Skip to content

Commit

Permalink
Use Optional.orElseGet than orElse to avoid unnecessary calculation
Browse files Browse the repository at this point in the history
  • Loading branch information
hantangwangd authored and tdcmeehan committed Dec 17, 2024
1 parent b2da48d commit 85cc36a
Show file tree
Hide file tree
Showing 62 changed files with 89 additions and 82 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ public static Object execute(Function<Scope, BytecodeNode> nodeGenerator, Parame
System.out.println(tree);
}

ClassLoader classLoader = parentClassLoader.orElse(BytecodeExpressionAssertions.class.getClassLoader());
ClassLoader classLoader = parentClassLoader.orElseGet(BytecodeExpressionAssertions.class::getClassLoader);

return classGenerator(classLoader)
.defineClass(classDefinition, Object.class)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,8 @@ public ConnectorTableMetadata getTableMetadata(ConnectorSession session, Connect
@Override
public List<SchemaTableName> listTables(ConnectorSession session, Optional<String> schemaName)
{
List<String> schemaNames = schemaName.<List<String>>map(ImmutableList::of).orElse(listSchemaNames(session));
List<String> schemaNames = schemaName.<List<String>>map(ImmutableList::of)
.orElseGet(() -> listSchemaNames(session));
ImmutableList.Builder<SchemaTableName> tableNames = ImmutableList.builder();
for (String schema : schemaNames) {
for (String tableName : metastore.getAllTables(metastoreContext(session), schema).orElse(emptyList())) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ private static ConnectorPageSource createParquetPageSource(
.map(type -> new MessageType(fileSchema.getName(), type))
.reduce(MessageType::union);

MessageType requestedSchema = message.orElse(new MessageType(fileSchema.getName(), ImmutableList.of()));
MessageType requestedSchema = message.orElseGet(() -> new MessageType(fileSchema.getName(), ImmutableList.of()));

ImmutableList.Builder<BlockMetaData> footerBlocks = ImmutableList.builder();
for (BlockMetaData block : parquetMetadata.getBlocks()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ public Map<SchemaTableName, List<ColumnMetadata>> listTableColumns(ConnectorSess
@Override
public List<SchemaTableName> listTables(ConnectorSession session, Optional<String> schemaName)
{
String schema = schemaName.orElse(getOnlyElement(SCHEMAS));
String schema = schemaName.orElseGet(() -> getOnlyElement(SCHEMAS));

if (listSchemaNames().contains(schema)) {
return sheetsClient.getTableNames().stream()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1179,7 +1179,7 @@ private ConnectorCommitHandle commitShared()
hdfsContext.getSession().map(MetastoreUtil::isUserDefinedTypeEncodingEnabled).orElse(false),
columnConverterProvider,
hdfsContext.getSession().map(ConnectorSession::getWarningCollector).orElse(NOOP),
hdfsContext.getSession().map(ConnectorSession::getRuntimeStats).orElse(new RuntimeStats()));
hdfsContext.getSession().map(ConnectorSession::getRuntimeStats).orElseGet(RuntimeStats::new));
switch (action.getType()) {
case DROP:
committer.prepareDropTable(metastoreContext, schemaTableName);
Expand Down Expand Up @@ -1213,7 +1213,7 @@ private ConnectorCommitHandle commitShared()
hdfsContext.getSession().map(MetastoreUtil::isUserDefinedTypeEncodingEnabled).orElse(false),
columnConverterProvider,
hdfsContext.getSession().map(ConnectorSession::getWarningCollector).orElse(NOOP),
hdfsContext.getSession().map(ConnectorSession::getRuntimeStats).orElse(new RuntimeStats()));
hdfsContext.getSession().map(ConnectorSession::getRuntimeStats).orElseGet(RuntimeStats::new));
switch (action.getType()) {
case DROP:
committer.prepareDropPartition(metastoreContext, schemaTableName, partitionValues);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1720,7 +1720,7 @@ public Optional<ConnectorOutputMetadata> finishCreateTable(ConnectorSession sess
HiveBasicStatistics basicStatistics = partitionUpdates.stream()
.map(PartitionUpdate::getStatistics)
.reduce((first, second) -> reduce(first, second, ADD))
.orElse(createZeroStatistics());
.orElseGet(() -> createZeroStatistics());
tableStatistics = createPartitionStatistics(session, basicStatistics, columnTypes, getColumnStatistics(partitionComputedStatistics, ImmutableList.of()), timeZone);
}
else {
Expand Down Expand Up @@ -3180,14 +3180,14 @@ public Set<RoleGrant> listRoleGrants(ConnectorSession session, PrestoPrincipal p
public void grantRoles(ConnectorSession session, Set<String> roles, Set<PrestoPrincipal> grantees, boolean withAdminOption, Optional<PrestoPrincipal> grantor)
{
MetastoreContext metastoreContext = getMetastoreContext(session);
metastore.grantRoles(metastoreContext, roles, grantees, withAdminOption, grantor.orElse(new PrestoPrincipal(USER, session.getUser())));
metastore.grantRoles(metastoreContext, roles, grantees, withAdminOption, grantor.orElseGet(() -> new PrestoPrincipal(USER, session.getUser())));
}

@Override
public void revokeRoles(ConnectorSession session, Set<String> roles, Set<PrestoPrincipal> grantees, boolean adminOptionFor, Optional<PrestoPrincipal> grantor)
{
MetastoreContext metastoreContext = getMetastoreContext(session);
metastore.revokeRoles(metastoreContext, roles, grantees, adminOptionFor, grantor.orElse(new PrestoPrincipal(USER, session.getUser())));
metastore.revokeRoles(metastoreContext, roles, grantees, adminOptionFor, grantor.orElseGet(() -> new PrestoPrincipal(USER, session.getUser())));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ public HivePageSource(
}
else if (isRowIdColumnHandle(columnMapping.getHiveColumnHandle())) {
// If there's no row ID partition component, then path + row numbers will be supplied for $row_id
byte[] component = rowIdPartitionComponent.orElse(new byte[0]);
byte[] component = rowIdPartitionComponent.orElseGet(() -> new byte[0]);
String rowGroupId = Paths.get(path).getFileName().toString();
coercers[columnIndex] = new RowIDCoercer(component, rowGroupId);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1237,8 +1237,9 @@ private static TypeSignature translateHiveUnsupportedTypeSignatureForTemporaryTa
TypeSignatureParameter typeSignatureParameter = parameters.get(i);
checkArgument(typeSignatureParameter.isNamedTypeSignature(), "unexpected row type signature parameter: %s", typeSignatureParameter);
NamedTypeSignature namedTypeSignature = typeSignatureParameter.getNamedTypeSignature();
int parameterIdx = i;
updatedParameters.add(TypeSignatureParameter.of(new NamedTypeSignature(
Optional.of(namedTypeSignature.getFieldName().orElse(new RowFieldName("_field_" + i, false))),
Optional.of(namedTypeSignature.getFieldName().orElseGet(() -> new RowFieldName("_field_" + parameterIdx, false))),
translateHiveUnsupportedTypeSignatureForTemporaryTable(namedTypeSignature.getTypeSignature()))));
}
return new TypeSignature(StandardTypes.ROW, updatedParameters.build());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@ public HiveWriter createWriter(Page partitionColumns, int position, OptionalInt
hiveFileWriter = sortingFileWriterFactory.get().createSortingFileWriter(
path,
hiveFileWriter,
bucketNumber.orElse(abs(path.hashCode() % 1024)),
bucketNumber.orElseGet(() -> abs(path.hashCode() % 1024)),
writerParameters.getWriteInfo().getTempPath());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ private InternalHiveSplitFactory createInternalHiveSplitFactory(
String inputFormatName = storage.getStorageFormat().getInputFormat();
int partitionDataColumnCount = partition.getPartition()
.map(p -> p.getColumns().size())
.orElse(table.getDataColumns().size());
.orElseGet(table.getDataColumns()::size);
List<HivePartitionKey> partitionKeys = getPartitionKeys(table, partition.getPartition(), partitionName);
Path path = new Path(getPartitionLocation(table, partition.getPartition()));
Configuration configuration = hdfsEnvironment.getConfiguration(hdfsContext, path);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ public ListenableFuture<?> loadPartition(HivePartitionMetadata partition, HiveSp
String inputFormatName = storage.getStorageFormat().getInputFormat();
int partitionDataColumnCount = partition.getPartition()
.map(p -> p.getColumns().size())
.orElse(table.getDataColumns().size());
.orElseGet(table.getDataColumns()::size);
List<HivePartitionKey> partitionKeys = getPartitionKeys(table, partition.getPartition(), partitionName);
String location = getPartitionLocation(table, partition.getPartition());
if (location.isEmpty()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ public static ConnectorPageSource createOrcPageSource(
systemMemoryUsage,
INITIAL_BATCH_SIZE);

byte[] partitionID = rowIDPartitionComponent.orElse(new byte[0]);
byte[] partitionID = rowIDPartitionComponent.orElseGet(() -> new byte[0]);
String rowGroupID = path.getName();

// none of the columns are row numbers
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ public static ConnectorPageSource createOrcPageSource(

boolean supplyRowIDs = selectedColumns.stream().anyMatch(column -> HiveColumnHandle.isRowIdColumnHandle(column));
checkArgument(!supplyRowIDs || rowIDPartitionComponent.isPresent(), "rowIDPartitionComponent required when supplying row IDs");
byte[] partitionID = rowIDPartitionComponent.orElse(new byte[0]);
byte[] partitionID = rowIDPartitionComponent.orElseGet(() -> new byte[0]);
String rowGroupId = path.getName();

DataSize maxMergeDistance = getOrcMaxMergeDistance(session);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ public static ConnectorPageSource createParquetPageSource(
.map(type -> new MessageType(fileSchema.getName(), type))
.reduce(MessageType::union);

MessageType requestedSchema = message.orElse(new MessageType(fileSchema.getName(), ImmutableList.of()));
MessageType requestedSchema = message.orElseGet(() -> new MessageType(fileSchema.getName(), ImmutableList.of()));

ImmutableList.Builder<BlockMetaData> footerBlocks = ImmutableList.builder();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ public static DistributedQueryRunner createQueryRunner(
queryRunner.createCatalog("tpchstandard", "tpch", tpchProperties);

ExtendedHiveMetastore metastore;
metastore = externalMetastore.orElse(getFileHiveMetastore(queryRunner));
metastore = externalMetastore.orElseGet(() -> getFileHiveMetastore(queryRunner));

queryRunner.installPlugin(new HivePlugin(HIVE_CATALOG, Optional.of(metastore)));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ private WriteContext createWriter(Optional<PartitionData> partitionData)
{
String fileName = fileFormat.addExtension(randomUUID().toString());
Path outputPath = partitionData.map(partition -> new Path(locationProvider.newDataLocation(partitionSpec, partition, fileName)))
.orElse(new Path(locationProvider.newDataLocation(fileName)));
.orElseGet(() -> new Path(locationProvider.newDataLocation(fileName)));

IcebergFileWriter writer = fileWriterFactory.createFileWriter(
outputPath,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ private static ConnectorPageSourceWithRowPositions createParquetPageSource(
.map(type -> new MessageType(fileSchema.getName(), type))
.reduce(MessageType::union);

MessageType requestedSchema = messageType.orElse(new MessageType(fileSchema.getName(), ImmutableList.of()));
MessageType requestedSchema = messageType.orElseGet(() -> new MessageType(fileSchema.getName(), ImmutableList.of()));
Map<List<String>, RichColumnDescriptor> descriptorsByPath = getDescriptors(fileSchema, requestedSchema);
TupleDomain<ColumnDescriptor> parquetTupleDomain = getParquetTupleDomain(descriptorsByPath, effectivePredicate);
Predicate parquetPredicate = buildPredicate(requestedSchema, parquetTupleDomain, descriptorsByPath);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ public ConnectorSplitSource getSplits(
if (table.getIcebergTableName().getTableType() == CHANGELOG) {
// if the snapshot isn't specified, grab the oldest available version of the table
long fromSnapshot = table.getIcebergTableName().getSnapshotId().orElseGet(() -> SnapshotUtil.oldestAncestor(icebergTable).snapshotId());
long toSnapshot = table.getIcebergTableName().getChangelogEndSnapshot().orElse(icebergTable.currentSnapshot().snapshotId());
long toSnapshot = table.getIcebergTableName().getChangelogEndSnapshot()
.orElseGet(icebergTable.currentSnapshot()::snapshotId);
IncrementalChangelogScan scan = icebergTable.newIncrementalChangelogScan()
.fromSnapshotExclusive(fromSnapshot)
.toSnapshot(toSnapshot);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ public IcebergDeletePageSink(
this.partitionData = partitionDataFromJson(partitionSpec, partitionDataAsJson);
String fileName = fileFormat.addExtension(String.format("delete_file_%s", randomUUID().toString()));
this.outputPath = partitionData.map(partition -> new Path(locationProvider.newDataLocation(partitionSpec, partition, fileName)))
.orElse(new Path(locationProvider.newDataLocation(fileName)));
.orElseGet(() -> new Path(locationProvider.newDataLocation(fileName)));
this.positionDeleteWriter = new IcebergPositionDeleteWriter();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ public PlanNode visitTableScan(TableScanNode node, RewriteContext<Void> context)
TupleDomain<IcebergColumnHandle> predicate = icebergTableLayoutHandle
.map(IcebergTableLayoutHandle::getValidPredicate)
.map(IcebergUtil::getNonMetadataColumnConstraints)
.orElse(TupleDomain.all());
.orElseGet(TupleDomain::all);

// Collect info about each unique delete schema to join by
ImmutableMap<Set<Integer>, DeleteSetInfo> deleteSchemas = collectDeleteInformation(icebergTable, predicate, tableName.getSnapshotId().get());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ protected SessionContext convertSession(ConnectorSession session)
.setIdentity(session.getUser())
.setCredentials(credentials.build())
.setProperties(properties);
}).orElse(builder(session).setSessionId(randomUUID().toString()));
}).orElseGet(() -> builder(session).setSessionId(randomUUID().toString()));
return sessionContextBuilder.build();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ public static EnumSet<ColumnStatisticType> decodeMergeFlags(String input)
.collect(Collectors.toSet()))
.filter(set -> !set.isEmpty())
.map(EnumSet::copyOf)
.orElse(EnumSet.noneOf(ColumnStatisticType.class));
.orElseGet(() -> EnumSet.noneOf(ColumnStatisticType.class));
}

public static String encodeMergeFlags(EnumSet<ColumnStatisticType> flags)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1064,7 +1064,7 @@ private TableStatistics getTableStats(String name, Optional<Long> snapshot, Sess
handle,
new ArrayList<>(columns
.map(columnSet -> Maps.filterKeys(resolver.getColumnHandles(handle), columnSet::contains))
.orElse(resolver.getColumnHandles(handle)).values()),
.orElseGet(() -> resolver.getColumnHandles(handle)).values()),
Constraint.alwaysTrue());
}

Expand Down
3 changes: 2 additions & 1 deletion presto-main/src/main/java/com/facebook/presto/Session.java
Original file line number Diff line number Diff line change
Expand Up @@ -845,7 +845,8 @@ public Session build()
Optional.ofNullable(userAgent),
Optional.ofNullable(clientInfo),
clientTags,
Optional.ofNullable(resourceEstimates).orElse(new ResourceEstimateBuilder().build()),
Optional.ofNullable(resourceEstimates)
.orElseGet(new ResourceEstimateBuilder()::build),
startTime,
systemProperties,
connectorProperties,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,8 @@ private double calculateFilterFactor(VariableStatsEstimate variableStatistics, S
Estimate filterEstimate;
if (useHistograms) {
Estimate distinctEstimate = isNaN(variableStatistics.getDistinctValuesCount()) ? Estimate.unknown() : Estimate.of(variableRange.getDistinctValuesCount());
filterEstimate = HistogramCalculator.calculateFilterFactor(intersectRange.toPrestoRange(), intersectRange.getDistinctValuesCount(), variableStatistics.getHistogram().orElse(new UniformDistributionHistogram(variableStatistics.getLowValue(), variableStatistics.getHighValue())), distinctEstimate, true);
filterEstimate = HistogramCalculator.calculateFilterFactor(intersectRange.toPrestoRange(), intersectRange.getDistinctValuesCount(),
variableStatistics.getHistogram().orElseGet(() -> new UniformDistributionHistogram(variableStatistics.getLowValue(), variableStatistics.getHighValue())), distinctEstimate, true);
if (log.isDebugEnabled()) {
double expressionFilter = variableRange.overlapPercentWith(intersectRange);
if (!Double.isNaN(expressionFilter) &&
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -299,23 +299,23 @@ public Duration getTotalCpuTime()
{
return tryGetQueryExecution()
.map(QueryExecution::getTotalCpuTime)
.orElse(new Duration(0, MILLISECONDS));
.orElseGet(() -> new Duration(0, MILLISECONDS));
}

@Override
public DataSize getTotalMemoryReservation()
{
return tryGetQueryExecution()
.map(QueryExecution::getTotalMemoryReservation)
.orElse(new DataSize(0, BYTE));
.orElseGet(() -> new DataSize(0, BYTE));
}

@Override
public DataSize getUserMemoryReservation()
{
return tryGetQueryExecution()
.map(QueryExecution::getUserMemoryReservation)
.orElse(new DataSize(0, BYTE));
.orElseGet(() -> new DataSize(0, BYTE));
}

public int getRunningTaskCount()
Expand All @@ -328,7 +328,7 @@ public BasicQueryInfo getBasicQueryInfo()
{
return tryGetQueryExecution()
.map(QueryExecution::getBasicQueryInfo)
.orElse(stateMachine.getBasicQueryInfo(Optional.empty()));
.orElseGet(() -> stateMachine.getBasicQueryInfo(Optional.empty()));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -610,7 +610,7 @@ private void updateTaskStatus(TaskId taskId, TaskStatus taskStatus)
.findFirst()
.map(this::rewriteTransportFailure)
.map(ExecutionFailureInfo::toException)
.orElse(new PrestoException(GENERIC_INTERNAL_ERROR, "A task failed for an unknown reason"));
.orElseGet(() -> new PrestoException(GENERIC_INTERNAL_ERROR, "A task failed for an unknown reason"));
if (isRecoverable(taskStatus.getFailures())) {
try {
stageTaskRecoveryCallback.get().recover(taskId);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ public Node<E> findLeaf()
int rightDescendants = right.map(node -> node.descendants).orElse(0);

if (leftDescendants == 0 && rightDescendants == 0) {
return left.orElse(right.orElse(this));
return left.orElseGet(() -> right.orElse(this));
}
if (leftDescendants > rightDescendants) {
return left.get().findLeaf();
Expand Down
Loading

0 comments on commit 85cc36a

Please sign in to comment.