Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Code Review Fix for Naming #326

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package io.hetu.core.transport.execution.buffer;

import io.airlift.compress.Compressor;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package io.hetu.core.transport.execution.buffer;

import com.fasterxml.jackson.annotation.JsonCreator;
Expand Down
5 changes: 3 additions & 2 deletions presto-cli/src/main/java/io/prestosql/cli/Query.java
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package io.prestosql.cli;

import com.google.common.base.Splitter;
Expand Down Expand Up @@ -58,7 +59,7 @@ public class Query
{
private static final Signal SIGINT = new Signal("INT");

private static final Logger log = Logger.get(Query.class);
private static final Logger LOG = Logger.get(Query.class);

private final AtomicBoolean ignoreUserInterrupt = new AtomicBoolean();
private final StatementClient client;
Expand Down Expand Up @@ -201,7 +202,7 @@ private void processInitialStatusUpdates(WarningsPrinter warningsPrinter)
client.advance();
}
catch (RuntimeException e) {
log.debug(e, "error printing status");
LOG.debug(e, "error printing status");
}
}
List<Warning> warnings;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
public class HiveConnector
implements Connector
{
private static final Logger log = Logger.get(HiveConnector.class);
private static final Logger LOG = Logger.get(HiveConnector.class);

private final LifeCycleManager lifeCycleManager;
private final Supplier<TransactionalMetadata> metadataFactory;
Expand Down Expand Up @@ -216,7 +216,7 @@ public final void shutdown()
lifeCycleManager.stop();
}
catch (Exception e) {
log.error(e, "Error shutting down connector");
LOG.error(e, "Error shutting down connector");
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package io.prestosql.plugin.hive;

import com.google.common.annotations.VisibleForTesting;
Expand Down Expand Up @@ -198,7 +199,7 @@
public class HiveMetadata
implements TransactionalMetadata
{
private static final Logger log = Logger.get(HiveMetadata.class);
private static final Logger LOG = Logger.get(HiveMetadata.class);

public static final String PRESTO_VERSION_NAME = "presto_version";
public static final String PRESTO_QUERY_ID_NAME = "presto_query_id";
Expand Down Expand Up @@ -491,7 +492,7 @@ protected ConnectorTableMetadata doGetTableMetadata(ConnectorSession session, Sc
properties.put(HiveTableProperties.STORAGE_FORMAT_PROPERTY, format);
}
catch (PrestoException ignored) {
log.debug("Format is not known error");
LOG.debug("Format is not known error");
}

// Partitioning property
Expand Down Expand Up @@ -668,7 +669,7 @@ public long getTableModificationTime(ConnectorSession session, ConnectorTableHan
}
// We want to make sure the query doesn't fail because of star-tree not being able to get last modified time
catch (Exception e) {
log.error("Exception thrown while trying to get modified time", e);
LOG.error("Exception thrown while trying to get modified time", e);
return -1L;
}
}
Expand All @@ -684,10 +685,10 @@ public Map<SchemaTableName, List<ColumnMetadata>> listTableColumns(ConnectorSess
columns.put(tableName, getTableMetadata(session, tableName).getColumns());
}
catch (HiveViewNotSupportedException e) {
log.debug("View is not supported error");
LOG.debug("View is not supported error");
}
catch (TableNotFoundException e) {
log.debug("Table disappeared during listing operation error");
LOG.debug("Table disappeared during listing operation error");
}
}
return columns.build();
Expand Down Expand Up @@ -1445,7 +1446,7 @@ public Optional<ConnectorOutputMetadata> finishCreateTable(
future.get();
}
catch (InterruptedException | ExecutionException ignore) {
log.debug("Get future error");
LOG.debug("Get future error");
}
});
}
Expand Down Expand Up @@ -3022,13 +3023,13 @@ else if (isSnapshotFile(status.getPath().getName(), queryId)) {
long subFileIndex = getSnapshotSubFileIndex(fileName, queryId);
// Remove any merged files and subfiles that are after the snapshot being resumed to
if (subFileIndex < 0 || subFileIndex >= snapshotIndex) {
log.debug("Deleting file resume=true: %s", fileName);
LOG.debug("Deleting file resume=true: %s", fileName);
fileSystem.delete(status.getPath());
}
}
else {
if (isSnapshotSubFile(fileName, queryId)) {
log.debug("Deleting sub file resume=false: %s", fileName);
LOG.debug("Deleting sub file resume=false: %s", fileName);
fileSystem.delete(status.getPath());
}
else {
Expand All @@ -3037,12 +3038,12 @@ else if (isSnapshotFile(status.getPath().getName(), queryId)) {
// For transaqctional tables, the file's parent folder is part of the output file list
if (mergedFileNames.contains(fileName) || mergedFileNames.contains(status.getPath().getParent().getName())) {
String newName = removeSnapshotFileName(fileName, queryId);
log.debug("Renaming merged file resume=false: %s to %s", fileName, newName);
LOG.debug("Renaming merged file resume=false: %s to %s", fileName, newName);
fileSystem.rename(status.getPath(), new Path(folder, newName));
}
else {
// Remove files that are not part of the final output files. (e.g. those produced by abandoned tasks.)
log.debug("Deleting old merged file resume=false: %s", fileName);
LOG.debug("Deleting old merged file resume=false: %s", fileName);
fileSystem.delete(status.getPath());
}
}
Expand Down Expand Up @@ -3178,7 +3179,7 @@ else if (isSortingColumnsNotPresent) {
if ((partitionedBy.size() + sortedColumnNames.size() < groupKeyNames.size()) ||
(partitionedBy.size() > groupKeyNames.size())) {
//sorted columns are less than join criteria columns
log.debug("number of sorted columns " + sortedColumnNames.size() + "are less join column size " + groupKeyNames.size());
LOG.debug("number of sorted columns " + sortedColumnNames.size() + "are less join column size " + groupKeyNames.size());
return partialAndFinalAggregationType;
}

Expand Down Expand Up @@ -3217,17 +3218,17 @@ else if (isSortingColumnsNotPresent) {
boolean bucketedColumnsResult = !singleOrZeroBucketedColumn && (!groupKeyNames.get(numOfComparedKeys).equals(bucketedColumns.get(numOfComparedKeys)));
if ((!groupKeyNames.get(numOfCmpKeysAfterPartitionedBy).equals(sortedColumnNames.get(numOfComparedKeys))) ||
(!singleOrZeroBucketedColumn && bucketedColumnsResult)) {
if (log.isDebugEnabled()) {
if (LOG.isDebugEnabled()) {
final String[] dbgGroupKeyNames = {new String("")};
groupKeyNames.stream().forEach(k -> dbgGroupKeyNames[0] = dbgGroupKeyNames[0].concat(k + " , "));
final String[] dbgSortedColumnNames = {new String("")};
sortedColumnNames.stream().forEach(k -> dbgSortedColumnNames[0] = dbgSortedColumnNames[0].concat(k + " , "));
if ((null != bucketedColumns) && (bucketedColumns.size() > 0)) {
final String[] dbgbucketedColumns = {new String("")};
bucketedColumns.stream().forEach(k -> dbgbucketedColumns[0] = dbgbucketedColumns[0].concat(k + " , "));
log.debug("Not matching sortedColumnNames: " + dbgSortedColumnNames + " group columns name: " + dbgGroupKeyNames + " bucketedColumns :" + dbgbucketedColumns);
LOG.debug("Not matching sortedColumnNames: " + dbgSortedColumnNames + " group columns name: " + dbgGroupKeyNames + " bucketedColumns :" + dbgbucketedColumns);
}
log.debug("Not matching sortedColumnNames: " + dbgSortedColumnNames + " group columns name: " + dbgGroupKeyNames);
LOG.debug("Not matching sortedColumnNames: " + dbgSortedColumnNames + " group columns name: " + dbgGroupKeyNames);
}
return partialAndFinalAggregationType;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package io.prestosql.plugin.hive;

import com.google.common.collect.ImmutableList;
Expand Down Expand Up @@ -92,7 +93,7 @@
public class HivePageSink
implements ConnectorPageSink
{
private static final Logger log = Logger.get(HivePageSink.class);
private static final Logger LOG = Logger.get(HivePageSink.class);

private static final int MAX_PAGE_POSITIONS = 4096;

Expand Down Expand Up @@ -344,7 +345,7 @@ private ListenableFuture<Collection<Slice>> mergeFiles()
return doFinish();
}
catch (IOException e) {
log.debug("exception '%s' while merging subfile", e);
LOG.debug("exception '%s' while merging subfile", e);
throw new RuntimeException(e);
}
}
Expand Down Expand Up @@ -375,7 +376,7 @@ private void doAbort(boolean isCancel)
writer.rollback(isCancel);
}
catch (Exception e) {
log.warn("exception '%s' while rollback on %s", e, writer);
LOG.warn("exception '%s' while rollback on %s", e, writer);
rollbackException = Optional.of(e);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@
public class HivePageSource
implements ConnectorPageSource
{
private static final Logger log = Logger.get(HivePageSource.class);
private static final Logger LOG = Logger.get(HivePageSource.class);

private final List<ColumnMapping> columnMappings;
private final Optional<BucketAdapter> bucketAdapter;
Expand All @@ -76,7 +76,6 @@ public class HivePageSource
private final TypeManager typeManager;
private final List<Optional<Function<Block, Block>>> coercers;
private final int rowFilteringThreshold;
protected boolean eligibleForRowFiltering;

private final ConnectorPageSource delegate;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package io.prestosql.plugin.hive;

import com.google.common.collect.AbstractIterator;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@
class HiveSplitSource
implements ConnectorSplitSource
{
private static final Logger log = Logger.get(HiveSplitSource.class);
private static final Logger LOG = Logger.get(HiveSplitSource.class);

private final String queryId;
private final String databaseName;
Expand Down Expand Up @@ -329,7 +329,7 @@ ListenableFuture<?> addToQueue(InternalHiveSplit split)
// If it's hit, it means individual splits are huge.
if (loggedHighMemoryWarning.compareAndSet(false, true)) {
highMemorySplitSourceCounter.update(1);
log.warn("Split buffering for %s.%s in query %s exceeded memory limit (%s). %s splits are buffered.",
LOG.warn("Split buffering for %s.%s in query %s exceeded memory limit (%s). %s splits are buffered.",
databaseName, tableName, queryId, succinctBytes(maxOutstandingSplitsBytes), getBufferedInternalSplitCount());
}
throw new PrestoException(HiveErrorCode.HIVE_EXCEEDED_SPLIT_BUFFERING_LIMIT, format(
Expand Down Expand Up @@ -521,7 +521,7 @@ private boolean matchesUserDefinedCachedPredicates(List<HivePartitionKey> partit
}
}
catch (Exception ex) {
log.warn(ex, "Unable to match partition keys %s with cached predicates. Ignoring this partition key. Error = %s", partitionKeys, ex.getMessage());
LOG.warn(ex, "Unable to match partition keys %s with cached predicates. Ignoring this partition key. Error = %s", partitionKeys, ex.getMessage());
}
return false;
}
Expand Down Expand Up @@ -728,7 +728,7 @@ Number of files should not cross MinValue(max-splits-to-group, Total number of f
numberOfSplitsGrouped += 1;
if ((maxSplitBytes < totalSize) || (avgSplitsPerNode < numberOfSplitsGrouped) || (maxSmallSplitsCanBeGrouped < numberOfSplitsGrouped)) {
connectorSplitList.add(HiveSplitWrapper.wrap(groupedHiveSplit, bucketNumberPresent ? OptionalInt.of(bucketNumber) : OptionalInt.empty()));
log.debug("info table %s, groupedHiveSplit size %d, maxSplitBytes %d, totalSize %d, avgSplitsPerNode %d, numberOfSplitsGrouped %d, maxSmallSplitsCanBeGrouped %d, numberOfSplitsGrouped %d ",
LOG.debug("info table %s, groupedHiveSplit size %d, maxSplitBytes %d, totalSize %d, avgSplitsPerNode %d, numberOfSplitsGrouped %d, maxSmallSplitsCanBeGrouped %d, numberOfSplitsGrouped %d ",
groupedHiveSplit.get(0).getTable(), groupedHiveSplit.size(), maxSplitBytes, totalSize, avgSplitsPerNode, numberOfSplitsGrouped, maxSmallSplitsCanBeGrouped, numberOfSplitsGrouped);
totalSize = 0;
numberOfSplitsGrouped = 0;
Expand All @@ -747,7 +747,7 @@ Number of files should not cross MinValue(max-splits-to-group, Total number of f
numberOfSplitsGrouped += 1;
if ((maxSplitBytes < totalSize) || (avgSplitsPerNode < numberOfSplitsGrouped) || (maxSmallSplitsCanBeGrouped < numberOfSplitsGrouped)) {
connectorSplitList.add(HiveSplitWrapper.wrap(groupedHiveSplit, bucketNumberPresent ? OptionalInt.of(bucketNumber) : OptionalInt.empty()));
log.debug("info table %s, groupedHiveSplit size %d, maxSplitBytes %d, totalSize %d, avgSplitsPerNode %d, numberOfSplitsGrouped %d, maxSmallSplitsCanBeGrouped %d, numberOfSplitsGrouped %d ",
LOG.debug("info table %s, groupedHiveSplit size %d, maxSplitBytes %d, totalSize %d, avgSplitsPerNode %d, numberOfSplitsGrouped %d, maxSmallSplitsCanBeGrouped %d, numberOfSplitsGrouped %d ",
groupedHiveSplit.get(0).getTable(), groupedHiveSplit.size(), maxSplitBytes, totalSize, avgSplitsPerNode, numberOfSplitsGrouped, maxSmallSplitsCanBeGrouped, numberOfSplitsGrouped);
totalSize = 0;
numberOfSplitsGrouped = 0;
Expand All @@ -764,7 +764,7 @@ Number of files should not cross MinValue(max-splits-to-group, Total number of f
}
}
List<ConnectorSplit> resultConnectorSplits = connectorSplitList.build();
log.debug("info resultBuilder size %d", resultConnectorSplits.size());
LOG.debug("info resultBuilder size %d", resultConnectorSplits.size());
return resultConnectorSplits;
}

Expand Down Expand Up @@ -797,7 +797,7 @@ private boolean getSmallerSplits(List<HiveSplitWrapper> hiveSplitWrappers, Multi
return false;
}

log.info("info total Split %d, numSmallSplits %d ", hiveSplitWrappers.size(), numSmallSplits);
LOG.info("info total Split %d, numSmallSplits %d ", hiveSplitWrappers.size(), numSmallSplits);
return true;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@

public final class HiveUtil
{
public static final Logger log = Logger.get(HiveUtil.class);
public static final Logger LOG = Logger.get(HiveUtil.class);

public static final String PRESTO_VIEW_FLAG = "presto_view";

Expand Down Expand Up @@ -398,7 +398,7 @@ public static boolean isSplittable(InputFormat<?, ?> inputFormat, FileSystem fil
break;
}
catch (NoSuchMethodException ignored) {
log.warn("GetDeclaredMethod error(FileSystem = %s, path = %s)", FileSystem.class.getName(), Path.class.getName());
LOG.warn("GetDeclaredMethod error(FileSystem = %s, path = %s)", FileSystem.class.getName(), Path.class.getName());
}
}

Expand Down Expand Up @@ -1136,7 +1136,7 @@ public static boolean isPartitionFiltered(List<HivePartitionKey> partitionKeys,
}
}
catch (PrestoException | ClassCastException e) {
log.error("cannot cast class" + e.getMessage());
LOG.error("cannot cast class" + e.getMessage());
return false;
}
//return if this dynamic filter is not filtering
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package io.prestosql.plugin.hive;

import com.google.common.base.Strings;
Expand Down Expand Up @@ -108,7 +109,7 @@

public class HiveWriterFactory
{
private static Logger log = Logger.get(HiveWriterFactory.class);
private static final Logger LOG = Logger.get(HiveWriterFactory.class);

private static final int MAX_BUCKET_COUNT = 100_000;
private static final int BUCKET_NUMBER_PADDING = Integer.toString(MAX_BUCKET_COUNT - 1).length();
Expand Down Expand Up @@ -985,15 +986,15 @@ public void mergeSubFiles(List<HiveWriter> writers)
private void logContainingFolderInfo(FileSystem fileSystem, Path path, String message, Object... params)
{
try {
if (log.isDebugEnabled()) {
log.debug(message, params);
if (LOG.isDebugEnabled()) {
LOG.debug(message, params);
Arrays.stream(fileSystem.listStatus(path.getParent())).forEach(file -> {
log.debug("%d\t%s", file.getLen(), file.getPath());
LOG.debug("%d\t%s", file.getLen(), file.getPath());
});
}
}
catch (IOException e) {
log.debug(e, "Failed to list folder content for %s: %s", path, e.getMessage());
LOG.debug(e, "Failed to list folder content for %s: %s", path, e.getMessage());
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
public class OrcFileWriter
implements HiveFileWriter
{
private static final Logger log = Logger.get(OrcFileWriter.class);
private static final Logger LOG = Logger.get(OrcFileWriter.class);

private static final int INSTANCE_SIZE = ClassLayout.parseClass(OrcFileWriter.class).instanceSize();
private static final ThreadMXBean THREAD_MX_BEAN = ManagementFactory.getThreadMXBean();
Expand Down Expand Up @@ -402,7 +402,7 @@ public void commit()
rollbackAction.call();
}
catch (Exception ignored) {
log.warn("RollbackAction error after roc commit error");
LOG.warn("RollbackAction error after roc commit error");
}
throw new PrestoException(HIVE_WRITER_CLOSE_ERROR, "Error committing write to Hive", e);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package io.prestosql.plugin.hive;

import com.google.common.collect.ImmutableSet;
Expand Down
Loading