Skip to content

Commit

Permalink
rename property & replace branch with early return
Browse files Browse the repository at this point in the history
  • Loading branch information
twuebi committed Oct 28, 2024
1 parent 302c1b4 commit 3033ab0
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 24 deletions.
9 changes: 4 additions & 5 deletions core/src/main/java/org/apache/iceberg/CatalogProperties.java
Original file line number Diff line number Diff line change
Expand Up @@ -79,13 +79,12 @@ private CatalogProperties() {}
public static final boolean IO_MANIFEST_CACHE_ENABLED_DEFAULT = false;

/**
* Controls if client-side purging of files is enabled.
*
* <p>When set to false, the client will not
* Controls whether engines using a REST Catalog should delegate the drop table purge requests to the Catalog.
* Defaults to false, allowing the engine to use its own implementation for purging.
*/
public static final String IO_CLIENT_SIDE_PURGE_ENABLED = "io.client-side.purge-enabled";
public static final String REST_SERVER_SIDE_PURGE = "rest.server-side-purge";

public static final boolean IO_CLIENT_SIDE_PURGE_ENABLED_DEFAULT = true;
public static final boolean REST_SERVER_SIDE_PURGE_DEFAULT = false;

/**
* Controls the maximum duration for which an entry stays in the manifest cache.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
*/
package org.apache.iceberg.spark;

import static org.apache.iceberg.CatalogProperties.IO_CLIENT_SIDE_PURGE_ENABLED;
import static org.apache.iceberg.CatalogProperties.IO_CLIENT_SIDE_PURGE_ENABLED_DEFAULT;
import static org.apache.iceberg.CatalogProperties.REST_SERVER_SIDE_PURGE;
import static org.apache.iceberg.CatalogProperties.REST_SERVER_SIDE_PURGE_DEFAULT;
import static org.apache.iceberg.TableProperties.GC_ENABLED;
import static org.apache.iceberg.TableProperties.GC_ENABLED_DEFAULT;

Expand Down Expand Up @@ -62,6 +62,8 @@
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.rest.RESTCatalog;
import org.apache.iceberg.rest.RESTSessionCatalog;
import org.apache.iceberg.spark.actions.SparkActions;
import org.apache.iceberg.spark.source.SparkChangelogTable;
import org.apache.iceberg.spark.source.SparkTable;
Expand Down Expand Up @@ -139,7 +141,7 @@ public class SparkCatalog extends BaseCatalog
private ViewCatalog asViewCatalog = null;
private String[] defaultNamespace = null;
private HadoopTables tables;
private boolean clientPurgeEnabled;
private boolean restServerPurgeEnabled;

/**
* Build an Iceberg {@link Catalog} to be used by this Spark catalog adapter.
Expand Down Expand Up @@ -368,22 +370,25 @@ public boolean purgeTable(Identifier ident) {
String metadataFileLocation =
((HasTableOperations) table).operations().current().metadataFileLocation();

if (this.clientPurgeEnabled) {
boolean dropped = dropTableWithoutPurging(ident);
if ((this.icebergCatalog instanceof RESTCatalog
|| this.icebergCatalog instanceof RESTSessionCatalog)
&& this.restServerPurgeEnabled) {
return dropTableWithPurging(ident);
}

if (dropped) {
// check whether the metadata file exists because HadoopCatalog/HadoopTables
// will drop the warehouse directly and ignore the `purge` argument
boolean metadataFileExists = table.io().newInputFile(metadataFileLocation).exists();
boolean dropped = dropTableWithoutPurging(ident);

if (metadataFileExists) {
SparkActions.get().deleteReachableFiles(metadataFileLocation).io(table.io()).execute();
}
if (dropped) {
// check whether the metadata file exists because HadoopCatalog/HadoopTables
// will drop the warehouse directly and ignore the `purge` argument
boolean metadataFileExists = table.io().newInputFile(metadataFileLocation).exists();

if (metadataFileExists) {
SparkActions.get().deleteReachableFiles(metadataFileLocation).io(table.io()).execute();
}
return dropped;
} else {
return dropTableWithPurging(ident);
}

return dropped;
} catch (org.apache.iceberg.exceptions.NoSuchTableException e) {
return false;
}
Expand Down Expand Up @@ -758,10 +763,9 @@ public final void initialize(String name, CaseInsensitiveStringMap options) {
CatalogProperties.CACHE_EXPIRATION_INTERVAL_MS,
CatalogProperties.CACHE_EXPIRATION_INTERVAL_MS_DEFAULT);

this.clientPurgeEnabled = PropertyUtil.propertyAsBoolean(
options,
IO_CLIENT_SIDE_PURGE_ENABLED,
IO_CLIENT_SIDE_PURGE_ENABLED_DEFAULT);
this.restServerPurgeEnabled =
PropertyUtil.propertyAsBoolean(
options, REST_SERVER_SIDE_PURGE, REST_SERVER_SIDE_PURGE_DEFAULT);

// An expiration interval of 0ms effectively disables caching.
// Do not wrap with CachingCatalog.
Expand Down

0 comments on commit 3033ab0

Please sign in to comment.