-
Notifications
You must be signed in to change notification settings - Fork 293
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Allow for in-place restores. #1094
base: 4.x
Are you sure you want to change the base?
Changes from all commits
6bb0c1b
65137ee
b80bfb3
d51863c
7bb2a38
ce6c7c1
8d18f51
646973d
8c23800
aa3dffe
4d6d07f
f10289f
6b3d45a
4ece8d3
9557275
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -18,10 +18,14 @@ | |
package com.netflix.priam.backup; | ||
|
||
import com.google.common.collect.ImmutableMap; | ||
import com.google.common.collect.ImmutableSet; | ||
import com.netflix.priam.backupv2.IMetaProxy; | ||
import com.netflix.priam.utils.DateUtil; | ||
import java.io.IOException; | ||
import java.nio.file.DirectoryStream; | ||
import java.nio.file.Files; | ||
import java.nio.file.Path; | ||
import java.time.Instant; | ||
import java.nio.file.Paths; | ||
import java.util.*; | ||
import java.util.regex.Pattern; | ||
import java.util.stream.Collectors; | ||
|
@@ -83,19 +87,6 @@ public static List<AbstractBackupPath> getMostRecentSnapshotPaths( | |
return snapshotPaths; | ||
} | ||
|
||
public static List<AbstractBackupPath> getIncrementalPaths( | ||
AbstractBackupPath latestValidMetaFile, | ||
DateUtil.DateRange dateRange, | ||
IMetaProxy metaProxy) { | ||
Instant snapshotTime; | ||
snapshotTime = latestValidMetaFile.getLastModified(); | ||
DateUtil.DateRange incrementalDateRange = | ||
new DateUtil.DateRange(snapshotTime, dateRange.getEndTime()); | ||
List<AbstractBackupPath> incrementalPaths = new ArrayList<>(); | ||
metaProxy.getIncrementals(incrementalDateRange).forEachRemaining(incrementalPaths::add); | ||
return incrementalPaths; | ||
} | ||
|
||
public static Map<String, List<String>> getFilter(String inputFilter) | ||
throws IllegalArgumentException { | ||
if (StringUtils.isEmpty(inputFilter)) return null; | ||
|
@@ -144,4 +135,35 @@ public final boolean isFiltered(String keyspace, String columnFamilyDir) { | |
|| includeFilter.get(keyspace).contains(columnFamilyName))); | ||
return false; | ||
} | ||
|
||
/** | ||
* Get all the backup directories for Cassandra. | ||
* | ||
* @param dataDirectory the location of the data folder. | ||
* @param monitoringFolder folder where cassandra backup's are configured. | ||
* @return Set of the path(s) containing the backup folder for each columnfamily. | ||
* @throws IOException | ||
*/ | ||
public static ImmutableSet<Path> getBackupDirectories( | ||
String dataDirectory, String monitoringFolder) throws IOException { | ||
ImmutableSet.Builder<Path> backupPaths = ImmutableSet.builder(); | ||
Path dataPath = Paths.get(dataDirectory); | ||
if (Files.exists(dataPath) && Files.isDirectory(dataPath)) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The |
||
try (DirectoryStream<Path> directoryStream = | ||
Files.newDirectoryStream(dataPath, Files::isDirectory)) { | ||
for (Path keyspaceDirPath : directoryStream) { | ||
try (DirectoryStream<Path> keyspaceStream = | ||
Files.newDirectoryStream(keyspaceDirPath, Files::isDirectory)) { | ||
for (Path columnfamilyDirPath : keyspaceStream) { | ||
Path backupDirPath = | ||
Paths.get(columnfamilyDirPath.toString(), monitoringFolder); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I believe you can use |
||
if (Files.exists(backupDirPath) && Files.isDirectory(backupDirPath)) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same comment here that the |
||
backupPaths.add(backupDirPath); | ||
} | ||
} | ||
} | ||
} | ||
} | ||
return backupPaths.build(); | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -74,9 +74,7 @@ public void execute() throws Exception { | |
return; | ||
} | ||
|
||
if (instanceState.getRestoreStatus() != null | ||
&& instanceState.getRestoreStatus().getStatus() != null | ||
&& instanceState.getRestoreStatus().getStatus() == Status.STARTED) { | ||
if (instanceState.isRestoring()) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I like this cleanup 🙌 |
||
logger.info("Skipping backup verification. Priam is in restore mode."); | ||
return; | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,12 +17,12 @@ | |
|
||
package com.netflix.priam.backupv2; | ||
|
||
import com.google.common.collect.ImmutableList; | ||
import com.netflix.priam.backup.AbstractBackupPath; | ||
import com.netflix.priam.backup.BackupRestoreException; | ||
import com.netflix.priam.backup.BackupVerificationResult; | ||
import com.netflix.priam.utils.DateUtil; | ||
import java.nio.file.Path; | ||
import java.util.Iterator; | ||
import java.util.List; | ||
|
||
/** Proxy to do management tasks for meta files. Created by aagrawal on 12/18/18. */ | ||
|
@@ -78,7 +78,7 @@ public interface IMetaProxy { | |
* @param dateRange the time period to scan in the remote file system for incremental files. | ||
* @return iterator containing the list of path on the remote file system satisfying criteria. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. update javadoc if you are going to keep |
||
*/ | ||
Iterator<AbstractBackupPath> getIncrementals(DateUtil.DateRange dateRange); | ||
ImmutableList<AbstractBackupPath> getIncrementals(DateUtil.DateRange dateRange); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. mostly curious: why this change? the iterator gives us flexibility that the data could be "streamed" where as this locks us in to determining all the incrementals up front. Likely not an issue but something I noticed. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Looking at the implementation we have converted from something that would get called on each iteration to something that gets called all during the execution of this method. Not sure thats a problem or not but wanted to call it out. |
||
|
||
/** | ||
* Validate that all the files mentioned in the meta file actually exists on remote file system. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,6 +17,7 @@ | |
|
||
package com.netflix.priam.backupv2; | ||
|
||
import com.google.common.collect.ImmutableList; | ||
import com.netflix.priam.backup.AbstractBackupPath; | ||
import com.netflix.priam.backup.BackupRestoreException; | ||
import com.netflix.priam.backup.BackupVerificationResult; | ||
|
@@ -31,8 +32,6 @@ | |
import java.util.*; | ||
import javax.inject.Inject; | ||
import javax.inject.Provider; | ||
import org.apache.commons.collections4.iterators.FilterIterator; | ||
import org.apache.commons.collections4.iterators.TransformIterator; | ||
import org.apache.commons.io.FileUtils; | ||
import org.apache.commons.io.filefilter.FileFilterUtils; | ||
import org.apache.commons.io.filefilter.IOFileFilter; | ||
|
@@ -84,40 +83,34 @@ private String getMatch( | |
} | ||
|
||
@Override | ||
public Iterator<AbstractBackupPath> getIncrementals(DateUtil.DateRange dateRange) { | ||
String incrementalPrefix = getMatch(dateRange, AbstractBackupPath.BackupFileType.SST_V2); | ||
String marker = | ||
getMatch( | ||
new DateUtil.DateRange(dateRange.getStartTime(), null), | ||
AbstractBackupPath.BackupFileType.SST_V2); | ||
public ImmutableList<AbstractBackupPath> getIncrementals(DateUtil.DateRange dateRange) { | ||
return new ImmutableList.Builder<AbstractBackupPath>() | ||
.addAll(getIncrementals(dateRange, AbstractBackupPath.BackupFileType.SST_V2)) | ||
.addAll( | ||
getIncrementals( | ||
dateRange, AbstractBackupPath.BackupFileType.SECONDARY_INDEX_V2)) | ||
.build(); | ||
} | ||
|
||
private ImmutableList<AbstractBackupPath> getIncrementals( | ||
DateUtil.DateRange dateRange, AbstractBackupPath.BackupFileType type) { | ||
String incrementalPrefix = getMatch(dateRange, type); | ||
String marker = getMatch(new DateUtil.DateRange(dateRange.getStartTime(), null), type); | ||
logger.info( | ||
"Listing filesystem with prefix: {}, marker: {}, daterange: {}", | ||
incrementalPrefix, | ||
marker, | ||
dateRange); | ||
Iterator<String> iterator = fs.listFileSystem(incrementalPrefix, null, marker); | ||
Iterator<AbstractBackupPath> transformIterator = | ||
new TransformIterator<>( | ||
iterator, | ||
s -> { | ||
AbstractBackupPath path = abstractBackupPathProvider.get(); | ||
path.parseRemote(s); | ||
return path; | ||
}); | ||
|
||
return new FilterIterator<>( | ||
transformIterator, | ||
abstractBackupPath -> | ||
(abstractBackupPath.getLastModified().isAfter(dateRange.getStartTime()) | ||
&& abstractBackupPath | ||
.getLastModified() | ||
.isBefore(dateRange.getEndTime())) | ||
|| abstractBackupPath | ||
.getLastModified() | ||
.equals(dateRange.getStartTime()) | ||
|| abstractBackupPath | ||
.getLastModified() | ||
.equals(dateRange.getEndTime())); | ||
ImmutableList.Builder<AbstractBackupPath> results = ImmutableList.builder(); | ||
while (iterator.hasNext()) { | ||
AbstractBackupPath path = abstractBackupPathProvider.get(); | ||
path.parseRemote(iterator.next()); | ||
if (dateRange.contains(path.getLastModified())) { | ||
results.add(path); | ||
} | ||
} | ||
return results.build(); | ||
} | ||
|
||
@Override | ||
|
@@ -136,10 +129,7 @@ public List<AbstractBackupPath> findMetaFiles(DateUtil.DateRange dateRange) { | |
AbstractBackupPath abstractBackupPath = abstractBackupPathProvider.get(); | ||
abstractBackupPath.parseRemote(iterator.next()); | ||
logger.debug("Meta file found: {}", abstractBackupPath); | ||
if (abstractBackupPath.getLastModified().toEpochMilli() | ||
>= dateRange.getStartTime().toEpochMilli() | ||
&& abstractBackupPath.getLastModified().toEpochMilli() | ||
<= dateRange.getEndTime().toEpochMilli()) { | ||
if (dateRange.contains(abstractBackupPath.getLastModified())) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Really like this sort of cleanup as well. |
||
metas.add(abstractBackupPath); | ||
} | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -125,11 +125,6 @@ default int getBackupRetentionDays() { | |
return 0; | ||
} | ||
|
||
/** @return Get list of racs to backup. Backup all racs if empty */ | ||
default List<String> getBackupRacs() { | ||
return Collections.EMPTY_LIST; | ||
} | ||
|
||
/** | ||
* Backup location i.e. remote file system to upload backups. e.g. for S3 it will be s3 bucket | ||
* name | ||
|
@@ -403,6 +398,10 @@ default String getRestoreSnapshot() { | |
return StringUtils.EMPTY; | ||
} | ||
|
||
default String getRestoreDataLocation() { | ||
return getCassandraBaseDirectory() + "/restore"; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To confirm my understanding on our systems this would be something like |
||
} | ||
|
||
/** @return Get the region to connect to SDB for instance identity */ | ||
default String getSDBInstanceIdentityRegion() { | ||
return "us-east-1"; | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
While we are in here, consider renaming this variable, its not idiomatic java