Skip to content

Commit

Permalink
DC-1261- Re-enable and update Firestore connected tests (#1817)
Browse files Browse the repository at this point in the history
  • Loading branch information
snf2ye authored Sep 25, 2024
1 parent 32fc183 commit ff92169
Show file tree
Hide file tree
Showing 9 changed files with 297 additions and 321 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/int-and-connected-test-run.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ jobs:
# extract service account credentials
base64 --decode <<< ${{ secrets.SA_B64_CREDENTIALS }} > ${GOOGLE_APPLICATION_CREDENTIALS}
base64 --decode <<< ${{ secrets.B64_RBS_APPLICATION_CREDENTIALS }} > ${RBS_CLIENT_CREDENTIAL_FILE_PATH}
# assemble code and run connected tests
./gradlew assemble -w testConnected --scan
# run connected tests
./gradlew --scan --warn testConnected
test_integration:
name: Integration tests
runs-on: ubuntu-latest
Expand Down Expand Up @@ -137,7 +137,7 @@ jobs:
# wait until api is ready
timeout 30 bash -c 'until curl -s ${IT_JADE_API_URL}/status; do sleep 1; done'
# run integration tests
./gradlew -w testIntegration --scan
./gradlew --scan --warn testIntegration
- name: Upload API logs
if: always()
uses: actions/upload-artifact@v4
Expand Down
2 changes: 0 additions & 2 deletions src/main/java/bio/terra/service/configuration/ConfigEnum.java
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ public enum ConfigEnum {
SNAPSHOT_GRANT_ACCESS_FAULT,
SNAPSHOT_GRANT_FILE_ACCESS_FAULT,

FIRESTORE_RETRIEVE_FAULT,

CRITICAL_SYSTEM_FAULT,

// Faults to test the fault system
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
import static bio.terra.service.configuration.ConfigEnum.FILE_INGEST_UNLOCK_RETRY_FAULT;
import static bio.terra.service.configuration.ConfigEnum.FIRESTORE_QUERY_BATCH_SIZE;
import static bio.terra.service.configuration.ConfigEnum.FIRESTORE_RETRIES;
import static bio.terra.service.configuration.ConfigEnum.FIRESTORE_RETRIEVE_FAULT;
import static bio.terra.service.configuration.ConfigEnum.FIRESTORE_SNAPSHOT_BATCH_SIZE;
import static bio.terra.service.configuration.ConfigEnum.FIRESTORE_VALIDATE_BATCH_SIZE;
import static bio.terra.service.configuration.ConfigEnum.LOAD_BULK_ARRAY_FILES_MAX;
Expand Down Expand Up @@ -254,8 +253,5 @@ private void setConfiguration() {
SNAPSHOT_GRANT_ACCESS_FAULT, 0, 3, 100, ConfigFaultCountedModel.RateStyleEnum.FIXED);
addFaultCounted(
SNAPSHOT_GRANT_FILE_ACCESS_FAULT, 0, 3, 100, ConfigFaultCountedModel.RateStyleEnum.FIXED);

addFaultCounted(
FIRESTORE_RETRIEVE_FAULT, 0, 11, 100, ConfigFaultCountedModel.RateStyleEnum.FIXED);
}
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
package bio.terra.service.filedata.google.firestore;

import bio.terra.service.configuration.ConfigEnum;
import bio.terra.service.configuration.ConfigurationService;
import bio.terra.service.filedata.exception.FileAlreadyExistsException;
import bio.terra.service.filedata.exception.FileSystemCorruptException;
Expand All @@ -14,8 +13,6 @@
import com.google.cloud.firestore.Query;
import com.google.cloud.firestore.Transaction;
import com.google.cloud.firestore.WriteResult;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -159,11 +156,6 @@ FireStoreFile retrieveFileMetadata(Firestore firestore, String datasetId, String
xn -> {
DocumentSnapshot docSnap = lookupByFileId(firestore, collectionId, fileId, xn);

// Fault insertion to test retry
if (configurationService.testInsertFault(ConfigEnum.FIRESTORE_RETRIEVE_FAULT)) {
throw new StatusRuntimeException(Status.fromCodeValue(500));
}

return Optional.ofNullable(docSnap)
.map(d -> docSnap.toObject(FireStoreFile.class))
.orElse(null);
Expand Down
4 changes: 3 additions & 1 deletion src/test/java/bio/terra/common/category/Connected.java
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,6 @@
* Connected test category. Tests in this category require credentials allowing access to a GCP
* project for operating BigQuery and GCS bucket.
*/
public interface Connected {}
public interface Connected {
String TAG = "bio.terra.common.category.Connected";
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,18 @@
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.hamcrest.Matchers.nullValue;

import bio.terra.app.configuration.ConnectedTestConfiguration;
import bio.terra.common.EmbeddedDatabaseTest;
import bio.terra.common.category.Connected;
import bio.terra.common.fixtures.ConnectedOperations;
import bio.terra.model.BillingProfileModel;
import bio.terra.model.DatasetSummaryModel;
import bio.terra.service.auth.iam.IamProviderInterface;
import bio.terra.service.configuration.ConfigurationService;
import bio.terra.service.dataset.Dataset;
import bio.terra.service.filedata.FileMetadataUtils;
import bio.terra.service.filedata.SnapshotCompute;
Expand All @@ -22,56 +26,73 @@
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.web.servlet.AutoConfigureMockMvc;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.context.junit.jupiter.SpringExtension;

@RunWith(SpringRunner.class)
@ExtendWith(SpringExtension.class)
@SpringBootTest
@AutoConfigureMockMvc
@ActiveProfiles({"google", "connectedtest"})
@Ignore
@Category(Connected.class)
@Tag(Connected.TAG)
@EmbeddedDatabaseTest
public class FireStoreDaoTest {
private final Logger logger = LoggerFactory.getLogger(FireStoreDaoTest.class);

class FireStoreDaoTest {
@Autowired private FireStoreDirectoryDao directoryDao;

@Autowired private FireStoreFileDao fileDao;

@Autowired private FireStoreDao dao;

@Autowired private FireStoreUtils fireStoreUtils;

@Autowired private FireStoreDependencyDao fireStoreDependencyDao;
@Autowired private ConnectedOperations connectedOperations;
@Autowired private ConnectedTestConfiguration testConfig;
@MockBean private IamProviderInterface samService;
@Autowired private ConfigurationService configService;

private Firestore firestore;
private UUID datasetId;
private UUID snapshotId;
private String datasetId;
private String snapshotId;
private Dataset dataset;
private Snapshot snapshot;

@Before
@BeforeEach
public void setup() throws Exception {
firestore = TestFirestoreProvider.getFirestore();
datasetId = UUID.randomUUID();
snapshotId = UUID.randomUUID();
connectedOperations.stubOutSamCalls(samService);
configService.reset();

// Create dataset so that we have a firestore instance to test with
BillingProfileModel billingProfile =
connectedOperations.createProfileForAccount(testConfig.getGoogleBillingAccountId());
DatasetSummaryModel summaryModel =
connectedOperations.createDataset(billingProfile, "dataset-minimal.json");
GoogleProjectResource projectResource =
new GoogleProjectResource().googleProjectId(summaryModel.getDataProject());
dataset = new Dataset().id(summaryModel.getId()).projectResource(projectResource);
datasetId = summaryModel.getId().toString();
var snapshotIdUUID = UUID.randomUUID();
snapshotId = snapshotIdUUID.toString();
snapshot = new Snapshot().id(snapshotIdUUID).projectResource(projectResource);

// real case will have separate dataset and snapshot instances
// But, we can share this firestore instance for this test
firestore = TestFirestoreProvider.getFirestore(summaryModel.getDataProject());
}

@After
@AfterEach
public void cleanup() throws Exception {
directoryDao.deleteDirectoryEntriesFromCollection(firestore, snapshotId.toString());
directoryDao.deleteDirectoryEntriesFromCollection(firestore, datasetId.toString());
fileDao.deleteFilesFromDataset(firestore, datasetId.toString(), i -> {});
if (datasetId != null) {
directoryDao.deleteDirectoryEntriesFromCollection(firestore, datasetId);
directoryDao.deleteDirectoryEntriesFromCollection(firestore, snapshotId);
fireStoreDependencyDao.deleteSnapshotFileDependencies(dataset, snapshotId);
fileDao.deleteFilesFromDataset(firestore, datasetId, f -> {});
}
connectedOperations.teardown();
}

// Test for snapshot file system
Expand All @@ -82,22 +103,19 @@ public void cleanup() throws Exception {
// - do the compute and validate
// Use binary for the sizes so each size combo will be unique
@Test
public void snapshotTest() throws Exception {
GoogleProjectResource projectResource =
new GoogleProjectResource().googleProjectId(System.getenv("GOOGLE_CLOUD_DATA_PROJECT"));
Dataset dataset = new Dataset().id(datasetId).projectResource(projectResource);
Snapshot snapshot = new Snapshot().id(snapshotId).projectResource(projectResource);
void snapshotTest() throws Exception {

// Make files that will be in the snapshot
List<FireStoreDirectoryEntry> snapObjects = new ArrayList<>();
snapObjects.add(makeFileObject(datasetId.toString(), "/adir/A1", 1));
snapObjects.add(makeFileObject(datasetId.toString(), "/adir/bdir/B1", 2));
snapObjects.add(makeFileObject(datasetId.toString(), "/adir/bdir/cdir/C1", 4));
snapObjects.add(makeFileObject(datasetId.toString(), "/adir/bdir/cdir/C2", 8));
snapObjects.add(makeFileObject(datasetId, "/adir/A1", 1));
snapObjects.add(makeFileObject(datasetId, "/adir/bdir/B1", 2));
snapObjects.add(makeFileObject(datasetId, "/adir/bdir/cdir/C1", 4));
snapObjects.add(makeFileObject(datasetId, "/adir/bdir/cdir/C2", 8));

// And some files that won't be in the snapshot
List<FireStoreDirectoryEntry> dsetObjects = new ArrayList<>();
dsetObjects.add(makeFileObject(datasetId.toString(), "/adir/bdir/B2", 16));
dsetObjects.add(makeFileObject(datasetId.toString(), "/adir/A2", 32));
dsetObjects.add(makeFileObject(datasetId, "/adir/bdir/B2", 16));
dsetObjects.add(makeFileObject(datasetId, "/adir/A2", 32));

List<String> dsfileIdList =
Streams.concat(
Expand All @@ -109,26 +127,19 @@ public void snapshotTest() throws Exception {
List<FireStoreDirectoryEntry> fileObjects = new ArrayList<>(snapObjects);
fileObjects.addAll(dsetObjects);
for (FireStoreDirectoryEntry fireStoreDirectoryEntry : fileObjects) {
directoryDao.createDirectoryEntry(firestore, datasetId.toString(), fireStoreDirectoryEntry);
directoryDao.createDirectoryEntry(firestore, datasetId, fireStoreDirectoryEntry);
}

// Make the snapshot file system
List<String> snapfileIdList =
snapObjects.stream().map(FireStoreDirectoryEntry::getFileId).toList();
directoryDao.addEntriesToSnapshot(
firestore,
datasetId.toString(),
"dataset",
firestore,
snapshotId.toString(),
snapfileIdList,
false);
firestore, datasetId, "dataset", firestore, snapshotId, snapfileIdList, false);

// Validate we can lookup files in the snapshot
for (FireStoreDirectoryEntry dsetObject : snapObjects) {
FireStoreDirectoryEntry snapObject =
directoryDao.retrieveById(firestore, snapshotId.toString(), dsetObject.getFileId());
assertNotNull("object found in snapshot", snapObject);
directoryDao.retrieveById(firestore, snapshotId, dsetObject.getFileId());
assertThat("objectId matches", snapObject.getFileId(), equalTo(dsetObject.getFileId()));
assertThat("path does not match", snapObject.getPath(), not(equalTo(dsetObject.getPath())));
}
Expand All @@ -137,45 +148,40 @@ public void snapshotTest() throws Exception {
// Before setting up the dependency file system, assert datasetHasSnapshotReference returns
// false
boolean noDependencies = fireStoreDependencyDao.datasetHasSnapshotReference(dataset);
assertFalse("Dataset should not yet have dependencies", noDependencies);
assertThat("Dataset should not yet have dependencies", noDependencies, is(false));

// Create dependency file system
fireStoreDependencyDao.storeSnapshotFileDependencies(
dataset, snapshotId.toString(), snapfileIdList);
fireStoreDependencyDao.storeSnapshotFileDependencies(dataset, snapshotId, snapfileIdList);

// Snapshot and File Dependency should now exist for dataset
boolean hasReference = fireStoreDependencyDao.datasetHasSnapshotReference(dataset);
assertTrue("Dataset should have dependencies", hasReference);
assertThat("Dataset should have dependencies", hasReference);

boolean hasFileReference =
fireStoreDependencyDao.fileHasSnapshotReference(dataset, snapObjects.get(0).getFileId());
assertTrue("File should be referenced in snapshot", hasFileReference);
assertThat("File should be referenced in snapshot", hasFileReference);

// Validate dataset files do not have references
boolean noFileReference =
fireStoreDependencyDao.fileHasSnapshotReference(dataset, dsetObjects.get(0).getFileId());
assertFalse("No dependency on files not referenced in snapshot", noFileReference);
assertThat("No dependency on files not referenced in snapshot", noFileReference, is(false));

// Validate we cannot lookup dataset files in the snapshot
for (FireStoreDirectoryEntry dsetObject : dsetObjects) {
FireStoreDirectoryEntry snapObject =
directoryDao.retrieveById(firestore, snapshotId.toString(), dsetObject.getFileId());
assertNull("object not found in snapshot", snapObject);
directoryDao.retrieveById(firestore, snapshotId, dsetObject.getFileId());
assertThat("object not found in snapshot", snapObject, is(nullValue()));
}

// Compute the size and checksums
FireStoreDirectoryEntry topDir =
directoryDao.retrieveByPath(firestore, snapshotId.toString(), "/");
FireStoreDirectoryEntry topDir = directoryDao.retrieveByPath(firestore, snapshotId, "/");
List<FireStoreDirectoryEntry> updateBatch = new ArrayList<>();
FireStoreDao.FirestoreComputeHelper helper =
dao.getHelper(firestore, firestore, snapshotId.toString());
FireStoreDao.FirestoreComputeHelper helper = dao.getHelper(firestore, firestore, snapshotId);
SnapshotCompute.computeDirectory(helper, topDir, updateBatch);
directoryDao.batchStoreDirectoryEntry(firestore, snapshotId.toString(), updateBatch);
directoryDao.batchStoreDirectoryEntry(firestore, snapshotId, updateBatch);

// Check the accumulated size on the root dir
FireStoreDirectoryEntry snapObject =
directoryDao.retrieveByPath(firestore, snapshotId.toString(), "/");
assertNotNull("root exists", snapObject);
FireStoreDirectoryEntry snapObject = directoryDao.retrieveByPath(firestore, snapshotId, "/");
assertThat("Total size is correct", snapObject.getSize(), equalTo(15L));

// Check that we can retrieve all with or without directories
Expand Down
Loading

0 comments on commit ff92169

Please sign in to comment.