diff --git a/.doc_gen/metadata/medical-imaging_metadata.yaml b/.doc_gen/metadata/medical-imaging_metadata.yaml
index 3da2705a497..37976f5e879 100644
--- a/.doc_gen/metadata/medical-imaging_metadata.yaml
+++ b/.doc_gen/metadata/medical-imaging_metadata.yaml
@@ -26,6 +26,15 @@ medical-imaging_Hello:
- description: Code for the hello_health_imaging.cpp source file.
snippet_tags:
- cpp.example_code.medical-imaging.hello_medical-imaging
+ Python:
+ versions:
+ - sdk_version: 3
+ github: python/example_code/medical-imaging/imaging_set_and_frames_workflow
+ github_note_at_bottom: true
+ excerpts:
+ - description:
+ snippet_tags:
+ - python.example_code.medical-imaging.Hello
services:
medical-imaging: {ListDatastores}
medical-imaging_CreateDatastore:
@@ -394,6 +403,12 @@ medical-imaging_SearchImageSets:
previously persisted.
snippet_tags:
- python.example_code.medical-imaging.SearchImageSets.use_case3
+ - description: >
+ Use case #4: EQUAL operator on DICOMSeriesInstanceUID and
+ BETWEEN on updatedAt and sort response in ASC order on
+ updatedAt field.
+ snippet_tags:
+ - python.example_code.medical-imaging.SearchImageSets.use_case4
- description: >
The following code instantiates the MedicalImagingWrapper object.
snippet_tags:
@@ -423,6 +438,13 @@ medical-imaging_SearchImageSets:
snippet_tags:
- medical-imaging.JavaScript.resource.searchImageSetV3.datastoreID
- medical-imaging.JavaScript.resource.searchImageSetV3.betweenFilter2
+ - description: >
+ Use case #4: EQUAL operator on DICOMSeriesInstanceUID and
+ BETWEEN on updatedAt and sort response in ASC order on
+ updatedAt field.
+ snippet_tags:
+ - medical-imaging.JavaScript.resource.searchImageSetV3.datastoreID
+ - medical-imaging.JavaScript.resource.searchImageSetV3.sortAndFilter
Java:
versions:
- sdk_version: 2
@@ -445,6 +467,12 @@ medical-imaging_SearchImageSets:
previously persisted.
snippet_tags:
- medicalimaging.java2.search_imagesets.use_case3
+ - description: >
+ Use case #4: EQUAL operator on DICOMSeriesInstanceUID and
+ BETWEEN on updatedAt and sort response in ASC order on
+ updatedAt field.
+ snippet_tags:
+ - medicalimaging.java2.search_imagesets.use_case4
C++:
versions:
- sdk_version: 1
@@ -467,6 +495,12 @@ medical-imaging_SearchImageSets:
previously persisted.
snippet_tags:
- cpp.example_code.medical_imaging.SearchImageSets.use_case3
+ - description: >
+ Use case #4: EQUAL operator on DICOMSeriesInstanceUID and
+ BETWEEN on updatedAt and sort response in ASC order on
+ updatedAt field.
+ snippet_tags:
+ - cpp.example_code.medical_imaging.SearchImageSets.use_case4
services:
medical-imaging: {SearchImageSets}
medical-imaging_GetImageSet:
@@ -1193,21 +1227,72 @@ medical-imaging_Scenario_ImageSetsAndFrames:
snippet_tags:
- cpp.example_code.medical-imaging.image-sets-workflow.clean_up
- cpp.example_code.medical-imaging.image-sets-workflow.empty_data_store
+ Python:
+ versions:
+ - sdk_version: 3
+ github: python/example_code/medical-imaging/imaging_set_and_frames_workflow
+ github_note_at_bottom: true
+ excerpts:
+ - description: Create an &CFN; stack with the necessary resources.
+ snippet_tags:
+ - python.example_code.medical-imaging.workflow.deploy
+ - description: Copy DICOM files to the &S3; import bucket.
+ snippet_tags:
+ - python.example_code.medical-imaging.workflow.copy
+ - description: Import the DICOM files to the &S3; data store.
+ snippet_tags:
+ - python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl
+ - python.example_code.medical-imaging.workflow.StartDICOMImportJob
+ - description: Get image sets created by the DICOM import job.
+ snippet_tags:
+ - python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl
+ - python.example_code.medical-imaging.workflow.GetImageSetsForImportJob
+ - python.example_code.medical-imaging.workflow.GetImageSet
+ - description: Get image frame information for image sets.
+ snippet_tags:
+ - python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl
+ - python.example_code.medical-imaging.workflow.GetImageFrames
+ - python.example_code.medical-imaging.workflow.GetImageSetMetadata
+ - description: Download, decode and verify image frames.
+ snippet_tags:
+ - python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl
+ - python.example_code.medical-imaging.workflow.GetPixelData
+ - python.example_code.medical-imaging.workflow.downloadAndCheck
+ - description: Clean up resources.
+ snippet_tags:
+ - python.example_code.medical-imaging.workflow.destroy
+ - python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl
+ - python.example_code.medical-imaging.workflow.SearchImageSets
+ - python.example_code.medical-imaging.workflow.DeleteImageSet
JavaScript:
versions:
- sdk_version: 3
github: javascriptv3/example_code/medical-imaging
github_note_at_bottom: true
excerpts:
- - description:
+ - description: index.js - Orchestrate steps.
snippet_files:
- javascriptv3/example_code/medical-imaging/scenarios/health-image-sets/index.js
+ - description: step-1.js - Deploy resources.
+ snippet_files:
- javascriptv3/example_code/medical-imaging/scenarios/health-image-sets/step-1.js
+ - description: step-2.js - Copy DICOM files.
+ snippet_files:
- javascriptv3/example_code/medical-imaging/scenarios/health-image-sets/step-2.js
+ - description: step-3.js - Start import into datastore.
+ snippet_files:
- javascriptv3/example_code/medical-imaging/scenarios/health-image-sets/step-3.js
+ - description: step-4.js - Get image set IDs.
+ snippet_files:
- javascriptv3/example_code/medical-imaging/scenarios/health-image-sets/step-4.js
+ - description: step-5.js - Get image frame IDs.
+ snippet_files:
- javascriptv3/example_code/medical-imaging/scenarios/health-image-sets/step-5.js
+ - description: step-6.js - Verify image frames. The &AWS; HealthImaging Pixel Data Verification library was used for verification.
+ snippet_files:
- javascriptv3/example_code/medical-imaging/scenarios/health-image-sets/step-6.js
+ - description: step-7.js - Destroy resources.
+ snippet_files:
- javascriptv3/example_code/medical-imaging/scenarios/health-image-sets/step-7.js
services:
medical-imaging:
diff --git a/.github/workflows/docker-push.yml b/.github/workflows/docker-push.yml
index c12c7d40eb2..fdddafadf0f 100644
--- a/.github/workflows/docker-push.yml
+++ b/.github/workflows/docker-push.yml
@@ -17,8 +17,7 @@ jobs:
name: Push Docker image to ECR Public
if: github.event.pull_request.merged == true
env:
- REGISTRY: public.ecr.aws
- REGISTRY_ALIAS: b4v4v1s0
+ REGISTRY: 808326389482.dkr.ecr.us-east-1.amazonaws.com
IMAGE_TAG: latest
REGION: us-east-1
runs-on: ubuntu-latest
@@ -51,10 +50,10 @@ jobs:
env:
REPOSITORY: ruby
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile ./$REPOSITORY
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
#################################
@@ -66,10 +65,10 @@ jobs:
env:
REPOSITORY: python
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile ./$REPOSITORY
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
#################################
@@ -81,10 +80,10 @@ jobs:
env:
REPOSITORY: javascriptv3
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile .
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
#################################
@@ -96,10 +95,10 @@ jobs:
env:
REPOSITORY: rustv1
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile ./$REPOSITORY
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
#################################
@@ -111,10 +110,10 @@ jobs:
env:
REPOSITORY: gov2
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile ./$REPOSITORY
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
#################################
@@ -126,10 +125,10 @@ jobs:
env:
REPOSITORY: cpp
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile ./$REPOSITORY
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
#################################
@@ -141,10 +140,10 @@ jobs:
env:
REPOSITORY: dotnetv3
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile ./$REPOSITORY
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
#################################
@@ -156,10 +155,10 @@ jobs:
env:
REPOSITORY: javav2
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile ./$REPOSITORY
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
#################################
@@ -171,10 +170,10 @@ jobs:
env:
REPOSITORY: kotlin
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile ./$REPOSITORY
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
#################################
@@ -186,10 +185,10 @@ jobs:
env:
REPOSITORY: php
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile ./$REPOSITORY
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
#################################
@@ -201,8 +200,8 @@ jobs:
env:
REPOSITORY: swift
run: |
- aws ecr-public get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY/$REGISTRY_ALIAS
+ aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin $REGISTRY
docker build -t $REPOSITORY -f ./$REPOSITORY/Dockerfile ./$REPOSITORY
- docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
- docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG
+ docker tag $REPOSITORY:$IMAGE_TAG $REGISTRY/$REPOSITORY:$IMAGE_TAG
+ docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG
continue-on-error: true
diff --git a/.github/workflows/validate-doc-metadata.yml b/.github/workflows/validate-doc-metadata.yml
index cd8daeed315..2b273e0026d 100644
--- a/.github/workflows/validate-doc-metadata.yml
+++ b/.github/workflows/validate-doc-metadata.yml
@@ -18,4 +18,4 @@ jobs:
- name: checkout repo content
uses: actions/checkout@v4
- name: validate metadata
- uses: awsdocs/aws-doc-sdk-examples-tools@2024-04-10-A
+ uses: awsdocs/aws-doc-sdk-examples-tools@2024-04-19-A
diff --git a/cpp/example_code/medical-imaging/medical-imaging_samples.h b/cpp/example_code/medical-imaging/medical-imaging_samples.h
index 3459c6cb027..8e4d6446c17 100644
--- a/cpp/example_code/medical-imaging/medical-imaging_samples.h
+++ b/cpp/example_code/medical-imaging/medical-imaging_samples.h
@@ -91,11 +91,10 @@ namespace AwsDoc {
\param clientConfig: Aws client configuration.
\return bool: Function succeeded.
*/
- bool searchImageSets(
- const Aws::String &dataStoreID,
- const Aws::MedicalImaging::Model::SearchCriteria &searchCriteria,
- Aws::Vector &imageSetResults,
- const Aws::Client::ClientConfiguration &clientConfig);
+ bool searchImageSets(const Aws::String &dataStoreID,
+ const Aws::MedicalImaging::Model::SearchCriteria &searchCriteria,
+ Aws::Vector &imageSetResults,
+ const Aws::Client::ClientConfiguration &clientConfig);
} // namespace Medical_Imaging
diff --git a/cpp/example_code/medical-imaging/search_image_sets.cpp b/cpp/example_code/medical-imaging/search_image_sets.cpp
index 2a17f6d1e19..0db01f25a6a 100644
--- a/cpp/example_code/medical-imaging/search_image_sets.cpp
+++ b/cpp/example_code/medical-imaging/search_image_sets.cpp
@@ -30,11 +30,10 @@
\param clientConfig: Aws client configuration.
\return bool: Function succeeded.
*/
-bool AwsDoc::Medical_Imaging::searchImageSets(
- const Aws::String &dataStoreID,
- const Aws::MedicalImaging::Model::SearchCriteria &searchCriteria,
- Aws::Vector &imageSetResults,
- const Aws::Client::ClientConfiguration &clientConfig) {
+bool AwsDoc::Medical_Imaging::searchImageSets(const Aws::String &dataStoreID,
+ const Aws::MedicalImaging::Model::SearchCriteria &searchCriteria,
+ Aws::Vector &imageSetResults,
+ const Aws::Client::ClientConfiguration &clientConfig) {
Aws::MedicalImaging::MedicalImagingClient client(clientConfig);
Aws::MedicalImaging::Model::SearchImageSetsRequest request;
request.SetDatastoreId(dataStoreID);
@@ -71,7 +70,7 @@ bool AwsDoc::Medical_Imaging::searchImageSets(
*
* main function
*
- * Usage: 'run_search_image_sets '
+ * Usage: 'run_search_image_sets '
*
* Prerequisites: A HealthImaging data store containing image sets to search.
*
@@ -80,9 +79,9 @@ bool AwsDoc::Medical_Imaging::searchImageSets(
#ifndef TESTING_BUILD
int main(int argc, char **argv) {
- if (argc != 3) {
+ if (argc != 4) {
std::cout
- << "Usage: 'run_search_image_sets '"
+ << "Usage: 'run_search_image_sets '"
<< std::endl;
return 1;
}
@@ -91,6 +90,7 @@ int main(int argc, char **argv) {
{
Aws::String dataStoreID = argv[1];
Aws::String patientID = argv[2];
+ Aws::String dicomSeriesInstanceUID = argv[3];
Aws::Client::ClientConfiguration clientConfig;
// Optional: Set to the AWS Region in which the bucket was created (overrides config file).
@@ -104,8 +104,11 @@ int main(int argc, char **argv) {
Aws::MedicalImaging::Model::SearchFilter().WithOperator(Aws::MedicalImaging::Model::Operator::EQUAL)
.WithValues({Aws::MedicalImaging::Model::SearchByAttributeValue().WithDICOMPatientId(patientID)})
};
+
searchCriteriaEqualsPatientID.SetFilters(patientIDSearchFilters);
- bool result = AwsDoc::Medical_Imaging::searchImageSets(dataStoreID, searchCriteriaEqualsPatientID, imageIDsForPatientID,
+ bool result = AwsDoc::Medical_Imaging::searchImageSets(dataStoreID,
+ searchCriteriaEqualsPatientID,
+ imageIDsForPatientID,
clientConfig);
if (result) {
std::cout << imageIDsForPatientID.size() << " image sets found for the patient with ID '"
@@ -137,8 +140,10 @@ int main(int argc, char **argv) {
useCase2SearchCriteria.SetFilters({useCase2SearchFilter});
Aws::Vector usesCase2Results;
- result = AwsDoc::Medical_Imaging::searchImageSets(dataStoreID, useCase2SearchCriteria, usesCase2Results,
- clientConfig);
+ result = AwsDoc::Medical_Imaging::searchImageSets(dataStoreID,
+ useCase2SearchCriteria,
+ usesCase2Results,
+ clientConfig);
if (result) {
std::cout << usesCase2Results.size() << " image sets found for between 1999/01/01 and present."
<< std::endl;
@@ -165,7 +170,9 @@ int main(int argc, char **argv) {
useCase3SearchCriteria.SetFilters({useCase3SearchFilter});
Aws::Vector usesCase3Results;
- result = AwsDoc::Medical_Imaging::searchImageSets(dataStoreID, useCase3SearchCriteria, usesCase3Results,
+ result = AwsDoc::Medical_Imaging::searchImageSets(dataStoreID,
+ useCase3SearchCriteria,
+ usesCase3Results,
clientConfig);
if (result) {
std::cout << usesCase3Results.size() << " image sets found for created between 2023/11/30 and present."
@@ -176,6 +183,50 @@ int main(int argc, char **argv) {
}
//snippet-end:[cpp.example_code.medical_imaging.SearchImageSets.use_case3]
+ // Use case #4: EQUAL operator on DICOMSeriesInstanceUID and BETWEEN on updatedAt and sort response
+ // in ASC order on updatedAt field.
+ //snippet-start:[cpp.example_code.medical_imaging.SearchImageSets.use_case4]
+ Aws::MedicalImaging::Model::SearchByAttributeValue useCase4StartDate;
+ useCase4StartDate.SetUpdatedAt(Aws::Utils::DateTime("20231130T000000000Z",Aws::Utils::DateFormat::ISO_8601_BASIC));
+
+ Aws::MedicalImaging::Model::SearchByAttributeValue useCase4EndDate;
+ useCase4EndDate.SetUpdatedAt(Aws::Utils::DateTime(std::chrono::system_clock::now()));
+
+ Aws::MedicalImaging::Model::SearchFilter useCase4SearchFilterBetween;
+ useCase4SearchFilterBetween.SetValues({useCase4StartDate, useCase4EndDate});
+ useCase4SearchFilterBetween.SetOperator(Aws::MedicalImaging::Model::Operator::BETWEEN);
+
+ Aws::MedicalImaging::Model::SearchByAttributeValue seriesInstanceUID;
+ seriesInstanceUID.SetDICOMSeriesInstanceUID(dicomSeriesInstanceUID);
+
+ Aws::MedicalImaging::Model::SearchFilter useCase4SearchFilterEqual;
+ useCase4SearchFilterEqual.SetValues({seriesInstanceUID});
+ useCase4SearchFilterEqual.SetOperator(Aws::MedicalImaging::Model::Operator::EQUAL);
+
+ Aws::MedicalImaging::Model::SearchCriteria useCase4SearchCriteria;
+ useCase4SearchCriteria.SetFilters({useCase4SearchFilterBetween, useCase4SearchFilterEqual});
+
+ Aws::MedicalImaging::Model::Sort useCase4Sort;
+ useCase4Sort.SetSortField(Aws::MedicalImaging::Model::SortField::updatedAt);
+ useCase4Sort.SetSortOrder(Aws::MedicalImaging::Model::SortOrder::ASC);
+
+ useCase4SearchCriteria.SetSort(useCase4Sort);
+
+ Aws::Vector usesCase4Results;
+ result = AwsDoc::Medical_Imaging::searchImageSets(dataStoreID,
+ useCase4SearchCriteria,
+ usesCase4Results,
+ clientConfig);
+ if (result) {
+ std::cout << usesCase4Results.size() << " image sets found for EQUAL operator "
+ << "on DICOMSeriesInstanceUID and BETWEEN on updatedAt and sort response\n"
+ << "in ASC order on updatedAt field." << std::endl;
+ for (auto &imageSetResult : usesCase4Results) {
+ std::cout << " Image set with ID '" << imageSetResult << std::endl;
+ }
+ }
+ //snippet-end:[cpp.example_code.medical_imaging.SearchImageSets.use_case4]
+
}
Aws::ShutdownAPI(options);
diff --git a/javascriptv3/example_code/medical-imaging/actions/search-image-sets.js b/javascriptv3/example_code/medical-imaging/actions/search-image-sets.js
index 0b9c8049fae..4f66bd10c49 100644
--- a/javascriptv3/example_code/medical-imaging/actions/search-image-sets.js
+++ b/javascriptv3/example_code/medical-imaging/actions/search-image-sets.js
@@ -1,129 +1,167 @@
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
-import { fileURLToPath } from "url";
+import {fileURLToPath} from "url";
// snippet-start:[medical-imaging.JavaScript.resource.searchImageSetV3]
-import { paginateSearchImageSets } from "@aws-sdk/client-medical-imaging";
-import { medicalImagingClient } from "../libs/medicalImagingClient.js";
+import {paginateSearchImageSets} from "@aws-sdk/client-medical-imaging";
+import {medicalImagingClient} from "../libs/medicalImagingClient.js";
/**
* @param {string} datastoreId - The data store's ID.
* @param { import('@aws-sdk/client-medical-imaging').SearchFilter[] } filters - The search criteria filters.
+ * @param { import('@aws-sdk/client-medical-imaging').Sort } sort - The search criteria sort.
*/
export const searchImageSets = async (
- datastoreId = "xxxxxxxx",
- filters = []
+ datastoreId = "xxxxxxxx",
+ searchCriteria = {}
) => {
- const paginatorConfig = {
- client: medicalImagingClient,
- pageSize: 50,
- };
-
- const commandParams = {
- datastoreId: datastoreId,
- searchCriteria: {
- filters,
- },
- };
-
- const paginator = paginateSearchImageSets(paginatorConfig, commandParams);
-
- const imageSetsMetadataSummaries = [];
- for await (const page of paginator) {
- // Each page contains a list of `jobSummaries`. The list is truncated if is larger than `pageSize`.
- imageSetsMetadataSummaries.push(...page["imageSetsMetadataSummaries"]);
- console.log(page);
- }
- // {
- // '$metadata': {
- // httpStatusCode: 200,
- // requestId: 'f009ea9c-84ca-4749-b5b6-7164f00a5ada',
- // extendedRequestId: undefined,
- // cfId: undefined,
- // attempts: 1,
- // totalRetryDelay: 0
- // },
- // imageSetsMetadataSummaries: [
- // {
- // DICOMTags: [Object],
- // createdAt: "2023-09-19T16:59:40.551Z",
- // imageSetId: '7f75e1b5c0f40eac2b24cf712f485f50',
- // updatedAt: "2023-09-19T16:59:40.551Z",
- // version: 1
- // }]
- // }
-
- return imageSetsMetadataSummaries;
+ const paginatorConfig = {
+ client: medicalImagingClient,
+ pageSize: 50,
+ };
+
+ const commandParams = {
+ datastoreId: datastoreId,
+ searchCriteria: searchCriteria,
+ };
+
+ const paginator = paginateSearchImageSets(paginatorConfig, commandParams);
+
+ const imageSetsMetadataSummaries = [];
+ for await (const page of paginator) {
+ // Each page contains a list of `jobSummaries`. The list is truncated if is larger than `pageSize`.
+ imageSetsMetadataSummaries.push(...page["imageSetsMetadataSummaries"]);
+ console.log(page);
+ }
+ // {
+ // '$metadata': {
+ // httpStatusCode: 200,
+ // requestId: 'f009ea9c-84ca-4749-b5b6-7164f00a5ada',
+ // extendedRequestId: undefined,
+ // cfId: undefined,
+ // attempts: 1,
+ // totalRetryDelay: 0
+ // },
+ // imageSetsMetadataSummaries: [
+ // {
+ // DICOMTags: [Object],
+ // createdAt: "2023-09-19T16:59:40.551Z",
+ // imageSetId: '7f75e1b5c0f40eac2b24cf712f485f50',
+ // updatedAt: "2023-09-19T16:59:40.551Z",
+ // version: 1
+ // }]
+ // }
+
+ return imageSetsMetadataSummaries;
};
// snippet-end:[medical-imaging.JavaScript.resource.searchImageSetV3]
// Invoke the following code if this file is being run directly.
if (process.argv[1] === fileURLToPath(import.meta.url)) {
- // snippet-start:[medical-imaging.JavaScript.resource.searchImageSetV3.datastoreID]
- const datastoreId = "12345678901234567890123456789012";
- // snippet-end:[medical-imaging.JavaScript.resource.searchImageSetV3.datastoreID]
- // Search using EQUAL operator.
- // snippet-start:[medical-imaging.JavaScript.resource.searchImageSetV3.equalFilter]
- try {
- const filters = [
- {
- values: [{ DICOMPatientId: "9227465" }],
- operator: "EQUAL",
- },
- ];
-
- await searchImageSets(datastoreId, filters);
- } catch (err) {
- console.error(err);
- }
- // snippet-end:[medical-imaging.JavaScript.resource.searchImageSetV3.equalFilter]
-
- // Search with BETWEEN operator using DICOMStudyDate and DICOMStudyTime.
- // snippet-start:[medical-imaging.JavaScript.resource.searchImageSetV3.betweenFilter1]
- try {
- const filters = [
- {
- values: [
- {
- DICOMStudyDateAndTime: {
- DICOMStudyDate: "19900101",
- DICOMStudyTime: "000000",
- },
- },
- {
- DICOMStudyDateAndTime: {
- DICOMStudyDate: "20230901",
- DICOMStudyTime: "000000",
- },
- },
- ],
- operator: "BETWEEN",
- },
- ];
-
- await searchImageSets(datastoreId, filters);
- } catch (err) {
- console.error(err);
- }
- // snippet-end:[medical-imaging.JavaScript.resource.searchImageSetV3.betweenFilter1]
-
- // Search with BETWEEN operator and createdAt date.
- // snippet-start:[medical-imaging.JavaScript.resource.searchImageSetV3.betweenFilter2]
- try {
- const filters = [
- {
- values: [
- { createdAt: new Date("1985-04-12T23:20:50.52Z") },
- { createdAt: new Date("2023-09-12T23:20:50.52Z") },
- ],
- operator: "BETWEEN",
- },
- ];
-
- await searchImageSets(datastoreId, filters);
- } catch (err) {
- console.error(err);
- }
- // snippet-end:[medical-imaging.JavaScript.resource.searchImageSetV3.betweenFilter2]
+ // snippet-start:[medical-imaging.JavaScript.resource.searchImageSetV3.datastoreID]
+ const datastoreId = "12345678901234567890123456789012";
+ // snippet-end:[medical-imaging.JavaScript.resource.searchImageSetV3.datastoreID]
+ // Search using EQUAL operator.
+ // snippet-start:[medical-imaging.JavaScript.resource.searchImageSetV3.equalFilter]
+ try {
+ const searchCriteria = {
+ filters: [
+ {
+ values: [{DICOMPatientId: "1234567"}],
+ operator: "EQUAL",
+ },
+ ]
+ };
+
+ await searchImageSets(datastoreId, searchCriteria);
+ } catch (err) {
+ console.error(err);
+ }
+ // snippet-end:[medical-imaging.JavaScript.resource.searchImageSetV3.equalFilter]
+
+ // Search with BETWEEN operator using DICOMStudyDate and DICOMStudyTime.
+ // snippet-start:[medical-imaging.JavaScript.resource.searchImageSetV3.betweenFilter1]
+ try {
+ const searchCriteria = {
+ filters: [
+ {
+ values: [
+ {
+ DICOMStudyDateAndTime: {
+ DICOMStudyDate: "19900101",
+ DICOMStudyTime: "000000",
+ },
+ },
+ {
+ DICOMStudyDateAndTime: {
+ DICOMStudyDate: "20230901",
+ DICOMStudyTime: "000000",
+ },
+ },
+ ],
+ operator: "BETWEEN",
+ },
+ ]
+ };
+
+ await searchImageSets(datastoreId, searchCriteria);
+ } catch (err) {
+ console.error(err);
+ }
+ // snippet-end:[medical-imaging.JavaScript.resource.searchImageSetV3.betweenFilter1]
+
+ // Search with BETWEEN operator and createdAt date.
+ // snippet-start:[medical-imaging.JavaScript.resource.searchImageSetV3.betweenFilter2]
+ try {
+ const searchCriteria = {
+ filters: [
+ {
+ values: [
+ {createdAt: new Date("1985-04-12T23:20:50.52Z")},
+ {createdAt: new Date()},
+ ],
+ operator: "BETWEEN",
+ },
+ ]
+ };
+
+ await searchImageSets(datastoreId, searchCriteria);
+ } catch (err) {
+ console.error(err);
+ }
+ // snippet-end:[medical-imaging.JavaScript.resource.searchImageSetV3.betweenFilter2]
+
+ // Search with EQUAL operator on DICOMSeriesInstanceUID and BETWEEN on updatedAt and sort response in ASC
+ // order on updatedAt field.
+ // snippet-start:[medical-imaging.JavaScript.resource.searchImageSetV3.sortAndFilter]
+ try {
+ const searchCriteria = {
+ filters: [
+ {
+ values: [
+ {updatedAt: new Date("1985-04-12T23:20:50.52Z")},
+ {updatedAt: new Date()},
+ ],
+ operator: "BETWEEN",
+ },
+ {
+ values: [
+ {DICOMSeriesInstanceUID: "1.1.123.123456.1.12.1.1234567890.1234.12345678.123"},
+ ],
+ operator: "EQUAL",
+ },
+ ],
+ sort: {
+ sortOrder: "ASC",
+ sortField: "updatedAt",
+ }
+ };
+
+ await searchImageSets(datastoreId, searchCriteria);
+ } catch (err) {
+ console.error(err);
+ }
+ // snippet-end:[medical-imaging.JavaScript.resource.searchImageSetV3.sortAndFilter]
+
}
diff --git a/javascriptv3/example_code/medical-imaging/tests/search-image-set.unit.test.js b/javascriptv3/example_code/medical-imaging/tests/search-image-set.unit.test.js
index f380e3e0846..e125cfba14d 100644
--- a/javascriptv3/example_code/medical-imaging/tests/search-image-set.unit.test.js
+++ b/javascriptv3/example_code/medical-imaging/tests/search-image-set.unit.test.js
@@ -1,61 +1,63 @@
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
-import { describe, it, expect, vi } from "vitest";
+import {describe, it, expect, vi} from "vitest";
const paginateSearchImageSets = vi.fn();
vi.doMock("@aws-sdk/client-medical-imaging", async () => {
- const actual = await vi.importActual("@aws-sdk/client-medical-imaging");
+ const actual = await vi.importActual("@aws-sdk/client-medical-imaging");
- return {
- ...actual,
- paginateSearchImageSets,
- };
+ return {
+ ...actual,
+ paginateSearchImageSets,
+ };
});
-const { searchImageSets } = await import("../actions/search-image-sets.js");
+const {searchImageSets} = await import("../actions/search-image-sets.js");
describe("search-image-sets", () => {
- it("should log the response", async () => {
- const logSpy = vi.spyOn(console, "log");
- const datastoreId = "12345678901234567890123456789012";
- const filters = [
- {
- values: [
- { createdAt: new Date("1985-04-12T23:20:50.52Z") },
- { createdAt: new Date("2023-09-12T23:20:50.52Z") },
- ],
- operator: "BETWEEN",
- },
- ];
-
- const response = {
- $metadata: {
- httpStatusCode: 200,
- requestId: "f009ea9c-84ca-4749-b5b6-7164f00a5ada",
- extendedRequestId: undefined,
- cfId: undefined,
- attempts: 1,
- totalRetryDelay: 0,
- },
- imageSetsMetadataSummaries: [
- {
- DICOMTags: [Object],
- createdAt: "2023-09-19T16:59:40.551Z",
- imageSetId: "7f75e1b5c0f40eac2b24cf712f485f50",
- updatedAt: "2023-09-19T16:59:40.551Z",
- version: 1,
- },
- ],
- };
-
- paginateSearchImageSets.mockImplementationOnce(async function* () {
- yield response;
+ it("should log the response", async () => {
+ const logSpy = vi.spyOn(console, "log");
+ const datastoreId = "12345678901234567890123456789012";
+ const searchCriteria = {
+ filters: [
+ {
+ values: [
+ {createdAt: new Date("1985-04-12T23:20:50.52Z")},
+ {createdAt: new Date()},
+ ],
+ operator: "BETWEEN",
+ },
+ ]
+ };
+
+ const response = {
+ $metadata: {
+ httpStatusCode: 200,
+ requestId: "f009ea9c-84ca-4749-b5b6-7164f00a5ada",
+ extendedRequestId: undefined,
+ cfId: undefined,
+ attempts: 1,
+ totalRetryDelay: 0,
+ },
+ imageSetsMetadataSummaries: [
+ {
+ DICOMTags: [Object],
+ createdAt: "2023-09-19T16:59:40.551Z",
+ imageSetId: "7f75e1b5c0f40eac2b24cf712f485f50",
+ updatedAt: "2023-09-19T16:59:40.551Z",
+ version: 1,
+ },
+ ],
+ };
+
+ paginateSearchImageSets.mockImplementationOnce(async function* () {
+ yield response;
+ });
+
+ await searchImageSets(datastoreId, searchCriteria);
+
+ expect(logSpy).toHaveBeenCalledWith(response);
});
-
- await searchImageSets(datastoreId, filters);
-
- expect(logSpy).toHaveBeenCalledWith(response);
- });
});
diff --git a/javav2/example_code/medicalimaging/README.md b/javav2/example_code/medicalimaging/README.md
index 855e90bfee5..91096f6dcc4 100644
--- a/javav2/example_code/medicalimaging/README.md
+++ b/javav2/example_code/medicalimaging/README.md
@@ -49,7 +49,7 @@ Code excerpts that show you how to call individual service functions.
- [List import jobs for a data store](src/main/java/com/example/medicalimaging/ListDicomImportJobs.java#L58) (`ListDICOMImportJobs`)
- [List tags for a resource](src/main/java/com/example/medicalimaging/ListTagsForResource.java#L56) (`ListTagsForResource`)
- [Remove a tag from a resource](src/main/java/com/example/medicalimaging/UntagResource.java#L54) (`UntagResource`)
-- [Search image sets](src/main/java/com/example/medicalimaging/SearchImageSets.java#L130) (`SearchImageSets`)
+- [Search image sets](src/main/java/com/example/medicalimaging/SearchImageSets.java#L182) (`SearchImageSets`)
- [Update image set metadata](src/main/java/com/example/medicalimaging/UpdateImageSetMetadata.java#L144) (`UpdateImageSetMetadata`)
### Scenarios
diff --git a/javav2/example_code/medicalimaging/pom.xml b/javav2/example_code/medicalimaging/pom.xml
index 6168ca7f3c4..bf1a283a6e0 100644
--- a/javav2/example_code/medicalimaging/pom.xml
+++ b/javav2/example_code/medicalimaging/pom.xml
@@ -8,8 +8,8 @@
1.0-SNAPSHOT
UTF-8
- 17
- 2.20.132
+ 1.8
+ 2.25.25
diff --git a/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/SearchImageSets.java b/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/SearchImageSets.java
index 76f003629e6..f5e88eb4a03 100644
--- a/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/SearchImageSets.java
+++ b/javav2/example_code/medicalimaging/src/main/java/com/example/medicalimaging/SearchImageSets.java
@@ -15,6 +15,7 @@
import java.time.LocalDate;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.List;
// snippet-end:[medicalimaging.java2.search_imagesets.import]
@@ -30,126 +31,177 @@
public class SearchImageSets {
- public static void main(String[] args) {
- final String usage = "\n" +
- "Usage:\n" +
- " \n\n" +
- "Where:\n" +
- " datastoreId - The ID of the data store.\n" +
- " patientId - The ID of the patient to search for.\\n";
-
- if (args.length != 2) {
- System.out.println(usage);
- System.exit(1);
- }
-
- String datastoreId = args[0];
- String patientId = args[1];
-
- Region region = Region.US_WEST_2;
- MedicalImagingClient medicalImagingClient = MedicalImagingClient.builder()
- .region(region)
- .credentialsProvider(ProfileCredentialsProvider.create())
- .build();
-
- // Use case #1: EQUAL operator.
- // snippet-start:[medicalimaging.java2.search_imagesets.use_case1]
- List searchFilters = Collections.singletonList(SearchFilter.builder()
- .operator(Operator.EQUAL)
- .values(SearchByAttributeValue.builder()
- .dicomPatientId(patientId)
- .build())
- .build());
-
- List imageSetsMetadataSummaries = searchMedicalImagingImageSets(
- medicalImagingClient,
- datastoreId, searchFilters);
- if (imageSetsMetadataSummaries != null) {
- System.out.println("The image sets for patient " + patientId + " are:\n"
- + imageSetsMetadataSummaries);
- System.out.println();
- }
- // snippet-end:[medicalimaging.java2.search_imagesets.use_case1]
-
- // Use case #2: BETWEEN operator using DICOMStudyDate and DICOMStudyTime.
- // snippet-start:[medicalimaging.java2.search_imagesets.use_case2]
- DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyyMMdd");
- searchFilters = Collections.singletonList(SearchFilter.builder()
- .operator(Operator.BETWEEN)
- .values(SearchByAttributeValue.builder()
- .dicomStudyDateAndTime(DICOMStudyDateAndTime.builder()
- .dicomStudyDate("19990101")
- .dicomStudyTime("000000.000")
- .build())
- .build(),
- SearchByAttributeValue.builder()
- .dicomStudyDateAndTime(DICOMStudyDateAndTime.builder()
- .dicomStudyDate((LocalDate.now()
- .format(formatter)))
- .dicomStudyTime("000000.000")
- .build())
- .build())
- .build());
-
- imageSetsMetadataSummaries = searchMedicalImagingImageSets(medicalImagingClient,
- datastoreId, searchFilters);
- if (imageSetsMetadataSummaries != null) {
- System.out.println(
- "The image sets searched with BETWEEN operator using DICOMStudyDate and DICOMStudyTime are:\n"
- +
- imageSetsMetadataSummaries);
- System.out.println();
- }
- // snippet-end:[medicalimaging.java2.search_imagesets.use_case2]
-
- // Use case #3: BETWEEN operator using createdAt. Time studies were previously
- // persisted.
- // snippet-start:[medicalimaging.java2.search_imagesets.use_case3]
- searchFilters = Collections.singletonList(SearchFilter.builder()
- .operator(Operator.BETWEEN)
- .values(SearchByAttributeValue.builder()
- .createdAt(Instant.parse("1985-04-12T23:20:50.52Z"))
- .build(),
- SearchByAttributeValue.builder()
- .createdAt(Instant.now())
- .build())
- .build());
-
- imageSetsMetadataSummaries = searchMedicalImagingImageSets(medicalImagingClient,
- datastoreId, searchFilters);
- if (imageSetsMetadataSummaries != null) {
- System.out.println("The image sets searched with BETWEEN operator using createdAt are:\n "
- + imageSetsMetadataSummaries);
- System.out.println();
- }
- // snippet-end:[medicalimaging.java2.search_imagesets.use_case3]
-
- medicalImagingClient.close();
+ public static void main(String[] args) {
+ final String usage = "\n" +
+ "Usage:\n" +
+ " \n\n" +
+ "Where:\n" +
+ " datastoreId - The ID of the data store.\n" +
+ " patientId - The ID of the patient to search for.\n" +
+ " seriesInstanceUID - The ID of the series instance to search for.\\n";
+
+
+ if (args.length != 3) {
+ System.out.println(usage);
+ System.exit(1);
}
- // snippet-start:[medicalimaging.java2.search_imagesets.main]
- public static List searchMedicalImagingImageSets(
- MedicalImagingClient medicalImagingClient,
- String datastoreId, List searchFilters) {
- try {
- SearchImageSetsRequest datastoreRequest = SearchImageSetsRequest.builder()
- .datastoreId(datastoreId)
- .searchCriteria(SearchCriteria.builder().filters(searchFilters).build())
- .build();
- SearchImageSetsIterable responses = medicalImagingClient
- .searchImageSetsPaginator(datastoreRequest);
- List imageSetsMetadataSummaries = new ArrayList<>();
-
- responses.stream().forEach(response -> imageSetsMetadataSummaries
- .addAll(response.imageSetsMetadataSummaries()));
-
- return imageSetsMetadataSummaries;
- } catch (MedicalImagingException e) {
- System.err.println(e.awsErrorDetails().errorMessage());
- System.exit(1);
- }
-
- return null;
+ String datastoreId = args[0];
+ String patientId = args[1];
+ String seriesInstanceUID = args[2];
+
+ Region region = Region.US_EAST_1;
+ MedicalImagingClient medicalImagingClient = MedicalImagingClient.builder()
+ .region(region)
+ .credentialsProvider(ProfileCredentialsProvider.create())
+ .build();
+
+ // Use case #1: EQUAL operator.
+ // snippet-start:[medicalimaging.java2.search_imagesets.use_case1]
+ List searchFilters = Collections.singletonList(SearchFilter.builder()
+ .operator(Operator.EQUAL)
+ .values(SearchByAttributeValue.builder()
+ .dicomPatientId(patientId)
+ .build())
+ .build());
+
+ SearchCriteria searchCriteria = SearchCriteria.builder()
+ .filters(searchFilters)
+ .build();
+
+ List imageSetsMetadataSummaries = searchMedicalImagingImageSets(
+ medicalImagingClient,
+ datastoreId, searchCriteria);
+ if (imageSetsMetadataSummaries != null) {
+ System.out.println("The image sets for patient " + patientId + " are:\n"
+ + imageSetsMetadataSummaries);
+ System.out.println();
+ }
+ // snippet-end:[medicalimaging.java2.search_imagesets.use_case1]
+
+ // Use case #2: BETWEEN operator using DICOMStudyDate and DICOMStudyTime.
+ // snippet-start:[medicalimaging.java2.search_imagesets.use_case2]
+ DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyyMMdd");
+ searchFilters = Collections.singletonList(SearchFilter.builder()
+ .operator(Operator.BETWEEN)
+ .values(SearchByAttributeValue.builder()
+ .dicomStudyDateAndTime(DICOMStudyDateAndTime.builder()
+ .dicomStudyDate("19990101")
+ .dicomStudyTime("000000.000")
+ .build())
+ .build(),
+ SearchByAttributeValue.builder()
+ .dicomStudyDateAndTime(DICOMStudyDateAndTime.builder()
+ .dicomStudyDate((LocalDate.now()
+ .format(formatter)))
+ .dicomStudyTime("000000.000")
+ .build())
+ .build())
+ .build());
+
+ searchCriteria = SearchCriteria.builder()
+ .filters(searchFilters)
+ .build();
+
+ imageSetsMetadataSummaries = searchMedicalImagingImageSets(medicalImagingClient,
+ datastoreId, searchCriteria);
+ if (imageSetsMetadataSummaries != null) {
+ System.out.println(
+ "The image sets searched with BETWEEN operator using DICOMStudyDate and DICOMStudyTime are:\n"
+ +
+ imageSetsMetadataSummaries);
+ System.out.println();
+ }
+ // snippet-end:[medicalimaging.java2.search_imagesets.use_case2]
+
+ // Use case #3: BETWEEN operator using createdAt. Time studies were previously
+ // persisted.
+ // snippet-start:[medicalimaging.java2.search_imagesets.use_case3]
+ searchFilters = Collections.singletonList(SearchFilter.builder()
+ .operator(Operator.BETWEEN)
+ .values(SearchByAttributeValue.builder()
+ .createdAt(Instant.parse("1985-04-12T23:20:50.52Z"))
+ .build(),
+ SearchByAttributeValue.builder()
+ .createdAt(Instant.now())
+ .build())
+ .build());
+
+ searchCriteria = SearchCriteria.builder()
+ .filters(searchFilters)
+ .build();
+ imageSetsMetadataSummaries = searchMedicalImagingImageSets(medicalImagingClient,
+ datastoreId, searchCriteria);
+ if (imageSetsMetadataSummaries != null) {
+ System.out.println("The image sets searched with BETWEEN operator using createdAt are:\n "
+ + imageSetsMetadataSummaries);
+ System.out.println();
}
- // snippet-end:[medicalimaging.java2.search_imagesets.main]
+ // snippet-end:[medicalimaging.java2.search_imagesets.use_case3]
+
+ // Use case #4: EQUAL operator on DICOMSeriesInstanceUID and BETWEEN on updatedAt and sort response
+ // in ASC order on updatedAt field.
+ // snippet-start:[medicalimaging.java2.search_imagesets.use_case4]
+ Instant startDate = Instant.parse("1985-04-12T23:20:50.52Z");
+ Instant endDate = Instant.now();
+
+ searchFilters = Arrays.asList(
+ SearchFilter.builder()
+ .operator(Operator.EQUAL)
+ .values(SearchByAttributeValue.builder()
+ .dicomSeriesInstanceUID(seriesInstanceUID)
+ .build())
+ .build(),
+ SearchFilter.builder()
+ .operator(Operator.BETWEEN)
+ .values(
+ SearchByAttributeValue.builder().updatedAt(startDate).build(),
+ SearchByAttributeValue.builder().updatedAt(endDate).build()
+ ).build());
+
+ Sort sort = Sort.builder().sortOrder(SortOrder.ASC).sortField(SortField.UPDATED_AT).build();
+
+ searchCriteria = SearchCriteria.builder()
+ .filters(searchFilters)
+ .sort(sort)
+ .build();
+
+ imageSetsMetadataSummaries = searchMedicalImagingImageSets(medicalImagingClient,
+ datastoreId, searchCriteria);
+ if (imageSetsMetadataSummaries != null) {
+ System.out.println("The image sets searched with EQUAL operator on DICOMSeriesInstanceUID and BETWEEN on updatedAt and sort response\n" +
+ "in ASC order on updatedAt field are:\n "
+ + imageSetsMetadataSummaries);
+ System.out.println();
+ }
+ // snippet-end:[medicalimaging.java2.search_imagesets.use_case4]
+
+ medicalImagingClient.close();
+ }
+
+ // snippet-start:[medicalimaging.java2.search_imagesets.main]
+ public static List searchMedicalImagingImageSets(
+ MedicalImagingClient medicalImagingClient,
+ String datastoreId, SearchCriteria searchCriteria) {
+ try {
+ SearchImageSetsRequest datastoreRequest = SearchImageSetsRequest.builder()
+ .datastoreId(datastoreId)
+ .searchCriteria(searchCriteria)
+ .build();
+ SearchImageSetsIterable responses = medicalImagingClient
+ .searchImageSetsPaginator(datastoreRequest);
+ List imageSetsMetadataSummaries = new ArrayList<>();
+
+ responses.stream().forEach(response -> imageSetsMetadataSummaries
+ .addAll(response.imageSetsMetadataSummaries()));
+
+ return imageSetsMetadataSummaries;
+ } catch (MedicalImagingException e) {
+ System.err.println(e.awsErrorDetails().errorMessage());
+ System.exit(1);
+ }
+
+ return null;
+ }
+ // snippet-end:[medicalimaging.java2.search_imagesets.main]
}
diff --git a/python/example_code/medical-imaging/README.md b/python/example_code/medical-imaging/README.md
index 9954c1e7c5d..dea9d5accf9 100644
--- a/python/example_code/medical-imaging/README.md
+++ b/python/example_code/medical-imaging/README.md
@@ -34,6 +34,11 @@ python -m pip install -r requirements.txt
+### Get started
+
+- [Hello HealthImaging](imaging_set_and_frames_workflow/hello.py#L4) (`ListDatastores`)
+
+
### Single actions
Code excerpts that show you how to call individual service functions.
@@ -62,6 +67,7 @@ Code excerpts that show you how to call individual service functions.
Code examples that show you how to accomplish a specific task by calling multiple
functions within the same service.
+- [Get started with image sets and image frames](imaging_set_and_frames_workflow/imaging_set_and_frames.py)
- [Tagging a data store](tagging_data_stores.py)
- [Tagging an image set](tagging_image_sets.py)
@@ -77,7 +83,41 @@ functions within the same service.
+#### Hello HealthImaging
+
+This example shows you how to get started using HealthImaging.
+
+```
+python hello.py
+```
+
+
+#### Get started with image sets and image frames
+
+This example shows you how to import DICOM files and download image frames in HealthImaging.
+ The implementation is structured as a workflow command-line
+ application.
+
+
+- Set up resources for a DICOM import.
+- Import DICOM files into a data store.
+- Retrieve the image set IDs for the import job.
+- Retrieve the image frame IDs for the image sets.
+- Download, decode and verify the image frames.
+- Clean up resources.
+
+
+
+
+Start the example by running the following at a command prompt:
+
+```
+python imaging_set_and_frames_workflow/imaging_set_and_frames.py
+```
+
+
+
#### Tagging a data store
diff --git a/python/example_code/medical-imaging/imaging_set_and_frames_workflow/README.md b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/README.md
new file mode 100644
index 00000000000..cfa53adf2e1
--- /dev/null
+++ b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/README.md
@@ -0,0 +1,73 @@
+# Import HealthImaging Image Sets and Download Image Frames using AWS SDKs
+
+## Overview
+
+This workflow shows how to use the AWS SDKs to import DICOM files into
+an AWS HealthImaging data store. It then shows how to download, decode and verify the image
+frames created by the DICOM import.
+
+Digital Imaging and Communications in Medicine (DICOM) is a technical standard for the digital storage and transmission of medical images and related information.
+
+## Scenario
+
+### Prerequisites
+
+For prerequisites, see the [README](../../README.md#Prerequisites) in the `python` folder.
+
+Install the packages required by these examples by running the following in a virtual environment:
+
+```
+python -m pip install -r requirements.txt
+```
+
+### Resources
+
+The workflow scenario steps deploy and clean up resources as needed.
+
+### Instructions
+
+Run the scenario at a command prompt in this folder with the following command:
+
+```
+python imaging_set_and_frames.py
+```
+
+### Workflow Steps
+
+This workflow runs as a command-line application prompting for user input.
+
+1. All the necessary resources are created from an AWS CloudFormation template.
+ 1. A HealthImaging data store.
+ 2. An Amazon Simple Storage Service (Amazon S3) input bucket for a DICOM import job.
+ 3. An Amazon S3 output bucket for a DICOM import job.
+ 4. An AWS Identity and Access Management (IAM) role with the appropriate permissions for a DICOM import job.
+
+![CloudFormation stack diagram](../../../../workflows/healthimaging_image_sets/.images/cfn_stack.png)
+
+2. The user chooses a DICOM study to copy from the [National Cancer Institute Imaging Data Commons (IDC) Collections](https://registry.opendata.aws/nci-imaging-data-commons/) public S3 bucket.
+3. The chosen study is copied to the user's input S3 bucket.
+
+![DICOM copy diagram](../../../../workflows/healthimaging_image_sets/.images/copy_dicom.png)
+
+4. A HealthImaging DICOM import job is run.
+
+![DICOM import diagram](../../../../workflows/healthimaging_image_sets/.images/dicom_import.png)
+
+5. The workflow retrieves the IDs for the HealthImaging image frames created by the DICOM import job.
+
+![Image frame ID retrieval diagram](../../../../workflows/healthimaging_image_sets/.images/get_image_frame_ids.png)
+
+6. The HealthImaging image frames are downloaded, decoded to a bitmap format, and verified using a CRC32 checksum.
+7. The created resources can then be deleted, if the user chooses.
+
+
+## Additional resources
+
+* [HealthImaging User Guide](https://docs.aws.amazon.com/healthimaging/latest/devguide/what-is.html)
+* [HealthImaging API Reference](https://docs.aws.amazon.com/healthimaging/latest/APIReference/Welcome.html)
+
+---
+
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+
+SPDX-License-Identifier: Apache-2.0
\ No newline at end of file
diff --git a/python/example_code/medical-imaging/imaging_set_and_frames_workflow/hello.py b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/hello.py
new file mode 100644
index 00000000000..d07e799b6b9
--- /dev/null
+++ b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/hello.py
@@ -0,0 +1,42 @@
+# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+# snippet-start:[python.example_code.medical-imaging.Hello]
+import logging
+import boto3
+from botocore.exceptions import ClientError
+
+logger = logging.getLogger(__name__)
+
+
+def hello_medical_imaging(medical_imaging_client):
+ """
+ Use the AWS SDK for Python (Boto3) to create an Amazon HealthImaging
+ client and list the data stores in your account.
+ This example uses the default settings specified in your shared credentials
+ and config files.
+
+ :param medical_imaging_client: A Boto3 Amazon HealthImaging Client object.
+ """
+ print("Hello, Amazon Health Imaging! Let's list some of your data stores:\n")
+ try:
+ paginator = medical_imaging_client.get_paginator("list_datastores")
+ page_iterator = paginator.paginate()
+ datastore_summaries = []
+ for page in page_iterator:
+ datastore_summaries.extend(page["datastoreSummaries"])
+ print("\tData Stores:")
+ for ds in datastore_summaries:
+ print(f"\t\tDatastore: {ds['datastoreName']} ID {ds['datastoreId']}")
+ except ClientError as err:
+ logger.error(
+ "Couldn't list data stores. Here's why: %s: %s",
+ err.response["Error"]["Code"],
+ err.response["Error"]["Message"],
+ )
+ raise
+
+
+if __name__ == "__main__":
+ hello_medical_imaging(boto3.client("medical-imaging"))
+# snippet-end:[python.example_code.medical-imaging.Hello]
diff --git a/python/example_code/medical-imaging/imaging_set_and_frames_workflow/imaging_set_and_frames.py b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/imaging_set_and_frames.py
new file mode 100644
index 00000000000..1313e709f89
--- /dev/null
+++ b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/imaging_set_and_frames.py
@@ -0,0 +1,395 @@
+# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+# snippet-start:[python.example_code.medical-imaging.workflow]
+"""
+Purpose
+
+Shows how to use the AWS SDK for Python (Boto3) to manage and invoke AWS HealthImaging
+functions.
+"""
+
+import datetime
+import gzip
+import json
+import logging
+import random
+import time
+import sys
+import os
+
+import boto3
+from botocore.exceptions import ClientError
+from threading import Thread
+
+# Import the wrapper for the service functionality.
+from medicalimaging import MedicalImagingWrapper
+
+# Add relative path to include demo_tools in this code example without need for setup.
+sys.path.append("../../..")
+from demo_tools import demo_func
+import demo_tools.question as q
+
+logger = logging.getLogger(__name__)
+
+IDC_S3_BUCKET_NAME = "idc-open-data"
+
+IDC_IMAGE_CHOICES = [
+ {
+ "Description": "CT of chest (2 images)",
+ "Directory": "00029d25-fb18-4d42-aaa5-a0897d1ac8f7",
+ },
+ {
+ "Description": "CT of pelvis (57 images)",
+ "Directory": "00025d30-ef8f-4135-a35a-d83eff264fc1",
+ },
+ {
+ "Description": "MRI of head (192 images)",
+ "Directory": "0002d261-8a5d-4e63-8e2e-0cbfac87b904",
+ },
+ {
+ "Description": "MRI of breast (92 images)",
+ "Directory": "0002dd07-0b7f-4a68-a655-44461ca34096",
+ },
+]
+
+IMPORT_JOB_MANIFEST_FILE_NAME = "job-output-manifest.json"
+
+
+class MedicalImagingWorkflowScenario:
+ input_bucket_name = ""
+ output_bucket_name = ""
+ role_arn = ""
+ data_store_id = ""
+
+ def __init__(self, medical_imaging_wrapper, s3_client, cf_resource):
+ self.medical_imaging_wrapper = medical_imaging_wrapper
+ self.s3_client = s3_client
+ self.cf_resource = cf_resource
+
+ def run_scenario(self):
+ print("-" * 88)
+ print(
+ "\t\tWelcome to the AWS HealthImaging working with image sets and frames workflow."
+ )
+ print("-" * 88)
+
+ print(
+ """\
+ This workflow will import DICOM files into a HealthImaging data store.
+ DICOM® — Digital Imaging and Communications in Medicine — is the international
+ standard for medical images and related information.
+
+ The workflow will then download all the image frames created during the DICOM import and decode
+ the image frames from their HTJ2K format to a bitmap format.
+ The bitmaps will then be validated with a checksum to ensure they are correct.
+ This workflow requires a number of AWS resources to run.
+
+ It requires a HealthImaging data store, an Amazon Simple Storage Service (Amazon S3)
+ bucket for uploaded DICOM files, an Amazon S3 bucket for the output of a DICOM import, and
+ an AWS Identity and Access Management (IAM) role for importing the DICOM files into
+ the data store.
+
+ These resources are created using the provided AWS CloudFormation stack
+ which will be deployed now.
+ """
+ )
+ cf_stack = self.deploy()
+
+ print(
+ """\
+ This workflow uses DICOM files from the National Cancer Institute Imaging Data Commons (IDC)
+ Collections.
+
+ Here is the link to their website:
+ https://registry.opendata.aws/nci-imaging-data-commons/
+ We will use DICOM files stored in an S3 bucket managed by the IDC.
+
+ First one of the DICOM folders in the IDC collection must be copied to your
+ input S3 bucket.
+ """
+ )
+
+ print(
+ f"\t\tYou have the choice of one of the following {len(IDC_IMAGE_CHOICES)} folders to copy."
+ )
+
+ for index, idcChoice in enumerate(IDC_IMAGE_CHOICES):
+ print(f"\t\t{index + 1}. {idcChoice['Description']}")
+ choice = q.ask(
+ "\t\tWhich DICOM files do you want to import? ",
+ q.is_int,
+ q.in_range(1, len(IDC_IMAGE_CHOICES) + 1),
+ )
+
+ from_directory = IDC_IMAGE_CHOICES[choice - 1]["Directory"]
+ input_directory = "input"
+ output_directory = "output"
+
+ print(
+ f"\n\t\tThe files in the directory {from_directory} in the bucket {IDC_S3_BUCKET_NAME} will be copied "
+ )
+ print(
+ f"\t\tto the folder {input_directory}/{from_directory}in the bucket {self.input_bucket_name}."
+ )
+ q.ask("\t\tPress Enter to start the copy.")
+ self.copy_images(
+ IDC_S3_BUCKET_NAME, from_directory, self.input_bucket_name, input_directory
+ )
+
+ print(
+ f"\n\t\tNow the DICOM images will be imported into the datastore with ID {self.data_store_id}."
+ )
+ import_job_id = self.medical_imaging_wrapper.start_dicom_import_job(
+ self.data_store_id,
+ self.input_bucket_name,
+ input_directory,
+ self.output_bucket_name,
+ output_directory,
+ self.role_arn,
+ )
+ print(
+ f"\n\t\tThe DICOM files were successfully imported. The import job ID is {self.data_store_id}."
+ )
+
+ print(
+ f"""\
+ Information about the import job, including the IDs of the created image sets,
+ is located in a file named {IMPORT_JOB_MANIFEST_FILE_NAME}
+ This file is located in a folder specified by the import job's 'outputS3Uri'.
+ The 'outputS3Uri' is retrieved by calling the 'GetDICOMImportJob' action.
+ """
+ )
+
+ print(
+ f"""\
+ The image set IDs will be retrieved by downloading '{IMPORT_JOB_MANIFEST_FILE_NAME}'
+ file from the output S3 bucket.
+ """
+ )
+ q.ask("\t\tPress Enter to continue.")
+
+ image_sets = self.medical_imaging_wrapper.get_image_sets_for_dicom_import_job(
+ self.data_store_id, import_job_id
+ )
+
+ print("\t\tThe image sets created by this import job are:")
+ for image_set in image_sets:
+ print("\t\tImage set:", image_set)
+
+ print(
+ """\
+ If you would like information about how HealthImaging organizes image sets,
+ go to the following link.
+ https://docs.aws.amazon.com/healthimaging/latest/devguide/understanding-image-sets.html
+ """
+ )
+
+ q.ask("\t\tPress Enter to continue.")
+
+ print(
+ """\
+ Next this workflow will download all the image frames created in this import job.
+ The IDs of all the image frames in an image set are stored in the image set metadata.
+ The image set metadata will be downloaded and parsed for the image frame IDs.
+ """
+ )
+
+ q.ask("\t\tPress Enter to continue.")
+
+ out_dir = f"output/import_job_{import_job_id}"
+ os.makedirs(out_dir, exist_ok=True)
+
+ all_image_frame_ids = []
+ for image_set in image_sets:
+ image_frames = self.medical_imaging_wrapper.get_image_frames_for_image_set(
+ self.data_store_id, image_set, out_dir
+ )
+
+ all_image_frame_ids.extend(image_frames)
+
+ print(
+ f"\t\t{len(all_image_frame_ids)} image frames were created by this import job."
+ )
+
+ print(
+ """\
+ The image frames are encoded in the HTJ2K format. This example will convert
+ the image frames to bitmaps. The decoded images will be verified using
+ a CRC32 checksum retrieved from the image set metadata.
+ The OpenJPEG open-source library will be used for the conversion.
+ The following link contains information about HTJ2K decoding libraries.
+ https://docs.aws.amazon.com/healthimaging/latest/devguide/reference-htj2k.html
+ """
+ )
+
+ q.ask("\t\tPress Enter to download and convert the images.")
+
+ self.medical_imaging_wrapper.download_decode_and_check_image_frames(
+ self.data_store_id, all_image_frame_ids, out_dir
+ )
+
+ print(
+ f"""\
+ The image files were successfully decoded and validated.
+ The HTJ2K image files are located in the directory
+ {out_dir} in the working directory of this example.
+ """
+ )
+
+ print("\t\tThis concludes this workflow.")
+ if q.ask(
+ f"\t\tClean up resources created by the workflow? (y/n) ",
+ q.is_yesno,
+ ):
+ self.destroy(cf_stack)
+ print("\t\tRemoved resources created by the workflow.")
+ print("\t\tThanks for watching!")
+ print("-" * 88)
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.copy]
+ def copy_single_object(self, key, source_bucket, target_bucket, target_directory):
+ """
+ Copies a single object from a source to a target bucket.
+
+ :param key: The key of the object to copy.
+ :param source_bucket: The source bucket for the copy.
+ :param target_bucket: The target bucket for the copy.
+ :param target_directory: The target directory for the copy.
+ """
+ new_key = target_directory + "/" + key
+ copy_source = {"Bucket": source_bucket, "Key": key}
+ self.s3_client.copy_object(
+ CopySource=copy_source, Bucket=target_bucket, Key=new_key
+ )
+ print(f"\n\t\tCopying {key}.")
+
+ def copy_images(
+ self, source_bucket, source_directory, target_bucket, target_directory
+ ):
+ """
+ Copies the images from the source to the target bucket using multiple threads.
+
+ :param source_bucket: The source bucket for the images.
+ :param source_directory: Directory within the source bucket.
+ :param target_bucket: The target bucket for the images.
+ :param target_directory: Directory within the target bucket.
+ """
+
+ # Get list of all objects in source bucket.
+ list_response = self.s3_client.list_objects_v2(
+ Bucket=source_bucket, Prefix=source_directory
+ )
+ objs = list_response["Contents"]
+ keys = [obj["Key"] for obj in objs]
+
+ # Copy the objects in the bucket.
+ for key in keys:
+ self.copy_single_object(key, source_bucket, target_bucket, target_directory)
+
+ print("\t\tDone copying all objects.")
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.copy]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.deploy]
+ def deploy(self):
+ """
+ Deploys prerequisite resources used by the scenario. The resources are
+ defined in the associated `setup.yaml` AWS CloudFormation script and are deployed
+ as a CloudFormation stack, so they can be easily managed and destroyed.
+ """
+
+ print("\t\tLet's deploy the stack for resource creation.")
+ stack_name = q.ask("\t\tEnter a name for the stack: ", q.non_empty)
+
+ data_store_name = q.ask(
+ "\t\tEnter a name for the Health Imaging Data Store: ", q.non_empty
+ )
+
+ account_id = boto3.client("sts").get_caller_identity()["Account"]
+
+ with open(
+ "../../../../workflows/healthimaging_image_sets/resources/cfn_template.yaml"
+ ) as setup_file:
+ setup_template = setup_file.read()
+ print(f"\t\tCreating {stack_name}.")
+ stack = self.cf_resource.create_stack(
+ StackName=stack_name,
+ TemplateBody=setup_template,
+ Capabilities=["CAPABILITY_NAMED_IAM"],
+ Parameters=[
+ {
+ "ParameterKey": "datastoreName",
+ "ParameterValue": data_store_name,
+ },
+ {
+ "ParameterKey": "userAccountID",
+ "ParameterValue": account_id,
+ },
+ ],
+ )
+ print("\t\tWaiting for stack to deploy. This typically takes a minute or two.")
+ waiter = self.cf_resource.meta.client.get_waiter("stack_create_complete")
+ waiter.wait(StackName=stack.name)
+ stack.load()
+ print(f"\t\tStack status: {stack.stack_status}")
+
+ outputs_dictionary = {
+ output["OutputKey"]: output["OutputValue"] for output in stack.outputs
+ }
+ self.input_bucket_name = outputs_dictionary["BucketName"]
+ self.output_bucket_name = outputs_dictionary["BucketName"]
+ self.role_arn = outputs_dictionary["RoleArn"]
+ self.data_store_id = outputs_dictionary["DatastoreID"]
+ return stack
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.deploy]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.destroy]
+ def destroy(self, stack):
+ """
+ Destroys the resources managed by the CloudFormation stack, and the CloudFormation
+ stack itself.
+
+ :param stack: The CloudFormation stack that manages the example resources.
+ """
+
+ print(f"\t\tCleaning up resources and {stack.name}.")
+ data_store_id = None
+ for oput in stack.outputs:
+ if oput["OutputKey"] == "DatastoreID":
+ data_store_id = oput["OutputValue"]
+ if data_store_id is not None:
+ print(f"\t\tDeleting image sets in data store {data_store_id}.")
+ image_sets = self.medical_imaging_wrapper.search_image_sets(
+ data_store_id, {}
+ )
+ image_set_ids = [image_set["imageSetId"] for image_set in image_sets]
+
+ for image_set_id in image_set_ids:
+ self.medical_imaging_wrapper.delete_image_set(
+ data_store_id, image_set_id
+ )
+ print(f"\t\tDeleted image set with id : {image_set_id}")
+
+ print(f"\t\tDeleting {stack.name}.")
+ stack.delete()
+ print("\t\tWaiting for stack removal. This may take a few minutes.")
+ waiter = self.cf_resource.meta.client.get_waiter("stack_delete_complete")
+ waiter.wait(StackName=stack.name)
+ print("\t\tStack delete complete.")
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.destroy]
+
+
+if __name__ == "__main__":
+ try:
+ s3 = boto3.client("s3")
+ cf = boto3.resource("cloudformation")
+ medical_imaging_wrapper = MedicalImagingWrapper.from_client()
+
+ scenario = MedicalImagingWorkflowScenario(medical_imaging_wrapper, s3, cf)
+ scenario.run_scenario()
+ except Exception:
+ logging.exception("Something went wrong with the workflow.")
+# snippet-end:[python.example_code.medical-imaging.workflow]
diff --git a/python/example_code/medical-imaging/imaging_set_and_frames_workflow/medicalimaging.py b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/medicalimaging.py
new file mode 100644
index 00000000000..c4ebb5b0bfa
--- /dev/null
+++ b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/medicalimaging.py
@@ -0,0 +1,385 @@
+# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+import logging
+import boto3
+import os
+import gzip
+import zlib
+import openjpeg
+import json
+import jmespath
+import time
+from botocore.exceptions import ClientError
+
+logger = logging.getLogger(__name__)
+
+
+# snippet-start:[python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.class]
+# snippet-start:[python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl]
+
+
+class MedicalImagingWrapper:
+ """Encapsulates Amazon HealthImaging functionality."""
+
+ def __init__(self, medical_imaging_client, s3_client):
+ """
+ :param medical_imaging_client: A Boto3 Amazon MedicalImaging client.
+ :param s3_client: A Boto3 S3 client.
+ """
+ self.medical_imaging_client = medical_imaging_client
+ self.s3_client = s3_client
+
+ @classmethod
+ def from_client(cls):
+ medical_imaging_client = boto3.client("medical-imaging")
+ s3_client = boto3.client("s3")
+ return cls(medical_imaging_client, s3_client)
+
+ # snippet-end:[python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.decl]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.GetImageSetMetadata]
+ def get_image_set_metadata(
+ self, metadata_file, datastore_id, image_set_id, version_id=None
+ ):
+ """
+ Get the metadata of an image set.
+
+ :param metadata_file: The file to store the JSON gzipped metadata.
+ :param datastore_id: The ID of the data store.
+ :param image_set_id: The ID of the image set.
+ :param version_id: The version of the image set.
+ """
+
+ try:
+ if version_id:
+ image_set_metadata = self.medical_imaging_client.get_image_set_metadata(
+ imageSetId=image_set_id,
+ datastoreId=datastore_id,
+ versionId=version_id,
+ )
+ else:
+ image_set_metadata = self.medical_imaging_client.get_image_set_metadata(
+ imageSetId=image_set_id, datastoreId=datastore_id
+ )
+ with open(metadata_file, "wb") as f:
+ for chunk in image_set_metadata["imageSetMetadataBlob"].iter_chunks():
+ if chunk:
+ f.write(chunk)
+
+ except ClientError as err:
+ logger.error(
+ "Couldn't get image metadata. Here's why: %s: %s",
+ err.response["Error"]["Code"],
+ err.response["Error"]["Message"],
+ )
+ raise
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.GetImageSetMetadata]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.StartDICOMImportJob]
+ def start_dicom_import_job(
+ self,
+ data_store_id,
+ input_bucket_name,
+ input_directory,
+ output_bucket_name,
+ output_directory,
+ role_arn,
+ ):
+ """
+ Routine which starts a HealthImaging import job.
+
+ :param data_store_id: The HealthImaging data store ID.
+ :param input_bucket_name: The name of the Amazon S3 bucket containing the DICOM files.
+ :param input_directory: The directory in the S3 bucket containing the DICOM files.
+ :param output_bucket_name: The name of the S3 bucket for the output.
+ :param output_directory: The directory in the S3 bucket to store the output.
+ :param role_arn: The ARN of the IAM role with permissions for the import.
+ :return: The job ID of the import.
+ """
+
+ input_uri = f"s3://{input_bucket_name}/{input_directory}/"
+ output_uri = f"s3://{output_bucket_name}/{output_directory}/"
+ try:
+ job = self.medical_imaging_client.start_dicom_import_job(
+ jobName="examplejob",
+ datastoreId=data_store_id,
+ dataAccessRoleArn=role_arn,
+ inputS3Uri=input_uri,
+ outputS3Uri=output_uri,
+ )
+ except ClientError as err:
+ logger.error(
+ "Couldn't start DICOM import job. Here's why: %s: %s",
+ err.response["Error"]["Code"],
+ err.response["Error"]["Message"],
+ )
+ raise
+ else:
+ return job["jobId"]
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.StartDICOMImportJob]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.GetImageSetsForImportJob]
+ def get_image_sets_for_dicom_import_job(self, datastore_id, import_job_id):
+ """
+ Retrieves the image sets created for an import job.
+
+ :param datastore_id: The HealthImaging data store ID
+ :param import_job_id: The import job ID
+ :return: List of image set IDs
+ """
+
+ import_job = self.medical_imaging_client.get_dicom_import_job(
+ datastoreId=datastore_id, jobId=import_job_id
+ )
+
+ output_uri = import_job["jobProperties"]["outputS3Uri"]
+
+ bucket = output_uri.split("/")[2]
+ key = "/".join(output_uri.split("/")[3:])
+
+ # Try to get the manifest.
+ retries = 3
+ while retries > 0:
+ try:
+ obj = self.s3_client.get_object(
+ Bucket=bucket, Key=key + "job-output-manifest.json"
+ )
+ body = obj["Body"]
+ break
+ except ClientError as error:
+ retries = retries - 1
+ time.sleep(3)
+ try:
+ data = json.load(body)
+ expression = jmespath.compile("jobSummary.imageSetsSummary[].imageSetId")
+ image_sets = expression.search(data)
+ except json.decoder.JSONDecodeError as error:
+ image_sets = import_job["jobProperties"]
+
+ return image_sets
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.GetImageSetsForImportJob]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.SearchImageSets]
+ def search_image_sets(self, datastore_id, search_filter):
+ """
+ Search for image sets.
+
+ :param datastore_id: The ID of the data store.
+ :param search_filter: The search filter.
+ For example: {"filters" : [{ "operator": "EQUAL", "values": [{"DICOMPatientId": "3524578"}]}]}.
+ :return: The list of image sets.
+ """
+ try:
+ paginator = self.medical_imaging_client.get_paginator("search_image_sets")
+ page_iterator = paginator.paginate(
+ datastoreId=datastore_id, searchCriteria=search_filter
+ )
+ metadata_summaries = []
+ for page in page_iterator:
+ metadata_summaries.extend(page["imageSetsMetadataSummaries"])
+ except ClientError as err:
+ logger.error(
+ "Couldn't search image sets. Here's why: %s: %s",
+ err.response["Error"]["Code"],
+ err.response["Error"]["Message"],
+ )
+ raise
+ else:
+ return metadata_summaries
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.SearchImageSets]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.GetImageFrames]
+ def get_image_frames_for_image_set(self, datastore_id, image_set_id, out_directory):
+ """
+ Get the image frames for an image set.
+
+ :param datastore_id: The ID of the data store.
+ :param image_set_id: The ID of the image set.
+ :param out_directory: The directory to save the file.
+ :return: The image frames.
+ """
+ image_frames = []
+ file_name = os.path.join(out_directory, f"{image_set_id}_metadata.json.gzip")
+ file_name = file_name.replace("/", "\\\\")
+ self.get_image_set_metadata(file_name, datastore_id, image_set_id)
+ try:
+ with gzip.open(file_name, "rb") as f_in:
+ doc = json.load(f_in)
+ instances = jmespath.search("Study.Series.*.Instances[].*[]", doc)
+ for instance in instances:
+ rescale_slope = jmespath.search("DICOM.RescaleSlope", instance)
+ rescale_intercept = jmespath.search("DICOM.RescaleIntercept", instance)
+ image_frames_json = jmespath.search("ImageFrames[][]", instance)
+ for image_frame in image_frames_json:
+ checksum_json = jmespath.search(
+ "max_by(PixelDataChecksumFromBaseToFullResolution, &Width)",
+ image_frame,
+ )
+ image_frame_info = {
+ "imageSetId": image_set_id,
+ "imageFrameId": image_frame["ID"],
+ "rescaleIntercept": rescale_intercept,
+ "rescaleSlope": rescale_slope,
+ "minPixelValue": image_frame["MinPixelValue"],
+ "maxPixelValue": image_frame["MaxPixelValue"],
+ "fullResolutionChecksum": checksum_json["Checksum"],
+ }
+ image_frames.append(image_frame_info)
+ return image_frames
+ except TypeError:
+ return {}
+ except ClientError as err:
+ logger.error(
+ "Couldn't get image frames for image set. Here's why: %s: %s",
+ err.response["Error"]["Code"],
+ err.response["Error"]["Message"],
+ )
+ raise
+ return image_frames
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.GetImageFrames]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.GetImageSet]
+ def get_image_set(self, datastore_id, image_set_id, version_id=None):
+ """
+ Get the properties of an image set.
+
+ :param datastore_id: The ID of the data store.
+ :param image_set_id: The ID of the image set.
+ :param version_id: The optional version of the image set.
+ :return: The image set properties.
+ """
+ try:
+ if version_id:
+ image_set = self.medical_imaging_client.get_image_set(
+ imageSetId=image_set_id,
+ datastoreId=datastore_id,
+ versionId=version_id,
+ )
+ else:
+ image_set = self.medical_imaging_client.get_image_set(
+ imageSetId=image_set_id, datastoreId=datastore_id
+ )
+ except ClientError as err:
+ logger.error(
+ "Couldn't get image set. Here's why: %s: %s",
+ err.response["Error"]["Code"],
+ err.response["Error"]["Message"],
+ )
+ raise
+ else:
+ return image_set
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.GetImageSet]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.downloadAndCheck]
+ def download_decode_and_check_image_frames(
+ self, data_store_id, image_frames, out_directory
+ ):
+ """
+ Downloads image frames, decodes them, and uses the checksum to validate
+ the decoded images.
+
+ :param data_store_id: The HealthImaging data store ID.
+ :param image_frames: A list of dicts containing image frame information.
+ :param out_directory: A directory for the downloaded images.
+ :return: True if the function succeeded; otherwise, False.
+ """
+ total_result = True
+ for image_frame in image_frames:
+ image_file_path = f"{out_directory}/image_{image_frame['imageFrameId']}.jph"
+ self.get_pixel_data(
+ image_file_path,
+ data_store_id,
+ image_frame["imageSetId"],
+ image_frame["imageFrameId"],
+ )
+
+ image_array = self.jph_image_to_opj_bitmap(image_file_path)
+ crc32_checksum = image_frame["fullResolutionChecksum"]
+ # Verify checksum.
+ crc32_calculated = zlib.crc32(image_array)
+ image_result = crc32_checksum == crc32_calculated
+ print(
+ f"\t\tImage checksum verified for {image_frame['imageFrameId']}: {image_result }"
+ )
+ total_result = total_result and image_result
+ return total_result
+
+ @staticmethod
+ def jph_image_to_opj_bitmap(jph_file):
+ """
+ Decode the image to a bitmap using an OPENJPEG library.
+ :param jph_file: The file to decode.
+ :return: The decoded bitmap as an array.
+ """
+ # Use format 2 for the JPH file.
+ params = openjpeg.utils.get_parameters(jph_file, 2)
+ print(f"\n\t\tImage parameters for {jph_file}: \n\t\t{params}")
+
+ image_array = openjpeg.utils.decode(jph_file, 2)
+
+ return image_array
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.downloadAndCheck]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.GetPixelData]
+ def get_pixel_data(
+ self, file_path_to_write, datastore_id, image_set_id, image_frame_id
+ ):
+ """
+ Get an image frame's pixel data.
+
+ :param file_path_to_write: The path to write the image frame's HTJ2K encoded pixel data.
+ :param datastore_id: The ID of the data store.
+ :param image_set_id: The ID of the image set.
+ :param image_frame_id: The ID of the image frame.
+ """
+ try:
+ image_frame = self.medical_imaging_client.get_image_frame(
+ datastoreId=datastore_id,
+ imageSetId=image_set_id,
+ imageFrameInformation={"imageFrameId": image_frame_id},
+ )
+ with open(file_path_to_write, "wb") as f:
+ for chunk in image_frame["imageFrameBlob"].iter_chunks():
+ f.write(chunk)
+ except ClientError as err:
+ logger.error(
+ "Couldn't get image frame. Here's why: %s: %s",
+ err.response["Error"]["Code"],
+ err.response["Error"]["Message"],
+ )
+ raise
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.GetPixelData]
+
+ # snippet-start:[python.example_code.medical-imaging.workflow.DeleteImageSet]
+ def delete_image_set(self, datastore_id, image_set_id):
+ """
+ Delete an image set.
+
+ :param datastore_id: The ID of the data store.
+ :param image_set_id: The ID of the image set.
+ """
+ try:
+ delete_results = self.medical_imaging_client.delete_image_set(
+ imageSetId=image_set_id, datastoreId=datastore_id
+ )
+ except ClientError as err:
+ logger.error(
+ "Couldn't delete image set. Here's why: %s: %s",
+ err.response["Error"]["Code"],
+ err.response["Error"]["Message"],
+ )
+ raise
+
+ # snippet-end:[python.example_code.medical-imaging.workflow.DeleteImageSet]
+
+
+# snippet-end:[python.example_code.medical-imaging.MedicalImagingWorkflowWrapper.class]
diff --git a/python/example_code/medical-imaging/imaging_set_and_frames_workflow/requirements.txt b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/requirements.txt
new file mode 100644
index 00000000000..60d1e837561
--- /dev/null
+++ b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/requirements.txt
@@ -0,0 +1,4 @@
+boto3>=1.26.79
+pytest>=7.2.1
+requests>=2.28.2
+botocore~=1.31.30
\ No newline at end of file
diff --git a/python/example_code/medical-imaging/imaging_set_and_frames_workflow/test/conftest.py b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/test/conftest.py
new file mode 100644
index 00000000000..b11d1929fb3
--- /dev/null
+++ b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/test/conftest.py
@@ -0,0 +1,13 @@
+# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+"""
+Contains common test fixtures used to run AWS HealthImaging
+tests.
+"""
+
+import sys
+
+# This is needed so Python can find test_tools on the path.
+sys.path.append("../../..")
+from test_tools.fixtures.common import *
diff --git a/python/example_code/medical-imaging/imaging_set_and_frames_workflow/test/test_imaging_set_and_frames_workflow.py b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/test/test_imaging_set_and_frames_workflow.py
new file mode 100644
index 00000000000..c0f92d508af
--- /dev/null
+++ b/python/example_code/medical-imaging/imaging_set_and_frames_workflow/test/test_imaging_set_and_frames_workflow.py
@@ -0,0 +1,244 @@
+# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+"""
+Tests for imaging_set_and_frames workflow.
+"""
+
+import boto3
+from botocore.exceptions import ClientError
+import pytest
+import os
+
+from medicalimaging import MedicalImagingWrapper
+from imaging_set_and_frames import MedicalImagingWorkflowScenario
+
+
+@pytest.mark.skip(
+ reason="Skip until shared resources are part of the Docker environment."
+)
+@pytest.mark.integ
+def test_run_imaging_set_and_frames_scenario_integ(input_mocker, capsys):
+ s3 = boto3.client("s3")
+ cf = boto3.resource("cloudformation")
+ scenario = MedicalImagingWorkflowScenario(
+ MedicalImagingWrapper.from_client(), s3, cf
+ )
+
+ input_mocker.mock_answers(
+ [
+ "stacktest0", # Stack name.
+ "storetest0", # Datastore name.
+ 1, # Select folder.
+ "", # Press enter.
+ "", # Press enter.
+ "", # Press enter.
+ "", # Press enter.
+ "", # Press enter.
+ "y", # Cleanup.
+ ]
+ )
+
+ scenario.run_scenario()
+
+ capt = capsys.readouterr()
+ assert "Thanks for watching!" in capt.out
+
+
+@pytest.mark.parametrize("error_code", [None, "TestException"])
+def test_get_image_set_metadata(make_stubber, error_code):
+ medical_imaging_client = boto3.client("medical-imaging")
+ medical_imaging_stubber = make_stubber(medical_imaging_client)
+ s3_client = boto3.client("s3")
+ wrapper = MedicalImagingWrapper(medical_imaging_client, s3_client)
+ datastore_id = "abcdedf1234567890abcdef123456789"
+ image_set_id = "cccccc1234567890abcdef123456789"
+ test_file = "med-imag-test_file_1234.gzip"
+ medical_imaging_stubber.stub_get_image_set_metadata(
+ datastore_id, image_set_id, error_code=error_code
+ )
+
+ if error_code is None:
+ wrapper.get_image_set_metadata(test_file, datastore_id, image_set_id)
+ assert os.path.exists(test_file)
+ os.remove(test_file)
+
+ else:
+ with pytest.raises(ClientError) as exc_info:
+ wrapper.get_image_set_metadata(test_file, datastore_id, image_set_id)
+ assert exc_info.value.response["Error"]["Code"] == error_code
+
+
+@pytest.mark.parametrize("error_code", [None, "TestException"])
+def test_start_dicom_import_job(make_stubber, error_code):
+ medical_imaging_client = boto3.client("medical-imaging")
+ medical_imaging_stubber = make_stubber(medical_imaging_client)
+ s3_client = boto3.client("s3")
+ wrapper = MedicalImagingWrapper(medical_imaging_client, s3_client)
+ job_id = "cccccc1234567890abcdef123456789"
+ job_name = "examplejob"
+ datastore_id = "abcdedf1234567890abcdef123456789"
+ role_arn = "arn:aws:iam::111111111111:role/dicom_import"
+ input_bucket_name = "healthimaging-source"
+ input_directory = "input"
+ output_bucket_name = "healthimaging-destination"
+ output_directory = "output"
+ input_uri = f"s3://{input_bucket_name}/{input_directory}/"
+ output_uri = f"s3://{output_bucket_name}/{output_directory}/"
+
+ medical_imaging_stubber.stub_start_dicom_import_job(
+ job_name,
+ datastore_id,
+ role_arn,
+ input_uri,
+ output_uri,
+ job_id,
+ error_code=error_code,
+ )
+
+ if error_code is None:
+ result = wrapper.start_dicom_import_job(
+ datastore_id,
+ input_bucket_name,
+ input_directory,
+ output_bucket_name,
+ output_directory,
+ role_arn,
+ )
+ assert result == job_id
+ else:
+ with pytest.raises(ClientError) as exc_info:
+ wrapper.start_dicom_import_job(
+ datastore_id,
+ input_bucket_name,
+ input_directory,
+ output_bucket_name,
+ output_directory,
+ role_arn,
+ )
+ assert exc_info.value.response["Error"]["Code"] == error_code
+
+
+@pytest.mark.parametrize("error_code", [None, "TestException"])
+def test_get_image_sets_for_dicom_import_job(make_stubber, error_code):
+ medical_imaging_client = boto3.client("medical-imaging")
+ s3_client = boto3.client("s3")
+ medical_imaging_stubber = make_stubber(medical_imaging_client)
+ s3_stubber = make_stubber(s3_client)
+ wrapper = MedicalImagingWrapper(medical_imaging_client, s3_client)
+ datastore_id = "abcdedf1234567890abcdef123456789"
+ job_id = "cccccc1234567890abcdef123456789"
+ job_status = "TESTING"
+ bucket = "healthimaging-destination"
+ key = "CRStudy/job-output-manifest.json"
+ test_content = b"Test object body"
+
+ medical_imaging_stubber.stub_get_dicom_import_job(
+ job_id, datastore_id, job_status, error_code=error_code
+ )
+
+ if error_code is None:
+ s3_stubber.stub_get_object(bucket, key, test_content, error_code=error_code)
+ result = wrapper.get_image_sets_for_dicom_import_job(datastore_id, job_id)
+ assert result["jobStatus"] == job_status
+ else:
+ with pytest.raises(ClientError) as exc_info:
+ wrapper.get_image_sets_for_dicom_import_job(datastore_id, job_id)
+ assert exc_info.value.response["Error"]["Code"] == error_code
+
+
+@pytest.mark.parametrize("error_code", [None, "TestException"])
+def test_search_mage_sets(make_stubber, error_code):
+ medical_imaging_client = boto3.client("medical-imaging")
+ medical_imaging_stubber = make_stubber(medical_imaging_client)
+ s3_client = boto3.client("s3")
+ wrapper = MedicalImagingWrapper(medical_imaging_client, s3_client)
+ datastore_id = "abcdedf1234567890abcdef123456789"
+ search_filter = {
+ "filters": [
+ {
+ "values": [
+ {"createdAt": "2023-09-13T14:13:39.302000-04:00"},
+ {"createdAt": "2023-09-13T14:13:39.302000-04:00"},
+ ],
+ "operator": "BETWEEN",
+ }
+ ]
+ }
+ medical_imaging_stubber.stub_search_image_sets(
+ datastore_id, search_filter, error_code=error_code
+ )
+
+ if error_code is None:
+ wrapper.search_image_sets(datastore_id, search_filter)
+
+ else:
+ with pytest.raises(ClientError) as exc_info:
+ wrapper.search_image_sets(datastore_id, search_filter)
+ assert exc_info.value.response["Error"]["Code"] == error_code
+
+
+@pytest.mark.parametrize("error_code", [None, "TestException"])
+def test_get_image_frames_for_image_set(make_stubber, error_code):
+ medical_imaging_client = boto3.client("medical-imaging")
+ medical_imaging_stubber = make_stubber(medical_imaging_client)
+ s3_client = boto3.client("s3")
+ wrapper = MedicalImagingWrapper(medical_imaging_client, s3_client)
+ datastore_id = "abcdedf1234567890abcdef123456789"
+ image_set_id = "cccccc1234567890abcdef123456789"
+ directory = "output"
+
+ if error_code is None:
+ medical_imaging_stubber.stub_get_image_set_metadata(
+ datastore_id, image_set_id, error_code=error_code
+ )
+ wrapper.get_image_frames_for_image_set(datastore_id, image_set_id, directory)
+
+
+@pytest.mark.parametrize("error_code", [None, "TestException"])
+def test_get_pixel_data(make_stubber, error_code):
+ medical_imaging_client = boto3.client("medical-imaging")
+ medical_imaging_stubber = make_stubber(medical_imaging_client)
+ s3_client = boto3.client("s3")
+ wrapper = MedicalImagingWrapper(medical_imaging_client, s3_client)
+ datastore_id = "abcdedf1234567890abcdef123456789"
+ image_set_id = "cccccc1234567890abcdef123456789"
+ image_frame_id = "cccccc1234567890abcdef123456789"
+ test_file = "med-imag-test_file_789654.jph"
+ medical_imaging_stubber.stub_get_pixel_data(
+ datastore_id, image_set_id, image_frame_id, error_code=error_code
+ )
+
+ if error_code is None:
+ wrapper.get_pixel_data(test_file, datastore_id, image_set_id, image_frame_id)
+ assert os.path.exists(test_file)
+ os.remove(test_file)
+
+ else:
+ with pytest.raises(ClientError) as exc_info:
+ wrapper.get_pixel_data(
+ test_file, datastore_id, image_set_id, image_frame_id
+ )
+ assert exc_info.value.response["Error"]["Code"] == error_code
+
+
+@pytest.mark.parametrize("error_code", [None, "TestException"])
+def test_delete_image_set(make_stubber, error_code):
+ medical_imaging_client = boto3.client("medical-imaging")
+ medical_imaging_stubber = make_stubber(medical_imaging_client)
+ s3_client = boto3.client("s3")
+ wrapper = MedicalImagingWrapper(medical_imaging_client, s3_client)
+ datastore_id = "abcdedf1234567890abcdef123456789"
+ image_set_id = "cccccc1234567890abcdef123456789"
+
+ medical_imaging_stubber.stub_delete_image_set(
+ datastore_id, image_set_id, error_code=error_code
+ )
+
+ if error_code is None:
+ wrapper.delete_image_set(datastore_id, image_set_id)
+
+ else:
+ with pytest.raises(ClientError) as exc_info:
+ wrapper.delete_image_set(datastore_id, image_set_id)
+ assert exc_info.value.response["Error"]["Code"] == error_code
diff --git a/python/example_code/medical-imaging/medical_imaging_basics.py b/python/example_code/medical-imaging/medical_imaging_basics.py
index e4f57820dd1..1555bc81f9a 100644
--- a/python/example_code/medical-imaging/medical_imaging_basics.py
+++ b/python/example_code/medical-imaging/medical_imaging_basics.py
@@ -554,60 +554,25 @@ def list_tags_for_resource(self, resource_arn):
# snippet-end:[python.example_code.medical-imaging.ListTagsForResource]
- def usage_demo(self, source_s3_uri, dest_s3_uri, data_access_role_arn):
- data_store_name = f"python_usage_demo_data_store_{random.randint(0, 200000)}"
-
- data_store_id = self.create_datastore(data_store_name)
- print(f"Data store created with id : {data_store_id}")
-
- while True:
- time.sleep(1)
- datastore_properties = self.get_datastore_properties(data_store_id)
- datastore_status = datastore_properties["datastoreStatus"]
- print(f'data store status: "{datastore_status}"')
- if datastore_status == "ACTIVE":
- break
- elif datastore_status == "CREATE_FAILED":
- raise Exception("Create datastore job failed")
-
- datastores = self.list_datastores()
- print(f"datastores : {datastores}")
-
- job_name = "python_usage_demo_job"
- job_id = self.start_dicom_import_job(
- job_name, data_store_id, data_access_role_arn, source_s3_uri, dest_s3_uri
- )
- print(f"Started import job with id: {job_id}")
-
- while True:
- time.sleep(1)
- job = self.get_dicom_import_job(data_store_id, job_id)
- job_status = job["jobStatus"]
- print(f'Status of import job : "{job_status}"')
- if job_status == "COMPLETED":
- break
- elif job_status == "FAILED":
- raise Exception("DICOM import job failed")
-
- import_jobs = self.list_dicom_import_jobs(data_store_id)
- print(import_jobs)
- for job in import_jobs:
- print(job)
-
- # Search with EQUAL operator..
- # snippet-start:[python.example_code.medical-imaging.SearchImageSets.use_case1]
- filter = {
+ def search_imagesets_demo(self, data_store_id):
+ # Replace these values with your own.
+ patient_id = "123456"
+ series_instance_uid = "1.1.123.123456.1.12.1.1234567890.1234.12345678.123"
+ # Search with EQUAL operator.
+ # snippet-start:[python.example_code.medical-imaging.SearchImageSets.use_case1]
+ search_filter = {
"filters": [
- {"operator": "EQUAL", "values": [{"DICOMPatientId": "3524578"}]}
+ {"operator": "EQUAL", "values": [{"DICOMPatientId": patient_id}]}
]
}
- image_sets = self.search_image_sets(data_store_id, filter)
+ image_sets = self.search_image_sets(data_store_id, search_filter)
+ print(f"Image sets found with EQUAL operator\n{image_sets}")
# snippet-end:[python.example_code.medical-imaging.SearchImageSets.use_case1]
# Search with BETWEEN operator using DICOMStudyDate and DICOMStudyTime.
# snippet-start:[python.example_code.medical-imaging.SearchImageSets.use_case2]
- filter = {
+ search_filter = {
"filters": [
{
"operator": "BETWEEN",
@@ -629,12 +594,15 @@ def usage_demo(self, source_s3_uri, dest_s3_uri, data_access_role_arn):
]
}
- image_sets = self.search_image_sets(data_store_id, filter)
+ image_sets = self.search_image_sets(data_store_id, search_filter)
+ print(
+ f"Image sets found with BETWEEN operator using DICOMStudyDate and DICOMStudyTime\n{image_sets}"
+ )
# snippet-end:[python.example_code.medical-imaging.SearchImageSets.use_case2]
# Search with BETWEEN operator using createdAt. Time studies were previously persisted.
# snippet-start:[python.example_code.medical-imaging.SearchImageSets.use_case3]
- filter = {
+ search_filter = {
"filters": [
{
"values": [
@@ -653,9 +621,93 @@ def usage_demo(self, source_s3_uri, dest_s3_uri, data_access_role_arn):
]
}
- image_sets = self.search_image_sets(data_store_id, filter)
+ recent_image_sets = self.search_image_sets(data_store_id, search_filter)
+ print(
+ f"Image sets found with with BETWEEN operator using createdAt\n{recent_image_sets}"
+ )
# snippet-end:[python.example_code.medical-imaging.SearchImageSets.use_case3]
+ # Search with EQUAL operator on DICOMSeriesInstanceUID and BETWEEN on updatedAt and sort response in ASC
+ # order on updatedAt field.
+ # snippet-start:[python.example_code.medical-imaging.SearchImageSets.use_case4]
+ search_filter = {
+ "filters": [
+ {
+ "values": [
+ {
+ "updatedAt": datetime.datetime(
+ 2021, 8, 4, 14, 49, 54, 429000
+ )
+ },
+ {
+ "updatedAt": datetime.datetime.now()
+ + datetime.timedelta(days=1)
+ },
+ ],
+ "operator": "BETWEEN",
+ },
+ {
+ "values": [{"DICOMSeriesInstanceUID": series_instance_uid}],
+ "operator": "EQUAL",
+ },
+ ],
+ "sort": {
+ "sortOrder": "ASC",
+ "sortField": "updatedAt",
+ },
+ }
+
+ image_sets = self.search_image_sets(data_store_id, search_filter)
+ print(
+ "Image sets found with EQUAL operator on DICOMSeriesInstanceUID and BETWEEN on updatedAt and"
+ )
+ print(f"sort response in ASC order on updatedAt field\n{image_sets}")
+ # snippet-end:[python.example_code.medical-imaging.SearchImageSets.use_case4]
+
+ return recent_image_sets
+
+ def usage_demo(self, source_s3_uri, dest_s3_uri, data_access_role_arn):
+ data_store_name = f"python_usage_demo_data_store_{random.randint(0, 200000)}"
+
+ data_store_id = self.create_datastore(data_store_name)
+ print(f"Data store created with id : {data_store_id}")
+
+ while True:
+ time.sleep(1)
+ datastore_properties = self.get_datastore_properties(data_store_id)
+ datastore_status = datastore_properties["datastoreStatus"]
+ print(f'data store status: "{datastore_status}"')
+ if datastore_status == "ACTIVE":
+ break
+ elif datastore_status == "CREATE_FAILED":
+ raise Exception("Create datastore job failed")
+
+ datastores = self.list_datastores()
+ print(f"datastores : {datastores}")
+
+ job_name = "python_usage_demo_job"
+ job_id = self.start_dicom_import_job(
+ job_name, data_store_id, data_access_role_arn, source_s3_uri, dest_s3_uri
+ )
+ print(f"Started import job with id: {job_id}")
+
+ while True:
+ time.sleep(1)
+ job = self.get_dicom_import_job(data_store_id, job_id)
+ job_status = job["jobStatus"]
+ print(f'Status of import job : "{job_status}"')
+ if job_status == "COMPLETED":
+ break
+ elif job_status == "FAILED":
+ raise Exception("DICOM import job failed")
+
+ import_jobs = self.list_dicom_import_jobs(data_store_id)
+ print(import_jobs)
+ for job in import_jobs:
+ print(job)
+
+ image_sets = self.search_imagesets_demo(data_store_id)
+
image_set_ids = [image_set["imageSetId"] for image_set in image_sets]
for image_set in image_sets:
print(image_set)
diff --git a/python/example_code/medical-imaging/requirements.txt b/python/example_code/medical-imaging/requirements.txt
index 6a14b87b4d1..3ef6d7037a9 100644
--- a/python/example_code/medical-imaging/requirements.txt
+++ b/python/example_code/medical-imaging/requirements.txt
@@ -1,5 +1,5 @@
-boto3>=1.26.79
+boto3>=1.34.78
pytest>=7.2.1
requests>=2.28.2
openjphpy>=0.1.0
-botocore~=1.31.30
\ No newline at end of file
+botocore>=1.34.78
\ No newline at end of file
diff --git a/python/test_tools/medical_imaging_stubber.py b/python/test_tools/medical_imaging_stubber.py
index 971d58db23a..031b2423d8e 100644
--- a/python/test_tools/medical_imaging_stubber.py
+++ b/python/test_tools/medical_imaging_stubber.py
@@ -8,6 +8,7 @@
from test_tools.example_stubber import ExampleStubber
import botocore
import io
+import gzip
class MedicalImagingStubber(ExampleStubber):
@@ -183,10 +184,15 @@ def stub_get_image_set(
def stub_get_image_set_metadata(self, datastore_id, image_set_id, error_code=None):
expected_params = {"datastoreId": datastore_id, "imageSetId": image_set_id}
- data_string = b"akdelfaldkflakdflkajs"
- stream = botocore.response.StreamingBody(
- io.BytesIO(data_string), len(data_string)
- )
+ data_string = b'"{data: akdelfaldkflakdflkajs}"'
+
+ gzip_stream = io.BytesIO()
+ with gzip.open(gzip_stream, "wb") as f:
+ f.write(data_string)
+ gzip_stream.seek(0)
+
+ stream = botocore.response.StreamingBody(gzip_stream, 49)
+
response = {
"contentType": " text/plain",
"contentEncoding": "gzip",