From 9cf74d261262cfe701afd64e3c91641e091c97fc Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 6 Mar 2023 23:13:20 -0600 Subject: [PATCH 001/806] remove unnecessary install block from CMakeLists.txt --- src/server/CMakeLists.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/server/CMakeLists.txt b/src/server/CMakeLists.txt index 7d59bafb9..449b64f10 100644 --- a/src/server/CMakeLists.txt +++ b/src/server/CMakeLists.txt @@ -54,11 +54,11 @@ add_executable(pdc_server.exe ${PDC_SOURCE_DIR}/src/api/pdc_analysis/pdc_hist_pkg.c ) -install( - TARGETS - pdc_server.exe - DESTINATION ${PDC_INSTALL_BIN_DIR} -) +#install( +# TARGETS +# pdc_server.exe +# DESTINATION ${PDC_INSTALL_BIN_DIR} +#) if(NOT ${PDC_INSTALL_BIN_DIR} MATCHES ${PROJECT_BINARY_DIR}/bin) install( From fe1f8b44995bc0dabd3b957e1032c2da26f56fdd Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 20 Mar 2023 11:10:40 -0500 Subject: [PATCH 002/806] update output --- src/tests/kvtag_add_get_scale.c | 42 ++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index b59125d46..e7e7657e1 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -58,6 +58,19 @@ assign_work_to_rank(int rank, int size, int nwork, int *my_count, int *my_start) return 1; } + +uint64_t +atoui64(char *arg) { + char *endptr; + uint64_t num = strtoull(arg, &endptr, 10); + + if (*endptr != '\0') { + printf("Invalid input: %s\n", arg); + return 1; + } + return num; +} + void print_usage(char *name) { @@ -69,8 +82,9 @@ main(int argc, char *argv[]) { pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t * obj_ids; - int n_obj, n_add_tag, n_query, my_obj, my_obj_s, my_add_tag, my_query, my_add_tag_s, my_query_s; - int proc_num, my_rank, i, v; + uint64_t n_obj, n_add_tag, n_query, my_obj, my_obj_s, my_add_tag, my_query, my_add_tag_s, my_query_s; + int proc_num, my_rank + uint64_t i, v; char obj_name[128]; double stime, total_time; pdc_kvtag_t kvtag; @@ -86,9 +100,9 @@ main(int argc, char *argv[]) print_usage(argv[0]); goto done; } - n_obj = atoi(argv[1]); - n_add_tag = atoi(argv[2]); - n_query = atoi(argv[3]); + n_obj = atoui64(argv[1]); + n_add_tag = atoui64(argv[2]); + n_query = atoui64(argv[3]); if (n_add_tag > n_obj || n_query > n_obj) { if (my_rank == 0) @@ -101,7 +115,7 @@ main(int argc, char *argv[]) assign_work_to_rank(my_rank, proc_num, n_obj, &my_obj, &my_obj_s); if (my_rank == 0) - printf("Create %d obj, %d tags, query %d\n", my_obj, my_add_tag, my_query); + printf("Create %llu obj, %llu tags, query %llu\n", my_obj, my_add_tag, my_query); // create a pdc pdc = PDCinit("pdc"); @@ -124,14 +138,14 @@ main(int argc, char *argv[]) // Create a number of objects, add at least one tag to that object obj_ids = (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); for (i = 0; i < my_obj; i++) { - sprintf(obj_name, "obj%d", my_obj_s + i); + sprintf(obj_name, "obj%llu", my_obj_s + i); obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); if (obj_ids[i] <= 0) printf("Fail to create object @ line %d!\n", __LINE__); } if (my_rank == 0) - printf("Created %d objects\n", n_obj); + printf("Created %llu objects\n", n_obj); // Add tags kvtag.name = "Group"; @@ -145,7 +159,7 @@ main(int argc, char *argv[]) for (i = 0; i < my_add_tag; i++) { v = i + my_add_tag_s; if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) - printf("fail to add a kvtag to o%d\n", i + my_obj_s); + printf("fail to add a kvtag to o%llu\n", i + my_obj_s); } #ifdef ENABLE_MPI @@ -153,7 +167,7 @@ main(int argc, char *argv[]) total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to add tags to %d objects: %.4f\n", n_add_tag, total_time); + printf("Total time to add tags to %llu objects: %.4f\n", n_add_tag, total_time); values = (pdc_kvtag_t **)calloc(my_query, sizeof(pdc_kvtag_t *)); @@ -163,7 +177,7 @@ main(int argc, char *argv[]) #endif for (i = 0; i < my_query; i++) { if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_size) < 0) - printf("fail to get a kvtag from o%d\n", i + my_query_s); + printf("fail to get a kvtag from o%llu\n", i + my_query_s); } #ifdef ENABLE_MPI @@ -171,13 +185,13 @@ main(int argc, char *argv[]) total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to retrieve tags from %d objects: %.4f\n", n_query, total_time); + printf("Total time to retrieve tags from %llu objects: %.4f\n", n_query, total_time); fflush(stdout); for (i = 0; i < my_query; i++) { if (*(int *)(values[i]->value) != i + my_add_tag_s) - printf("Error with retrieved tag from o%d\n", i + my_query_s); + printf("Error with retrieved tag from o%llu\n", i + my_query_s); PDC_free_kvtag(&values[i]); } @@ -186,7 +200,7 @@ main(int argc, char *argv[]) // close first object for (i = 0; i < my_obj; i++) { if (PDCobj_close(obj_ids[i]) < 0) - printf("fail to close object o%d\n", i + my_obj_s); + printf("fail to close object o%llu\n", i + my_obj_s); } // close a container From 596a4b88904077ef29e123202c714cbc9fcc5a7f Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 20 Mar 2023 12:52:57 -0500 Subject: [PATCH 003/806] Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. --- src/tests/kvtag_add_get_scale.c | 42 +++++++++++---------------------- 1 file changed, 14 insertions(+), 28 deletions(-) diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index e7e7657e1..b59125d46 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -58,19 +58,6 @@ assign_work_to_rank(int rank, int size, int nwork, int *my_count, int *my_start) return 1; } - -uint64_t -atoui64(char *arg) { - char *endptr; - uint64_t num = strtoull(arg, &endptr, 10); - - if (*endptr != '\0') { - printf("Invalid input: %s\n", arg); - return 1; - } - return num; -} - void print_usage(char *name) { @@ -82,9 +69,8 @@ main(int argc, char *argv[]) { pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t * obj_ids; - uint64_t n_obj, n_add_tag, n_query, my_obj, my_obj_s, my_add_tag, my_query, my_add_tag_s, my_query_s; - int proc_num, my_rank - uint64_t i, v; + int n_obj, n_add_tag, n_query, my_obj, my_obj_s, my_add_tag, my_query, my_add_tag_s, my_query_s; + int proc_num, my_rank, i, v; char obj_name[128]; double stime, total_time; pdc_kvtag_t kvtag; @@ -100,9 +86,9 @@ main(int argc, char *argv[]) print_usage(argv[0]); goto done; } - n_obj = atoui64(argv[1]); - n_add_tag = atoui64(argv[2]); - n_query = atoui64(argv[3]); + n_obj = atoi(argv[1]); + n_add_tag = atoi(argv[2]); + n_query = atoi(argv[3]); if (n_add_tag > n_obj || n_query > n_obj) { if (my_rank == 0) @@ -115,7 +101,7 @@ main(int argc, char *argv[]) assign_work_to_rank(my_rank, proc_num, n_obj, &my_obj, &my_obj_s); if (my_rank == 0) - printf("Create %llu obj, %llu tags, query %llu\n", my_obj, my_add_tag, my_query); + printf("Create %d obj, %d tags, query %d\n", my_obj, my_add_tag, my_query); // create a pdc pdc = PDCinit("pdc"); @@ -138,14 +124,14 @@ main(int argc, char *argv[]) // Create a number of objects, add at least one tag to that object obj_ids = (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); for (i = 0; i < my_obj; i++) { - sprintf(obj_name, "obj%llu", my_obj_s + i); + sprintf(obj_name, "obj%d", my_obj_s + i); obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); if (obj_ids[i] <= 0) printf("Fail to create object @ line %d!\n", __LINE__); } if (my_rank == 0) - printf("Created %llu objects\n", n_obj); + printf("Created %d objects\n", n_obj); // Add tags kvtag.name = "Group"; @@ -159,7 +145,7 @@ main(int argc, char *argv[]) for (i = 0; i < my_add_tag; i++) { v = i + my_add_tag_s; if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) - printf("fail to add a kvtag to o%llu\n", i + my_obj_s); + printf("fail to add a kvtag to o%d\n", i + my_obj_s); } #ifdef ENABLE_MPI @@ -167,7 +153,7 @@ main(int argc, char *argv[]) total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to add tags to %llu objects: %.4f\n", n_add_tag, total_time); + printf("Total time to add tags to %d objects: %.4f\n", n_add_tag, total_time); values = (pdc_kvtag_t **)calloc(my_query, sizeof(pdc_kvtag_t *)); @@ -177,7 +163,7 @@ main(int argc, char *argv[]) #endif for (i = 0; i < my_query; i++) { if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_size) < 0) - printf("fail to get a kvtag from o%llu\n", i + my_query_s); + printf("fail to get a kvtag from o%d\n", i + my_query_s); } #ifdef ENABLE_MPI @@ -185,13 +171,13 @@ main(int argc, char *argv[]) total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to retrieve tags from %llu objects: %.4f\n", n_query, total_time); + printf("Total time to retrieve tags from %d objects: %.4f\n", n_query, total_time); fflush(stdout); for (i = 0; i < my_query; i++) { if (*(int *)(values[i]->value) != i + my_add_tag_s) - printf("Error with retrieved tag from o%llu\n", i + my_query_s); + printf("Error with retrieved tag from o%d\n", i + my_query_s); PDC_free_kvtag(&values[i]); } @@ -200,7 +186,7 @@ main(int argc, char *argv[]) // close first object for (i = 0; i < my_obj; i++) { if (PDCobj_close(obj_ids[i]) < 0) - printf("fail to close object o%llu\n", i + my_obj_s); + printf("fail to close object o%d\n", i + my_obj_s); } // close a container From 041d368d9aa3d7a0624ad5959941a99a2ed7bfa2 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 20 Mar 2023 13:07:10 -0500 Subject: [PATCH 004/806] build kvtag_add_get_scale --- src/tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index 965a39fed..aae44e8f2 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -63,7 +63,7 @@ set(PROGRAMS # data_server_meta_test # kvtag_add_get # kvtag_get -# kvtag_add_get_scale + kvtag_add_get_scale # kvtag_query # kvtag_query_scale # obj_transformation From b14da3521f0404958e6e34e6485f29e595d802b1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 20 Mar 2023 13:40:39 -0500 Subject: [PATCH 005/806] comment off free --- src/tests/kvtag_add_get_scale.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index b59125d46..464ab45b2 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -175,12 +175,12 @@ main(int argc, char *argv[]) fflush(stdout); - for (i = 0; i < my_query; i++) { - if (*(int *)(values[i]->value) != i + my_add_tag_s) - printf("Error with retrieved tag from o%d\n", i + my_query_s); + // for (i = 0; i < my_query; i++) { + // if (*(int *)(values[i]->value) != i + my_add_tag_s) + // printf("Error with retrieved tag from o%d\n", i + my_query_s); - PDC_free_kvtag(&values[i]); - } + // PDC_free_kvtag(&values[i]); + // } free(values); // close first object From 31d15313ed566218957d83c357d309d882eb1a4d Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 20 Mar 2023 14:06:28 -0500 Subject: [PATCH 006/806] update code --- src/tests/kvtag_add_get_scale.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index 464ab45b2..7f7745a7c 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -175,12 +175,12 @@ main(int argc, char *argv[]) fflush(stdout); - // for (i = 0; i < my_query; i++) { - // if (*(int *)(values[i]->value) != i + my_add_tag_s) - // printf("Error with retrieved tag from o%d\n", i + my_query_s); + for (i = 0; i < my_query; i++) { + if (*(int *)(values[i]->value) != i + my_add_tag_s) + printf("Error with retrieved tag from o%d\n", i + my_query_s); - // PDC_free_kvtag(&values[i]); - // } + // PDC_free_kvtag(&values[i]); + } free(values); // close first object From d81440c870c1ca7dc773dcbddcc718852467ed21 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 22 Mar 2023 23:45:53 -0500 Subject: [PATCH 007/806] 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working --- src/tests/kvtag_scale_add_get.c | 230 ++++++++++++++++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 src/tests/kvtag_scale_add_get.c diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c new file mode 100644 index 000000000..1493a7d6d --- /dev/null +++ b/src/tests/kvtag_scale_add_get.c @@ -0,0 +1,230 @@ +/* + * Copyright Notice for + * Proactive Data Containers (PDC) Software Library and Utilities + * ----------------------------------------------------------------------------- + + *** Copyright Notice *** + + * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the + * University of California, through Lawrence Berkeley National Laboratory, + * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF + * Group (subject to receipt of any required approvals from the U.S. Dept. of + * Energy). All rights reserved. + + * If you have questions about your rights to use or distribute this software, + * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. + + * NOTICE. This Software was developed under funding from the U.S. Department of + * Energy and the U.S. Government consequently retains certain rights. As such, the + * U.S. Government has been granted for itself and others acting on its behalf a + * paid-up, nonexclusive, irrevocable, worldwide license in the Software to + * reproduce, distribute copies to the public, prepare derivative works, and + * perform publicly and display publicly, and to permit other to do so. + */ + +#include +#include +#include +#include +#include +#include +#include "pdc.h" +#include "pdc_client_connect.h" + +uint64_t +atoui64(char *arg) { + char *endptr; + uint64_t num = strtoull(arg, &endptr, 10); + + if (*endptr != '\0') { + printf("Invalid input: %s\n", arg); + return 1; + } + return num; +} + +int +assign_work_to_rank(int rank, int size, uint64_t nwork, uint64_t *my_count, uint64_t *my_start) +{ + if (rank > size || my_count == NULL || my_start == NULL) { + printf("assign_work_to_rank(): Error with input!\n"); + return -1; + } + if (nwork < size) { + if (rank < nwork) + *my_count = 1; + else + *my_count = 0; + (*my_start) = rank * (*my_count); + } + else { + (*my_count) = nwork / size; + (*my_start) = rank * (*my_count); + + // Last few ranks may have extra work + if (rank >= size - nwork % size) { + (*my_count)++; + (*my_start) += (rank - (size - nwork % size)); + } + } + + return 1; +} + +void +print_usage(char *name) +{ + printf("%s n_obj n_add_tag n_query\n", name); +} + +int +main(int argc, char *argv[]) +{ + pdcid_t pdc, cont_prop, cont, obj_prop; + pdcid_t *obj_ids; + uint64_t n_obj, n_obj_incr, my_obj, my_obj_s; + uint64_t n_add_tag, my_add_tag, my_add_tag_s; + uint64_t n_query, my_query, my_query_s; + uint64_t i, v; + int proc_num, my_rank; + char obj_name[128]; + char tag_name[128]; + double stime, total_time; + pdc_kvtag_t kvtag; + void **values; + size_t value_size; +#ifdef ENABLE_MPI + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &proc_num); + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +#endif + if (argc != 5) { + if (my_rank == 0) + print_usage(argv[0]); + goto done; + } + n_obj = atoui64(argv[1]); + n_obj_incr = atoui64(argv[2]); + n_add_tag = atoui64(argv[3]); + n_query = atoui64(argv[4]); + + if (n_add_tag > n_obj || n_query > n_obj) { + if (my_rank == 0) + printf("n_add_tag or n_query larger than n_obj! Exiting...\n"); + goto done; + } + + if (my_rank == 0) + printf("Create %d obj, %d tags, query %d\n", my_obj, my_add_tag, my_query); + + // create a pdc + pdc = PDCinit("pdc"); + + // create a container property + cont_prop = PDCprop_create(PDC_CONT_CREATE, pdc); + if (cont_prop <= 0) + printf("Fail to create container property @ line %d!\n", __LINE__); + + // create a container + cont = PDCcont_create("c1", cont_prop); + if (cont <= 0) + printf("Fail to create container @ line %d!\n", __LINE__); + + // create an object property + obj_prop = PDCprop_create(PDC_OBJ_CREATE, pdc); + if (obj_prop <= 0) + printf("Fail to create object property @ line %d!\n", __LINE__); + + assign_work_to_rank(my_rank, proc_num, n_add_tag, &my_add_tag, &my_add_tag_s); + assign_work_to_rank(my_rank, proc_num, n_query, &my_query, &my_query_s); + assign_work_to_rank(my_rank, proc_num, n_obj, &my_obj, &my_obj_s); + + // Create a number of objects, add at least one tag to that object + obj_ids = (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); + for (i = 0; i < my_obj; i++) { + sprintf(obj_name, "obj%d", my_obj_s + i); + obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); + if (obj_ids[i] <= 0) + printf("Fail to create object @ line %d!\n", __LINE__); + } + + if (my_rank == 0) + printf("Created %d objects\n", n_obj); + + // Add tags + kvtag.name = "Group"; + kvtag.value = (void *)&v; + kvtag.size = sizeof(uint64_t); + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + stime = MPI_Wtime(); +#endif + for (i = 0; i < my_add_tag; i++) { + v = i + my_add_tag_s; + if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) + printf("fail to add a kvtag to o%d\n", i + my_obj_s); + } + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + total_time = MPI_Wtime() - stime; +#endif + if (my_rank == 0) + printf("Total time to add tags to %d objects: %.4f\n", n_add_tag, total_time); + + values = (void **)calloc(my_query, sizeof(void *)); + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + stime = MPI_Wtime(); +#endif + for (i = 0; i < my_query; i++) { + if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_size) < 0) + printf("fail to get a kvtag from o%d\n", i + my_query_s); + } + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + total_time = MPI_Wtime() - stime; +#endif + if (my_rank == 0) + printf("Total time to retrieve tags from %d objects: %.4f\n", n_query, total_time); + + fflush(stdout); + + for (i = 0; i < my_query; i++) { + if (*(int *)(values[i]) != i + my_add_tag_s) + printf("Error with retrieved tag from o%d\n", i + my_query_s); + // PDC_free_kvtag(&values[i]); + } + free(values); + + // close first object + for (i = 0; i < my_obj; i++) { + if (PDCobj_close(obj_ids[i]) < 0) + printf("fail to close object o%d\n", i + my_obj_s); + } + + // close a container + if (PDCcont_close(cont) < 0) + printf("fail to close container c1\n"); + + // close a container property + if (PDCprop_close(obj_prop) < 0) + printf("Fail to close property @ line %d\n", __LINE__); + + if (PDCprop_close(cont_prop) < 0) + printf("Fail to close property @ line %d\n", __LINE__); + + // close pdc + if (PDCclose(pdc) < 0) + printf("fail to close PDC\n"); + +done: +#ifdef ENABLE_MPI + MPI_Finalize(); +#endif + + return 0; +} From fb8cbf70e58e3f63d7f0f1bf9c2bf66b8067c906 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 23 Mar 2023 00:40:50 -0500 Subject: [PATCH 008/806] do while loop added, tested with 1m object and works --- src/tests/kvtag_scale_add_get.c | 144 ++++++++++++++++---------------- 1 file changed, 74 insertions(+), 70 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 1493a7d6d..4497392db 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -32,8 +32,9 @@ #include "pdc_client_connect.h" uint64_t -atoui64(char *arg) { - char *endptr; +atoui64(char *arg) +{ + char *endptr; uint64_t num = strtoull(arg, &endptr, 10); if (*endptr != '\0') { @@ -74,48 +75,43 @@ assign_work_to_rank(int rank, int size, uint64_t nwork, uint64_t *my_count, uint void print_usage(char *name) { - printf("%s n_obj n_add_tag n_query\n", name); + printf("%s n_obj n_obj_incr\n", name); } int main(int argc, char *argv[]) { - pdcid_t pdc, cont_prop, cont, obj_prop; - pdcid_t *obj_ids; - uint64_t n_obj, n_obj_incr, my_obj, my_obj_s; - uint64_t n_add_tag, my_add_tag, my_add_tag_s; - uint64_t n_query, my_query, my_query_s; - uint64_t i, v; - int proc_num, my_rank; - char obj_name[128]; - char tag_name[128]; - double stime, total_time; - pdc_kvtag_t kvtag; - void **values; - size_t value_size; + pdcid_t pdc, cont_prop, cont, obj_prop; + pdcid_t *obj_ids; + uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; + uint64_t i, v; + int proc_num, my_rank; + char obj_name[128]; + char tag_name[128]; + double stime, total_time; + void **values; + size_t value_size; #ifdef ENABLE_MPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &proc_num); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); #endif - if (argc != 5) { + if (argc != 3) { if (my_rank == 0) print_usage(argv[0]); goto done; } n_obj = atoui64(argv[1]); n_obj_incr = atoui64(argv[2]); - n_add_tag = atoui64(argv[3]); - n_query = atoui64(argv[4]); - if (n_add_tag > n_obj || n_query > n_obj) { + if (n_obj_incr > n_obj) { if (my_rank == 0) - printf("n_add_tag or n_query larger than n_obj! Exiting...\n"); + printf("n_obj_incr cannot be larger than n_obj! Exiting...\n"); goto done; } if (my_rank == 0) - printf("Create %d obj, %d tags, query %d\n", my_obj, my_add_tag, my_query); + printf("Create %llu obj, %llu tags, query %llu\n", my_obj, my_obj, my_obj); // create a pdc pdc = PDCinit("pdc"); @@ -135,75 +131,83 @@ main(int argc, char *argv[]) if (obj_prop <= 0) printf("Fail to create object property @ line %d!\n", __LINE__); - assign_work_to_rank(my_rank, proc_num, n_add_tag, &my_add_tag, &my_add_tag_s); - assign_work_to_rank(my_rank, proc_num, n_query, &my_query, &my_query_s); - assign_work_to_rank(my_rank, proc_num, n_obj, &my_obj, &my_obj_s); + curr_total_obj = 0; // Create a number of objects, add at least one tag to that object - obj_ids = (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); - for (i = 0; i < my_obj; i++) { - sprintf(obj_name, "obj%d", my_obj_s + i); - obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); - if (obj_ids[i] <= 0) - printf("Fail to create object @ line %d!\n", __LINE__); - } + obj_ids = (pdcid_t *)calloc(n_obj, sizeof(pdcid_t)); - if (my_rank == 0) - printf("Created %d objects\n", n_obj); + do { + assign_work_to_rank(my_rank, proc_num, n_obj_incr, &my_obj, &my_obj_s); + + for (i = 0; i < my_obj; i++) { + v = my_obj_s + i + curr_total_obj; + sprintf(obj_name, "obj%llu", v); + obj_ids[v] = PDCobj_create(cont, obj_name, obj_prop); + if (obj_ids[v] <= 0) + printf("Fail to create object @ line %d!\n", __LINE__); + } - // Add tags - kvtag.name = "Group"; - kvtag.value = (void *)&v; - kvtag.size = sizeof(uint64_t); + curr_total_obj += n_obj_incr; + + if (my_rank == 0) + printf("Created %llu objects in total now.\n", curr_total_obj); #ifdef ENABLE_MPI - MPI_Barrier(MPI_COMM_WORLD); - stime = MPI_Wtime(); + MPI_Barrier(MPI_COMM_WORLD); + stime = MPI_Wtime(); #endif - for (i = 0; i < my_add_tag; i++) { - v = i + my_add_tag_s; - if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) - printf("fail to add a kvtag to o%d\n", i + my_obj_s); - } + for (i = 0; i < my_obj; i++) { + v = i + my_obj_s + curr_total_obj - n_obj_incr; + sprintf(tag_name, "tag%llu", v); + if (PDCobj_put_tag(obj_ids[v], tag_name, (void *)&v, sizeof(uint64_t)) < 0) + printf("fail to add a kvtag to o%llu\n", i + my_obj_s); + } #ifdef ENABLE_MPI - MPI_Barrier(MPI_COMM_WORLD); - total_time = MPI_Wtime() - stime; + MPI_Barrier(MPI_COMM_WORLD); + total_time = MPI_Wtime() - stime; #endif - if (my_rank == 0) - printf("Total time to add tags to %d objects: %.4f\n", n_add_tag, total_time); + if (my_rank == 0) + printf("Total time to add tags to %llu objects: %.4f\n", my_obj, total_time); - values = (void **)calloc(my_query, sizeof(void *)); + values = (void **)calloc(my_obj, sizeof(void *)); #ifdef ENABLE_MPI - MPI_Barrier(MPI_COMM_WORLD); - stime = MPI_Wtime(); + MPI_Barrier(MPI_COMM_WORLD); + stime = MPI_Wtime(); #endif - for (i = 0; i < my_query; i++) { - if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_size) < 0) - printf("fail to get a kvtag from o%d\n", i + my_query_s); - } + for (i = 0; i < my_obj; i++) { + v = i + my_obj_s + curr_total_obj - n_obj_incr; + sprintf(tag_name, "tag%llu", v); + if (PDCobj_get_tag(obj_ids[v], tag_name, (void *)&values[i], (void *)&value_size) < 0) + printf("fail to get a kvtag from o%llu\n", v); + } #ifdef ENABLE_MPI - MPI_Barrier(MPI_COMM_WORLD); - total_time = MPI_Wtime() - stime; + MPI_Barrier(MPI_COMM_WORLD); + total_time = MPI_Wtime() - stime; #endif - if (my_rank == 0) - printf("Total time to retrieve tags from %d objects: %.4f\n", n_query, total_time); + if (my_rank == 0) + printf("Total time to retrieve tags from %llu objects: %.4f\n", my_obj, total_time); - fflush(stdout); + fflush(stdout); - for (i = 0; i < my_query; i++) { - if (*(int *)(values[i]) != i + my_add_tag_s) - printf("Error with retrieved tag from o%d\n", i + my_query_s); - // PDC_free_kvtag(&values[i]); - } - free(values); + for (i = 0; i < my_obj; i++) { + v = i + my_obj_s + curr_total_obj - n_obj_incr; + if (*(int *)(values[i]) != v) + printf("Error with retrieved tag from o%llu\n", v); + free(values[i]); + } + free(values); + + } while (curr_total_obj < n_obj); // close first object - for (i = 0; i < my_obj; i++) { - if (PDCobj_close(obj_ids[i]) < 0) - printf("fail to close object o%d\n", i + my_obj_s); + for (i = 0; i < n_obj; i++) { + if (obj_ids[i] > 0) { + if (PDCobj_close(obj_ids[i]) < 0) + printf("fail to close object o%llu\n", i); + } } // close a container From 5073c722b95b9c5e9c92a7128b8f3405710c95e4 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 23 Mar 2023 02:17:43 -0500 Subject: [PATCH 009/806] 1m objects test works, 10m object test fail as the original also fails --- src/tests/kvtag_scale_add_get.c | 109 +++++++++++++++++++++++--------- 1 file changed, 79 insertions(+), 30 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 4497392db..2215a6ab4 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -81,16 +81,18 @@ print_usage(char *name) int main(int argc, char *argv[]) { - pdcid_t pdc, cont_prop, cont, obj_prop; - pdcid_t *obj_ids; - uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; - uint64_t i, v; - int proc_num, my_rank; - char obj_name[128]; - char tag_name[128]; - double stime, total_time; - void **values; - size_t value_size; + pdcid_t pdc, cont_prop, cont, obj_prop; + pdcid_t *obj_ids; + uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; + uint64_t i, v; + int proc_num, my_rank; + char obj_name[128]; + char tag_name[128]; + double stime, total_time; + void **values; + size_t value_size; + obj_handle *oh; + struct pdc_obj_info *info; #ifdef ENABLE_MPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &proc_num); @@ -111,7 +113,7 @@ main(int argc, char *argv[]) } if (my_rank == 0) - printf("Create %llu obj, %llu tags, query %llu\n", my_obj, my_obj, my_obj); + printf("Create %llu obj, %llu tags, query %llu\n", n_obj, n_obj, n_obj); // create a pdc pdc = PDCinit("pdc"); @@ -133,33 +135,49 @@ main(int argc, char *argv[]) curr_total_obj = 0; + if (my_rank == 0) + printf("create obj_ids array\n"); // Create a number of objects, add at least one tag to that object - obj_ids = (pdcid_t *)calloc(n_obj, sizeof(pdcid_t)); + assign_work_to_rank(my_rank, proc_num, n_obj_incr, &my_obj, &my_obj_s); + obj_ids = (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); + values = (void **)calloc(my_obj, sizeof(void *)); + sprintf(tag_name, "tag%d", 2); do { - assign_work_to_rank(my_rank, proc_num, n_obj_incr, &my_obj, &my_obj_s); + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + stime = MPI_Wtime(); +#endif + + if (my_rank == 0) + printf("starting creating %llu objects... \n", my_obj); for (i = 0; i < my_obj; i++) { v = my_obj_s + i + curr_total_obj; sprintf(obj_name, "obj%llu", v); - obj_ids[v] = PDCobj_create(cont, obj_name, obj_prop); - if (obj_ids[v] <= 0) + obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); + if (obj_ids[i] <= 0) printf("Fail to create object @ line %d!\n", __LINE__); } curr_total_obj += n_obj_incr; +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + total_time = MPI_Wtime() - stime; +#endif if (my_rank == 0) - printf("Created %llu objects in total now.\n", curr_total_obj); + printf("Created %llu objects in total now in %.4f seconds.\n", curr_total_obj, total_time); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif + for (i = 0; i < my_obj; i++) { v = i + my_obj_s + curr_total_obj - n_obj_incr; - sprintf(tag_name, "tag%llu", v); - if (PDCobj_put_tag(obj_ids[v], tag_name, (void *)&v, sizeof(uint64_t)) < 0) + if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)&v, sizeof(uint64_t)) < 0) printf("fail to add a kvtag to o%llu\n", i + my_obj_s); } @@ -170,16 +188,13 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("Total time to add tags to %llu objects: %.4f\n", my_obj, total_time); - values = (void **)calloc(my_obj, sizeof(void *)); - #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif for (i = 0; i < my_obj; i++) { v = i + my_obj_s + curr_total_obj - n_obj_incr; - sprintf(tag_name, "tag%llu", v); - if (PDCobj_get_tag(obj_ids[v], tag_name, (void *)&values[i], (void *)&value_size) < 0) + if (PDCobj_get_tag(obj_ids[i], tag_name, (void **)&values[i], (void *)&value_size) < 0) printf("fail to get a kvtag from o%llu\n", v); } @@ -196,19 +211,53 @@ main(int argc, char *argv[]) v = i + my_obj_s + curr_total_obj - n_obj_incr; if (*(int *)(values[i]) != v) printf("Error with retrieved tag from o%llu\n", v); - free(values[i]); + // free(values[i]); } - free(values); - - } while (curr_total_obj < n_obj); + // free(values); - // close first object - for (i = 0; i < n_obj; i++) { - if (obj_ids[i] > 0) { + // close objects + for (i = 0; i < my_obj; i++) { + v = i + my_obj_s + curr_total_obj - n_obj_incr; if (PDCobj_close(obj_ids[i]) < 0) - printf("fail to close object o%llu\n", i); + printf("fail to close object o%llu\n", v); } + + } while (curr_total_obj < n_obj); + + for (i = 0; i < my_obj; i++) { + free(values[i]); } + free(obj_ids); + free(values); + + // oh = PDCobj_iter_start(cont); + + // while (!PDCobj_iter_null(oh)) { + // info = PDCobj_iter_get_info(oh); + // info-> + // if (info->obj_pt->type != PDC_DOUBLE) { + // printf("Type is not properly inherited from object property.\n"); + // ret_value = 1; + // } + // if (info->obj_pt->ndim != ndim) { + // printf("Number of dimensions is not properly inherited from object property.\n"); + // ret_value = 1; + // } + // if (info->obj_pt->dims[0] != dims[0]) { + // printf("First dimension is not properly inherited from object property.\n"); + // ret_value = 1; + // } + // if (info->obj_pt->dims[1] != dims[1]) { + // printf("Second dimension is not properly inherited from object property.\n"); + // ret_value = 1; + // } + // if (info->obj_pt->dims[2] != dims[2]) { + // printf("Third dimension is not properly inherited from object property.\n"); + // ret_value = 1; + // } + + // oh = PDCobj_iter_next(oh, cont); + // } // close a container if (PDCcont_close(cont) < 0) From c73cb087d6ef3a695f17dbd027193844535e9313 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 23 Mar 2023 10:20:29 -0500 Subject: [PATCH 010/806] add new executable to test set --- src/tests/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index d0f319dd1..45eb40990 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -66,6 +66,7 @@ set(PROGRAMS kvtag_add_get_scale # kvtag_query # kvtag_query_scale + kvtag_scale_add_get # obj_transformation region_transfer_query region_transfer From d8d6b014e518bc9e5ca45a69255435658bb4110e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 23 Mar 2023 13:52:20 -0500 Subject: [PATCH 011/806] enlarge PDC_SERVER_ID_INTERVAL --- src/server/include/pdc_client_server_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/include/pdc_client_server_common.h b/src/server/include/pdc_client_server_common.h index 11cb394c5..f30586833 100644 --- a/src/server/include/pdc_client_server_common.h +++ b/src/server/include/pdc_client_server_common.h @@ -69,7 +69,7 @@ hg_thread_mutex_t meta_obj_map_mutex_g; #define ADDR_MAX 512 #define DIM_MAX 4 #define TAG_LEN_MAX 2048 -#define PDC_SERVER_ID_INTERVEL 1000000 +#define PDC_SERVER_ID_INTERVEL 1000000000 #define PDC_SERVER_MAX_PROC_PER_NODE 32 #define PDC_SERIALIZE_MAX_SIZE 256 #define PDC_MAX_CORE_PER_NODE 68 // Cori KNL has 68 cores per node, Haswell 32 From fb74b5405276dababae3274be0eac6736918595a Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 24 Mar 2023 10:32:56 -0500 Subject: [PATCH 012/806] update code --- src/tests/kvtag_add_get_scale.c | 2 +- src/tests/kvtag_scale_add_get.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index b9a3a03ee..29d2b894a 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -81,7 +81,7 @@ main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &proc_num); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); #endif - if (argc != 4) { + if (argc < 4) { if (my_rank == 0) print_usage(argv[0]); goto done; diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 2215a6ab4..d054a1a18 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -98,7 +98,7 @@ main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &proc_num); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); #endif - if (argc != 3) { + if (argc < 3) { if (my_rank == 0) print_usage(argv[0]); goto done; From 3bc4ec11231bd1753c48911cb9e142ada639d5b7 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 24 Mar 2023 13:29:02 -0500 Subject: [PATCH 013/806] update console args --- src/tests/kvtag_query_scale.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tests/kvtag_query_scale.c b/src/tests/kvtag_query_scale.c index 58d276e55..80ca2a07c 100644 --- a/src/tests/kvtag_query_scale.c +++ b/src/tests/kvtag_query_scale.c @@ -70,7 +70,7 @@ main(int argc, char *argv[]) pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t * obj_ids; int n_obj, n_add_tag, my_obj, my_obj_s, my_add_tag, my_add_tag_s; - int proc_num, my_rank, i, v, iter; + int proc_num, my_rank, i, v, iter, round; char obj_name[128]; double stime, total_time; pdc_kvtag_t kvtag; @@ -83,14 +83,14 @@ main(int argc, char *argv[]) MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); #endif - if (argc != 2) { + if (argc < 3) { if (my_rank == 0) print_usage(argv[0]); goto done; } n_obj = atoi(argv[1]); + round = atoi(argv[2]); n_add_tag = n_obj / 100; - int round = 7; // create a pdc pdc = PDCinit("pdc"); From 61c26fb04fa7a3707cc13a690349ab67f9cd2744 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 24 Mar 2023 13:43:40 -0500 Subject: [PATCH 014/806] add p search test --- src/tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index 45eb40990..27cb1a3f3 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -65,7 +65,7 @@ set(PROGRAMS # kvtag_get kvtag_add_get_scale # kvtag_query -# kvtag_query_scale + kvtag_query_scale kvtag_scale_add_get # obj_transformation region_transfer_query From 74738344958be8092222232db392e640c524553d Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 30 Mar 2023 11:52:12 -0500 Subject: [PATCH 015/806] add console arg for changing number of attributes per object --- src/tests/kvtag_scale_add_get.c | 44 ++++++++------------------------- 1 file changed, 10 insertions(+), 34 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index d054a1a18..6717928f5 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -84,7 +84,8 @@ main(int argc, char *argv[]) pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t *obj_ids; uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; - uint64_t i, v; + uint64_t n_attr; + uint64_t i, j, v; int proc_num, my_rank; char obj_name[128]; char tag_name[128]; @@ -98,13 +99,14 @@ main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &proc_num); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); #endif - if (argc < 3) { + if (argc < 4) { if (my_rank == 0) print_usage(argv[0]); goto done; } n_obj = atoui64(argv[1]); n_obj_incr = atoui64(argv[2]); + n_attr = atoui64(argv[3]); if (n_obj_incr > n_obj) { if (my_rank == 0) @@ -193,9 +195,12 @@ main(int argc, char *argv[]) stime = MPI_Wtime(); #endif for (i = 0; i < my_obj; i++) { - v = i + my_obj_s + curr_total_obj - n_obj_incr; - if (PDCobj_get_tag(obj_ids[i], tag_name, (void **)&values[i], (void *)&value_size) < 0) - printf("fail to get a kvtag from o%llu\n", v); + for (j = 0; j < n_attr; j++) { + v = i + my_obj_s + curr_total_obj - n_obj_incr; + sprintf(tag_name, "tag%llu.%llu", j, v); + if (PDCobj_get_tag(obj_ids[i], tag_name, (void **)&values[i], (void *)&value_size) < 0) + printf("fail to get a kvtag from o%llu\n", v); + } } #ifdef ENABLE_MPI @@ -230,35 +235,6 @@ main(int argc, char *argv[]) free(obj_ids); free(values); - // oh = PDCobj_iter_start(cont); - - // while (!PDCobj_iter_null(oh)) { - // info = PDCobj_iter_get_info(oh); - // info-> - // if (info->obj_pt->type != PDC_DOUBLE) { - // printf("Type is not properly inherited from object property.\n"); - // ret_value = 1; - // } - // if (info->obj_pt->ndim != ndim) { - // printf("Number of dimensions is not properly inherited from object property.\n"); - // ret_value = 1; - // } - // if (info->obj_pt->dims[0] != dims[0]) { - // printf("First dimension is not properly inherited from object property.\n"); - // ret_value = 1; - // } - // if (info->obj_pt->dims[1] != dims[1]) { - // printf("Second dimension is not properly inherited from object property.\n"); - // ret_value = 1; - // } - // if (info->obj_pt->dims[2] != dims[2]) { - // printf("Third dimension is not properly inherited from object property.\n"); - // ret_value = 1; - // } - - // oh = PDCobj_iter_next(oh, cont); - // } - // close a container if (PDCcont_close(cont) < 0) printf("fail to close container c1\n"); From e6f811072a151f26520c82ed1fb55d1bbcd0ec1e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 30 Mar 2023 21:59:01 -0500 Subject: [PATCH 016/806] free allocated memory --- src/tests/kvtag_scale_add_get.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 6717928f5..c67aebaf7 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -143,15 +143,13 @@ main(int argc, char *argv[]) assign_work_to_rank(my_rank, proc_num, n_obj_incr, &my_obj, &my_obj_s); obj_ids = (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); - values = (void **)calloc(my_obj, sizeof(void *)); - sprintf(tag_name, "tag%d", 2); - do { + do { + values = (void **)calloc(my_obj, sizeof(void *)); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif - if (my_rank == 0) printf("starting creating %llu objects... \n", my_obj); @@ -216,9 +214,9 @@ main(int argc, char *argv[]) v = i + my_obj_s + curr_total_obj - n_obj_incr; if (*(int *)(values[i]) != v) printf("Error with retrieved tag from o%llu\n", v); - // free(values[i]); + free(values[i]); } - // free(values); + free(values); // close objects for (i = 0; i < my_obj; i++) { @@ -229,11 +227,12 @@ main(int argc, char *argv[]) } while (curr_total_obj < n_obj); - for (i = 0; i < my_obj; i++) { - free(values[i]); - } + // for (i = 0; i < my_obj; i++) { + // free(values[i]); + // } + // free(values); free(obj_ids); - free(values); + // close a container if (PDCcont_close(cont) < 0) From 9d3e73d53f9c9e917cc067ce411753f0a3979f80 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 31 Mar 2023 10:48:08 -0500 Subject: [PATCH 017/806] fix query count issue --- src/tests/kvtag_scale_add_get.c | 36 ++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index c67aebaf7..91424efb7 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -84,12 +84,13 @@ main(int argc, char *argv[]) pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t *obj_ids; uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; - uint64_t n_attr; - uint64_t i, j, v; - int proc_num, my_rank; + uint64_t n_attr, n_query; + uint64_t i, j, k, v; + int proc_num, my_rank, attr_value; char obj_name[128]; char tag_name[128]; double stime, total_time; + int *value_to_add; void **values; size_t value_size; obj_handle *oh; @@ -99,7 +100,7 @@ main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &proc_num); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); #endif - if (argc < 4) { + if (argc < 6) { if (my_rank == 0) print_usage(argv[0]); goto done; @@ -107,6 +108,8 @@ main(int argc, char *argv[]) n_obj = atoui64(argv[1]); n_obj_incr = atoui64(argv[2]); n_attr = atoui64(argv[3]); + n_attr_len = atoui64(argv[4]); + n_query = atoui64(argv[5]); if (n_obj_incr > n_obj) { if (my_rank == 0) @@ -145,7 +148,7 @@ main(int argc, char *argv[]) obj_ids = (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); do { - values = (void **)calloc(my_obj, sizeof(void *)); + #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); @@ -177,8 +180,13 @@ main(int argc, char *argv[]) for (i = 0; i < my_obj; i++) { v = i + my_obj_s + curr_total_obj - n_obj_incr; - if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)&v, sizeof(uint64_t)) < 0) - printf("fail to add a kvtag to o%llu\n", i + my_obj_s); + for (j = 0; j < n_attr; j++) { + printf("print tag name before add"); + fflush(stdout); + sprintf(tag_name, "tag%llu.%llu", j, v); + if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)&v, sizeof(uint64_t)) < 0) + printf("fail to add a kvtag to o%llu\n", i + my_obj_s); + } } #ifdef ENABLE_MPI @@ -188,17 +196,18 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("Total time to add tags to %llu objects: %.4f\n", my_obj, total_time); + values = (void **)calloc(my_obj, sizeof(void *)); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif for (i = 0; i < my_obj; i++) { - for (j = 0; j < n_attr; j++) { - v = i + my_obj_s + curr_total_obj - n_obj_incr; - sprintf(tag_name, "tag%llu.%llu", j, v); - if (PDCobj_get_tag(obj_ids[i], tag_name, (void **)&values[i], (void *)&value_size) < 0) - printf("fail to get a kvtag from o%llu\n", v); - } + v = i + my_obj_s + curr_total_obj - n_obj_incr; + printf("print tag name before query"); + fflush(stdout); + sprintf(tag_name, "tag%llu.%llu", j, v); + if (PDCobj_get_tag(obj_ids[i], tag_name, (void **)&values[i], (void *)&value_size) < 0) + printf("fail to get a kvtag from o%llu\n", v); } #ifdef ENABLE_MPI @@ -232,7 +241,6 @@ main(int argc, char *argv[]) // } // free(values); free(obj_ids); - // close a container if (PDCcont_close(cont) < 0) From a1697ae5700429dc3d73f82736dbf7bec61629ad Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 31 Mar 2023 10:49:26 -0500 Subject: [PATCH 018/806] fix attr length definition --- src/tests/kvtag_scale_add_get.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 91424efb7..94e508650 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -84,7 +84,7 @@ main(int argc, char *argv[]) pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t *obj_ids; uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; - uint64_t n_attr, n_query; + uint64_t n_attr, n_attr_len, n_query; uint64_t i, j, k, v; int proc_num, my_rank, attr_value; char obj_name[128]; From d48a43981848f66394ef09e1f6baa21fab0e5799 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 31 Mar 2023 13:26:33 -0500 Subject: [PATCH 019/806] code refactored --- src/tests/kvtag_scale_add_get.c | 286 +++++++++++++++++++++++++------- 1 file changed, 222 insertions(+), 64 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 94e508650..a2ee774df 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -44,6 +44,22 @@ atoui64(char *arg) return num; } +/** + * Assigns a portion of work to a specific rank in a parallel processing environment. + * + * This function calculates the start and count of work items that should be + * assigned to a specific rank based on the total number of work items (nwork) and + * the total number of ranks (size) in the environment. + * + * @param rank The rank of the process for which the work is being assigned (0-indexed). + * @param size The total number of ranks in the parallel processing environment. + * @param nwork The total number of work items to be distributed among the ranks. + * @param my_count A pointer to a uint64_t variable that will store the number of work items assigned to the + * rank. + * @param my_start A pointer to a uint64_t variable that will store the starting index of the work items + * assigned to the rank. + * @return 0 if the function executes successfully, non-zero if an error occurs. + */ int assign_work_to_rank(int rank, int size, uint64_t nwork, uint64_t *my_count, uint64_t *my_start) { @@ -68,14 +84,195 @@ assign_work_to_rank(int rank, int size, uint64_t nwork, uint64_t *my_count, uint (*my_start) += (rank - (size - nwork % size)); } } + return 0; +} - return 1; +char ** +gen_strings(int n_strings, int string_len) +{ + srand(time(NULL)); // seed the random number generator with the current time + + char **str = malloc(n_strings * sizeof(char *)); // allocate memory for the array of strings + + for (int i = 0; i < n_strings; i++) { + str[i] = malloc((string_len + 1) * sizeof(char)); // allocate memory for each string in the array + for (int j = 0; j < string_len; j++) { + str[i][j] = 'a' + rand() % 26; // generate a random lowercase letter + } + str[i][string_len] = '\0'; // terminate the string with a null character + } + return str; // return the array of strings } +/** + * @brief Prints the usage instructions for the given program. + * This function displays the correct usage and command-line options for the program + * identified by the name parameter. It is typically called when the user provides + * incorrect or insufficient arguments. + * @param name A pointer to a character string representing the name of the program. + */ void print_usage(char *name) { - printf("%s n_obj n_obj_incr\n", name); + printf("Usage: %s \n\n", name); + + printf(" n_obj : The total number of objects (positive integer).\n"); + printf(" n_obj_incr : The increment in the number of objects per step (positive integer).\n"); + printf(" n_attr : The number of attributes per object (positive integer).\n"); + printf(" n_attr_len : The length of each attribute (positive integer).\n"); + printf(" n_query : The number of queries to be performed (positive integer).\n\n"); + + printf("Example:\n"); + printf(" %s 100 10 5 20 50\n\n", name); +} + +/** + * Initializes a test environment for the PDC by creating a specified number of objects in a container + * and returning their object IDs. + * + * @param my_rank The rank of the current process in the MPI communicator. + * @param proc_num The total number of processes in the MPI communicator. + * @param n_obj_incr Pointer to the number of objects to be created in each iteration. + * @param my_obj Pointer to the number of objects assigned to the current process. + * @param my_obj_s Pointer to the starting object index assigned to the current process. + * @param obj_prop Pointer to the object property ID to be used for creating objects. + * + * @return obj_ids Pointer to an array of object IDs for the created objects. + */ +pdcid_t * +init_test(int my_rank, int proc_num, uint64_t *n_obj_incr, uint64_t *my_obj, uint64_t *my_obj_s, + pdcid_t *obj_prop) +{ + // create a pdc + pdc = PDCinit("pdc"); + + // create a container property + cont_prop = PDCprop_create(PDC_CONT_CREATE, pdc); + if (cont_prop <= 0) + printf("Fail to create container property @ line %d!\n", __LINE__); + + // create a container + cont = PDCcont_create("c1", cont_prop); + if (cont <= 0) + printf("Fail to create container @ line %d!\n", __LINE__); + + // create an object property + *obj_prop = PDCprop_create(PDC_OBJ_CREATE, pdc); + if (obj_prop <= 0) + printf("Fail to create object property @ line %d!\n", __LINE__); + + curr_total_obj = 0; + + if (my_rank == 0) + printf("create obj_ids array\n"); + // Create a number of objects, add at least one tag to that object + assign_work_to_rank(my_rank, proc_num, n_obj_incr, &my_obj, &my_obj_s); + + return (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); +} + +/** + * Creates a specified number of objects in a container and stores their object IDs in the obj_ids array. + * + * @param my_obj The number of objects assigned to the current process. + * @param my_obj_s The starting object index assigned to the current process. + * @param curr_total_obj The current total number of objects. + * @param cont The container ID in which to create the objects. + * @param obj_prop The object property ID to be used for creating objects. + * @param obj_ids Pointer to an array of object IDs for the created objects. + */ +void +create_object(uint64_t my_obj, uint64_t my_obj_s, uint64_t curr_total_obj, pdcid_t cont, pdcid_t obj_prop, + pdcid_t *obj_ids) +{ + uint64_t i, v; + char obj_name[128]; + + for (i = 0; i < my_obj; i++) { + v = my_obj_s + i + curr_total_obj; + sprintf(obj_name, "obj%llu", v); + obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); + if (obj_ids[i] <= 0) + printf("Fail to create object @ line %d!\n", __LINE__); + } +} + +/** + * Adds n_attr tags to each object in the obj_ids array. + * + * @param my_obj The number of objects assigned to the current process. + * @param my_obj_s The starting object index assigned to the current process. + * @param curr_total_obj The current total number of objects. + * @param n_obj_incr The number of objects to be created in each iteration. + * @param n_attr The number of attributes (tags) to add to each object. + * @param obj_ids Pointer to an array of object IDs for the objects to add tags. + */ +void +add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t curr_total_obj, uint64_t n_obj_incr, uint64_t n_attr, + char **tag_values, uint64_t tag_value_len, pdcid_t *obj_ids) +{ + uint64_t i, j, v; + char tag_name[128]; + + for (i = 0; i < my_obj; i++) { + v = i + my_obj_s + curr_total_obj - n_obj_incr; + for (j = 0; j < n_attr; j++) { + sprintf(tag_name, "tag%llu.%llu", v, j); + if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)&tag_values[j], tag_value_len * sizeof(char)) < + 0) + printf("fail to add a kvtag to o%llu\n", i + my_obj_s); + } + } +} + +/** + * Queries n_attr tags from the object specified by the object ID. + * + * @param obj_id The ID of the object to retrieve tags for. + * @param obj_name_v logical object ID for object name. + * @param n_attr The number of tags to retrieve. + * @param tag_values An array of pointers to store the tag values. + * @param value_size An array to store the size of each tag value. + */ +void +get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, int n_attr, char **tag_values, uint64_t *value_size) +{ + uint64_t i, v; + char tag_name[128]; + + for (i = 0; i < n_attr; i++) { + sprintf(tag_name, "tag%llu.%llu", obj_name_v, i); + if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[i], (void *)&value_size[i]) < 0) + printf("fail to get a kvtag from o%llu\n", v); + } +} + +/** + * Sends a specified number of queries to retrieve the values of the specified tags for a set of objects. + * + * @param my_obj_s The size of the current object name. + * @param curr_total_obj The current total number of objects. + * @param n_obj_incr The number of objects to increment by. + * @param n_query The number of queries to send. + * @param n_attr The number of tags to retrieve for each object. + * @param obj_ids An array of object IDs to retrieve tags for. + * @param tag_values An array of pointers to store the tag values for all queries and objects. + * The caller is responsible for allocating memory for the array and the individual + * pointers within it. + * @param value_size An array to store the size of each tag value. + * The caller is responsible for allocating memory for the array. + */ +void +send_queries(uint64_t my_obj_s, uint64_t curr_total_obj, uint64_t n_obj_incr, int n_query, uint64_t n_attr, + pdcid_t *obj_ids, char **tag_values, uint64_t *value_size) +{ + uint64_t i, v; + char tag_name[128]; + + for (i = 0; i < n_query; i++) { + v = i + my_obj_s + curr_total_obj - n_obj_incr; + get_object_tags(obj_ids[i], v, n_attr, &tag_values[i * n_attr], &value_size[i * n_attr]); + } } int @@ -91,8 +288,8 @@ main(int argc, char *argv[]) char tag_name[128]; double stime, total_time; int *value_to_add; - void **values; - size_t value_size; + char **query_rst_cache; + uint64_t *value_size; obj_handle *oh; struct pdc_obj_info *info; #ifdef ENABLE_MPI @@ -120,32 +317,8 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("Create %llu obj, %llu tags, query %llu\n", n_obj, n_obj, n_obj); - // create a pdc - pdc = PDCinit("pdc"); - - // create a container property - cont_prop = PDCprop_create(PDC_CONT_CREATE, pdc); - if (cont_prop <= 0) - printf("Fail to create container property @ line %d!\n", __LINE__); - - // create a container - cont = PDCcont_create("c1", cont_prop); - if (cont <= 0) - printf("Fail to create container @ line %d!\n", __LINE__); - - // create an object property - obj_prop = PDCprop_create(PDC_OBJ_CREATE, pdc); - if (obj_prop <= 0) - printf("Fail to create object property @ line %d!\n", __LINE__); - - curr_total_obj = 0; - - if (my_rank == 0) - printf("create obj_ids array\n"); - // Create a number of objects, add at least one tag to that object - assign_work_to_rank(my_rank, proc_num, n_obj_incr, &my_obj, &my_obj_s); - - obj_ids = (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); + // making necessary preparation for the test. + obj_ids = init_test(my_rank, proc_num, &n_obj_incr, &my_obj, &my_obj_s, &obj_prop); do { @@ -156,14 +329,10 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("starting creating %llu objects... \n", my_obj); - for (i = 0; i < my_obj; i++) { - v = my_obj_s + i + curr_total_obj; - sprintf(obj_name, "obj%llu", v); - obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); - if (obj_ids[i] <= 0) - printf("Fail to create object @ line %d!\n", __LINE__); - } - + // creating objects. Here, my_obj and my_obj_s has been calculated for each process based on + // n_obj_incr. + create_object(my_obj, my_obj_s, curr_total_obj, cont, obj_prop, obj_ids); + // therefore, after 'create_objects' function, we should add 'curr_total_obj' by 'n_obj_incr'. curr_total_obj += n_obj_incr; #ifdef ENABLE_MPI @@ -173,21 +342,14 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("Created %llu objects in total now in %.4f seconds.\n", curr_total_obj, total_time); + char **tag_values = gen_strings(n_attr, n_attr_len); + #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif - for (i = 0; i < my_obj; i++) { - v = i + my_obj_s + curr_total_obj - n_obj_incr; - for (j = 0; j < n_attr; j++) { - printf("print tag name before add"); - fflush(stdout); - sprintf(tag_name, "tag%llu.%llu", j, v); - if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)&v, sizeof(uint64_t)) < 0) - printf("fail to add a kvtag to o%llu\n", i + my_obj_s); - } - } + add_n_tags(my_obj, my_obj_s, curr_total_obj, n_obj_incr, n_attr, tag_values, obj_ids); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); @@ -196,19 +358,15 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("Total time to add tags to %llu objects: %.4f\n", my_obj, total_time); - values = (void **)calloc(my_obj, sizeof(void *)); + query_rst_cache = (char **)malloc(n_query * n_attr * sizeof(char *)); + value_size = malloc(n_query * n_attr * sizeof(uint64_t)); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif - for (i = 0; i < my_obj; i++) { - v = i + my_obj_s + curr_total_obj - n_obj_incr; - printf("print tag name before query"); - fflush(stdout); - sprintf(tag_name, "tag%llu.%llu", j, v); - if (PDCobj_get_tag(obj_ids[i], tag_name, (void **)&values[i], (void *)&value_size) < 0) - printf("fail to get a kvtag from o%llu\n", v); - } + + send_queries(my_obj_s, curr_total_obj, n_obj_incr, n_query, n_attr, obj_ids, query_rst_cache, + value_size); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); @@ -219,13 +377,13 @@ main(int argc, char *argv[]) fflush(stdout); - for (i = 0; i < my_obj; i++) { - v = i + my_obj_s + curr_total_obj - n_obj_incr; - if (*(int *)(values[i]) != v) - printf("Error with retrieved tag from o%llu\n", v); - free(values[i]); - } - free(values); + // for (i = 0; i < my_obj * n_attr; i++) { + // v = i + my_obj_s + curr_total_obj - n_obj_incr; + // if (*(int *)(values[i]) != v) + // printf("Error with retrieved tag from o%llu\n", v); + // free(values[i]); + // } + // free(values); // close objects for (i = 0; i < my_obj; i++) { From b5bdf6b351052736004d7fcb947a202f79a8dabd Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 31 Mar 2023 13:43:28 -0500 Subject: [PATCH 020/806] code refactored --- src/tests/kvtag_scale_add_get.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index a2ee774df..86d871326 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -143,6 +143,8 @@ pdcid_t * init_test(int my_rank, int proc_num, uint64_t *n_obj_incr, uint64_t *my_obj, uint64_t *my_obj_s, pdcid_t *obj_prop) { + + pdcid_t pdc, cont_prop, cont; // create a pdc pdc = PDCinit("pdc"); @@ -161,12 +163,11 @@ init_test(int my_rank, int proc_num, uint64_t *n_obj_incr, uint64_t *my_obj, uin if (obj_prop <= 0) printf("Fail to create object property @ line %d!\n", __LINE__); - curr_total_obj = 0; - if (my_rank == 0) printf("create obj_ids array\n"); + // Create a number of objects, add at least one tag to that object - assign_work_to_rank(my_rank, proc_num, n_obj_incr, &my_obj, &my_obj_s); + assign_work_to_rank(my_rank, proc_num, n_obj_incr, my_obj, my_obj_s); return (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); } @@ -278,7 +279,7 @@ send_queries(uint64_t my_obj_s, uint64_t curr_total_obj, uint64_t n_obj_incr, in int main(int argc, char *argv[]) { - pdcid_t pdc, cont_prop, cont, obj_prop; + pdcid_t obj_prop; pdcid_t *obj_ids; uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; uint64_t n_attr, n_attr_len, n_query; @@ -316,6 +317,10 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("Create %llu obj, %llu tags, query %llu\n", n_obj, n_obj, n_obj); + + curr_total_obj = 0; + + // making necessary preparation for the test. obj_ids = init_test(my_rank, proc_num, &n_obj_incr, &my_obj, &my_obj_s, &obj_prop); From 4e6a6ce7d9a8c4a2531d1d6e67c1f9facf0aca58 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 31 Mar 2023 13:50:23 -0500 Subject: [PATCH 021/806] code refactored --- src/tests/kvtag_scale_add_get.c | 53 +++++++++++++++++---------------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 86d871326..286a59d4c 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -141,26 +141,24 @@ print_usage(char *name) */ pdcid_t * init_test(int my_rank, int proc_num, uint64_t *n_obj_incr, uint64_t *my_obj, uint64_t *my_obj_s, - pdcid_t *obj_prop) + pdcid_t *obj_prop, pdcid_t *pdc, pdcid_t *cont_prop, pdcid_t *cont) { - - pdcid_t pdc, cont_prop, cont; // create a pdc - pdc = PDCinit("pdc"); + *pdc = PDCinit("pdc"); // create a container property - cont_prop = PDCprop_create(PDC_CONT_CREATE, pdc); - if (cont_prop <= 0) + *cont_prop = PDCprop_create(PDC_CONT_CREATE, *pdc); + if (*cont_prop <= 0) printf("Fail to create container property @ line %d!\n", __LINE__); // create a container - cont = PDCcont_create("c1", cont_prop); - if (cont <= 0) + *cont = PDCcont_create("c1", *cont_prop); + if (*cont <= 0) printf("Fail to create container @ line %d!\n", __LINE__); // create an object property - *obj_prop = PDCprop_create(PDC_OBJ_CREATE, pdc); - if (obj_prop <= 0) + *obj_prop = PDCprop_create(PDC_OBJ_CREATE, *pdc); + if (*obj_prop <= 0) printf("Fail to create object property @ line %d!\n", __LINE__); if (my_rank == 0) @@ -276,10 +274,27 @@ send_queries(uint64_t my_obj_s, uint64_t curr_total_obj, uint64_t n_obj_incr, in } } +void closePDC(pdcid_t pdc, pdcid_t cont_prop, pdcid_t cont, pdcid_t obj_prop){ + // close a container + if (PDCcont_close(cont) < 0) + printf("fail to close container c1\n"); + + // close a container property + if (PDCprop_close(obj_prop) < 0) + printf("Fail to close property @ line %d\n", __LINE__); + + if (PDCprop_close(cont_prop) < 0) + printf("Fail to close property @ line %d\n", __LINE__); + + // close pdc + if (PDCclose(pdc) < 0) + printf("fail to close PDC\n"); +} + int main(int argc, char *argv[]) { - pdcid_t obj_prop; + pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t *obj_ids; uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; uint64_t n_attr, n_attr_len, n_query; @@ -405,20 +420,8 @@ main(int argc, char *argv[]) // free(values); free(obj_ids); - // close a container - if (PDCcont_close(cont) < 0) - printf("fail to close container c1\n"); - - // close a container property - if (PDCprop_close(obj_prop) < 0) - printf("Fail to close property @ line %d\n", __LINE__); - - if (PDCprop_close(cont_prop) < 0) - printf("Fail to close property @ line %d\n", __LINE__); - - // close pdc - if (PDCclose(pdc) < 0) - printf("fail to close PDC\n"); + closePDC(pdc, cont_prop, cont, obj_prop); + done: #ifdef ENABLE_MPI From e98ed0f327144e321ca7ed4efe3aa36d1857be1f Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 31 Mar 2023 13:57:49 -0500 Subject: [PATCH 022/806] code refactored --- src/tests/kvtag_scale_add_get.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 286a59d4c..bfc806084 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -140,7 +140,7 @@ print_usage(char *name) * @return obj_ids Pointer to an array of object IDs for the created objects. */ pdcid_t * -init_test(int my_rank, int proc_num, uint64_t *n_obj_incr, uint64_t *my_obj, uint64_t *my_obj_s, +init_test(int my_rank, int proc_num, uint64_t n_obj_incr, uint64_t *my_obj, uint64_t *my_obj_s, pdcid_t *obj_prop, pdcid_t *pdc, pdcid_t *cont_prop, pdcid_t *cont) { // create a pdc @@ -167,7 +167,7 @@ init_test(int my_rank, int proc_num, uint64_t *n_obj_incr, uint64_t *my_obj, uin // Create a number of objects, add at least one tag to that object assign_work_to_rank(my_rank, proc_num, n_obj_incr, my_obj, my_obj_s); - return (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); + return (pdcid_t *)calloc(*my_obj, sizeof(pdcid_t)); } /** @@ -338,7 +338,8 @@ main(int argc, char *argv[]) // making necessary preparation for the test. - obj_ids = init_test(my_rank, proc_num, &n_obj_incr, &my_obj, &my_obj_s, &obj_prop); + + obj_ids = init_test(my_rank, proc_num, n_obj_incr, &my_obj, &my_obj_s, &obj_prop, &pdc, &cont_prop, &cont); do { From 9e7f11ff9c73edb6d947d8f9551252efd5b0c679 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 31 Mar 2023 13:59:18 -0500 Subject: [PATCH 023/806] code refactored --- src/tests/kvtag_scale_add_get.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index bfc806084..a23f8eaa0 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -370,7 +370,7 @@ main(int argc, char *argv[]) stime = MPI_Wtime(); #endif - add_n_tags(my_obj, my_obj_s, curr_total_obj, n_obj_incr, n_attr, tag_values, obj_ids); + add_n_tags(my_obj, my_obj_s, curr_total_obj, n_obj_incr, n_attr, tag_values, n_attr_len, obj_ids); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); From d0e0b841ccac03d33f88a08ffddb02e7c80df2c3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sat, 1 Apr 2023 18:21:01 -0500 Subject: [PATCH 024/806] code refactored --- src/tests/kvtag_scale_add_get.c | 118 ++++++++++++++++++-------------- 1 file changed, 65 insertions(+), 53 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index a23f8eaa0..233900400 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -132,16 +132,20 @@ print_usage(char *name) * * @param my_rank The rank of the current process in the MPI communicator. * @param proc_num The total number of processes in the MPI communicator. - * @param n_obj_incr Pointer to the number of objects to be created in each iteration. + * @param n_obj_incr The number of objects to be created in each iteration. + * @param n_query The number of queries to be issued in each iteration. * @param my_obj Pointer to the number of objects assigned to the current process. * @param my_obj_s Pointer to the starting object index assigned to the current process. + * @param my_query Pointer to the number of queries assigned to the current process. + * @param my_query_s Pointer to the starting query index assigned to the current process. * @param obj_prop Pointer to the object property ID to be used for creating objects. * * @return obj_ids Pointer to an array of object IDs for the created objects. */ pdcid_t * -init_test(int my_rank, int proc_num, uint64_t n_obj_incr, uint64_t *my_obj, uint64_t *my_obj_s, - pdcid_t *obj_prop, pdcid_t *pdc, pdcid_t *cont_prop, pdcid_t *cont) +init_test(int my_rank, int proc_num, uint64_t n_obj_incr, uint64_t n_query, uint64_t *my_obj, + uint64_t *my_obj_s, uint64_t *my_query, uint64_t *my_query_s, pdcid_t *obj_prop, pdcid_t *pdc, + pdcid_t *cont_prop, pdcid_t *cont) { // create a pdc *pdc = PDCinit("pdc"); @@ -166,6 +170,7 @@ init_test(int my_rank, int proc_num, uint64_t n_obj_incr, uint64_t *my_obj, uint // Create a number of objects, add at least one tag to that object assign_work_to_rank(my_rank, proc_num, n_obj_incr, my_obj, my_obj_s); + assign_work_to_rank(my_rank, proc_num, n_query, my_query, my_query_s); return (pdcid_t *)calloc(*my_obj, sizeof(pdcid_t)); } @@ -175,20 +180,18 @@ init_test(int my_rank, int proc_num, uint64_t n_obj_incr, uint64_t *my_obj, uint * * @param my_obj The number of objects assigned to the current process. * @param my_obj_s The starting object index assigned to the current process. - * @param curr_total_obj The current total number of objects. * @param cont The container ID in which to create the objects. * @param obj_prop The object property ID to be used for creating objects. * @param obj_ids Pointer to an array of object IDs for the created objects. */ void -create_object(uint64_t my_obj, uint64_t my_obj_s, uint64_t curr_total_obj, pdcid_t cont, pdcid_t obj_prop, - pdcid_t *obj_ids) +create_object(uint64_t my_obj, uint64_t my_obj_s, pdcid_t cont, pdcid_t obj_prop, pdcid_t *obj_ids) { uint64_t i, v; char obj_name[128]; for (i = 0; i < my_obj; i++) { - v = my_obj_s + i + curr_total_obj; + v = i + my_obj_s; sprintf(obj_name, "obj%llu", v); obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); if (obj_ids[i] <= 0) @@ -201,20 +204,20 @@ create_object(uint64_t my_obj, uint64_t my_obj_s, uint64_t curr_total_obj, pdcid * * @param my_obj The number of objects assigned to the current process. * @param my_obj_s The starting object index assigned to the current process. - * @param curr_total_obj The current total number of objects. - * @param n_obj_incr The number of objects to be created in each iteration. * @param n_attr The number of attributes (tags) to add to each object. + * @param tag_values: Array of pointers to strings containing the tag values + * @param tag_value_len: 64-bit unsigned integer representing the length of the tag values in bytes * @param obj_ids Pointer to an array of object IDs for the objects to add tags. */ void -add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t curr_total_obj, uint64_t n_obj_incr, uint64_t n_attr, - char **tag_values, uint64_t tag_value_len, pdcid_t *obj_ids) +add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_values, uint64_t tag_value_len, + pdcid_t *obj_ids) { uint64_t i, j, v; char tag_name[128]; for (i = 0; i < my_obj; i++) { - v = i + my_obj_s + curr_total_obj - n_obj_incr; + v = i + my_obj_s; for (j = 0; j < n_attr; j++) { sprintf(tag_name, "tag%llu.%llu", v, j); if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)&tag_values[j], tag_value_len * sizeof(char)) < @@ -234,7 +237,7 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t curr_total_obj, uint64_t * @param value_size An array to store the size of each tag value. */ void -get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, int n_attr, char **tag_values, uint64_t *value_size) +get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, int n_attr, void **tag_values, uint64_t *value_size) { uint64_t i, v; char tag_name[128]; @@ -262,19 +265,44 @@ get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, int n_attr, char **tag_valu * The caller is responsible for allocating memory for the array. */ void -send_queries(uint64_t my_obj_s, uint64_t curr_total_obj, uint64_t n_obj_incr, int n_query, uint64_t n_attr, - pdcid_t *obj_ids, char **tag_values, uint64_t *value_size) +send_queries(uint64_t my_obj_s, int n_query, uint64_t n_attr, pdcid_t *obj_ids, void **tag_values, + uint64_t *value_size) { uint64_t i, v; - char tag_name[128]; for (i = 0; i < n_query; i++) { - v = i + my_obj_s + curr_total_obj - n_obj_incr; + v = i + my_obj_s; get_object_tags(obj_ids[i], v, n_attr, &tag_values[i * n_attr], &value_size[i * n_attr]); } } -void closePDC(pdcid_t pdc, pdcid_t cont_prop, pdcid_t cont, pdcid_t obj_prop){ +void +check_and_release_query_result(uint64_t n_query, uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_values, void **values, pdcid_t *obj_ids) +{ + uint64_t i, j, v; + + for (i = 0; i < n_query; i++) { + v = i + my_obj_s; + for (j = 0; j < n_attr; j++) { + char *query_rst = (char *)values[j + i * n_attr]; + if (strcmp(query_rst, tag_values[j])!=0) { + printf("Error with retrieved tag from o%llu. Expected %s, Found %s \n", v, tag_values[j], query_rst); + } + free(values[j + i * n_attr]); + } + } + free(values); + // close objects + for (i = 0; i < my_obj; i++) { + v = i + my_obj_s; + if (PDCobj_close(obj_ids[i]) < 0) + printf("fail to close object o%llu\n", v); + } +} + +void +closePDC(pdcid_t pdc, pdcid_t cont_prop, pdcid_t cont, pdcid_t obj_prop) +{ // close a container if (PDCcont_close(cont) < 0) printf("fail to close container c1\n"); @@ -297,7 +325,7 @@ main(int argc, char *argv[]) pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t *obj_ids; uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; - uint64_t n_attr, n_attr_len, n_query; + uint64_t n_attr, n_attr_len, n_query, my_query, my_query_s; uint64_t i, j, k, v; int proc_num, my_rank, attr_value; char obj_name[128]; @@ -332,14 +360,14 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("Create %llu obj, %llu tags, query %llu\n", n_obj, n_obj, n_obj); - + curr_total_obj = 0; + // making necessary preparation for the test. - + obj_ids = init_test(my_rank, proc_num, n_obj_incr, n_query, &my_obj, &my_obj_s, &my_query, &my_query_s, + &obj_prop, &pdc, &cont_prop, &cont); - // making necessary preparation for the test. - - obj_ids = init_test(my_rank, proc_num, n_obj_incr, &my_obj, &my_obj_s, &obj_prop, &pdc, &cont_prop, &cont); + char **tag_values = gen_strings(n_attr, n_attr_len); do { @@ -352,7 +380,7 @@ main(int argc, char *argv[]) // creating objects. Here, my_obj and my_obj_s has been calculated for each process based on // n_obj_incr. - create_object(my_obj, my_obj_s, curr_total_obj, cont, obj_prop, obj_ids); + create_object(my_obj, my_obj_s, cont, obj_prop, obj_ids); // therefore, after 'create_objects' function, we should add 'curr_total_obj' by 'n_obj_incr'. curr_total_obj += n_obj_incr; @@ -363,66 +391,50 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("Created %llu objects in total now in %.4f seconds.\n", curr_total_obj, total_time); - char **tag_values = gen_strings(n_attr, n_attr_len); - #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif - add_n_tags(my_obj, my_obj_s, curr_total_obj, n_obj_incr, n_attr, tag_values, n_attr_len, obj_ids); + add_n_tags(my_obj, my_obj_s, n_attr, tag_values, n_attr_len, obj_ids); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to add tags to %llu objects: %.4f\n", my_obj, total_time); + printf("Total time to add tags to %llu objects: %.4f\n", curr_total_obj, total_time); - query_rst_cache = (char **)malloc(n_query * n_attr * sizeof(char *)); - value_size = malloc(n_query * n_attr * sizeof(uint64_t)); + query_rst_cache = (void **)malloc(my_query * n_attr * sizeof(void *)); + value_size = malloc(my_query * n_attr * sizeof(uint64_t)); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif - send_queries(my_obj_s, curr_total_obj, n_obj_incr, n_query, n_attr, obj_ids, query_rst_cache, - value_size); + send_queries(my_obj_s, my_query, n_attr, obj_ids, query_rst_cache, value_size); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to retrieve tags from %llu objects: %.4f\n", my_obj, total_time); + printf("Total time to retrieve tags from %llu objects: %.4f\n", curr_total_obj, total_time); + check_and_release_query_result(my_query, my_obj, my_obj_s, n_attr, tag_values, query_rst_cache, obj_ids); fflush(stdout); - // for (i = 0; i < my_obj * n_attr; i++) { - // v = i + my_obj_s + curr_total_obj - n_obj_incr; - // if (*(int *)(values[i]) != v) - // printf("Error with retrieved tag from o%llu\n", v); - // free(values[i]); - // } - // free(values); - - // close objects - for (i = 0; i < my_obj; i++) { - v = i + my_obj_s + curr_total_obj - n_obj_incr; - if (PDCobj_close(obj_ids[i]) < 0) - printf("fail to close object o%llu\n", v); - } + my_obj_s += n_obj_incr; } while (curr_total_obj < n_obj); - // for (i = 0; i < my_obj; i++) { - // free(values[i]); - // } - // free(values); + for (i = 0; i < n_attr; i++) { + free(tag_values[i]); + } + free(tag_values); free(obj_ids); closePDC(pdc, cont_prop, cont, obj_prop); - done: #ifdef ENABLE_MPI From 6651d9207de7e581a6c71f22371e4b75c6935493 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sat, 1 Apr 2023 18:23:08 -0500 Subject: [PATCH 025/806] fix data type --- src/tests/kvtag_scale_add_get.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 233900400..dc5a413db 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -332,7 +332,7 @@ main(int argc, char *argv[]) char tag_name[128]; double stime, total_time; int *value_to_add; - char **query_rst_cache; + void **query_rst_cache; uint64_t *value_size; obj_handle *oh; struct pdc_obj_info *info; From 8963964d2c6df4bd08c8ecf1bbcd18cd3f4f3fe5 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 2 Apr 2023 08:22:23 -0500 Subject: [PATCH 026/806] fix data type --- src/tests/kvtag_scale_add_get.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index dc5a413db..e6c64a568 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -220,8 +220,7 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value v = i + my_obj_s; for (j = 0; j < n_attr; j++) { sprintf(tag_name, "tag%llu.%llu", v, j); - if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)&tag_values[j], tag_value_len * sizeof(char)) < - 0) + if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)&tag_values[j], tag_value_len + 1) < 0) printf("fail to add a kvtag to o%llu\n", i + my_obj_s); } } @@ -277,7 +276,8 @@ send_queries(uint64_t my_obj_s, int n_query, uint64_t n_attr, pdcid_t *obj_ids, } void -check_and_release_query_result(uint64_t n_query, uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_values, void **values, pdcid_t *obj_ids) +check_and_release_query_result(uint64_t n_query, uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, + char **tag_values, void **values, pdcid_t *obj_ids) { uint64_t i, j, v; @@ -285,8 +285,9 @@ check_and_release_query_result(uint64_t n_query, uint64_t my_obj, uint64_t my_ob v = i + my_obj_s; for (j = 0; j < n_attr; j++) { char *query_rst = (char *)values[j + i * n_attr]; - if (strcmp(query_rst, tag_values[j])!=0) { - printf("Error with retrieved tag from o%llu. Expected %s, Found %s \n", v, tag_values[j], query_rst); + if (strcmp(query_rst, tag_values[j]) != 0) { + printf("Error with retrieved tag from o%llu. Expected %s, Found %s \n", v, tag_values[j], + query_rst); } free(values[j + i * n_attr]); } @@ -421,7 +422,8 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("Total time to retrieve tags from %llu objects: %.4f\n", curr_total_obj, total_time); - check_and_release_query_result(my_query, my_obj, my_obj_s, n_attr, tag_values, query_rst_cache, obj_ids); + check_and_release_query_result(my_query, my_obj, my_obj_s, n_attr, tag_values, query_rst_cache, + obj_ids); fflush(stdout); my_obj_s += n_obj_incr; From 339d3e40870dc35f74a5682333513fd610c9eae9 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 2 Apr 2023 08:27:31 -0500 Subject: [PATCH 027/806] fix data type --- src/tests/kvtag_scale_add_get.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index e6c64a568..c9de1543c 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -220,7 +220,7 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value v = i + my_obj_s; for (j = 0; j < n_attr; j++) { sprintf(tag_name, "tag%llu.%llu", v, j); - if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)&tag_values[j], tag_value_len + 1) < 0) + if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)tag_values[j], tag_value_len + 1) < 0) printf("fail to add a kvtag to o%llu\n", i + my_obj_s); } } From ff146cafd6011e9ce894654c8687e8d0523284b6 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 2 Apr 2023 09:10:13 -0500 Subject: [PATCH 028/806] add client side statistics --- src/tests/kvtag_scale_add_get.c | 39 ++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index c9de1543c..81087ebd7 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -327,11 +327,13 @@ main(int argc, char *argv[]) pdcid_t *obj_ids; uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; uint64_t n_attr, n_attr_len, n_query, my_query, my_query_s; + uint64_t n_servers, n_clients; uint64_t i, j, k, v; int proc_num, my_rank, attr_value; char obj_name[128]; char tag_name[128]; - double stime, total_time; + double stime, step_elapse, total_object_time, total_tag_time, total_query_time; + uint64_t total_object_count, total_tag_count, total_query_count; int *value_to_add; void **query_rst_cache; uint64_t *value_size; @@ -342,7 +344,7 @@ main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &proc_num); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); #endif - if (argc < 6) { + if (argc < 7) { if (my_rank == 0) print_usage(argv[0]); goto done; @@ -352,6 +354,8 @@ main(int argc, char *argv[]) n_attr = atoui64(argv[3]); n_attr_len = atoui64(argv[4]); n_query = atoui64(argv[5]); + n_servers = atoui64(argv[6]); + n_clients = proc_num; if (n_obj_incr > n_obj) { if (my_rank == 0) @@ -370,6 +374,9 @@ main(int argc, char *argv[]) char **tag_values = gen_strings(n_attr, n_attr_len); + total_time = 0; + k = 1; + do { #ifdef ENABLE_MPI @@ -387,10 +394,13 @@ main(int argc, char *argv[]) #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); - total_time = MPI_Wtime() - stime; + step_elapse = MPI_Wtime() - stime; + total_object_time += step_elapse; + total_object_count += n_obj_incr; #endif if (my_rank == 0) - printf("Created %llu objects in total now in %.4f seconds.\n", curr_total_obj, total_time); + printf("Iteration %llu : Objects: %llu , Time: %.4f sec. Object throughput in this iteration: %.4f .\n", k, n_obj_incr, step_elapse, ((double)n_obj_incr)/step_elapse); + printf("Overall %llu : Objects: %llu , Time: %.4f sec. Overall object throughput: %.4f .\n", k, total_object_count, total_object_time, ((double)total_object_count)/total_object_time); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); @@ -401,10 +411,13 @@ main(int argc, char *argv[]) #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); - total_time = MPI_Wtime() - stime; + step_elapse = MPI_Wtime() - stime; + total_tag_time += step_elapse; + total_tag_count += n_obj_incr * n_attr; #endif if (my_rank == 0) - printf("Total time to add tags to %llu objects: %.4f\n", curr_total_obj, total_time); + printf("Iteration %llu : Tags: %llu , Time: %.4f sec. Tag throughput in this iteration: %.4f .\n", k, n_obj_incr * n_attr, step_elapse, (double)(n_obj_incr * n_attr)/step_elapse); + printf("Overall %llu : Tags: %llu , Time: %.4f sec. Overall tag throughput: %.4f .\n", k, total_tag_count, total_tag_time, ((double)total_tag_count)/total_tag_time); query_rst_cache = (void **)malloc(my_query * n_attr * sizeof(void *)); value_size = malloc(my_query * n_attr * sizeof(uint64_t)); @@ -417,19 +430,29 @@ main(int argc, char *argv[]) #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); - total_time = MPI_Wtime() - stime; + step_elapse = MPI_Wtime() - stime; + total_query_time += step_elapse; + total_query_count += n_query * n_attr; #endif if (my_rank == 0) - printf("Total time to retrieve tags from %llu objects: %.4f\n", curr_total_obj, total_time); + printf("Iteration %llu : Queries: %llu , Time: %.4f sec. Query throughput in this iteration: %.4f .\n", k, n_query * n_attr, step_elapse, (double)(n_query * n_attr)/step_elapse); + printf("Overall %llu : Queries: %llu , Time: %.4f sec. Overall query throughput: %.4f .\n", k, total_query_count, total_query_time, ((double)total_query_count)/total_query_time); check_and_release_query_result(my_query, my_obj, my_obj_s, n_attr, tag_values, query_rst_cache, obj_ids); fflush(stdout); my_obj_s += n_obj_incr; + k++; } while (curr_total_obj < n_obj); + if (my_rank == 0): + printf("Final Report: \n"); + printf("Servers: %llu , Clients: %llu , C/S ratio: %.4f \n", n_servers, n_clients, (double)n_clients/(double)n_servers); + printf("Iterations: %llu , Objects: %llu , Tags/Object: %llu , Queries/Iteration: %llu , \n", k, curr_total_obj, n_attr, n_query); + printf("Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", (double)curr_total_obj/total_object_time, (double)(curr_total_obj*n_attr)/total_tag_time, (double)(total_query_count * n_attr)/total_query_time); + for (i = 0; i < n_attr; i++) { free(tag_values[i]); } From 33e3fdf6f8e55fac2fb8c26e8171da0532d91b24 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 2 Apr 2023 09:18:44 -0500 Subject: [PATCH 029/806] add client side statistics --- src/tests/kvtag_scale_add_get.c | 47 +++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 81087ebd7..01a9abcc3 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -325,15 +325,15 @@ main(int argc, char *argv[]) { pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t *obj_ids; - uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj; + uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj=0; uint64_t n_attr, n_attr_len, n_query, my_query, my_query_s; uint64_t n_servers, n_clients; uint64_t i, j, k, v; int proc_num, my_rank, attr_value; char obj_name[128]; char tag_name[128]; - double stime, step_elapse, total_object_time, total_tag_time, total_query_time; - uint64_t total_object_count, total_tag_count, total_query_count; + double stime=0.0, step_elapse=0.0, total_object_time=0.0, total_tag_time=0.0, total_query_time=0.0; + uint64_t total_object_count=0, total_tag_count=0, total_query_count=0; int *value_to_add; void **query_rst_cache; uint64_t *value_size; @@ -366,7 +366,6 @@ main(int argc, char *argv[]) if (my_rank == 0) printf("Create %llu obj, %llu tags, query %llu\n", n_obj, n_obj, n_obj); - curr_total_obj = 0; // making necessary preparation for the test. obj_ids = init_test(my_rank, proc_num, n_obj_incr, n_query, &my_obj, &my_obj_s, &my_query, &my_query_s, @@ -374,8 +373,7 @@ main(int argc, char *argv[]) char **tag_values = gen_strings(n_attr, n_attr_len); - total_time = 0; - k = 1; + k = 1; do { @@ -399,8 +397,12 @@ main(int argc, char *argv[]) total_object_count += n_obj_incr; #endif if (my_rank == 0) - printf("Iteration %llu : Objects: %llu , Time: %.4f sec. Object throughput in this iteration: %.4f .\n", k, n_obj_incr, step_elapse, ((double)n_obj_incr)/step_elapse); - printf("Overall %llu : Objects: %llu , Time: %.4f sec. Overall object throughput: %.4f .\n", k, total_object_count, total_object_time, ((double)total_object_count)/total_object_time); + printf("Iteration %llu : Objects: %llu , Time: %.4f sec. Object throughput in this iteration: " + "%.4f .\n", + k, n_obj_incr, step_elapse, ((double)n_obj_incr) / step_elapse); + printf( + "Overall %llu : Objects: %llu , Time: %.4f sec. Overall object throughput: %.4f .\n", + k, total_object_count, total_object_time, ((double)total_object_count) / total_object_time); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); @@ -416,8 +418,10 @@ main(int argc, char *argv[]) total_tag_count += n_obj_incr * n_attr; #endif if (my_rank == 0) - printf("Iteration %llu : Tags: %llu , Time: %.4f sec. Tag throughput in this iteration: %.4f .\n", k, n_obj_incr * n_attr, step_elapse, (double)(n_obj_incr * n_attr)/step_elapse); - printf("Overall %llu : Tags: %llu , Time: %.4f sec. Overall tag throughput: %.4f .\n", k, total_tag_count, total_tag_time, ((double)total_tag_count)/total_tag_time); + printf("Iteration %llu : Tags: %llu , Time: %.4f sec. Tag throughput in this iteration: %.4f .\n", + k, n_obj_incr * n_attr, step_elapse, (double)(n_obj_incr * n_attr) / step_elapse); + printf("Overall %llu : Tags: %llu , Time: %.4f sec. Overall tag throughput: %.4f .\n", k, + total_tag_count, total_tag_time, ((double)total_tag_count) / total_tag_time); query_rst_cache = (void **)malloc(my_query * n_attr * sizeof(void *)); value_size = malloc(my_query * n_attr * sizeof(uint64_t)); @@ -435,8 +439,12 @@ main(int argc, char *argv[]) total_query_count += n_query * n_attr; #endif if (my_rank == 0) - printf("Iteration %llu : Queries: %llu , Time: %.4f sec. Query throughput in this iteration: %.4f .\n", k, n_query * n_attr, step_elapse, (double)(n_query * n_attr)/step_elapse); - printf("Overall %llu : Queries: %llu , Time: %.4f sec. Overall query throughput: %.4f .\n", k, total_query_count, total_query_time, ((double)total_query_count)/total_query_time); + printf("Iteration %llu : Queries: %llu , Time: %.4f sec. Query throughput in this iteration: " + "%.4f .\n", + k, n_query * n_attr, step_elapse, (double)(n_query * n_attr) / step_elapse); + printf( + "Overall %llu : Queries: %llu , Time: %.4f sec. Overall query throughput: %.4f .\n", + k, total_query_count, total_query_time, ((double)total_query_count) / total_query_time); check_and_release_query_result(my_query, my_obj, my_obj_s, n_attr, tag_values, query_rst_cache, obj_ids); @@ -447,12 +455,17 @@ main(int argc, char *argv[]) } while (curr_total_obj < n_obj); - if (my_rank == 0): + if (my_rank == 0) { printf("Final Report: \n"); - printf("Servers: %llu , Clients: %llu , C/S ratio: %.4f \n", n_servers, n_clients, (double)n_clients/(double)n_servers); - printf("Iterations: %llu , Objects: %llu , Tags/Object: %llu , Queries/Iteration: %llu , \n", k, curr_total_obj, n_attr, n_query); - printf("Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", (double)curr_total_obj/total_object_time, (double)(curr_total_obj*n_attr)/total_tag_time, (double)(total_query_count * n_attr)/total_query_time); - + printf("Servers: %llu , Clients: %llu , C/S ratio: %.4f \n", n_servers, n_clients, + (double)n_clients / (double)n_servers); + printf("Iterations: %llu , Objects: %llu , Tags/Object: %llu , Queries/Iteration: %llu , \n", k, + curr_total_obj, n_attr, n_query); + printf("Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", + (double)curr_total_obj / total_object_time, (double)(curr_total_obj * n_attr) / total_tag_time, + (double)(total_query_count * n_attr) / total_query_time); + } + for (i = 0; i < n_attr; i++) { free(tag_values[i]); } From 150a7369dde017fce41970cff7ed8c8e28f6b5a3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 2 Apr 2023 09:25:01 -0500 Subject: [PATCH 030/806] fix format --- src/tests/kvtag_scale_add_get.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 01a9abcc3..5d5e52f0e 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -396,7 +396,7 @@ main(int argc, char *argv[]) total_object_time += step_elapse; total_object_count += n_obj_incr; #endif - if (my_rank == 0) + if (my_rank == 0){ printf("Iteration %llu : Objects: %llu , Time: %.4f sec. Object throughput in this iteration: " "%.4f .\n", k, n_obj_incr, step_elapse, ((double)n_obj_incr) / step_elapse); @@ -404,6 +404,8 @@ main(int argc, char *argv[]) "Overall %llu : Objects: %llu , Time: %.4f sec. Overall object throughput: %.4f .\n", k, total_object_count, total_object_time, ((double)total_object_count) / total_object_time); + } + #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); @@ -417,11 +419,13 @@ main(int argc, char *argv[]) total_tag_time += step_elapse; total_tag_count += n_obj_incr * n_attr; #endif - if (my_rank == 0) + if (my_rank == 0){ printf("Iteration %llu : Tags: %llu , Time: %.4f sec. Tag throughput in this iteration: %.4f .\n", k, n_obj_incr * n_attr, step_elapse, (double)(n_obj_incr * n_attr) / step_elapse); printf("Overall %llu : Tags: %llu , Time: %.4f sec. Overall tag throughput: %.4f .\n", k, total_tag_count, total_tag_time, ((double)total_tag_count) / total_tag_time); + } + query_rst_cache = (void **)malloc(my_query * n_attr * sizeof(void *)); value_size = malloc(my_query * n_attr * sizeof(uint64_t)); @@ -438,13 +442,14 @@ main(int argc, char *argv[]) total_query_time += step_elapse; total_query_count += n_query * n_attr; #endif - if (my_rank == 0) + if (my_rank == 0) { printf("Iteration %llu : Queries: %llu , Time: %.4f sec. Query throughput in this iteration: " "%.4f .\n", k, n_query * n_attr, step_elapse, (double)(n_query * n_attr) / step_elapse); printf( "Overall %llu : Queries: %llu , Time: %.4f sec. Overall query throughput: %.4f .\n", k, total_query_count, total_query_time, ((double)total_query_count) / total_query_time); + } check_and_release_query_result(my_query, my_obj, my_obj_s, n_attr, tag_values, query_rst_cache, obj_ids); From 1e6d53bf1584e7f2e3d6c02d8d1e038a982d2072 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 3 Apr 2023 09:44:18 -0500 Subject: [PATCH 031/806] clang formatter --- src/tests/kvtag_scale_add_get.c | 56 ++++++++++++++++----------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 5d5e52f0e..00060319a 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -325,15 +325,16 @@ main(int argc, char *argv[]) { pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t *obj_ids; - uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj=0; + uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj = 0; uint64_t n_attr, n_attr_len, n_query, my_query, my_query_s; uint64_t n_servers, n_clients; uint64_t i, j, k, v; int proc_num, my_rank, attr_value; char obj_name[128]; char tag_name[128]; - double stime=0.0, step_elapse=0.0, total_object_time=0.0, total_tag_time=0.0, total_query_time=0.0; - uint64_t total_object_count=0, total_tag_count=0, total_query_count=0; + double stime = 0.0, step_elapse = 0.0; + double total_object_time = 0.0, total_tag_time = 0.0, total_query_time = 0.0; + uint64_t total_object_count = 0, total_tag_count = 0, total_query_count = 0; int *value_to_add; void **query_rst_cache; uint64_t *value_size; @@ -373,7 +374,7 @@ main(int argc, char *argv[]) char **tag_values = gen_strings(n_attr, n_attr_len); - k = 1; + k = 1; do { @@ -381,9 +382,6 @@ main(int argc, char *argv[]) MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif - if (my_rank == 0) - printf("starting creating %llu objects... \n", my_obj); - // creating objects. Here, my_obj and my_obj_s has been calculated for each process based on // n_obj_incr. create_object(my_obj, my_obj_s, cont, obj_prop, obj_ids); @@ -396,16 +394,16 @@ main(int argc, char *argv[]) total_object_time += step_elapse; total_object_count += n_obj_incr; #endif - if (my_rank == 0){ - printf("Iteration %llu : Objects: %llu , Time: %.4f sec. Object throughput in this iteration: " + if (my_rank == 0) { + printf("Iteration %4llu : Objects: %6llu , Time: %.4f sec. Object throughput in this iteration: " "%.4f .\n", k, n_obj_incr, step_elapse, ((double)n_obj_incr) / step_elapse); - printf( - "Overall %llu : Objects: %llu , Time: %.4f sec. Overall object throughput: %.4f .\n", - k, total_object_count, total_object_time, ((double)total_object_count) / total_object_time); - + printf("Overall %4llu : Objects: %6llu , Time: %.4f sec. Overall object throughput: " + "%.4f .\n", + k, total_object_count, total_object_time, + ((double)total_object_count) / total_object_time); } - + #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); @@ -419,13 +417,14 @@ main(int argc, char *argv[]) total_tag_time += step_elapse; total_tag_count += n_obj_incr * n_attr; #endif - if (my_rank == 0){ - printf("Iteration %llu : Tags: %llu , Time: %.4f sec. Tag throughput in this iteration: %.4f .\n", - k, n_obj_incr * n_attr, step_elapse, (double)(n_obj_incr * n_attr) / step_elapse); - printf("Overall %llu : Tags: %llu , Time: %.4f sec. Overall tag throughput: %.4f .\n", k, - total_tag_count, total_tag_time, ((double)total_tag_count) / total_tag_time); + if (my_rank == 0) { + printf( + "Iteration %4llu : Tags: %6llu , Time: %.4f sec. Tag throughput in this iteration: %.4f .\n", + k, n_obj_incr * n_attr, step_elapse, (double)(n_obj_incr * n_attr) / step_elapse); + printf( + "Overall %4llu : Tags: %6llu , Time: %.4f sec. Overall tag throughput: %.4f .\n", + k, total_tag_count, total_tag_time, ((double)total_tag_count) / total_tag_time); } - query_rst_cache = (void **)malloc(my_query * n_attr * sizeof(void *)); value_size = malloc(my_query * n_attr * sizeof(uint64_t)); @@ -443,12 +442,12 @@ main(int argc, char *argv[]) total_query_count += n_query * n_attr; #endif if (my_rank == 0) { - printf("Iteration %llu : Queries: %llu , Time: %.4f sec. Query throughput in this iteration: " + printf("Iteration %4llu : Queries: %6llu , Time: %.4f sec. Query throughput in this iteration: " "%.4f .\n", k, n_query * n_attr, step_elapse, (double)(n_query * n_attr) / step_elapse); - printf( - "Overall %llu : Queries: %llu , Time: %.4f sec. Overall query throughput: %.4f .\n", - k, total_query_count, total_query_time, ((double)total_query_count) / total_query_time); + printf("Overall %4llu : Queries: %6llu , Time: %.4f sec. Overall query throughput: " + "%.4f .\n", + k, total_query_count, total_query_time, ((double)total_query_count) / total_query_time); } check_and_release_query_result(my_query, my_obj, my_obj_s, n_attr, tag_values, query_rst_cache, @@ -462,11 +461,12 @@ main(int argc, char *argv[]) if (my_rank == 0) { printf("Final Report: \n"); - printf("Servers: %llu , Clients: %llu , C/S ratio: %.4f \n", n_servers, n_clients, + printf("[Final Report 1] Servers: %llu , Clients: %llu , C/S ratio: %.4f \n", n_servers, n_clients, (double)n_clients / (double)n_servers); - printf("Iterations: %llu , Objects: %llu , Tags/Object: %llu , Queries/Iteration: %llu , \n", k, - curr_total_obj, n_attr, n_query); - printf("Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", + printf("[Final Report 2] Iterations: %llu , Objects: %llu , Tags/Object: %llu , Queries/Iteration: " + "%llu , \n", + k, curr_total_obj, n_attr, n_query); + printf("[Final Report 3] Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", (double)curr_total_obj / total_object_time, (double)(curr_total_obj * n_attr) / total_tag_time, (double)(total_query_count * n_attr) / total_query_time); } From 84e7675a58bf4050c682ab89a46002fcdf609f2e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 3 Apr 2023 10:01:53 -0500 Subject: [PATCH 032/806] update CMake --- CMakeLists.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 447ef9924..e54244a42 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -433,6 +433,8 @@ configure_file( ${PDC_BINARY_DIR}/CMakeFiles/pdc-config-version.cmake @ONLY ) + + install( FILES ${PDC_BINARY_DIR}/CMakeFiles/pdc-config-version.cmake @@ -444,10 +446,14 @@ install( DIRECTORY ${PDC_BINARY_DIR}/bin + FILES_MATCHING PATTERN "*" DESTINATION ${PDC_INSTALL_DATA_DIR}/test ) +# set(ADD_EXE_PERMISSION_CMD "chmod +x ${}/test/*") +# add_custom_command(TARGET ${PROJECT_NAME} POST_INSTALL COMMAND ${add_permission_cmd}) + #install( # FILES From 54a9419df00a8ec89617935c3bfd5ea0466e5bd3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 3 Apr 2023 10:04:24 -0500 Subject: [PATCH 033/806] update CMake --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e54244a42..0633ac6bd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -445,10 +445,10 @@ install( install( DIRECTORY ${PDC_BINARY_DIR}/bin - - FILES_MATCHING PATTERN "*" DESTINATION ${PDC_INSTALL_DATA_DIR}/test + FILES_MATCHING PATTERN "*" + PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE ) # set(ADD_EXE_PERMISSION_CMD "chmod +x ${}/test/*") From 3a8f63e329301ceb154a6f73df71151cc0896082 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 3 Apr 2023 10:08:32 -0500 Subject: [PATCH 034/806] update CMake --- CMakeLists.txt | 4 ---- 1 file changed, 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0633ac6bd..7a1db4d08 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -451,10 +451,6 @@ install( PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE ) -# set(ADD_EXE_PERMISSION_CMD "chmod +x ${}/test/*") -# add_custom_command(TARGET ${PROJECT_NAME} POST_INSTALL COMMAND ${add_permission_cmd}) - - #install( # FILES # ${PDC_BINARY_DIR}/bin/pdc_server.exe From dfa1e50c08a7ff856979809d0fb35e1e095d4b49 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 3 Apr 2023 10:24:35 -0500 Subject: [PATCH 035/806] free allocated memory properly --- src/tests/kvtag_add_get_scale.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index 29d2b894a..95a32591a 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -178,7 +178,7 @@ main(int argc, char *argv[]) for (i = 0; i < my_query; i++) { if (*(int *)(values[i]) != i + my_add_tag_s) printf("Error with retrieved tag from o%d\n", i + my_query_s); - // PDC_free_kvtag(&values[i]); + free(values[i]); } free(values); From c6c0be6fc7149ea3a89eb0217a6a8d28e991f00e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 3 Apr 2023 13:42:47 -0500 Subject: [PATCH 036/806] clang format --- src/api/pdc_client_connect.c | 2 +- src/tests/kvtag_scale_add_get.c | 12 ++++++------ src/utils/pdc_region_utils.c | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index 83cba609f..22e98f751 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -1195,7 +1195,7 @@ PDC_Client_mercury_init(hg_class_t **hg_class, hg_context_t **hg_context, int po init_info.na_init_info.progress_mode = NA_NO_BLOCK; // busy mode #endif -//#ifndef PDC_HAS_CRAY_DRC +// #ifndef PDC_HAS_CRAY_DRC #ifdef PDC_HAS_SHARED_SERVER init_info.auto_sm = HG_TRUE; #endif diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_scale_add_get.c index 00060319a..5586f4b6c 100644 --- a/src/tests/kvtag_scale_add_get.c +++ b/src/tests/kvtag_scale_add_get.c @@ -34,7 +34,7 @@ uint64_t atoui64(char *arg) { - char *endptr; + char * endptr; uint64_t num = strtoull(arg, &endptr, 10); if (*endptr != '\0') { @@ -324,7 +324,7 @@ int main(int argc, char *argv[]) { pdcid_t pdc, cont_prop, cont, obj_prop; - pdcid_t *obj_ids; + pdcid_t * obj_ids; uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj = 0; uint64_t n_attr, n_attr_len, n_query, my_query, my_query_s; uint64_t n_servers, n_clients; @@ -335,10 +335,10 @@ main(int argc, char *argv[]) double stime = 0.0, step_elapse = 0.0; double total_object_time = 0.0, total_tag_time = 0.0, total_query_time = 0.0; uint64_t total_object_count = 0, total_tag_count = 0, total_query_count = 0; - int *value_to_add; - void **query_rst_cache; - uint64_t *value_size; - obj_handle *oh; + int * value_to_add; + void ** query_rst_cache; + uint64_t * value_size; + obj_handle * oh; struct pdc_obj_info *info; #ifdef ENABLE_MPI MPI_Init(&argc, &argv); diff --git a/src/utils/pdc_region_utils.c b/src/utils/pdc_region_utils.c index a81dd9b9f..3f360056e 100644 --- a/src/utils/pdc_region_utils.c +++ b/src/utils/pdc_region_utils.c @@ -33,7 +33,7 @@ PDC_region_overlap_detect(int ndim, uint64_t *offset1, uint64_t *size1, uint64_t for (i = 0; i < ndim; ++i) { output_offset[0][i] = offset2[i] < offset1[i] ? offset1[i] : offset2[i]; output_size[0][i] = ((offset2[i] + size2[i] < offset1[i] + size1[i]) ? (offset2[i] + size2[i]) - : (offset1[i] + size1[i])) - + : (offset1[i] + size1[i])) - output_offset[0][i]; } From cb6f440324273cf06174a6fece38d46151818468 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 3 Apr 2023 15:13:41 -0500 Subject: [PATCH 037/806] clang format --- .github/workflows/clang-format-check.yml | 2 +- .github/workflows/clang-format-fix.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/clang-format-check.yml b/.github/workflows/clang-format-check.yml index 52e4d0613..7f90f5fb2 100644 --- a/.github/workflows/clang-format-check.yml +++ b/.github/workflows/clang-format-check.yml @@ -13,6 +13,6 @@ jobs: with: source: '.' extensions: 'c,h,cpp,hpp' - clangFormatVersion: 10 + clangFormatVersion: 11 style: file # exclude: './config' diff --git a/.github/workflows/clang-format-fix.yml b/.github/workflows/clang-format-fix.yml index 9145f65ca..ac3592185 100644 --- a/.github/workflows/clang-format-fix.yml +++ b/.github/workflows/clang-format-fix.yml @@ -14,7 +14,7 @@ jobs: with: source: '.' extensions: 'c,h,cpp,hpp' - clangFormatVersion: 10 + clangFormatVersion: 11 inplace: True style: file # exclude: './config ' From 74d72670c4a02642d40cb643ddec6a8ae0b2561e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 3 Apr 2023 15:32:44 -0500 Subject: [PATCH 038/806] clang-format-10 --- .github/workflows/clang-format-check.yml | 2 +- .github/workflows/clang-format-fix.yml | 2 +- src/utils/pdc_region_utils.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/clang-format-check.yml b/.github/workflows/clang-format-check.yml index 7f90f5fb2..52e4d0613 100644 --- a/.github/workflows/clang-format-check.yml +++ b/.github/workflows/clang-format-check.yml @@ -13,6 +13,6 @@ jobs: with: source: '.' extensions: 'c,h,cpp,hpp' - clangFormatVersion: 11 + clangFormatVersion: 10 style: file # exclude: './config' diff --git a/.github/workflows/clang-format-fix.yml b/.github/workflows/clang-format-fix.yml index ac3592185..9145f65ca 100644 --- a/.github/workflows/clang-format-fix.yml +++ b/.github/workflows/clang-format-fix.yml @@ -14,7 +14,7 @@ jobs: with: source: '.' extensions: 'c,h,cpp,hpp' - clangFormatVersion: 11 + clangFormatVersion: 10 inplace: True style: file # exclude: './config ' diff --git a/src/utils/pdc_region_utils.c b/src/utils/pdc_region_utils.c index 3f360056e..a81dd9b9f 100644 --- a/src/utils/pdc_region_utils.c +++ b/src/utils/pdc_region_utils.c @@ -33,7 +33,7 @@ PDC_region_overlap_detect(int ndim, uint64_t *offset1, uint64_t *size1, uint64_t for (i = 0; i < ndim; ++i) { output_offset[0][i] = offset2[i] < offset1[i] ? offset1[i] : offset2[i]; output_size[0][i] = ((offset2[i] + size2[i] < offset1[i] + size1[i]) ? (offset2[i] + size2[i]) - : (offset1[i] + size1[i])) - + : (offset1[i] + size1[i])) - output_offset[0][i]; } From 25196ed9dc350936b1d2ea12890842b982fb2227 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 3 Apr 2023 15:38:17 -0500 Subject: [PATCH 039/806] change file name --- src/tests/CMakeLists.txt | 1 + src/tests/{kvtag_scale_add_get.c => kvtag_add_get_benchmark.c} | 0 2 files changed, 1 insertion(+) rename src/tests/{kvtag_scale_add_get.c => kvtag_add_get_benchmark.c} (100%) diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index 27cb1a3f3..e524881e8 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -63,6 +63,7 @@ set(PROGRAMS # data_server_meta_test kvtag_add_get # kvtag_get + kvtag_add_get_benchmark kvtag_add_get_scale # kvtag_query kvtag_query_scale diff --git a/src/tests/kvtag_scale_add_get.c b/src/tests/kvtag_add_get_benchmark.c similarity index 100% rename from src/tests/kvtag_scale_add_get.c rename to src/tests/kvtag_add_get_benchmark.c From 50d10014b42b997811d8e55dfa1bad4619d7ea86 Mon Sep 17 00:00:00 2001 From: zhangwei217245 Date: Mon, 3 Apr 2023 20:05:19 -0700 Subject: [PATCH 040/806] address review comments --- src/tests/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index e524881e8..499421880 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -67,7 +67,6 @@ set(PROGRAMS kvtag_add_get_scale # kvtag_query kvtag_query_scale - kvtag_scale_add_get # obj_transformation region_transfer_query region_transfer From 8cdccdaa25091fc81f73552ba11523defe82829f Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 4 Apr 2023 13:35:49 -0500 Subject: [PATCH 041/806] update llsm importer --- src/server/pdc_server.c | 4 ++-- tools/CMakeLists.txt | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/server/pdc_server.c b/src/server/pdc_server.c index 9b5428a5c..526658bd0 100644 --- a/src/server/pdc_server.c +++ b/src/server/pdc_server.c @@ -61,8 +61,8 @@ #include #endif -#define PDC_CHECKPOINT_INTERVAL 200 -#define PDC_CHECKPOINT_MIN_INTERVAL_SEC 300 +#define PDC_CHECKPOINT_INTERVAL 1000 +#define PDC_CHECKPOINT_MIN_INTERVAL_SEC 1800 // Global debug variable to control debug printfs int is_debug_g = 0; diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index f9915b221..e2a017bd4 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -51,8 +51,21 @@ option(USE_SYSTEM_HDF5 "Use system-installed HDF5." ON) endif() endif() +option(USE_SYSTEM_OPENMP "Use system-installed OpenMP." ON) +if(USE_SYSTEM_OPENMP) + find_package(OpenMP REQUIRED) + + if(OPENMP_FOUND) + set(OPENMP_LIBRARIES "${OpenMP_C_LIBRARIES}") + else() + message(FATAL_ERROR "OpenMP not found") + endif() +endif() + + add_definitions(-DENABLE_MPI=1) add_library(cjson cjson/cJSON.c) +add_subdirectory(llsm) set(PROGRAMS pdc_ls From 6ebb5a7fc54d81821c93982c3c096b2a0d8db979 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 4 Apr 2023 13:37:13 -0500 Subject: [PATCH 042/806] update llsm importer --- tools/llsm/CMakeLists.txt | 5 + tools/llsm/parallelReadTiff.c | 762 ++++++++++++++++++++++++++++++++++ tools/llsm/parallelReadTiff.h | 0 tools/llsm_importer.c | 14 + 4 files changed, 781 insertions(+) create mode 100644 tools/llsm/CMakeLists.txt create mode 100644 tools/llsm/parallelReadTiff.c create mode 100644 tools/llsm/parallelReadTiff.h create mode 100644 tools/llsm_importer.c diff --git a/tools/llsm/CMakeLists.txt b/tools/llsm/CMakeLists.txt new file mode 100644 index 000000000..a0ea9404d --- /dev/null +++ b/tools/llsm/CMakeLists.txt @@ -0,0 +1,5 @@ + +add_library(llsm_tiff parallelReadTiff.c) +target_compile_options(llsm_tiff PRIVATE ${OpenMP_C_FLAGS}) +target_include_directories(llsm_tiff PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) +target_link_libraries(llsm_tiff PUBLIC ${OpenMP_C_LIBRARIES}) \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c new file mode 100644 index 000000000..fb4283e6a --- /dev/null +++ b/tools/llsm/parallelReadTiff.c @@ -0,0 +1,762 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "tiffio.h" +#include "omp.h" +#include "mex.h" +//mex -v COPTIMFLAGS="-O3 -DNDEBUG" CFLAGS='$CFLAGS -O3 -fopenmp' LDFLAGS='$LDFLAGS -O3 -fopenmp' '-I/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' '-L/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' -ltiff parallelReadTiff.c +//mex COMPFLAGS='$COMPFLAGS /openmp' '-IC:\Program Files (x86)\tiff\include\' '-LC:\Program Files (x86)\tiff\lib\' -ltiffd.lib C:\Users\Matt\Documents\parallelTiff\main.cpp + +//libtiff 4.4.0 +//mex -v COPTIMFLAGS="-O3 -DNDEBUG" LDOPTIMFLAGS="-O3 -DNDEBUG" CFLAGS='$CFLAGS -O3 -fopenmp' LDFLAGS='$LDFLAGS -O3 -fopenmp' '-I/clusterfs/fiona/matthewmueller/software/tiff-4.4.0/include' '-L/clusterfs/fiona/matthewmueller/software/tiff-4.4.0/lib' -ltiff parallelReadTiff.c + +// Handle the tilde character in filenames on Linux/Mac +#ifndef _WIN32 +#include +char* expandTilde(char* path) { + wordexp_t expPath; + wordexp(path, &expPath, 0); + return expPath.we_wordv[0]; +} +#endif + +void DummyHandler(const char* module, const char* fmt, va_list ap) +{ + // ignore errors and warnings +} + +// Backup method in case there are errors reading strips +void readTiffParallelBak(uint64_t x, uint64_t y, uint64_t z, const char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint8_t flipXY){ + int32_t numWorkers = omp_get_max_threads(); + int32_t batchSize = (z-1)/numWorkers+1; + uint64_t bytes = bits/8; + + int32_t w; + #pragma omp parallel for + for(w = 0; w < numWorkers; w++){ + + TIFF* tif = TIFFOpen(fileName, "r"); + if(!tif) mexErrMsgIdAndTxt("tiff:threadError","Thread %d: File \"%s\" cannot be opened\n",w,fileName); + + void* buffer = malloc(x*bytes); + for(int64_t dir = startSlice+(w*batchSize); dir < startSlice+((w+1)*batchSize); dir++){ + if(dir>=z+startSlice) break; + + int counter = 0; + while(!TIFFSetDirectory(tif, (uint64_t)dir) && counter<3){ + printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n",w,fileName,dir,counter+1); + counter++; + } + + for (int64_t i = 0; i < y; i++) + { + TIFFReadScanline(tif, buffer, i, 0); + if(!flipXY){ + memcpy(tiff+((i*x)*bytes),buffer,x*bytes); + continue; + } + //loading the data into a buffer + switch(bits){ + case 8: + // Map Values to flip x and y for MATLAB + for(int64_t j = 0; j < x; j++){ + ((uint8_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint8_t*)buffer)[j]; + } + break; + case 16: + // Map Values to flip x and y for MATLAB + for(int64_t j = 0; j < x; j++){ + ((uint16_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint16_t*)buffer)[j]; + } + break; + case 32: + // Map Values to flip x and y for MATLAB + for(int64_t j = 0; j < x; j++){ + ((float*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((float*)buffer)[j]; + } + break; + case 64: + // Map Values to flip x and y for MATLAB + for(int64_t j = 0; j < x; j++){ + ((double*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((double*)buffer)[j]; + } + break; + } + } + } + free(buffer); + TIFFClose(tif); + } +} + +void readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint64_t stripSize, uint8_t flipXY){ + int32_t numWorkers = omp_get_max_threads(); + int32_t batchSize = (z-1)/numWorkers+1; + uint64_t bytes = bits/8; + + uint16_t compressed = 1; + TIFF* tif = TIFFOpen(fileName, "r"); + TIFFGetField(tif, TIFFTAG_COMPRESSION, &compressed); + + + + + int32_t w; + uint8_t errBak = 0; + uint8_t err = 0; + char errString[10000]; + if(compressed > 1 || z < 32768){ + TIFFClose(tif); + #pragma omp parallel for + for(w = 0; w < numWorkers; w++){ + + uint8_t outCounter = 0; + TIFF* tif = TIFFOpen(fileName, "r"); + while(!tif){ + tif = TIFFOpen(fileName, "r"); + if(outCounter == 3){ + #pragma omp critical + { + err = 1; + sprintf(errString,"Thread %d: File \"%s\" cannot be opened\n",w,fileName); + } + continue; + } + outCounter++; + } + + void* buffer = malloc(x*stripSize*bytes); + for(int64_t dir = startSlice+(w*batchSize); dir < startSlice+((w+1)*batchSize); dir++){ + if(dir>=z+startSlice || err) break; + + uint8_t counter = 0; + while(!TIFFSetDirectory(tif, (uint64_t)dir) && counter<3){ + counter++; + if(counter == 3){ + #pragma omp critical + { + err = 1; + sprintf(errString,"Thread %d: File \"%s\" cannot be opened\n",w,fileName); + } + } + } + if(err) break; + for (int64_t i = 0; i*stripSize < y; i++) + { + + //loading the data into a buffer + int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize*x*bytes); + if(cBytes < 0){ + #pragma omp critical + { + errBak = 1; + err = 1; + sprintf(errString,"Thread %d: Strip %ld cannot be read\n",w,i); + } + break; + } + if(!flipXY){ + memcpy(tiff+((i*stripSize*x)*bytes),buffer,cBytes); + continue; + } + switch(bits){ + case 8: + // Map Values to flip x and y for MATLAB + for(int64_t k = 0; k < stripSize; k++){ + if((k+(i*stripSize)) >= y) break; + for(int64_t j = 0; j < x; j++){ + ((uint8_t*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((uint8_t*)buffer)[j+(k*x)]; + } + } + break; + case 16: + // Map Values to flip x and y for MATLAB + for(int64_t k = 0; k < stripSize; k++){ + if((k+(i*stripSize)) >= y) break; + for(int64_t j = 0; j < x; j++){ + ((uint16_t*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((uint16_t*)buffer)[j+(k*x)]; + } + } + break; + case 32: + // Map Values to flip x and y for MATLAB + for(int64_t k = 0; k < stripSize; k++){ + if((k+(i*stripSize)) >= y) break; + for(int64_t j = 0; j < x; j++){ + ((float*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((float*)buffer)[j+(k*x)]; + } + } + break; + case 64: + // Map Values to flip x and y for MATLAB + for(int64_t k = 0; k < stripSize; k++){ + if((k+(i*stripSize)) >= y) break; + for(int64_t j = 0; j < x; j++){ + ((double*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((double*)buffer)[j+(k*x)]; + } + } + break; + } + } + } + free(buffer); + TIFFClose(tif); + } + } + else{ + uint64_t stripsPerDir = (uint64_t)ceil((double)y/(double)stripSize); + #ifdef _WIN32 + int fd = open(fileName,O_RDONLY | O_BINARY); + #else + int fd = open(fileName,O_RDONLY); + #endif + if(fd == -1) mexErrMsgIdAndTxt("disk:threadError","File \"%s\" cannot be opened from Disk\n",fileName); + + if(!tif) mexErrMsgIdAndTxt("tiff:threadError","File \"%s\" cannot be opened\n",fileName); + uint64_t offset = 0; + uint64_t* offsets = NULL; + TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); + uint64_t* byteCounts = NULL; + TIFFGetField(tif, TIFFTAG_STRIPBYTECOUNTS, &byteCounts); + if(!offsets || !byteCounts) mexErrMsgIdAndTxt("tiff:threadError","Could not get offsets or byte counts from the tiff file\n"); + offset = offsets[0]; + uint64_t fOffset = offsets[stripsPerDir-1]+byteCounts[stripsPerDir-1]; + uint64_t zSize = fOffset-offset; + TIFFSetDirectory(tif,1); + TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); + uint64_t gap = offsets[0]-fOffset; + + lseek(fd, offset, SEEK_SET); + + + TIFFClose(tif); + uint64_t curr = 0; + uint64_t bytesRead = 0; + // TESTING + // Not sure if we will need to read in chunks like for ImageJ + for(uint64_t i = 0; i < z; i++){ + bytesRead = read(fd,tiff+curr,zSize); + curr += bytesRead; + lseek(fd,gap,SEEK_CUR); + } + close(fd); + uint64_t size = x*y*z*(bits/8); + void* tiffC = malloc(size); + memcpy(tiffC,tiff,size); + #pragma omp parallel for + for(uint64_t k = 0; k < z; k++){ + for(uint64_t j = 0; j < x; j++){ + for(uint64_t i = 0; i < y; i++){ + switch(bits){ + case 8: + ((uint8_t*)tiff)[i+(j*y)+(k*x*y)] = ((uint8_t*)tiffC)[j+(i*x)+(k*x*y)]; + break; + case 16: + ((uint16_t*)tiff)[i+(j*y)+(k*x*y)] = ((uint16_t*)tiffC)[j+(i*x)+(k*x*y)]; + break; + case 32: + ((float*)tiff)[i+(j*y)+(k*x*y)] = ((float*)tiffC)[j+(i*x)+(k*x*y)]; + break; + case 64: + ((double*)tiff)[i+(j*y)+(k*x*y)] = ((double*)tiffC)[j+(i*x)+(k*x*y)]; + break; + } + } + } + } + free(tiffC); + } + if(err){ + if(errBak) readTiffParallelBak(x, y, z, fileName, tiff, bits, startSlice, flipXY); + else mexErrMsgIdAndTxt("tiff:threadError",errString); + } +} + +// Backup method in case there are errors reading strips +void readTiffParallel2DBak(uint64_t x, uint64_t y, uint64_t z, const char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint8_t flipXY){ + int32_t numWorkers = omp_get_max_threads(); + int32_t batchSize = (y-1)/numWorkers+1; + uint64_t bytes = bits/8; + + int32_t w; + #pragma omp parallel for + for(w = 0; w < numWorkers; w++){ + + TIFF* tif = TIFFOpen(fileName, "r"); + if(!tif) mexErrMsgIdAndTxt("tiff:threadError","Thread %d: File \"%s\" cannot be opened\n",w,fileName); + + void* buffer = malloc(x*bytes); + for(int64_t dir = startSlice+(w*batchSize); dir < startSlice+((w+1)*batchSize); dir++){ + if(dir>=z+startSlice) break; + + int counter = 0; + while(!TIFFSetDirectory(tif, (uint64_t)0) && counter<3){ + printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n",w,fileName,dir,counter+1); + counter++; + } + + for (int64_t i = (w*batchSize); i < ((w+1)*batchSize); i++) + { + if(i >= y) break; + TIFFReadScanline(tif, buffer, i, 0); + if(!flipXY){ + memcpy(tiff+((i*x)*bytes),buffer,x*bytes); + continue; + } + //loading the data into a buffer + switch(bits){ + case 8: + // Map Values to flip x and y for MATLAB + for(int64_t j = 0; j < x; j++){ + ((uint8_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint8_t*)buffer)[j]; + } + break; + case 16: + // Map Values to flip x and y for MATLAB + for(int64_t j = 0; j < x; j++){ + ((uint16_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint16_t*)buffer)[j]; + } + break; + case 32: + // Map Values to flip x and y for MATLAB + for(int64_t j = 0; j < x; j++){ + ((float*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((float*)buffer)[j]; + } + break; + case 64: + // Map Values to flip x and y for MATLAB + for(int64_t j = 0; j < x; j++){ + ((double*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((double*)buffer)[j]; + } + break; + } + } + } + free(buffer); + TIFFClose(tif); + } +} + +void readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint64_t stripSize, uint8_t flipXY){ + int32_t numWorkers = omp_get_max_threads(); + uint64_t stripsPerDir = (uint64_t)ceil((double)y/(double)stripSize); + int32_t batchSize = (stripsPerDir-1)/numWorkers+1; + uint64_t bytes = bits/8; + + int32_t w; + uint8_t err = 0; + uint8_t errBak = 0; + char errString[10000]; + + + #pragma omp parallel for + for(w = 0; w < numWorkers; w++){ + + uint8_t outCounter = 0; + TIFF* tif = TIFFOpen(fileName, "r"); + while(!tif){ + tif = TIFFOpen(fileName, "r"); + if(outCounter == 3){ + #pragma omp critical + { + err = 1; + sprintf(errString,"Thread %d: File \"%s\" cannot be opened\n",w,fileName); + } + continue; + } + outCounter++; + } + + void* buffer = malloc(x*stripSize*bytes); + + + uint8_t counter = 0; + while(!TIFFSetDirectory(tif, 0) && counter<3){ + printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n",w,fileName,0,counter+1); + counter++; + if(counter == 3){ + #pragma omp critical + { + err = 1; + sprintf(errString,"Thread %d: File \"%s\" cannot be opened\n",w,fileName); + } + } + } + for (int64_t i = (w*batchSize); i < (w+1)*batchSize; i++) + { + if(i*stripSize >= y || err) break; + //loading the data into a buffer + int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize*x*bytes); + if(cBytes < 0){ + #pragma omp critical + { + errBak = 1; + err = 1; + sprintf(errString,"Thread %d: Strip %ld cannot be read\n",w,i); + } + break; + } + if(!flipXY){ + memcpy(tiff+((i*stripSize*x)*bytes),buffer,cBytes); + continue; + } + switch(bits){ + case 8: + // Map Values to flip x and y for MATLAB + for(int64_t k = 0; k < stripSize; k++){ + if((k+(i*stripSize)) >= y) break; + for(int64_t j = 0; j < x; j++){ + ((uint8_t*)tiff)[((j*y)+(k+(i*stripSize)))] = ((uint8_t*)buffer)[j+(k*x)]; + } + } + break; + case 16: + // Map Values to flip x and y for MATLAB + for(int64_t k = 0; k < stripSize; k++){ + if((k+(i*stripSize)) >= y) break; + for(int64_t j = 0; j < x; j++){ + ((uint16_t*)tiff)[((j*y)+(k+(i*stripSize)))] = ((uint16_t*)buffer)[j+(k*x)]; + } + } + break; + case 32: + // Map Values to flip x and y for MATLAB + for(int64_t k = 0; k < stripSize; k++){ + if((k+(i*stripSize)) >= y) break; + for(int64_t j = 0; j < x; j++){ + ((float*)tiff)[((j*y)+(k+(i*stripSize)))] = ((float*)buffer)[j+(k*x)]; + } + } + break; + case 64: + // Map Values to flip x and y for MATLAB + for(int64_t k = 0; k < stripSize; k++){ + if((k+(i*stripSize)) >= y) break; + for(int64_t j = 0; j < x; j++){ + ((double*)tiff)[((j*y)+(k+(i*stripSize)))] = ((double*)buffer)[j+(k*x)]; + } + } + break; + } + } + free(buffer); + TIFFClose(tif); + } + + if(err) { + if(errBak) readTiffParallel2DBak(x, y, z, fileName, tiff, bits, startSlice, flipXY); + else mexErrMsgIdAndTxt("tiff:threadError",errString); + } +} + +// Reading images saved by ImageJ +void readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint64_t stripSize, uint8_t flipXY){ + #ifdef _WIN32 + int fd = open(fileName,O_RDONLY | O_BINARY); + #else + int fd = open(fileName,O_RDONLY); + #endif + TIFF* tif = TIFFOpen(fileName, "r"); + if(!tif) mexErrMsgIdAndTxt("tiff:threadError","File \"%s\" cannot be opened\n",fileName); + uint64_t offset = 0; + uint64_t* offsets = NULL; + TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); + if(offsets) offset = offsets[0]; + + TIFFClose(tif); + lseek(fd, offset, SEEK_SET); + uint64_t bytes = bits/8; + //#pragma omp parallel for + /* + for(uint64_t i = 0; i < z; i++){ + uint64_t cOffset = x*y*bytes*i; + //pread(fd,tiff+cOffset,x*y*bytes,offset+cOffset); + read(fd,tiff+cOffset,x*y*bytes); + }*/ + uint64_t chunk = 0; + uint64_t tBytes = x*y*z*bytes; + uint64_t bytesRead; + uint64_t rBytes = tBytes; + if(tBytes < INT_MAX) bytesRead = read(fd,tiff,tBytes); + else{ + while(chunk < tBytes){ + rBytes = tBytes-chunk; + if(rBytes > INT_MAX) bytesRead = read(fd,tiff+chunk,INT_MAX); + else bytesRead = read(fd,tiff+chunk,rBytes); + chunk += bytesRead; + } + } + close(fd); + // Swap endianess for types greater than 8 bits + // TODO: May need to change later because we may not always need to swap + if(bits > 8){ + #pragma omp parallel for + for(uint64_t i = 0; i < x*y*z; i++){ + switch(bits){ + case 16: + //((uint16_t*)tiff)[i] = ((((uint16_t*)tiff)[i] & 0xff) >> 8) | (((uint16_t*)tiff)[i] << 8); + //((uint16_t*)tiff)[i] = bswap_16(((uint16_t*)tiff)[i]); + ((uint16_t*)tiff)[i] = ((((uint16_t*)tiff)[i] << 8) & 0xff00) | ((((uint16_t*)tiff)[i] >> 8) & 0x00ff); + break; + case 32: + //((num & 0xff000000) >> 24) | ((num & 0x00ff0000) >> 8) | ((num & 0x0000ff00) << 8) | (num << 24) + //((float*)tiff)[i] = bswap_32(((float*)tiff)[i]); + ((uint32_t*)tiff)[i] = ((((uint32_t*)tiff)[i] << 24) & 0xff000000 ) | + ((((uint32_t*)tiff)[i] << 8) & 0x00ff0000 ) | + ((((uint32_t*)tiff)[i] >> 8) & 0x0000ff00 ) | + ((((uint32_t*)tiff)[i] >> 24) & 0x000000ff ); + break; + case 64: + //((double*)tiff)[i] = bswap_64(((double*)tiff)[i]); + ((uint64_t*)tiff)[i] = ( (((uint64_t*)tiff)[i] << 56) & 0xff00000000000000UL ) | + ( (((uint64_t*)tiff)[i] << 40) & 0x00ff000000000000UL ) | + ( (((uint64_t*)tiff)[i] << 24) & 0x0000ff0000000000UL ) | + ( (((uint64_t*)tiff)[i] << 8) & 0x000000ff00000000UL ) | + ( (((uint64_t*)tiff)[i] >> 8) & 0x00000000ff000000UL ) | + ( (((uint64_t*)tiff)[i] >> 24) & 0x0000000000ff0000UL ) | + ( (((uint64_t*)tiff)[i] >> 40) & 0x000000000000ff00UL ) | + ( (((uint64_t*)tiff)[i] >> 56) & 0x00000000000000ffUL ); + break; + } + + } + } + // Find a way to do this in-place without making a copy + if(flipXY){ + uint64_t size = x*y*z*(bits/8); + void* tiffC = malloc(size); + memcpy(tiffC,tiff,size); + #pragma omp parallel for + for(uint64_t k = 0; k < z; k++){ + for(uint64_t j = 0; j < x; j++){ + for(uint64_t i = 0; i < y; i++){ + switch(bits){ + case 8: + ((uint8_t*)tiff)[i+(j*y)+(k*x*y)] = ((uint8_t*)tiffC)[j+(i*x)+(k*x*y)]; + break; + case 16: + ((uint16_t*)tiff)[i+(j*y)+(k*x*y)] = ((uint16_t*)tiffC)[j+(i*x)+(k*x*y)]; + break; + case 32: + ((float*)tiff)[i+(j*y)+(k*x*y)] = ((float*)tiffC)[j+(i*x)+(k*x*y)]; + break; + case 64: + ((double*)tiff)[i+(j*y)+(k*x*y)] = ((double*)tiffC)[j+(i*x)+(k*x*y)]; + break; + } + } + } + } + free(tiffC); + } +} + +uint8_t isImageJIm(const char* fileName){ + TIFF* tif = TIFFOpen(fileName, "r"); + if(!tif) return 0; + char* tiffDesc = NULL; + if(TIFFGetField(tif, TIFFTAG_IMAGEDESCRIPTION, &tiffDesc)){ + if(strstr(tiffDesc, "ImageJ")){ + return 1; + } + } + return 0; +} + +uint64_t imageJImGetZ(const char* fileName){ + TIFF* tif = TIFFOpen(fileName, "r"); + if(!tif) return 0; + char* tiffDesc = NULL; + if(TIFFGetField(tif, TIFFTAG_IMAGEDESCRIPTION, &tiffDesc)){ + if(strstr(tiffDesc, "ImageJ")){ + char* nZ = strstr(tiffDesc,"images="); + if(nZ){ + nZ+=7; + char* temp; + return strtol(nZ,&temp,10); + } + } + } + return 0; +} + +void mexFunction(int nlhs, mxArray *plhs[], + int nrhs, const mxArray *prhs[]) +{ + // Check if the fileName is a char array or matlab style + char* fileName = NULL; + if(!mxIsClass(prhs[0], "string")){ + if(!mxIsChar(prhs[0])) mexErrMsgIdAndTxt("tiff:inputError","The first argument must be a string"); + fileName = mxArrayToString(prhs[0]); + } + else{ + mxArray* mString[1]; + mxArray* mCharA[1]; + + // Convert string to char array + mString[0] = mxDuplicateArray(prhs[0]); + mexCallMATLAB(1, mCharA, 1, mString, "char"); + fileName = mxArrayToString(mCharA[0]); + } + + // Handle the tilde character in filenames on Linux/Mac + #ifndef _WIN32 + if(strchr(fileName,'~')) fileName = expandTilde(fileName); + #endif + + uint8_t flipXY = 1; + //uint8_t flipXY = 0; + + + //if(nrhs > 2){ + // flipXY = (uint8_t)*(mxGetPr(prhs[2])); + //} + + + TIFFSetWarningHandler(DummyHandler); + TIFF* tif = TIFFOpen(fileName, "r"); + if(!tif) mexErrMsgIdAndTxt("tiff:inputError","File \"%s\" cannot be opened",fileName); + + uint64_t x = 1,y = 1,z = 1,bits = 1, startSlice = 0; + TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &x); + TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &y); + + if(nrhs == 1){ + uint16_t s = 0, m = 0, t = 1; + while(TIFFSetDirectory(tif,t)){ + s = t; + t *= 8; + if(s > t){ + t = 65535; + printf("Number of slices > 32768\n"); + break; + } + } + while(s != t){ + m = (s+t+1)/2; + if(TIFFSetDirectory(tif,m)){ + s = m; + } + else{ + if(m > 0) t = m-1; + else t = m; + } + } + z = s+1; + } + else{ + if(mxGetN(prhs[1]) != 2){ + mexErrMsgIdAndTxt("tiff:inputError","Input range is not 2"); + } + else{ + startSlice = (uint64_t)*(mxGetPr(prhs[1]))-1; + z = (uint64_t)*((mxGetPr(prhs[1])+1))-startSlice; + if (!TIFFSetDirectory(tif,startSlice+z-1) || !TIFFSetDirectory(tif,startSlice)){ + mexErrMsgIdAndTxt("tiff:rangeOutOfBound","Range is out of bounds"); + } + } + } + + TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits); + uint64_t stripSize = 1; + TIFFGetField(tif, TIFFTAG_ROWSPERSTRIP, &stripSize); + TIFFClose(tif); + + uint8_t imageJIm = 0; + if(isImageJIm(fileName)){ + imageJIm = 1; + uint64_t tempZ = imageJImGetZ(fileName); + if(tempZ) z = tempZ; + } + + uint64_t dim[3]; + dim[0] = y; + dim[1] = x; + dim[2] = z; + + + + // Case for ImageJ + if(imageJIm){ + if(bits == 8){ + plhs[0] = mxCreateNumericArray(3,dim,mxUINT8_CLASS, mxREAL); + uint8_t* tiff = (uint8_t*)mxGetPr(plhs[0]); + readTiffParallelImageJ(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else if(bits == 16){ + plhs[0] = mxCreateNumericArray(3,dim,mxUINT16_CLASS, mxREAL); + uint16_t* tiff = (uint16_t*)mxGetPr(plhs[0]); + readTiffParallelImageJ(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else if(bits == 32){ + plhs[0] = mxCreateNumericArray(3,dim,mxSINGLE_CLASS, mxREAL); + float* tiff = (float*)mxGetPr(plhs[0]); + readTiffParallelImageJ(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else if(bits == 64){ + plhs[0] = mxCreateNumericArray(3,dim,mxDOUBLE_CLASS, mxREAL); + double* tiff = (double*)mxGetPr(plhs[0]); + readTiffParallelImageJ(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else{ + mexErrMsgIdAndTxt("tiff:dataTypeError","Data type not suppported"); + } + } + // Case for 2D + else if(z <= 1){ + if(bits == 8){ + plhs[0] = mxCreateNumericArray(3,dim,mxUINT8_CLASS, mxREAL); + uint8_t* tiff = (uint8_t*)mxGetPr(plhs[0]); + readTiffParallel2D(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else if(bits == 16){ + plhs[0] = mxCreateNumericArray(3,dim,mxUINT16_CLASS, mxREAL); + uint16_t* tiff = (uint16_t*)mxGetPr(plhs[0]); + readTiffParallel2D(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else if(bits == 32){ + plhs[0] = mxCreateNumericArray(3,dim,mxSINGLE_CLASS, mxREAL); + float* tiff = (float*)mxGetPr(plhs[0]); + readTiffParallel2D(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else if(bits == 64){ + plhs[0] = mxCreateNumericArray(3,dim,mxDOUBLE_CLASS, mxREAL); + double* tiff = (double*)mxGetPr(plhs[0]); + readTiffParallel2D(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else{ + mexErrMsgIdAndTxt("tiff:dataTypeError","Data type not suppported"); + } + } + // Case for 3D + else{ + if(bits == 8){ + plhs[0] = mxCreateNumericArray(3,dim,mxUINT8_CLASS, mxREAL); + uint8_t* tiff = (uint8_t*)mxGetPr(plhs[0]); + readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else if(bits == 16){ + plhs[0] = mxCreateNumericArray(3,dim,mxUINT16_CLASS, mxREAL); + uint16_t* tiff = (uint16_t*)mxGetPr(plhs[0]); + readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else if(bits == 32){ + plhs[0] = mxCreateNumericArray(3,dim,mxSINGLE_CLASS, mxREAL); + float* tiff = (float*)mxGetPr(plhs[0]); + readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else if(bits == 64){ + plhs[0] = mxCreateNumericArray(3,dim,mxDOUBLE_CLASS, mxREAL); + double* tiff = (double*)mxGetPr(plhs[0]); + readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); + } + else{ + mexErrMsgIdAndTxt("tiff:dataTypeError","Data type not suppported"); + } + } +} \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.h b/tools/llsm/parallelReadTiff.h new file mode 100644 index 000000000..e69de29bb diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c new file mode 100644 index 000000000..34912b3f0 --- /dev/null +++ b/tools/llsm_importer.c @@ -0,0 +1,14 @@ +#include +#include +#include + +#define ENABLE_MPI 1 + +#ifdef ENABLE_MPI +#include "mpi.h" +#endif + +#include "pdc.h" +#include "pdc_client_server_common.h" +#include "pdc_client_connect.h" + From 8dfd43f6b5174d4d1d7eed0b77c945cb6ca50d13 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 4 Apr 2023 13:39:36 -0500 Subject: [PATCH 043/806] update server checkpoint intervals --- src/server/pdc_server.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server/pdc_server.c b/src/server/pdc_server.c index 9b5428a5c..f51d00591 100644 --- a/src/server/pdc_server.c +++ b/src/server/pdc_server.c @@ -61,8 +61,8 @@ #include #endif -#define PDC_CHECKPOINT_INTERVAL 200 -#define PDC_CHECKPOINT_MIN_INTERVAL_SEC 300 +#define PDC_CHECKPOINT_INTERVAL 2000 +#define PDC_CHECKPOINT_MIN_INTERVAL_SEC 3600 // Global debug variable to control debug printfs int is_debug_g = 0; From 752aea828a7e8b44f87bb1cf462977c2810adcd2 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 4 Apr 2023 14:07:13 -0500 Subject: [PATCH 044/806] update gitignore --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index f706f8e1b..74675f7ef 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,7 @@ *.pdf src/install + +.vscode + +build \ No newline at end of file From ef7278d8dc11587c7ab064553a6f6709a53cb27b Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 4 Apr 2023 21:18:36 -0500 Subject: [PATCH 045/806] adding job scripts --- scripts/kv_exact/cori/clean.sh | 8 +++ scripts/kv_exact/cori/gen_scripts.sh | 25 +++++++++ scripts/kv_exact/cori/submit.sh | 78 ++++++++++++++++++++++++++++ scripts/kv_exact/cori/template.sh | 67 ++++++++++++++++++++++++ 4 files changed, 178 insertions(+) create mode 100644 scripts/kv_exact/cori/clean.sh create mode 100644 scripts/kv_exact/cori/gen_scripts.sh create mode 100644 scripts/kv_exact/cori/submit.sh create mode 100644 scripts/kv_exact/cori/template.sh diff --git a/scripts/kv_exact/cori/clean.sh b/scripts/kv_exact/cori/clean.sh new file mode 100644 index 000000000..9df3999a6 --- /dev/null +++ b/scripts/kv_exact/cori/clean.sh @@ -0,0 +1,8 @@ +#!/bin/bash +MAX_NODE=512 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + + rm -rf $i/* + +done diff --git a/scripts/kv_exact/cori/gen_scripts.sh b/scripts/kv_exact/cori/gen_scripts.sh new file mode 100644 index 000000000..54606da2b --- /dev/null +++ b/scripts/kv_exact/cori/gen_scripts.sh @@ -0,0 +1,25 @@ +#!/bin/bash +N_THREAD=NO +MAX_NODE=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + mkdir -p $i + for (( j = 1; j <= $MAX_ATTR; j*=4 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + JOBNAME=kvtag_bench_${i}_${j}_${k} + TARGET=./$i/$JOBNAME.sh + cp template.sh $TARGET + sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET + sed -i "s/NODENUM/${i}/g" $TARGET + sed -i "s/ATTRNUM/${j}/g" $TARGET + sed -i "s/ATTRLEN/${k}/g" $TARGET + if [[ "$i" -gt "16" ]]; then + sed -i "s/REG//g" $TARGET + else + sed -i "s/DBG//g" $TARGET + fi + done + done +done diff --git a/scripts/kv_exact/cori/submit.sh b/scripts/kv_exact/cori/submit.sh new file mode 100644 index 000000000..2ca6badf8 --- /dev/null +++ b/scripts/kv_exact/cori/submit.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# MIN_PROC=4 +# MAX_PROC=128 +MIN_PROC=1 +MAX_PROC=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +curdir=$(pwd) + +first_submit=1 + +for (( i = 1; i <= $MAX_PROC; i*=2 )); do + mkdir -p $i + for (( j = 1; j <= $MAX_ATTR; j*=4 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + JOBNAME=kvtag_bench_${i}_${j}_${k} + TARGET=./$i/JOBNAME.sh + + njob=`squeue -u $USER | grep kvtag_bench | wc -l` + echo $njob + while [ $njob -ge 4 ] + do + sleeptime=$[ ( $RANDOM % 1000 ) ] + sleep $sleeptime + njob=`squeue -u $USER | grep kvtag_bench | wc -l` + echo $njob + done + + if [[ $first_submit == 1 ]]; then + # Submit first job w/o dependency + echo "Submitting $TARGET" + job=`sbatch $TARGET` + first_submit=0 + else + echo "Submitting $TARGET after ${job: -8}" + job=`sbatch -d afterany:${job: -8} $TARGET` + fi + + sleeptime=$[ ( $RANDOM % 5 ) ] + sleep $sleeptime + done + done +done + + +# for (( j = $MIN_PROC; j <= $MAX_PROC ; j*=2 )); do + +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# while [ $njob -ge 4 ] +# do +# sleeptime=$[ ( $RANDOM % 1000 ) ] +# sleep $sleeptime +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# done + + +# cd $curdir/$j +# for filename in ./*.sh ; do + +# if [[ $first_submit == 1 ]]; then +# # Submit first job w/o dependency +# echo "Submitting $filename" +# job=`sbatch $filename` +# first_submit=0 +# else +# echo "Submitting $filename after ${job: -8}" +# job=`sbatch -d afterany:${job: -8} $filename` +# fi + +# sleeptime=$[ ( $RANDOM % 5 ) ] +# sleep $sleeptime + +# done +# done diff --git a/scripts/kv_exact/cori/template.sh b/scripts/kv_exact/cori/template.sh new file mode 100644 index 000000000..a1da6fda8 --- /dev/null +++ b/scripts/kv_exact/cori/template.sh @@ -0,0 +1,67 @@ +#!/bin/bash -l + +#REGSBATCH -p regular +#DBGSBATCH -p debug +#SBATCH -N NODENUM +#REGSBATCH -t 4:00:00 +#DBGSBATCH -t 0:30:00 +#SBATCH --gres=craynetwork:2 +#SBATCH -L SCRATCH +#SBATCH -C haswell +#SBATCH -J JOBNAME +#SBATCH -A m2621 +#SBATCH -o o%j.JOBNAME.out +#SBATCH -e o%j.JOBNAME.out + + +# export PDC_DEBUG=0 + +export PDC_TMPDIR=/global/cfs/cdirs/m2621/wzhang5/cori/install/pdc/conf + +rm -rf $PDC_TMPDIR/* + +REPEAT=1 + +N_NODE=NODENUM +NCLIENT=31 + +export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE +mkdir -p $PDC_TMPDIR + +let TOTALPROC=$NCLIENT*$N_NODE + +EXECPATH=/global/cfs/cdirs/m2621/wzhang5/cori/install/pdc/share/test/bin +SERVER=$EXECPATH/pdc_server.exe +CLIENT=$EXECPATH/kvtag_scale_add_get +CLOSE=$EXECPATH/close_server + +chmod +x $EXECPATH/* + +MAX_OBJ_COUNT=$((1024*1024*1024)) +OBJ_INCR=$((MAX_OBJ_COUNT/1024)) +ATTR_COUNT=ATTRNUM +ATTR_LENGTH=ATTRLEN +QUERY_COUNT=$((1024*1024)) + +date + +echo "" +echo "=============" +echo "$i Init server" +echo "=============" +srun -N $N_NODE -n $N_NODE -c 2 --mem=100000 --cpu_bind=cores --gres=craynetwork:1 --overlap $SERVER & +sleep 5 + + +echo "============================================" +echo "KVTAGS with $N_NODE nodes" +echo "============================================" +srun -N $N_NODE -n $TOTALPROC -c 2 --mem=100000 --cpu_bind=cores --gres=craynetwork:1 --overlap $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE + +echo "" +echo "=================" +echo "$i Closing server" +echo "=================" +srun -N 1 -n 1 -c 2 --mem=25600 --gres=craynetwork:1 --cpu_bind=cores --overlap $CLOSE + +date From d34ce8b30b2562fc5c0336bf4c249c7ce7b5b6f9 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 13:15:10 -0500 Subject: [PATCH 046/806] adding one debugging msg --- scripts/kv_exact/cori/clean.sh | 2 +- src/api/pdc_client_connect.c | 3 +++ src/tests/kvtag_add_get_benchmark.c | 2 +- src/utils/include/pdc_private.h | 1 + 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/scripts/kv_exact/cori/clean.sh b/scripts/kv_exact/cori/clean.sh index 9df3999a6..d6476962e 100644 --- a/scripts/kv_exact/cori/clean.sh +++ b/scripts/kv_exact/cori/clean.sh @@ -5,4 +5,4 @@ for (( i = 1; i <= $MAX_NODE; i*=2 )); do rm -rf $i/* -done +done \ No newline at end of file diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index 0dde1b5a6..ed59e565f 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -6960,6 +6960,9 @@ PDC_add_kvtag(pdcid_t obj_id, pdc_kvtag_t *kvtag, int is_cont) in.hash_value = PDC_get_hash_by_name(cont_prop->cont_info_pub->name); } + //TODO: delete this line after debugging. + printf("PDC_add_kvtag::in.obj_id = %llu \n ", in.obj_id); + server_id = PDC_get_server_by_obj_id(meta_id, pdc_server_num_g); // Debug statistics for counting number of messages sent to each server. diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 5586f4b6c..fa8b1eca1 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -221,7 +221,7 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value for (j = 0; j < n_attr; j++) { sprintf(tag_name, "tag%llu.%llu", v, j); if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)tag_values[j], tag_value_len + 1) < 0) - printf("fail to add a kvtag to o%llu\n", i + my_obj_s); + printf("fail to add a kvtag to o%llu\n", v); } } } diff --git a/src/utils/include/pdc_private.h b/src/utils/include/pdc_private.h index 5f5a2712f..b6ca3bcfd 100644 --- a/src/utils/include/pdc_private.h +++ b/src/utils/include/pdc_private.h @@ -28,6 +28,7 @@ #include "pdc_config.h" #include "pdc_public.h" #include +// #include /* gettimeofday() */ /****************************/ /* Library Private Typedefs */ From d78533e3cc12898a23a2769c96990d5e2de20e92 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 14:56:00 -0500 Subject: [PATCH 047/806] update container creation to collective mode for debugging purpose --- src/tests/kvtag_add_get_benchmark.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index fa8b1eca1..f0e7e7110 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -156,7 +156,7 @@ init_test(int my_rank, int proc_num, uint64_t n_obj_incr, uint64_t n_query, uint printf("Fail to create container property @ line %d!\n", __LINE__); // create a container - *cont = PDCcont_create("c1", *cont_prop); + *cont = PDCcont_create_col("c1", *cont_prop); if (*cont <= 0) printf("Fail to create container @ line %d!\n", __LINE__); From 334efc4748145da09e13a7549e35c286f2af1ef5 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 15:04:24 -0500 Subject: [PATCH 048/806] update container creation to collective mode for debugging purpose --- src/api/pdc_client_connect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index ed59e565f..897379e77 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -6961,7 +6961,7 @@ PDC_add_kvtag(pdcid_t obj_id, pdc_kvtag_t *kvtag, int is_cont) } //TODO: delete this line after debugging. - printf("PDC_add_kvtag::in.obj_id = %llu \n ", in.obj_id); + printf("==CLIENT[%d]: PDC_add_kvtag::in.obj_id = %llu \n ", pdc_client_mpi_rank_g, in.obj_id); server_id = PDC_get_server_by_obj_id(meta_id, pdc_server_num_g); From c704bc6b4f1581ef8353d69f009edf393378e3b7 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 15:37:20 -0500 Subject: [PATCH 049/806] update container creation to collective mode for debugging purpose --- src/server/pdc_server_metadata.c | 2 ++ src/tests/kvtag_add_get_benchmark.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/server/pdc_server_metadata.c b/src/server/pdc_server_metadata.c index 7253a8f1c..e2411fc2d 100644 --- a/src/server/pdc_server_metadata.c +++ b/src/server/pdc_server_metadata.c @@ -2555,6 +2555,8 @@ PDC_Server_add_kvtag(metadata_add_kvtag_in_t *in, metadata_add_tag_out_t *out) hash_key = in->hash_value; obj_id = in->obj_id; + printf("==SERVER[%d]: PDC_add_kvtag::in.obj_id = %llu \n ", pdc_server_rank_g, in.obj_id); + #ifdef ENABLE_MULTITHREAD // Obtain lock for hash table unlocked = 0; diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index f0e7e7110..fa8b1eca1 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -156,7 +156,7 @@ init_test(int my_rank, int proc_num, uint64_t n_obj_incr, uint64_t n_query, uint printf("Fail to create container property @ line %d!\n", __LINE__); // create a container - *cont = PDCcont_create_col("c1", *cont_prop); + *cont = PDCcont_create("c1", *cont_prop); if (*cont <= 0) printf("Fail to create container @ line %d!\n", __LINE__); From 0139db71ad2e679c6717c9844663531791e1323f Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 15:39:35 -0500 Subject: [PATCH 050/806] update container creation to collective mode for debugging purpose --- src/server/pdc_server_metadata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/pdc_server_metadata.c b/src/server/pdc_server_metadata.c index e2411fc2d..79cd22dfd 100644 --- a/src/server/pdc_server_metadata.c +++ b/src/server/pdc_server_metadata.c @@ -2555,7 +2555,7 @@ PDC_Server_add_kvtag(metadata_add_kvtag_in_t *in, metadata_add_tag_out_t *out) hash_key = in->hash_value; obj_id = in->obj_id; - printf("==SERVER[%d]: PDC_add_kvtag::in.obj_id = %llu \n ", pdc_server_rank_g, in.obj_id); + printf("==SERVER[%d]: PDC_add_kvtag::in.obj_id = %llu \n ", pdc_server_rank_g, obj_id); #ifdef ENABLE_MULTITHREAD // Obtain lock for hash table From d0b47a62a0a03a0dda16d6a6b0f7c20e580f94b7 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 21:33:13 -0500 Subject: [PATCH 051/806] update output for uint64_t --- scripts/kv_exact/cori/clean.sh | 8 --- scripts/kv_exact/cori/gen_scripts.sh | 25 --------- scripts/kv_exact/cori/submit.sh | 78 ---------------------------- scripts/kv_exact/cori/template.sh | 67 ------------------------ src/tests/kvtag_add_get_benchmark.c | 53 ++++++++++--------- 5 files changed, 29 insertions(+), 202 deletions(-) delete mode 100644 scripts/kv_exact/cori/clean.sh delete mode 100644 scripts/kv_exact/cori/gen_scripts.sh delete mode 100644 scripts/kv_exact/cori/submit.sh delete mode 100644 scripts/kv_exact/cori/template.sh diff --git a/scripts/kv_exact/cori/clean.sh b/scripts/kv_exact/cori/clean.sh deleted file mode 100644 index d6476962e..000000000 --- a/scripts/kv_exact/cori/clean.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -MAX_NODE=512 - -for (( i = 1; i <= $MAX_NODE; i*=2 )); do - - rm -rf $i/* - -done \ No newline at end of file diff --git a/scripts/kv_exact/cori/gen_scripts.sh b/scripts/kv_exact/cori/gen_scripts.sh deleted file mode 100644 index 54606da2b..000000000 --- a/scripts/kv_exact/cori/gen_scripts.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -N_THREAD=NO -MAX_NODE=512 -MAX_ATTR=1024 -MAX_ATTRLEN=1000 - -for (( i = 1; i <= $MAX_NODE; i*=2 )); do - mkdir -p $i - for (( j = 1; j <= $MAX_ATTR; j*=4 )); do - for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do - JOBNAME=kvtag_bench_${i}_${j}_${k} - TARGET=./$i/$JOBNAME.sh - cp template.sh $TARGET - sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET - sed -i "s/NODENUM/${i}/g" $TARGET - sed -i "s/ATTRNUM/${j}/g" $TARGET - sed -i "s/ATTRLEN/${k}/g" $TARGET - if [[ "$i" -gt "16" ]]; then - sed -i "s/REG//g" $TARGET - else - sed -i "s/DBG//g" $TARGET - fi - done - done -done diff --git a/scripts/kv_exact/cori/submit.sh b/scripts/kv_exact/cori/submit.sh deleted file mode 100644 index 2ca6badf8..000000000 --- a/scripts/kv_exact/cori/submit.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -# MIN_PROC=4 -# MAX_PROC=128 -MIN_PROC=1 -MAX_PROC=512 -MAX_ATTR=1024 -MAX_ATTRLEN=1000 - -curdir=$(pwd) - -first_submit=1 - -for (( i = 1; i <= $MAX_PROC; i*=2 )); do - mkdir -p $i - for (( j = 1; j <= $MAX_ATTR; j*=4 )); do - for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do - JOBNAME=kvtag_bench_${i}_${j}_${k} - TARGET=./$i/JOBNAME.sh - - njob=`squeue -u $USER | grep kvtag_bench | wc -l` - echo $njob - while [ $njob -ge 4 ] - do - sleeptime=$[ ( $RANDOM % 1000 ) ] - sleep $sleeptime - njob=`squeue -u $USER | grep kvtag_bench | wc -l` - echo $njob - done - - if [[ $first_submit == 1 ]]; then - # Submit first job w/o dependency - echo "Submitting $TARGET" - job=`sbatch $TARGET` - first_submit=0 - else - echo "Submitting $TARGET after ${job: -8}" - job=`sbatch -d afterany:${job: -8} $TARGET` - fi - - sleeptime=$[ ( $RANDOM % 5 ) ] - sleep $sleeptime - done - done -done - - -# for (( j = $MIN_PROC; j <= $MAX_PROC ; j*=2 )); do - -# njob=`squeue -u $USER | grep vpic | wc -l` -# echo $njob -# while [ $njob -ge 4 ] -# do -# sleeptime=$[ ( $RANDOM % 1000 ) ] -# sleep $sleeptime -# njob=`squeue -u $USER | grep vpic | wc -l` -# echo $njob -# done - - -# cd $curdir/$j -# for filename in ./*.sh ; do - -# if [[ $first_submit == 1 ]]; then -# # Submit first job w/o dependency -# echo "Submitting $filename" -# job=`sbatch $filename` -# first_submit=0 -# else -# echo "Submitting $filename after ${job: -8}" -# job=`sbatch -d afterany:${job: -8} $filename` -# fi - -# sleeptime=$[ ( $RANDOM % 5 ) ] -# sleep $sleeptime - -# done -# done diff --git a/scripts/kv_exact/cori/template.sh b/scripts/kv_exact/cori/template.sh deleted file mode 100644 index a1da6fda8..000000000 --- a/scripts/kv_exact/cori/template.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash -l - -#REGSBATCH -p regular -#DBGSBATCH -p debug -#SBATCH -N NODENUM -#REGSBATCH -t 4:00:00 -#DBGSBATCH -t 0:30:00 -#SBATCH --gres=craynetwork:2 -#SBATCH -L SCRATCH -#SBATCH -C haswell -#SBATCH -J JOBNAME -#SBATCH -A m2621 -#SBATCH -o o%j.JOBNAME.out -#SBATCH -e o%j.JOBNAME.out - - -# export PDC_DEBUG=0 - -export PDC_TMPDIR=/global/cfs/cdirs/m2621/wzhang5/cori/install/pdc/conf - -rm -rf $PDC_TMPDIR/* - -REPEAT=1 - -N_NODE=NODENUM -NCLIENT=31 - -export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE -mkdir -p $PDC_TMPDIR - -let TOTALPROC=$NCLIENT*$N_NODE - -EXECPATH=/global/cfs/cdirs/m2621/wzhang5/cori/install/pdc/share/test/bin -SERVER=$EXECPATH/pdc_server.exe -CLIENT=$EXECPATH/kvtag_scale_add_get -CLOSE=$EXECPATH/close_server - -chmod +x $EXECPATH/* - -MAX_OBJ_COUNT=$((1024*1024*1024)) -OBJ_INCR=$((MAX_OBJ_COUNT/1024)) -ATTR_COUNT=ATTRNUM -ATTR_LENGTH=ATTRLEN -QUERY_COUNT=$((1024*1024)) - -date - -echo "" -echo "=============" -echo "$i Init server" -echo "=============" -srun -N $N_NODE -n $N_NODE -c 2 --mem=100000 --cpu_bind=cores --gres=craynetwork:1 --overlap $SERVER & -sleep 5 - - -echo "============================================" -echo "KVTAGS with $N_NODE nodes" -echo "============================================" -srun -N $N_NODE -n $TOTALPROC -c 2 --mem=100000 --cpu_bind=cores --gres=craynetwork:1 --overlap $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE - -echo "" -echo "=================" -echo "$i Closing server" -echo "=================" -srun -N 1 -n 1 -c 2 --mem=25600 --gres=craynetwork:1 --cpu_bind=cores --overlap $CLOSE - -date diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index fa8b1eca1..bce854644 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -61,7 +61,7 @@ atoui64(char *arg) * @return 0 if the function executes successfully, non-zero if an error occurs. */ int -assign_work_to_rank(int rank, int size, uint64_t nwork, uint64_t *my_count, uint64_t *my_start) +assign_work_to_rank(uint64_t rank, uint64_t size, uint64_t nwork, uint64_t *my_count, uint64_t *my_start) { if (rank > size || my_count == NULL || my_start == NULL) { printf("assign_work_to_rank(): Error with input!\n"); @@ -169,8 +169,8 @@ init_test(int my_rank, int proc_num, uint64_t n_obj_incr, uint64_t n_query, uint printf("create obj_ids array\n"); // Create a number of objects, add at least one tag to that object - assign_work_to_rank(my_rank, proc_num, n_obj_incr, my_obj, my_obj_s); - assign_work_to_rank(my_rank, proc_num, n_query, my_query, my_query_s); + assign_work_to_rank((uint64_t)my_rank, (uint64_t)proc_num, n_obj_incr, my_obj, my_obj_s); + assign_work_to_rank((uint64_t)my_rank, (uint64_t)proc_num, n_query, my_query, my_query_s); return (pdcid_t *)calloc(*my_obj, sizeof(pdcid_t)); } @@ -219,9 +219,9 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value for (i = 0; i < my_obj; i++) { v = i + my_obj_s; for (j = 0; j < n_attr; j++) { - sprintf(tag_name, "tag%llu.%llu", v, j); + sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", v, i); if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)tag_values[j], tag_value_len + 1) < 0) - printf("fail to add a kvtag to o%llu\n", v); + printf("fail to add a kvtag to o%" PRIu64 "\n", v); } } } @@ -242,9 +242,9 @@ get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, int n_attr, void **tag_valu char tag_name[128]; for (i = 0; i < n_attr; i++) { - sprintf(tag_name, "tag%llu.%llu", obj_name_v, i); + sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", obj_name_v, i); if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[i], (void *)&value_size[i]) < 0) - printf("fail to get a kvtag from o%llu\n", v); + printf("fail to get a kvtag from o%" PRIu64 "\n", v); } } @@ -286,8 +286,8 @@ check_and_release_query_result(uint64_t n_query, uint64_t my_obj, uint64_t my_ob for (j = 0; j < n_attr; j++) { char *query_rst = (char *)values[j + i * n_attr]; if (strcmp(query_rst, tag_values[j]) != 0) { - printf("Error with retrieved tag from o%llu. Expected %s, Found %s \n", v, tag_values[j], - query_rst); + printf("Error with retrieved tag from o%" PRIu64 ". Expected %s, Found %s \n", v, + tag_values[j], query_rst); } free(values[j + i * n_attr]); } @@ -297,7 +297,7 @@ check_and_release_query_result(uint64_t n_query, uint64_t my_obj, uint64_t my_ob for (i = 0; i < my_obj; i++) { v = i + my_obj_s; if (PDCobj_close(obj_ids[i]) < 0) - printf("fail to close object o%llu\n", v); + printf("fail to close object o%" PRIu64 "\n", v); } } @@ -395,10 +395,12 @@ main(int argc, char *argv[]) total_object_count += n_obj_incr; #endif if (my_rank == 0) { - printf("Iteration %4llu : Objects: %6llu , Time: %.4f sec. Object throughput in this iteration: " + printf("Iteration %" PRIu64 " : Objects: %" PRIu64 + " , Time: %.4f sec. Object throughput in this iteration: " "%.4f .\n", k, n_obj_incr, step_elapse, ((double)n_obj_incr) / step_elapse); - printf("Overall %4llu : Objects: %6llu , Time: %.4f sec. Overall object throughput: " + printf("Overall %" PRIu64 " : Objects: %" PRIu64 + " , Time: %.4f sec. Overall object throughput: " "%.4f .\n", k, total_object_count, total_object_time, ((double)total_object_count) / total_object_time); @@ -418,12 +420,12 @@ main(int argc, char *argv[]) total_tag_count += n_obj_incr * n_attr; #endif if (my_rank == 0) { - printf( - "Iteration %4llu : Tags: %6llu , Time: %.4f sec. Tag throughput in this iteration: %.4f .\n", - k, n_obj_incr * n_attr, step_elapse, (double)(n_obj_incr * n_attr) / step_elapse); - printf( - "Overall %4llu : Tags: %6llu , Time: %.4f sec. Overall tag throughput: %.4f .\n", - k, total_tag_count, total_tag_time, ((double)total_tag_count) / total_tag_time); + printf("Iteration %" PRIu64 " : Tags: %" PRIu64 + " , Time: %.4f sec. Tag throughput in this iteration: %.4f .\n", + k, n_obj_incr * n_attr, step_elapse, (double)(n_obj_incr * n_attr) / step_elapse); + printf("Overall %" PRIu64 " : Tags: %" PRIu64 + " , Time: %.4f sec. Overall tag throughput: %.4f .\n", + k, total_tag_count, total_tag_time, ((double)total_tag_count) / total_tag_time); } query_rst_cache = (void **)malloc(my_query * n_attr * sizeof(void *)); @@ -442,10 +444,12 @@ main(int argc, char *argv[]) total_query_count += n_query * n_attr; #endif if (my_rank == 0) { - printf("Iteration %4llu : Queries: %6llu , Time: %.4f sec. Query throughput in this iteration: " + printf("Iteration %" PRIu64 " : Queries: %" PRIu64 + " , Time: %.4f sec. Query throughput in this iteration: " "%.4f .\n", k, n_query * n_attr, step_elapse, (double)(n_query * n_attr) / step_elapse); - printf("Overall %4llu : Queries: %6llu , Time: %.4f sec. Overall query throughput: " + printf("Overall %" PRIu64 " : Queries: %" PRIu64 + " , Time: %.4f sec. Overall query throughput: " "%.4f .\n", k, total_query_count, total_query_time, ((double)total_query_count) / total_query_time); } @@ -461,10 +465,11 @@ main(int argc, char *argv[]) if (my_rank == 0) { printf("Final Report: \n"); - printf("[Final Report 1] Servers: %llu , Clients: %llu , C/S ratio: %.4f \n", n_servers, n_clients, - (double)n_clients / (double)n_servers); - printf("[Final Report 2] Iterations: %llu , Objects: %llu , Tags/Object: %llu , Queries/Iteration: " - "%llu , \n", + printf("[Final Report 1] Servers: %" PRIu64 " , Clients: %" PRIu64 " , C/S ratio: %.4f \n", n_servers, + n_clients, (double)n_clients / (double)n_servers); + printf("[Final Report 2] Iterations: %" PRIu64 " , Objects: %" PRIu64 " , Tags/Object: %" PRIu64 + " , Queries/Iteration: " + "%" PRIu64 " , \n", k, curr_total_obj, n_attr, n_query); printf("[Final Report 3] Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", (double)curr_total_obj / total_object_time, (double)(curr_total_obj * n_attr) / total_tag_time, From 70ffe9daa55cbd1d1e46ce4df32b7030193461fd Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 21:33:34 -0500 Subject: [PATCH 052/806] add scripts --- scripts/kvtag_add_get_benchmark/cori/clean.sh | 8 ++ .../cori/gen_scripts.sh | 25 ++++++ .../kvtag_add_get_benchmark/cori/submit.sh | 78 +++++++++++++++++++ .../kvtag_add_get_benchmark/cori/template.sh | 68 ++++++++++++++++ scripts/kvtag_add_get_scale/cori/clean.sh | 8 ++ .../kvtag_add_get_scale/cori/gen_scripts.sh | 25 ++++++ scripts/kvtag_add_get_scale/cori/submit.sh | 78 +++++++++++++++++++ scripts/kvtag_add_get_scale/cori/template.sh | 68 ++++++++++++++++ .../kvtag_add_get_scale/perlmutter/clean.sh | 8 ++ .../perlmutter/gen_scripts.sh | 25 ++++++ .../kvtag_add_get_scale/perlmutter/submit.sh | 78 +++++++++++++++++++ .../perlmutter/template.sh | 69 ++++++++++++++++ 12 files changed, 538 insertions(+) create mode 100644 scripts/kvtag_add_get_benchmark/cori/clean.sh create mode 100644 scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh create mode 100644 scripts/kvtag_add_get_benchmark/cori/submit.sh create mode 100644 scripts/kvtag_add_get_benchmark/cori/template.sh create mode 100644 scripts/kvtag_add_get_scale/cori/clean.sh create mode 100644 scripts/kvtag_add_get_scale/cori/gen_scripts.sh create mode 100644 scripts/kvtag_add_get_scale/cori/submit.sh create mode 100644 scripts/kvtag_add_get_scale/cori/template.sh create mode 100644 scripts/kvtag_add_get_scale/perlmutter/clean.sh create mode 100644 scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh create mode 100644 scripts/kvtag_add_get_scale/perlmutter/submit.sh create mode 100644 scripts/kvtag_add_get_scale/perlmutter/template.sh diff --git a/scripts/kvtag_add_get_benchmark/cori/clean.sh b/scripts/kvtag_add_get_benchmark/cori/clean.sh new file mode 100644 index 000000000..d6476962e --- /dev/null +++ b/scripts/kvtag_add_get_benchmark/cori/clean.sh @@ -0,0 +1,8 @@ +#!/bin/bash +MAX_NODE=512 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + + rm -rf $i/* + +done \ No newline at end of file diff --git a/scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh b/scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh new file mode 100644 index 000000000..54606da2b --- /dev/null +++ b/scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh @@ -0,0 +1,25 @@ +#!/bin/bash +N_THREAD=NO +MAX_NODE=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + mkdir -p $i + for (( j = 1; j <= $MAX_ATTR; j*=4 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + JOBNAME=kvtag_bench_${i}_${j}_${k} + TARGET=./$i/$JOBNAME.sh + cp template.sh $TARGET + sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET + sed -i "s/NODENUM/${i}/g" $TARGET + sed -i "s/ATTRNUM/${j}/g" $TARGET + sed -i "s/ATTRLEN/${k}/g" $TARGET + if [[ "$i" -gt "16" ]]; then + sed -i "s/REG//g" $TARGET + else + sed -i "s/DBG//g" $TARGET + fi + done + done +done diff --git a/scripts/kvtag_add_get_benchmark/cori/submit.sh b/scripts/kvtag_add_get_benchmark/cori/submit.sh new file mode 100644 index 000000000..2ca6badf8 --- /dev/null +++ b/scripts/kvtag_add_get_benchmark/cori/submit.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# MIN_PROC=4 +# MAX_PROC=128 +MIN_PROC=1 +MAX_PROC=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +curdir=$(pwd) + +first_submit=1 + +for (( i = 1; i <= $MAX_PROC; i*=2 )); do + mkdir -p $i + for (( j = 1; j <= $MAX_ATTR; j*=4 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + JOBNAME=kvtag_bench_${i}_${j}_${k} + TARGET=./$i/JOBNAME.sh + + njob=`squeue -u $USER | grep kvtag_bench | wc -l` + echo $njob + while [ $njob -ge 4 ] + do + sleeptime=$[ ( $RANDOM % 1000 ) ] + sleep $sleeptime + njob=`squeue -u $USER | grep kvtag_bench | wc -l` + echo $njob + done + + if [[ $first_submit == 1 ]]; then + # Submit first job w/o dependency + echo "Submitting $TARGET" + job=`sbatch $TARGET` + first_submit=0 + else + echo "Submitting $TARGET after ${job: -8}" + job=`sbatch -d afterany:${job: -8} $TARGET` + fi + + sleeptime=$[ ( $RANDOM % 5 ) ] + sleep $sleeptime + done + done +done + + +# for (( j = $MIN_PROC; j <= $MAX_PROC ; j*=2 )); do + +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# while [ $njob -ge 4 ] +# do +# sleeptime=$[ ( $RANDOM % 1000 ) ] +# sleep $sleeptime +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# done + + +# cd $curdir/$j +# for filename in ./*.sh ; do + +# if [[ $first_submit == 1 ]]; then +# # Submit first job w/o dependency +# echo "Submitting $filename" +# job=`sbatch $filename` +# first_submit=0 +# else +# echo "Submitting $filename after ${job: -8}" +# job=`sbatch -d afterany:${job: -8} $filename` +# fi + +# sleeptime=$[ ( $RANDOM % 5 ) ] +# sleep $sleeptime + +# done +# done diff --git a/scripts/kvtag_add_get_benchmark/cori/template.sh b/scripts/kvtag_add_get_benchmark/cori/template.sh new file mode 100644 index 000000000..73eb670c5 --- /dev/null +++ b/scripts/kvtag_add_get_benchmark/cori/template.sh @@ -0,0 +1,68 @@ +#!/bin/bash -l + +#REGSBATCH -p regular +#DBGSBATCH -p debug +#SBATCH -N NODENUM +#REGSBATCH -t 4:00:00 +#DBGSBATCH -t 0:30:00 +#SBATCH --gres=craynetwork:2 +#SBATCH -L SCRATCH +#SBATCH -C haswell +#SBATCH -J JOBNAME +#SBATCH -A m2621 +#SBATCH -o o%j.JOBNAME.out +#SBATCH -e o%j.JOBNAME.out + + +# export PDC_DEBUG=0 + +export PDC_TMPDIR=/global/cscratch1/sd/wzhang5/data/pdc/conf + +rm -rf $PDC_TMPDIR/* + +REPEAT=1 + +N_NODE=NODENUM +NCLIENT=31 + +export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE +mkdir -p $PDC_TMPDIR + +let TOTALPROC=$NCLIENT*$N_NODE + +EXECPATH=/global/cfs/cdirs/m2621/wzhang5/cori/install/pdc/share/test/bin +SERVER=$EXECPATH/pdc_server.exe +CLIENT=$EXECPATH/kvtag_add_get_benchmark +CLOSE=$EXECPATH/close_server + +chmod +x $EXECPATH/* + +MAX_OBJ_COUNT=$((1024*1024)) +OBJ_INCR=$((MAX_OBJ_COUNT/1024)) +ATTR_COUNT=ATTRNUM +ATTR_LENGTH=ATTRLEN +QUERY_COUNT=$((1024)) + +date + + +echo "" +echo "=============" +echo "$i Init server" +echo "=============" +srun -N $N_NODE -n $N_NODE -c 2 --mem=100000 --cpu_bind=cores --gres=craynetwork:1 --overlap stdbuf -i0 -o0 -e0 $SERVER & +sleep 5 + + +echo "============================================" +echo "KVTAGS with $N_NODE nodes" +echo "============================================" +srun -N $N_NODE -n $TOTALPROC -c 2 --mem=100000 --cpu_bind=cores --gres=craynetwork:1 --overlap stdbuf -i0 -o0 -e0 $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE + +echo "" +echo "=================" +echo "$i Closing server" +echo "=================" +srun -N 1 -n 1 -c 2 --mem=25600 --gres=craynetwork:1 --cpu_bind=cores --overlap stdbuf -i0 -o0 -e0 $CLOSE + +date diff --git a/scripts/kvtag_add_get_scale/cori/clean.sh b/scripts/kvtag_add_get_scale/cori/clean.sh new file mode 100644 index 000000000..d6476962e --- /dev/null +++ b/scripts/kvtag_add_get_scale/cori/clean.sh @@ -0,0 +1,8 @@ +#!/bin/bash +MAX_NODE=512 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + + rm -rf $i/* + +done \ No newline at end of file diff --git a/scripts/kvtag_add_get_scale/cori/gen_scripts.sh b/scripts/kvtag_add_get_scale/cori/gen_scripts.sh new file mode 100644 index 000000000..54606da2b --- /dev/null +++ b/scripts/kvtag_add_get_scale/cori/gen_scripts.sh @@ -0,0 +1,25 @@ +#!/bin/bash +N_THREAD=NO +MAX_NODE=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + mkdir -p $i + for (( j = 1; j <= $MAX_ATTR; j*=4 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + JOBNAME=kvtag_bench_${i}_${j}_${k} + TARGET=./$i/$JOBNAME.sh + cp template.sh $TARGET + sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET + sed -i "s/NODENUM/${i}/g" $TARGET + sed -i "s/ATTRNUM/${j}/g" $TARGET + sed -i "s/ATTRLEN/${k}/g" $TARGET + if [[ "$i" -gt "16" ]]; then + sed -i "s/REG//g" $TARGET + else + sed -i "s/DBG//g" $TARGET + fi + done + done +done diff --git a/scripts/kvtag_add_get_scale/cori/submit.sh b/scripts/kvtag_add_get_scale/cori/submit.sh new file mode 100644 index 000000000..2ca6badf8 --- /dev/null +++ b/scripts/kvtag_add_get_scale/cori/submit.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# MIN_PROC=4 +# MAX_PROC=128 +MIN_PROC=1 +MAX_PROC=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +curdir=$(pwd) + +first_submit=1 + +for (( i = 1; i <= $MAX_PROC; i*=2 )); do + mkdir -p $i + for (( j = 1; j <= $MAX_ATTR; j*=4 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + JOBNAME=kvtag_bench_${i}_${j}_${k} + TARGET=./$i/JOBNAME.sh + + njob=`squeue -u $USER | grep kvtag_bench | wc -l` + echo $njob + while [ $njob -ge 4 ] + do + sleeptime=$[ ( $RANDOM % 1000 ) ] + sleep $sleeptime + njob=`squeue -u $USER | grep kvtag_bench | wc -l` + echo $njob + done + + if [[ $first_submit == 1 ]]; then + # Submit first job w/o dependency + echo "Submitting $TARGET" + job=`sbatch $TARGET` + first_submit=0 + else + echo "Submitting $TARGET after ${job: -8}" + job=`sbatch -d afterany:${job: -8} $TARGET` + fi + + sleeptime=$[ ( $RANDOM % 5 ) ] + sleep $sleeptime + done + done +done + + +# for (( j = $MIN_PROC; j <= $MAX_PROC ; j*=2 )); do + +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# while [ $njob -ge 4 ] +# do +# sleeptime=$[ ( $RANDOM % 1000 ) ] +# sleep $sleeptime +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# done + + +# cd $curdir/$j +# for filename in ./*.sh ; do + +# if [[ $first_submit == 1 ]]; then +# # Submit first job w/o dependency +# echo "Submitting $filename" +# job=`sbatch $filename` +# first_submit=0 +# else +# echo "Submitting $filename after ${job: -8}" +# job=`sbatch -d afterany:${job: -8} $filename` +# fi + +# sleeptime=$[ ( $RANDOM % 5 ) ] +# sleep $sleeptime + +# done +# done diff --git a/scripts/kvtag_add_get_scale/cori/template.sh b/scripts/kvtag_add_get_scale/cori/template.sh new file mode 100644 index 000000000..73eb670c5 --- /dev/null +++ b/scripts/kvtag_add_get_scale/cori/template.sh @@ -0,0 +1,68 @@ +#!/bin/bash -l + +#REGSBATCH -p regular +#DBGSBATCH -p debug +#SBATCH -N NODENUM +#REGSBATCH -t 4:00:00 +#DBGSBATCH -t 0:30:00 +#SBATCH --gres=craynetwork:2 +#SBATCH -L SCRATCH +#SBATCH -C haswell +#SBATCH -J JOBNAME +#SBATCH -A m2621 +#SBATCH -o o%j.JOBNAME.out +#SBATCH -e o%j.JOBNAME.out + + +# export PDC_DEBUG=0 + +export PDC_TMPDIR=/global/cscratch1/sd/wzhang5/data/pdc/conf + +rm -rf $PDC_TMPDIR/* + +REPEAT=1 + +N_NODE=NODENUM +NCLIENT=31 + +export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE +mkdir -p $PDC_TMPDIR + +let TOTALPROC=$NCLIENT*$N_NODE + +EXECPATH=/global/cfs/cdirs/m2621/wzhang5/cori/install/pdc/share/test/bin +SERVER=$EXECPATH/pdc_server.exe +CLIENT=$EXECPATH/kvtag_add_get_benchmark +CLOSE=$EXECPATH/close_server + +chmod +x $EXECPATH/* + +MAX_OBJ_COUNT=$((1024*1024)) +OBJ_INCR=$((MAX_OBJ_COUNT/1024)) +ATTR_COUNT=ATTRNUM +ATTR_LENGTH=ATTRLEN +QUERY_COUNT=$((1024)) + +date + + +echo "" +echo "=============" +echo "$i Init server" +echo "=============" +srun -N $N_NODE -n $N_NODE -c 2 --mem=100000 --cpu_bind=cores --gres=craynetwork:1 --overlap stdbuf -i0 -o0 -e0 $SERVER & +sleep 5 + + +echo "============================================" +echo "KVTAGS with $N_NODE nodes" +echo "============================================" +srun -N $N_NODE -n $TOTALPROC -c 2 --mem=100000 --cpu_bind=cores --gres=craynetwork:1 --overlap stdbuf -i0 -o0 -e0 $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE + +echo "" +echo "=================" +echo "$i Closing server" +echo "=================" +srun -N 1 -n 1 -c 2 --mem=25600 --gres=craynetwork:1 --cpu_bind=cores --overlap stdbuf -i0 -o0 -e0 $CLOSE + +date diff --git a/scripts/kvtag_add_get_scale/perlmutter/clean.sh b/scripts/kvtag_add_get_scale/perlmutter/clean.sh new file mode 100644 index 000000000..d6476962e --- /dev/null +++ b/scripts/kvtag_add_get_scale/perlmutter/clean.sh @@ -0,0 +1,8 @@ +#!/bin/bash +MAX_NODE=512 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + + rm -rf $i/* + +done \ No newline at end of file diff --git a/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh b/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh new file mode 100644 index 000000000..54606da2b --- /dev/null +++ b/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh @@ -0,0 +1,25 @@ +#!/bin/bash +N_THREAD=NO +MAX_NODE=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + mkdir -p $i + for (( j = 1; j <= $MAX_ATTR; j*=4 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + JOBNAME=kvtag_bench_${i}_${j}_${k} + TARGET=./$i/$JOBNAME.sh + cp template.sh $TARGET + sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET + sed -i "s/NODENUM/${i}/g" $TARGET + sed -i "s/ATTRNUM/${j}/g" $TARGET + sed -i "s/ATTRLEN/${k}/g" $TARGET + if [[ "$i" -gt "16" ]]; then + sed -i "s/REG//g" $TARGET + else + sed -i "s/DBG//g" $TARGET + fi + done + done +done diff --git a/scripts/kvtag_add_get_scale/perlmutter/submit.sh b/scripts/kvtag_add_get_scale/perlmutter/submit.sh new file mode 100644 index 000000000..2ca6badf8 --- /dev/null +++ b/scripts/kvtag_add_get_scale/perlmutter/submit.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# MIN_PROC=4 +# MAX_PROC=128 +MIN_PROC=1 +MAX_PROC=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +curdir=$(pwd) + +first_submit=1 + +for (( i = 1; i <= $MAX_PROC; i*=2 )); do + mkdir -p $i + for (( j = 1; j <= $MAX_ATTR; j*=4 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + JOBNAME=kvtag_bench_${i}_${j}_${k} + TARGET=./$i/JOBNAME.sh + + njob=`squeue -u $USER | grep kvtag_bench | wc -l` + echo $njob + while [ $njob -ge 4 ] + do + sleeptime=$[ ( $RANDOM % 1000 ) ] + sleep $sleeptime + njob=`squeue -u $USER | grep kvtag_bench | wc -l` + echo $njob + done + + if [[ $first_submit == 1 ]]; then + # Submit first job w/o dependency + echo "Submitting $TARGET" + job=`sbatch $TARGET` + first_submit=0 + else + echo "Submitting $TARGET after ${job: -8}" + job=`sbatch -d afterany:${job: -8} $TARGET` + fi + + sleeptime=$[ ( $RANDOM % 5 ) ] + sleep $sleeptime + done + done +done + + +# for (( j = $MIN_PROC; j <= $MAX_PROC ; j*=2 )); do + +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# while [ $njob -ge 4 ] +# do +# sleeptime=$[ ( $RANDOM % 1000 ) ] +# sleep $sleeptime +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# done + + +# cd $curdir/$j +# for filename in ./*.sh ; do + +# if [[ $first_submit == 1 ]]; then +# # Submit first job w/o dependency +# echo "Submitting $filename" +# job=`sbatch $filename` +# first_submit=0 +# else +# echo "Submitting $filename after ${job: -8}" +# job=`sbatch -d afterany:${job: -8} $filename` +# fi + +# sleeptime=$[ ( $RANDOM % 5 ) ] +# sleep $sleeptime + +# done +# done diff --git a/scripts/kvtag_add_get_scale/perlmutter/template.sh b/scripts/kvtag_add_get_scale/perlmutter/template.sh new file mode 100644 index 000000000..56fa7cd02 --- /dev/null +++ b/scripts/kvtag_add_get_scale/perlmutter/template.sh @@ -0,0 +1,69 @@ +#!/bin/bash -l + +#REGSBATCH -p regular +#DBGSBATCH -p debug +#SBATCH -N NODENUM +#REGSBATCH -t 4:00:00 +#DBGSBATCH -t 0:30:00 +#SBATCH --gres=craynetwork:2 +#SBATCH -L SCRATCH +#SBATCH -C haswell +#SBATCH -J JOBNAME +#SBATCH -A m2621 +#SBATCH -o o%j.JOBNAME.out +#SBATCH -e o%j.JOBNAME.out + + +# export PDC_DEBUG=0 + +export PDC_TMPDIR=/global/cscratch1/sd/wzhang5/data/pdc/conf + +rm -rf $PDC_TMPDIR/* + +REPEAT=1 + +N_NODE=NODENUM +NCLIENT=31 + +export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE +mkdir -p $PDC_TMPDIR + +let TOTALPROC=$NCLIENT*$N_NODE + +EXECPATH=/global/cfs/cdirs/m2621/wzhang5/cori/install/pdc/share/test/bin +SERVER=$EXECPATH/pdc_server.exe +CLIENT=$EXECPATH/kvtag_add_get_benchmark +CLOSE=$EXECPATH/close_server + +chmod +x $EXECPATH/* + +NUM_OBJ= +MAX_OBJ_COUNT=$((1024*1024)) +OBJ_INCR=$((MAX_OBJ_COUNT/1024)) +ATTR_COUNT=ATTRNUM +ATTR_LENGTH=ATTRLEN +QUERY_COUNT=$((1024)) + +date + + +echo "" +echo "=============" +echo "$i Init server" +echo "=============" +srun -N $N_NODE -n $N_NODE -c 2 --mem=128000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $SERVER & +sleep 5 + + +echo "============================================" +echo "KVTAGS with $N_NODE nodes" +echo "============================================" +srun -N $N_NODE -n $TOTALPROC -c 2 --mem=256000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE + +echo "" +echo "=================" +echo "$i Closing server" +echo "=================" +srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLOSE + +date From 90736ab646022f7d934c17f6678b0cd6924fd1d9 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 21:42:34 -0500 Subject: [PATCH 053/806] update output for uint64_t --- src/tests/kvtag_add_get_benchmark.c | 30 +++++++++++++---------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index bce854644..9f39293e6 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -192,7 +192,7 @@ create_object(uint64_t my_obj, uint64_t my_obj_s, pdcid_t cont, pdcid_t obj_prop for (i = 0; i < my_obj; i++) { v = i + my_obj_s; - sprintf(obj_name, "obj%llu", v); + sprintf(obj_name, "obj%" PRIu64 "", v); obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); if (obj_ids[i] <= 0) printf("Fail to create object @ line %d!\n", __LINE__); @@ -236,15 +236,15 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value * @param value_size An array to store the size of each tag value. */ void -get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, int n_attr, void **tag_values, uint64_t *value_size) +get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, uint64_t n_attr, void **tag_values, uint64_t *value_size) { - uint64_t i, v; + uint64_t i; char tag_name[128]; for (i = 0; i < n_attr; i++) { sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", obj_name_v, i); if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[i], (void *)&value_size[i]) < 0) - printf("fail to get a kvtag from o%" PRIu64 "\n", v); + printf("fail to get a kvtag from o%" PRIu64 "\n", obj_name_v); } } @@ -264,7 +264,7 @@ get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, int n_attr, void **tag_valu * The caller is responsible for allocating memory for the array. */ void -send_queries(uint64_t my_obj_s, int n_query, uint64_t n_attr, pdcid_t *obj_ids, void **tag_values, +send_queries(uint64_t my_obj_s, uint64_t n_query, uint64_t n_attr, pdcid_t *obj_ids, void **tag_values, uint64_t *value_size) { uint64_t i, v; @@ -325,21 +325,17 @@ main(int argc, char *argv[]) { pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t * obj_ids; - uint64_t n_obj, n_obj_incr, my_obj, my_obj_s, curr_total_obj = 0; + uint64_t n_obj, n_obj_incr, my_obj, my_obj_s; uint64_t n_attr, n_attr_len, n_query, my_query, my_query_s; uint64_t n_servers, n_clients; - uint64_t i, j, k, v; - int proc_num, my_rank, attr_value; - char obj_name[128]; - char tag_name[128]; + uint64_t i, k; + int proc_num, my_rank; double stime = 0.0, step_elapse = 0.0; double total_object_time = 0.0, total_tag_time = 0.0, total_query_time = 0.0; uint64_t total_object_count = 0, total_tag_count = 0, total_query_count = 0; - int * value_to_add; void ** query_rst_cache; uint64_t * value_size; - obj_handle * oh; - struct pdc_obj_info *info; + #ifdef ENABLE_MPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &proc_num); @@ -365,7 +361,7 @@ main(int argc, char *argv[]) } if (my_rank == 0) - printf("Create %llu obj, %llu tags, query %llu\n", n_obj, n_obj, n_obj); + printf("Create %" PRIu64 " obj, %" PRIu64 " tags, query %" PRIu64 "\n", n_obj, n_attr, n_query); // making necessary preparation for the test. @@ -386,14 +382,14 @@ main(int argc, char *argv[]) // n_obj_incr. create_object(my_obj, my_obj_s, cont, obj_prop, obj_ids); // therefore, after 'create_objects' function, we should add 'curr_total_obj' by 'n_obj_incr'. - curr_total_obj += n_obj_incr; #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); step_elapse = MPI_Wtime() - stime; total_object_time += step_elapse; - total_object_count += n_obj_incr; #endif + total_object_count += n_obj_incr; + if (my_rank == 0) { printf("Iteration %" PRIu64 " : Objects: %" PRIu64 " , Time: %.4f sec. Object throughput in this iteration: " @@ -461,7 +457,7 @@ main(int argc, char *argv[]) my_obj_s += n_obj_incr; k++; - } while (curr_total_obj < n_obj); + } while (total_object_count < n_obj); if (my_rank == 0) { printf("Final Report: \n"); From ac3236967243a0143b4e828f157ed5d29af2e276 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 21:45:08 -0500 Subject: [PATCH 054/806] update output for uint64_t --- src/tests/kvtag_add_get_benchmark.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 9f39293e6..225d22d92 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -466,9 +466,9 @@ main(int argc, char *argv[]) printf("[Final Report 2] Iterations: %" PRIu64 " , Objects: %" PRIu64 " , Tags/Object: %" PRIu64 " , Queries/Iteration: " "%" PRIu64 " , \n", - k, curr_total_obj, n_attr, n_query); + k, total_object_count, n_attr, n_query); printf("[Final Report 3] Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", - (double)curr_total_obj / total_object_time, (double)(curr_total_obj * n_attr) / total_tag_time, + (double)total_object_count / total_object_time, (double)(total_object_count * n_attr) / total_tag_time, (double)(total_query_count * n_attr) / total_query_time); } From 94d8633081ab0130f490924760809c1d9de9d4b3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 22:03:47 -0500 Subject: [PATCH 055/806] update output for uint64_t --- .../kvtag_add_get_benchmark/cori/template.sh | 6 ++--- scripts/kvtag_add_get_scale/cori/template.sh | 18 ++++++------- .../perlmutter/template.sh | 26 +++++++------------ 3 files changed, 21 insertions(+), 29 deletions(-) diff --git a/scripts/kvtag_add_get_benchmark/cori/template.sh b/scripts/kvtag_add_get_benchmark/cori/template.sh index 73eb670c5..dd3216c9c 100644 --- a/scripts/kvtag_add_get_benchmark/cori/template.sh +++ b/scripts/kvtag_add_get_benchmark/cori/template.sh @@ -1,7 +1,7 @@ #!/bin/bash -l -#REGSBATCH -p regular -#DBGSBATCH -p debug +#REGSBATCH -q regular +#DBGSBATCH -q debug #SBATCH -N NODENUM #REGSBATCH -t 4:00:00 #DBGSBATCH -t 0:30:00 @@ -16,7 +16,7 @@ # export PDC_DEBUG=0 -export PDC_TMPDIR=/global/cscratch1/sd/wzhang5/data/pdc/conf +export PDC_TMPDIR=$SCRATCH/data/pdc/conf rm -rf $PDC_TMPDIR/* diff --git a/scripts/kvtag_add_get_scale/cori/template.sh b/scripts/kvtag_add_get_scale/cori/template.sh index 73eb670c5..402c008ae 100644 --- a/scripts/kvtag_add_get_scale/cori/template.sh +++ b/scripts/kvtag_add_get_scale/cori/template.sh @@ -1,7 +1,7 @@ #!/bin/bash -l -#REGSBATCH -p regular -#DBGSBATCH -p debug +#REGSBATCH -q regular +#DBGSBATCH -q debug #SBATCH -N NODENUM #REGSBATCH -t 4:00:00 #DBGSBATCH -t 0:30:00 @@ -16,7 +16,7 @@ # export PDC_DEBUG=0 -export PDC_TMPDIR=/global/cscratch1/sd/wzhang5/data/pdc/conf +export PDC_TMPDIR=$SCRATCH/data/pdc/conf rm -rf $PDC_TMPDIR/* @@ -32,16 +32,14 @@ let TOTALPROC=$NCLIENT*$N_NODE EXECPATH=/global/cfs/cdirs/m2621/wzhang5/cori/install/pdc/share/test/bin SERVER=$EXECPATH/pdc_server.exe -CLIENT=$EXECPATH/kvtag_add_get_benchmark +CLIENT=$EXECPATH/kvtag_add_get_scale CLOSE=$EXECPATH/close_server chmod +x $EXECPATH/* -MAX_OBJ_COUNT=$((1024*1024)) -OBJ_INCR=$((MAX_OBJ_COUNT/1024)) -ATTR_COUNT=ATTRNUM -ATTR_LENGTH=ATTRLEN -QUERY_COUNT=$((1024)) +NUM_OBJ=$((1024*1024)) +NUM_TAGS=$NUM_OBJ +NUM_QUERY=$((NUM_OBJ)) date @@ -57,7 +55,7 @@ sleep 5 echo "============================================" echo "KVTAGS with $N_NODE nodes" echo "============================================" -srun -N $N_NODE -n $TOTALPROC -c 2 --mem=100000 --cpu_bind=cores --gres=craynetwork:1 --overlap stdbuf -i0 -o0 -e0 $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE +srun -N $N_NODE -n $TOTALPROC -c 2 --mem=100000 --cpu_bind=cores --gres=craynetwork:1 --overlap stdbuf -i0 -o0 -e0 $CLIENT $NUM_OBJ $NUM_TAGS $NUM_QUERY echo "" echo "=================" diff --git a/scripts/kvtag_add_get_scale/perlmutter/template.sh b/scripts/kvtag_add_get_scale/perlmutter/template.sh index 56fa7cd02..423d59752 100644 --- a/scripts/kvtag_add_get_scale/perlmutter/template.sh +++ b/scripts/kvtag_add_get_scale/perlmutter/template.sh @@ -1,22 +1,19 @@ #!/bin/bash -l -#REGSBATCH -p regular -#DBGSBATCH -p debug +#REGSBATCH -q regular +#DBGSBATCH -q debug #SBATCH -N NODENUM #REGSBATCH -t 4:00:00 #DBGSBATCH -t 0:30:00 -#SBATCH --gres=craynetwork:2 -#SBATCH -L SCRATCH -#SBATCH -C haswell -#SBATCH -J JOBNAME +#SBATCH -C cpu +#SBATCH -J JOBNAME #SBATCH -A m2621 #SBATCH -o o%j.JOBNAME.out #SBATCH -e o%j.JOBNAME.out - # export PDC_DEBUG=0 -export PDC_TMPDIR=/global/cscratch1/sd/wzhang5/data/pdc/conf +export PDC_TMPDIR=$SCRATCH/data/pdc/conf rm -rf $PDC_TMPDIR/* @@ -32,17 +29,14 @@ let TOTALPROC=$NCLIENT*$N_NODE EXECPATH=/global/cfs/cdirs/m2621/wzhang5/cori/install/pdc/share/test/bin SERVER=$EXECPATH/pdc_server.exe -CLIENT=$EXECPATH/kvtag_add_get_benchmark +CLIENT=$EXECPATH/kvtag_add_get_scale CLOSE=$EXECPATH/close_server chmod +x $EXECPATH/* -NUM_OBJ= -MAX_OBJ_COUNT=$((1024*1024)) -OBJ_INCR=$((MAX_OBJ_COUNT/1024)) -ATTR_COUNT=ATTRNUM -ATTR_LENGTH=ATTRLEN -QUERY_COUNT=$((1024)) +NUM_OBJ=$((1024*1024)) +NUM_TAGS=$NUM_OBJ +NUM_QUERY=$((NUM_OBJ)) date @@ -58,7 +52,7 @@ sleep 5 echo "============================================" echo "KVTAGS with $N_NODE nodes" echo "============================================" -srun -N $N_NODE -n $TOTALPROC -c 2 --mem=256000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE +srun -N $N_NODE -n $TOTALPROC -c 2 --mem=256000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLIENT $NUM_OBJ $NUM_TAGS $NUM_QUERY echo "" echo "=================" From 653375bf592bad451da71ebb2bef66e94fb761c6 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 22:09:27 -0500 Subject: [PATCH 056/806] update scripts --- scripts/kvtag_add_get_benchmark/cori/clean.sh | 0 scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh | 0 scripts/kvtag_add_get_benchmark/cori/submit.sh | 0 scripts/kvtag_add_get_benchmark/cori/template.sh | 0 scripts/kvtag_add_get_scale/cori/clean.sh | 0 scripts/kvtag_add_get_scale/cori/gen_scripts.sh | 0 scripts/kvtag_add_get_scale/cori/submit.sh | 0 scripts/kvtag_add_get_scale/cori/template.sh | 0 scripts/kvtag_add_get_scale/perlmutter/clean.sh | 0 scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh | 0 scripts/kvtag_add_get_scale/perlmutter/submit.sh | 0 scripts/kvtag_add_get_scale/perlmutter/template.sh | 0 12 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 scripts/kvtag_add_get_benchmark/cori/clean.sh mode change 100644 => 100755 scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh mode change 100644 => 100755 scripts/kvtag_add_get_benchmark/cori/submit.sh mode change 100644 => 100755 scripts/kvtag_add_get_benchmark/cori/template.sh mode change 100644 => 100755 scripts/kvtag_add_get_scale/cori/clean.sh mode change 100644 => 100755 scripts/kvtag_add_get_scale/cori/gen_scripts.sh mode change 100644 => 100755 scripts/kvtag_add_get_scale/cori/submit.sh mode change 100644 => 100755 scripts/kvtag_add_get_scale/cori/template.sh mode change 100644 => 100755 scripts/kvtag_add_get_scale/perlmutter/clean.sh mode change 100644 => 100755 scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh mode change 100644 => 100755 scripts/kvtag_add_get_scale/perlmutter/submit.sh mode change 100644 => 100755 scripts/kvtag_add_get_scale/perlmutter/template.sh diff --git a/scripts/kvtag_add_get_benchmark/cori/clean.sh b/scripts/kvtag_add_get_benchmark/cori/clean.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh b/scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_benchmark/cori/submit.sh b/scripts/kvtag_add_get_benchmark/cori/submit.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_benchmark/cori/template.sh b/scripts/kvtag_add_get_benchmark/cori/template.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_scale/cori/clean.sh b/scripts/kvtag_add_get_scale/cori/clean.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_scale/cori/gen_scripts.sh b/scripts/kvtag_add_get_scale/cori/gen_scripts.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_scale/cori/submit.sh b/scripts/kvtag_add_get_scale/cori/submit.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_scale/cori/template.sh b/scripts/kvtag_add_get_scale/cori/template.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_scale/perlmutter/clean.sh b/scripts/kvtag_add_get_scale/perlmutter/clean.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh b/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_scale/perlmutter/submit.sh b/scripts/kvtag_add_get_scale/perlmutter/submit.sh old mode 100644 new mode 100755 diff --git a/scripts/kvtag_add_get_scale/perlmutter/template.sh b/scripts/kvtag_add_get_scale/perlmutter/template.sh old mode 100644 new mode 100755 From ebe99ef248ec9bd1078ddff515be90b011b5ff58 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 22:29:19 -0500 Subject: [PATCH 057/806] update scripts --- scripts/kvtag_add_get_scale/cori/template.sh | 2 +- scripts/kvtag_add_get_scale/perlmutter/template.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/kvtag_add_get_scale/cori/template.sh b/scripts/kvtag_add_get_scale/cori/template.sh index 402c008ae..40a4dca34 100755 --- a/scripts/kvtag_add_get_scale/cori/template.sh +++ b/scripts/kvtag_add_get_scale/cori/template.sh @@ -37,7 +37,7 @@ CLOSE=$EXECPATH/close_server chmod +x $EXECPATH/* -NUM_OBJ=$((1024*1024)) +NUM_OBJ=$((1024*1024*100)) NUM_TAGS=$NUM_OBJ NUM_QUERY=$((NUM_OBJ)) diff --git a/scripts/kvtag_add_get_scale/perlmutter/template.sh b/scripts/kvtag_add_get_scale/perlmutter/template.sh index 423d59752..8b424ef88 100755 --- a/scripts/kvtag_add_get_scale/perlmutter/template.sh +++ b/scripts/kvtag_add_get_scale/perlmutter/template.sh @@ -27,14 +27,14 @@ mkdir -p $PDC_TMPDIR let TOTALPROC=$NCLIENT*$N_NODE -EXECPATH=/global/cfs/cdirs/m2621/wzhang5/cori/install/pdc/share/test/bin +EXECPATH=/global/cfs/cdirs/m2621/wzhang5/perlmutter/install/pdc/share/test/bin SERVER=$EXECPATH/pdc_server.exe CLIENT=$EXECPATH/kvtag_add_get_scale CLOSE=$EXECPATH/close_server chmod +x $EXECPATH/* -NUM_OBJ=$((1024*1024)) +NUM_OBJ=$((1024*1024*100)) NUM_TAGS=$NUM_OBJ NUM_QUERY=$((NUM_OBJ)) From aaa0b84632e4031ffd056ecc459b610b72da7df8 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 22:32:48 -0500 Subject: [PATCH 058/806] delete debugging message --- src/api/pdc_client_connect.c | 2 +- src/server/pdc_server_metadata.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index 897379e77..457001079 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -6961,7 +6961,7 @@ PDC_add_kvtag(pdcid_t obj_id, pdc_kvtag_t *kvtag, int is_cont) } //TODO: delete this line after debugging. - printf("==CLIENT[%d]: PDC_add_kvtag::in.obj_id = %llu \n ", pdc_client_mpi_rank_g, in.obj_id); + // printf("==CLIENT[%d]: PDC_add_kvtag::in.obj_id = %llu \n ", pdc_client_mpi_rank_g, in.obj_id); server_id = PDC_get_server_by_obj_id(meta_id, pdc_server_num_g); diff --git a/src/server/pdc_server_metadata.c b/src/server/pdc_server_metadata.c index 79cd22dfd..2331df3b1 100644 --- a/src/server/pdc_server_metadata.c +++ b/src/server/pdc_server_metadata.c @@ -2555,7 +2555,7 @@ PDC_Server_add_kvtag(metadata_add_kvtag_in_t *in, metadata_add_tag_out_t *out) hash_key = in->hash_value; obj_id = in->obj_id; - printf("==SERVER[%d]: PDC_add_kvtag::in.obj_id = %llu \n ", pdc_server_rank_g, obj_id); + // printf("==SERVER[%d]: PDC_add_kvtag::in.obj_id = %llu \n ", pdc_server_rank_g, obj_id); #ifdef ENABLE_MULTITHREAD // Obtain lock for hash table From ed0587870adf6886e5ed84d051967470e1589269 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 22:37:22 -0500 Subject: [PATCH 059/806] make Cmake to publish scripts directory --- CMakeLists.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c661eaa9..0922d44c9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -455,6 +455,17 @@ install( PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE ) + +install( + DIRECTORY + ${PDC_SOURCE_DIR}/scripts + DESTINATION + ${PDC_BINARY_DIR} + FILES_MATCHING PATTERN "*.sh" + PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE +) + + #install( # FILES # ${PDC_BINARY_DIR}/bin/pdc_server.exe From 0f3db861fbfed610925fec6fe6770f7a48a5ccd3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 22:42:53 -0500 Subject: [PATCH 060/806] make Cmake to publish scripts directory --- CMakeLists.txt | 2 +- scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh | 2 +- scripts/kvtag_add_get_scale/cori/gen_scripts.sh | 2 +- scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0922d44c9..446f1d024 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -460,7 +460,7 @@ install( DIRECTORY ${PDC_SOURCE_DIR}/scripts DESTINATION - ${PDC_BINARY_DIR} + ${PDC_INSTALL_DATA_DIR} FILES_MATCHING PATTERN "*.sh" PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE ) diff --git a/scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh b/scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh index 54606da2b..62eb1a2b2 100755 --- a/scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh +++ b/scripts/kvtag_add_get_benchmark/cori/gen_scripts.sh @@ -9,7 +9,7 @@ for (( i = 1; i <= $MAX_NODE; i*=2 )); do for (( j = 1; j <= $MAX_ATTR; j*=4 )); do for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do JOBNAME=kvtag_bench_${i}_${j}_${k} - TARGET=./$i/$JOBNAME.sh + TARGET=./$i/$JOBNAME.sbatch cp template.sh $TARGET sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET sed -i "s/NODENUM/${i}/g" $TARGET diff --git a/scripts/kvtag_add_get_scale/cori/gen_scripts.sh b/scripts/kvtag_add_get_scale/cori/gen_scripts.sh index 54606da2b..62eb1a2b2 100755 --- a/scripts/kvtag_add_get_scale/cori/gen_scripts.sh +++ b/scripts/kvtag_add_get_scale/cori/gen_scripts.sh @@ -9,7 +9,7 @@ for (( i = 1; i <= $MAX_NODE; i*=2 )); do for (( j = 1; j <= $MAX_ATTR; j*=4 )); do for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do JOBNAME=kvtag_bench_${i}_${j}_${k} - TARGET=./$i/$JOBNAME.sh + TARGET=./$i/$JOBNAME.sbatch cp template.sh $TARGET sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET sed -i "s/NODENUM/${i}/g" $TARGET diff --git a/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh b/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh index 54606da2b..62eb1a2b2 100755 --- a/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh +++ b/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh @@ -9,7 +9,7 @@ for (( i = 1; i <= $MAX_NODE; i*=2 )); do for (( j = 1; j <= $MAX_ATTR; j*=4 )); do for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do JOBNAME=kvtag_bench_${i}_${j}_${k} - TARGET=./$i/$JOBNAME.sh + TARGET=./$i/$JOBNAME.sbatch cp template.sh $TARGET sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET sed -i "s/NODENUM/${i}/g" $TARGET From 5552ad06c0a4ba1326730c9f6b94c122a289f6db Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 22:45:48 -0500 Subject: [PATCH 061/806] make Cmake to publish scripts directory --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 446f1d024..2e353dbc2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -460,7 +460,7 @@ install( DIRECTORY ${PDC_SOURCE_DIR}/scripts DESTINATION - ${PDC_INSTALL_DATA_DIR} + ${CMAKE_INSTALL_PREFIX} FILES_MATCHING PATTERN "*.sh" PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE ) From 0e63480cc3ba72f88c6596b3ebf5b588ebab1002 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 22:47:44 -0500 Subject: [PATCH 062/806] make Cmake to publish scripts directory --- .../perlmutter/clean.sh | 8 ++ .../perlmutter/gen_scripts.sh | 25 ++++++ .../perlmutter/submit.sh | 78 +++++++++++++++++++ .../perlmutter/template.sh | 66 ++++++++++++++++ 4 files changed, 177 insertions(+) create mode 100755 scripts/kvtag_add_get_benchmark/perlmutter/clean.sh create mode 100755 scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh create mode 100755 scripts/kvtag_add_get_benchmark/perlmutter/submit.sh create mode 100755 scripts/kvtag_add_get_benchmark/perlmutter/template.sh diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/clean.sh b/scripts/kvtag_add_get_benchmark/perlmutter/clean.sh new file mode 100755 index 000000000..d6476962e --- /dev/null +++ b/scripts/kvtag_add_get_benchmark/perlmutter/clean.sh @@ -0,0 +1,8 @@ +#!/bin/bash +MAX_NODE=512 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + + rm -rf $i/* + +done \ No newline at end of file diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh b/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh new file mode 100755 index 000000000..62eb1a2b2 --- /dev/null +++ b/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh @@ -0,0 +1,25 @@ +#!/bin/bash +N_THREAD=NO +MAX_NODE=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + mkdir -p $i + for (( j = 1; j <= $MAX_ATTR; j*=4 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + JOBNAME=kvtag_bench_${i}_${j}_${k} + TARGET=./$i/$JOBNAME.sbatch + cp template.sh $TARGET + sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET + sed -i "s/NODENUM/${i}/g" $TARGET + sed -i "s/ATTRNUM/${j}/g" $TARGET + sed -i "s/ATTRLEN/${k}/g" $TARGET + if [[ "$i" -gt "16" ]]; then + sed -i "s/REG//g" $TARGET + else + sed -i "s/DBG//g" $TARGET + fi + done + done +done diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/submit.sh b/scripts/kvtag_add_get_benchmark/perlmutter/submit.sh new file mode 100755 index 000000000..2ca6badf8 --- /dev/null +++ b/scripts/kvtag_add_get_benchmark/perlmutter/submit.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# MIN_PROC=4 +# MAX_PROC=128 +MIN_PROC=1 +MAX_PROC=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +curdir=$(pwd) + +first_submit=1 + +for (( i = 1; i <= $MAX_PROC; i*=2 )); do + mkdir -p $i + for (( j = 1; j <= $MAX_ATTR; j*=4 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + JOBNAME=kvtag_bench_${i}_${j}_${k} + TARGET=./$i/JOBNAME.sh + + njob=`squeue -u $USER | grep kvtag_bench | wc -l` + echo $njob + while [ $njob -ge 4 ] + do + sleeptime=$[ ( $RANDOM % 1000 ) ] + sleep $sleeptime + njob=`squeue -u $USER | grep kvtag_bench | wc -l` + echo $njob + done + + if [[ $first_submit == 1 ]]; then + # Submit first job w/o dependency + echo "Submitting $TARGET" + job=`sbatch $TARGET` + first_submit=0 + else + echo "Submitting $TARGET after ${job: -8}" + job=`sbatch -d afterany:${job: -8} $TARGET` + fi + + sleeptime=$[ ( $RANDOM % 5 ) ] + sleep $sleeptime + done + done +done + + +# for (( j = $MIN_PROC; j <= $MAX_PROC ; j*=2 )); do + +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# while [ $njob -ge 4 ] +# do +# sleeptime=$[ ( $RANDOM % 1000 ) ] +# sleep $sleeptime +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# done + + +# cd $curdir/$j +# for filename in ./*.sh ; do + +# if [[ $first_submit == 1 ]]; then +# # Submit first job w/o dependency +# echo "Submitting $filename" +# job=`sbatch $filename` +# first_submit=0 +# else +# echo "Submitting $filename after ${job: -8}" +# job=`sbatch -d afterany:${job: -8} $filename` +# fi + +# sleeptime=$[ ( $RANDOM % 5 ) ] +# sleep $sleeptime + +# done +# done diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh new file mode 100755 index 000000000..fc17ed525 --- /dev/null +++ b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh @@ -0,0 +1,66 @@ +#!/bin/bash -l + +#REGSBATCH -q regular +#DBGSBATCH -q debug +#SBATCH -N NODENUM +#REGSBATCH -t 4:00:00 +#DBGSBATCH -t 0:30:00 +#SBATCH -C cpu +#SBATCH -J JOBNAME +#SBATCH -A m2621 +#SBATCH -o o%j.JOBNAME.out +#SBATCH -e o%j.JOBNAME.out + + +# export PDC_DEBUG=0 + +export PDC_TMPDIR=$SCRATCH/data/pdc/conf + +rm -rf $PDC_TMPDIR/* + +REPEAT=1 + +N_NODE=NODENUM +NCLIENT=31 + +export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE +mkdir -p $PDC_TMPDIR + +let TOTALPROC=$NCLIENT*$N_NODE + +EXECPATH=/global/cfs/cdirs/m2621/wzhang5/perlmutter/install/pdc/share/test/bin +SERVER=$EXECPATH/pdc_server.exe +CLIENT=$EXECPATH/kvtag_add_get_benchmark +CLOSE=$EXECPATH/close_server + +chmod +x $EXECPATH/* + +MAX_OBJ_COUNT=$((1024*1024)) +OBJ_INCR=$((MAX_OBJ_COUNT/1024)) +ATTR_COUNT=ATTRNUM +ATTR_LENGTH=ATTRLEN +QUERY_COUNT=$((1024)) + +date + + +echo "" +echo "=============" +echo "$i Init server" +echo "=============" +srun -N $N_NODE -n $N_NODE -c 2 --mem=100000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $SERVER & +sleep 5 + + +echo "============================================" +echo "KVTAGS with $N_NODE nodes" +echo "============================================" +srun -N $N_NODE -n $TOTALPROC -c 2 --mem=100000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE + +echo "" +echo "=================" +echo "$i Closing server" +echo "=================" +srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLOSE + +date From fd745f8092d5295a42c0f4556a585a4a86717638 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 23:16:47 -0500 Subject: [PATCH 063/806] make Cmake to publish scripts directory --- src/tests/kvtag_add_get_benchmark.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 225d22d92..d3550847a 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -430,7 +430,11 @@ main(int argc, char *argv[]) MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif - + if (my_rank == 0) { + printf("send queries\n"); + fflush(stdout); + } + send_queries(my_obj_s, my_query, n_attr, obj_ids, query_rst_cache, value_size); #ifdef ENABLE_MPI From 53e0a156839837c2e08b82aa15a9567a9878f3ac Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 23:24:35 -0500 Subject: [PATCH 064/806] update tag names --- src/tests/kvtag_add_get_benchmark.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index d3550847a..fbfd8ca7a 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -242,7 +242,7 @@ get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, uint64_t n_attr, void **tag char tag_name[128]; for (i = 0; i < n_attr; i++) { - sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", obj_name_v, i); + sprintf(tag_name, "tag%llu.%llu", obj_name_v, i); if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[i], (void *)&value_size[i]) < 0) printf("fail to get a kvtag from o%" PRIu64 "\n", obj_name_v); } From 4e6eb2700fa3b987720ee621efcc768ce4362026 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 5 Apr 2023 23:27:51 -0500 Subject: [PATCH 065/806] update tag names --- src/tests/kvtag_add_get_benchmark.c | 35 +++++++++++++---------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index fbfd8ca7a..7ac9c5c09 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -242,7 +242,7 @@ get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, uint64_t n_attr, void **tag char tag_name[128]; for (i = 0; i < n_attr; i++) { - sprintf(tag_name, "tag%llu.%llu", obj_name_v, i); + sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", obj_name_v, i); if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[i], (void *)&value_size[i]) < 0) printf("fail to get a kvtag from o%" PRIu64 "\n", obj_name_v); } @@ -323,18 +323,18 @@ closePDC(pdcid_t pdc, pdcid_t cont_prop, pdcid_t cont, pdcid_t obj_prop) int main(int argc, char *argv[]) { - pdcid_t pdc, cont_prop, cont, obj_prop; - pdcid_t * obj_ids; - uint64_t n_obj, n_obj_incr, my_obj, my_obj_s; - uint64_t n_attr, n_attr_len, n_query, my_query, my_query_s; - uint64_t n_servers, n_clients; - uint64_t i, k; - int proc_num, my_rank; - double stime = 0.0, step_elapse = 0.0; - double total_object_time = 0.0, total_tag_time = 0.0, total_query_time = 0.0; - uint64_t total_object_count = 0, total_tag_count = 0, total_query_count = 0; - void ** query_rst_cache; - uint64_t * value_size; + pdcid_t pdc, cont_prop, cont, obj_prop; + pdcid_t * obj_ids; + uint64_t n_obj, n_obj_incr, my_obj, my_obj_s; + uint64_t n_attr, n_attr_len, n_query, my_query, my_query_s; + uint64_t n_servers, n_clients; + uint64_t i, k; + int proc_num, my_rank; + double stime = 0.0, step_elapse = 0.0; + double total_object_time = 0.0, total_tag_time = 0.0, total_query_time = 0.0; + uint64_t total_object_count = 0, total_tag_count = 0, total_query_count = 0; + void ** query_rst_cache; + uint64_t *value_size; #ifdef ENABLE_MPI MPI_Init(&argc, &argv); @@ -430,11 +430,7 @@ main(int argc, char *argv[]) MPI_Barrier(MPI_COMM_WORLD); stime = MPI_Wtime(); #endif - if (my_rank == 0) { - printf("send queries\n"); - fflush(stdout); - } - + send_queries(my_obj_s, my_query, n_attr, obj_ids, query_rst_cache, value_size); #ifdef ENABLE_MPI @@ -472,7 +468,8 @@ main(int argc, char *argv[]) "%" PRIu64 " , \n", k, total_object_count, n_attr, n_query); printf("[Final Report 3] Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", - (double)total_object_count / total_object_time, (double)(total_object_count * n_attr) / total_tag_time, + (double)total_object_count / total_object_time, + (double)(total_object_count * n_attr) / total_tag_time, (double)(total_query_count * n_attr) / total_query_time); } From 71a6a9a587f41c3a5aaca3073847735bfdcc2e39 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 6 Apr 2023 00:40:56 -0500 Subject: [PATCH 066/806] update query startingpos --- src/tests/kvtag_add_get_benchmark.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 7ac9c5c09..b96b981f5 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -431,7 +431,7 @@ main(int argc, char *argv[]) stime = MPI_Wtime(); #endif - send_queries(my_obj_s, my_query, n_attr, obj_ids, query_rst_cache, value_size); + send_queries(my_query_s, my_query, n_attr, obj_ids, query_rst_cache, value_size); #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); From fed2e60adcd837ebf673c3bd2c6f6d13c81db23b Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 6 Apr 2023 09:47:25 -0500 Subject: [PATCH 067/806] update query startingpos --- src/tests/kvtag_add_get_benchmark.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index b96b981f5..1b5f26815 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -188,7 +188,7 @@ void create_object(uint64_t my_obj, uint64_t my_obj_s, pdcid_t cont, pdcid_t obj_prop, pdcid_t *obj_ids) { uint64_t i, v; - char obj_name[128]; + char obj_name[256]; for (i = 0; i < my_obj; i++) { v = i + my_obj_s; @@ -214,12 +214,12 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value pdcid_t *obj_ids) { uint64_t i, j, v; - char tag_name[128]; + char tag_name[256]; for (i = 0; i < my_obj; i++) { v = i + my_obj_s; for (j = 0; j < n_attr; j++) { - sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", v, i); + sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", v, j); if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)tag_values[j], tag_value_len + 1) < 0) printf("fail to add a kvtag to o%" PRIu64 "\n", v); } @@ -239,7 +239,7 @@ void get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, uint64_t n_attr, void **tag_values, uint64_t *value_size) { uint64_t i; - char tag_name[128]; + char tag_name[256]; for (i = 0; i < n_attr; i++) { sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", obj_name_v, i); @@ -267,10 +267,16 @@ void send_queries(uint64_t my_obj_s, uint64_t n_query, uint64_t n_attr, pdcid_t *obj_ids, void **tag_values, uint64_t *value_size) { - uint64_t i, v; + uint64_t i, j, v; + char tag_name[128]; for (i = 0; i < n_query; i++) { v = i + my_obj_s; + // for (j = 0; j < n_attr; j++) { + // sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", v, j); + // if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[j], (void *)&value_size[j]) < 0) + // printf("fail to get a kvtag from o%" PRIu64 "\n", v); + // } get_object_tags(obj_ids[i], v, n_attr, &tag_values[i * n_attr], &value_size[i * n_attr]); } } @@ -360,6 +366,13 @@ main(int argc, char *argv[]) goto done; } + if (n_query > n_obj_incr) { + if (my_rank == 0) { + printf("n_query cannot be larger than n_obj_incr! Exiting...\n"); + } + goto done; + } + if (my_rank == 0) printf("Create %" PRIu64 " obj, %" PRIu64 " tags, query %" PRIu64 "\n", n_obj, n_attr, n_query); @@ -455,6 +468,7 @@ main(int argc, char *argv[]) fflush(stdout); my_obj_s += n_obj_incr; + my_query_s += n_obj_incr; k++; } while (total_object_count < n_obj); From 591e5c7c8a436ece86712298261330d276d7ae40 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 6 Apr 2023 11:16:13 -0500 Subject: [PATCH 068/806] update job scripts --- scripts/kvtag_add_get_benchmark/cori/template.sh | 4 ++-- scripts/kvtag_add_get_benchmark/perlmutter/template.sh | 4 ++-- src/tests/kvtag_add_get_benchmark.c | 8 +------- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/scripts/kvtag_add_get_benchmark/cori/template.sh b/scripts/kvtag_add_get_benchmark/cori/template.sh index dd3216c9c..1db2938f3 100755 --- a/scripts/kvtag_add_get_benchmark/cori/template.sh +++ b/scripts/kvtag_add_get_benchmark/cori/template.sh @@ -37,11 +37,11 @@ CLOSE=$EXECPATH/close_server chmod +x $EXECPATH/* -MAX_OBJ_COUNT=$((1024*1024)) +MAX_OBJ_COUNT=$((1024*1024*1024)) OBJ_INCR=$((MAX_OBJ_COUNT/1024)) ATTR_COUNT=ATTRNUM ATTR_LENGTH=ATTRLEN -QUERY_COUNT=$((1024)) +QUERY_COUNT=$((OBJ_INCR)) date diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh index fc17ed525..9c69b9872 100755 --- a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh +++ b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh @@ -35,11 +35,11 @@ CLOSE=$EXECPATH/close_server chmod +x $EXECPATH/* -MAX_OBJ_COUNT=$((1024*1024)) +MAX_OBJ_COUNT=$((1024*1024*1024)) OBJ_INCR=$((MAX_OBJ_COUNT/1024)) ATTR_COUNT=ATTRNUM ATTR_LENGTH=ATTRLEN -QUERY_COUNT=$((1024)) +QUERY_COUNT=$((OBJ_INCR)) date diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 1b5f26815..c0801135e 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -267,16 +267,10 @@ void send_queries(uint64_t my_obj_s, uint64_t n_query, uint64_t n_attr, pdcid_t *obj_ids, void **tag_values, uint64_t *value_size) { - uint64_t i, j, v; - char tag_name[128]; + uint64_t i, v; for (i = 0; i < n_query; i++) { v = i + my_obj_s; - // for (j = 0; j < n_attr; j++) { - // sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", v, j); - // if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[j], (void *)&value_size[j]) < 0) - // printf("fail to get a kvtag from o%" PRIu64 "\n", v); - // } get_object_tags(obj_ids[i], v, n_attr, &tag_values[i * n_attr], &value_size[i * n_attr]); } } From 9b88faec1a7fecae7a585e8fae87ba132a0da25c Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 6 Apr 2023 16:20:15 -0500 Subject: [PATCH 069/806] add progressive timing for kvtag_add_get_scale --- src/tests/kvtag_add_get_scale.c | 60 +++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 3 deletions(-) diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index 95a32591a..28f2352ea 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -70,9 +70,10 @@ main(int argc, char *argv[]) pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t * obj_ids; int n_obj, n_add_tag, n_query, my_obj, my_obj_s, my_add_tag, my_query, my_add_tag_s, my_query_s; + int obj_10percent = 0, tag_10percent = 0, query_10percent = 0; int proc_num, my_rank, i, v; char obj_name[128]; - double stime, total_time; + double stime, total_time, percent_time; pdc_kvtag_t kvtag; void ** values; size_t value_size; @@ -100,6 +101,10 @@ main(int argc, char *argv[]) assign_work_to_rank(my_rank, proc_num, n_query, &my_query, &my_query_s); assign_work_to_rank(my_rank, proc_num, n_obj, &my_obj, &my_obj_s); + obj_10percent = my_obj / 10; + tag_10percent = my_add_tag / 10; + query_10percent = my_query / 10; + if (my_rank == 0) printf("Create %d obj, %d tags, query %d\n", my_obj, my_add_tag, my_query); @@ -123,15 +128,40 @@ main(int argc, char *argv[]) // Create a number of objects, add at least one tag to that object obj_ids = (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + stime = MPI_Wtime(); +#endif + for (i = 0; i < my_obj; i++) { sprintf(obj_name, "obj%d", my_obj_s + i); obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); if (obj_ids[i] <= 0) printf("Fail to create object @ line %d!\n", __LINE__); + + if (i % obj_10percent == 0) { +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + percent_time = MPI_Wtime() - stime; + if (my_rank == 0) { + int current_percentage = i / obj_10percent; + int estimated_current_object_number = n_obj / 100 * current_percentage; + double tps = estimated_current_object_number / percent_time; + printf("[OBJ PROGRESS %d%% ] %d objects, %.2f seconds, TPS: %.4f", current_percentage, + estimated_current_object_number, percent_time, tps); + } +#endif + } } +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + total_time = MPI_Wtime() - stime; +#endif + if (my_rank == 0) - printf("Created %d objects\n", n_obj); + printf("Total time to create %d objects: %.4f\n\n", n_obj, total_time); // Add tags kvtag.name = "Group"; @@ -146,6 +176,18 @@ main(int argc, char *argv[]) v = i + my_add_tag_s; if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) printf("fail to add a kvtag to o%d\n", i + my_obj_s); + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + percent_time = MPI_Wtime() - stime; + if (my_rank == 0) { + int current_percentage = i / tag_10percent; + int estimated_current_tag_number = n_obj / 100 * current_percentage; + double tps = estimated_current_tag_number / percent_time; + printf("[TAG PROGRESS %d%% ] %d tags, %.2f seconds, TPS: %.4f", current_percentage, + estimated_current_tag_number, percent_time, tps); + } +#endif } #ifdef ENABLE_MPI @@ -164,6 +206,18 @@ main(int argc, char *argv[]) for (i = 0; i < my_query; i++) { if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_size) < 0) printf("fail to get a kvtag from o%d\n", i + my_query_s); + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + percent_time = MPI_Wtime() - stime; + if (my_rank == 0) { + int current_percentage = i / query_10percent; + int estimated_current_query_number = n_obj / 100 * current_percentage; + double tps = estimated_current_query_number / percent_time; + printf("[QRY PROGRESS %d%% ] %d queries, %.2f seconds, TPS: %.4f", current_percentage, + estimated_current_query_number, percent_time, tps); + } +#endif } #ifdef ENABLE_MPI @@ -171,7 +225,7 @@ main(int argc, char *argv[]) total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to retrieve tags from %d objects: %.4f\n", n_query, total_time); + printf("Total time to retrieve 1 tag from %d objects: %.4f\n", n_query, total_time); fflush(stdout); From c20956d01ff6023080abb5af24fc8d9f93d93f7b Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 6 Apr 2023 16:38:28 -0500 Subject: [PATCH 070/806] fix iteration count in final report --- src/tests/kvtag_add_get_benchmark.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index c0801135e..27e0836ad 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -474,7 +474,7 @@ main(int argc, char *argv[]) printf("[Final Report 2] Iterations: %" PRIu64 " , Objects: %" PRIu64 " , Tags/Object: %" PRIu64 " , Queries/Iteration: " "%" PRIu64 " , \n", - k, total_object_count, n_attr, n_query); + k-1, total_object_count, n_attr, n_query); printf("[Final Report 3] Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", (double)total_object_count / total_object_time, (double)(total_object_count * n_attr) / total_tag_time, From eab118697b3a0258605d24ce6d575d7a26c0a193 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 7 Apr 2023 13:02:37 -0500 Subject: [PATCH 071/806] update job scripts and benckmark program --- .../kvtag_add_get_scale/cori/gen_scripts.sh | 26 ++++------ scripts/kvtag_add_get_scale/cori/submit.sh | 48 +++++++++---------- .../perlmutter/gen_scripts.sh | 26 ++++------ .../kvtag_add_get_scale/perlmutter/submit.sh | 48 +++++++++---------- src/tests/kvtag_add_get_benchmark.c | 10 ++-- 5 files changed, 69 insertions(+), 89 deletions(-) diff --git a/scripts/kvtag_add_get_scale/cori/gen_scripts.sh b/scripts/kvtag_add_get_scale/cori/gen_scripts.sh index 62eb1a2b2..e8ee7b11d 100755 --- a/scripts/kvtag_add_get_scale/cori/gen_scripts.sh +++ b/scripts/kvtag_add_get_scale/cori/gen_scripts.sh @@ -6,20 +6,14 @@ MAX_ATTRLEN=1000 for (( i = 1; i <= $MAX_NODE; i*=2 )); do mkdir -p $i - for (( j = 1; j <= $MAX_ATTR; j*=4 )); do - for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do - JOBNAME=kvtag_bench_${i}_${j}_${k} - TARGET=./$i/$JOBNAME.sbatch - cp template.sh $TARGET - sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET - sed -i "s/NODENUM/${i}/g" $TARGET - sed -i "s/ATTRNUM/${j}/g" $TARGET - sed -i "s/ATTRLEN/${k}/g" $TARGET - if [[ "$i" -gt "16" ]]; then - sed -i "s/REG//g" $TARGET - else - sed -i "s/DBG//g" $TARGET - fi - done - done + JOBNAME=kvtag_scale${i} + TARGET=./$i/$JOBNAME.sbatch + cp template.sh $TARGET + sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET + sed -i "s/NODENUM/${i}/g" $TARGET + if [[ "$i" -gt "16" ]]; then + sed -i "s/REG//g" $TARGET + else + sed -i "s/DBG//g" $TARGET + fi done diff --git a/scripts/kvtag_add_get_scale/cori/submit.sh b/scripts/kvtag_add_get_scale/cori/submit.sh index 2ca6badf8..192522e31 100755 --- a/scripts/kvtag_add_get_scale/cori/submit.sh +++ b/scripts/kvtag_add_get_scale/cori/submit.sh @@ -13,35 +13,31 @@ first_submit=1 for (( i = 1; i <= $MAX_PROC; i*=2 )); do mkdir -p $i - for (( j = 1; j <= $MAX_ATTR; j*=4 )); do - for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do - JOBNAME=kvtag_bench_${i}_${j}_${k} - TARGET=./$i/JOBNAME.sh + JOBNAME=kvtag_scale_${i} + TARGET=./$i/JOBNAME.sh - njob=`squeue -u $USER | grep kvtag_bench | wc -l` - echo $njob - while [ $njob -ge 4 ] - do - sleeptime=$[ ( $RANDOM % 1000 ) ] - sleep $sleeptime - njob=`squeue -u $USER | grep kvtag_bench | wc -l` - echo $njob - done + njob=`squeue -u $USER | grep kvtag_scale | wc -l` + echo $njob + while [ $njob -ge 4 ] + do + sleeptime=$[ ( $RANDOM % 1000 ) ] + sleep $sleeptime + njob=`squeue -u $USER | grep kvtag_scale | wc -l` + echo $njob + done - if [[ $first_submit == 1 ]]; then - # Submit first job w/o dependency - echo "Submitting $TARGET" - job=`sbatch $TARGET` - first_submit=0 - else - echo "Submitting $TARGET after ${job: -8}" - job=`sbatch -d afterany:${job: -8} $TARGET` - fi + if [[ $first_submit == 1 ]]; then + # Submit first job w/o dependency + echo "Submitting $TARGET" + job=`sbatch $TARGET` + first_submit=0 + else + echo "Submitting $TARGET after ${job: -8}" + job=`sbatch -d afterany:${job: -8} $TARGET` + fi - sleeptime=$[ ( $RANDOM % 5 ) ] - sleep $sleeptime - done - done + sleeptime=$[ ( $RANDOM % 5 ) ] + sleep $sleeptime done diff --git a/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh b/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh index 62eb1a2b2..4af771821 100755 --- a/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh +++ b/scripts/kvtag_add_get_scale/perlmutter/gen_scripts.sh @@ -6,20 +6,14 @@ MAX_ATTRLEN=1000 for (( i = 1; i <= $MAX_NODE; i*=2 )); do mkdir -p $i - for (( j = 1; j <= $MAX_ATTR; j*=4 )); do - for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do - JOBNAME=kvtag_bench_${i}_${j}_${k} - TARGET=./$i/$JOBNAME.sbatch - cp template.sh $TARGET - sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET - sed -i "s/NODENUM/${i}/g" $TARGET - sed -i "s/ATTRNUM/${j}/g" $TARGET - sed -i "s/ATTRLEN/${k}/g" $TARGET - if [[ "$i" -gt "16" ]]; then - sed -i "s/REG//g" $TARGET - else - sed -i "s/DBG//g" $TARGET - fi - done - done + JOBNAME=kvtag_scale_${i} + TARGET=./$i/$JOBNAME.sbatch + cp template.sh $TARGET + sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET + sed -i "s/NODENUM/${i}/g" $TARGET + if [[ "$i" -gt "16" ]]; then + sed -i "s/REG//g" $TARGET + else + sed -i "s/DBG//g" $TARGET + fi done diff --git a/scripts/kvtag_add_get_scale/perlmutter/submit.sh b/scripts/kvtag_add_get_scale/perlmutter/submit.sh index 2ca6badf8..192522e31 100755 --- a/scripts/kvtag_add_get_scale/perlmutter/submit.sh +++ b/scripts/kvtag_add_get_scale/perlmutter/submit.sh @@ -13,35 +13,31 @@ first_submit=1 for (( i = 1; i <= $MAX_PROC; i*=2 )); do mkdir -p $i - for (( j = 1; j <= $MAX_ATTR; j*=4 )); do - for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do - JOBNAME=kvtag_bench_${i}_${j}_${k} - TARGET=./$i/JOBNAME.sh + JOBNAME=kvtag_scale_${i} + TARGET=./$i/JOBNAME.sh - njob=`squeue -u $USER | grep kvtag_bench | wc -l` - echo $njob - while [ $njob -ge 4 ] - do - sleeptime=$[ ( $RANDOM % 1000 ) ] - sleep $sleeptime - njob=`squeue -u $USER | grep kvtag_bench | wc -l` - echo $njob - done + njob=`squeue -u $USER | grep kvtag_scale | wc -l` + echo $njob + while [ $njob -ge 4 ] + do + sleeptime=$[ ( $RANDOM % 1000 ) ] + sleep $sleeptime + njob=`squeue -u $USER | grep kvtag_scale | wc -l` + echo $njob + done - if [[ $first_submit == 1 ]]; then - # Submit first job w/o dependency - echo "Submitting $TARGET" - job=`sbatch $TARGET` - first_submit=0 - else - echo "Submitting $TARGET after ${job: -8}" - job=`sbatch -d afterany:${job: -8} $TARGET` - fi + if [[ $first_submit == 1 ]]; then + # Submit first job w/o dependency + echo "Submitting $TARGET" + job=`sbatch $TARGET` + first_submit=0 + else + echo "Submitting $TARGET after ${job: -8}" + job=`sbatch -d afterany:${job: -8} $TARGET` + fi - sleeptime=$[ ( $RANDOM % 5 ) ] - sleep $sleeptime - done - done + sleeptime=$[ ( $RANDOM % 5 ) ] + sleep $sleeptime done diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 27e0836ad..be332a265 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -328,7 +328,7 @@ main(int argc, char *argv[]) uint64_t n_obj, n_obj_incr, my_obj, my_obj_s; uint64_t n_attr, n_attr_len, n_query, my_query, my_query_s; uint64_t n_servers, n_clients; - uint64_t i, k; + uint64_t i = 0, k = 0; int proc_num, my_rank; double stime = 0.0, step_elapse = 0.0; double total_object_time = 0.0, total_tag_time = 0.0, total_query_time = 0.0; @@ -377,9 +377,9 @@ main(int argc, char *argv[]) char **tag_values = gen_strings(n_attr, n_attr_len); - k = 1; - do { + + k++; #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); @@ -463,7 +463,7 @@ main(int argc, char *argv[]) my_obj_s += n_obj_incr; my_query_s += n_obj_incr; - k++; + } while (total_object_count < n_obj); @@ -474,7 +474,7 @@ main(int argc, char *argv[]) printf("[Final Report 2] Iterations: %" PRIu64 " , Objects: %" PRIu64 " , Tags/Object: %" PRIu64 " , Queries/Iteration: " "%" PRIu64 " , \n", - k-1, total_object_count, n_attr, n_query); + k, total_object_count, n_attr, n_query); printf("[Final Report 3] Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", (double)total_object_count / total_object_time, (double)(total_object_count * n_attr) / total_tag_time, From fe93e8ec80d873c5fede2fc85f4eff2e5afc2c94 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 7 Apr 2023 13:15:05 -0500 Subject: [PATCH 072/806] update message format --- src/tests/kvtag_add_get_scale.c | 44 ++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index 28f2352ea..bfa949961 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -146,9 +146,9 @@ main(int argc, char *argv[]) percent_time = MPI_Wtime() - stime; if (my_rank == 0) { int current_percentage = i / obj_10percent; - int estimated_current_object_number = n_obj / 100 * current_percentage; + int estimated_current_object_number = n_obj / 1000 * current_percentage; double tps = estimated_current_object_number / percent_time; - printf("[OBJ PROGRESS %d%% ] %d objects, %.2f seconds, TPS: %.4f", current_percentage, + printf("[OBJ PROGRESS %d%% ] %d objects, %.2f seconds, TPS: %.4f \n", current_percentage, estimated_current_object_number, percent_time, tps); } #endif @@ -177,17 +177,19 @@ main(int argc, char *argv[]) if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) printf("fail to add a kvtag to o%d\n", i + my_obj_s); + if (i % tag_10percent == 0) { #ifdef ENABLE_MPI - MPI_Barrier(MPI_COMM_WORLD); - percent_time = MPI_Wtime() - stime; - if (my_rank == 0) { - int current_percentage = i / tag_10percent; - int estimated_current_tag_number = n_obj / 100 * current_percentage; - double tps = estimated_current_tag_number / percent_time; - printf("[TAG PROGRESS %d%% ] %d tags, %.2f seconds, TPS: %.4f", current_percentage, - estimated_current_tag_number, percent_time, tps); - } + MPI_Barrier(MPI_COMM_WORLD); + percent_time = MPI_Wtime() - stime; + if (my_rank == 0) { + int current_percentage = i / tag_10percent; + int estimated_current_tag_number = n_obj / 1000 * current_percentage; + double tps = estimated_current_tag_number / percent_time; + printf("[TAG PROGRESS %d%% ] %d tags, %.2f seconds, TPS: %.4f \n", current_percentage, + estimated_current_tag_number, percent_time, tps); + } #endif + } } #ifdef ENABLE_MPI @@ -207,17 +209,19 @@ main(int argc, char *argv[]) if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_size) < 0) printf("fail to get a kvtag from o%d\n", i + my_query_s); + if (i % query_10percent == 0) { #ifdef ENABLE_MPI - MPI_Barrier(MPI_COMM_WORLD); - percent_time = MPI_Wtime() - stime; - if (my_rank == 0) { - int current_percentage = i / query_10percent; - int estimated_current_query_number = n_obj / 100 * current_percentage; - double tps = estimated_current_query_number / percent_time; - printf("[QRY PROGRESS %d%% ] %d queries, %.2f seconds, TPS: %.4f", current_percentage, - estimated_current_query_number, percent_time, tps); - } + MPI_Barrier(MPI_COMM_WORLD); + percent_time = MPI_Wtime() - stime; + if (my_rank == 0) { + int current_percentage = i / query_10percent; + int estimated_current_query_number = n_obj / 1000 * current_percentage; + double tps = estimated_current_query_number / percent_time; + printf("[QRY PROGRESS %d%% ] %d queries, %.2f seconds, TPS: %.4f \n", current_percentage, + estimated_current_query_number, percent_time, tps); + } #endif + } } #ifdef ENABLE_MPI From 7f16ee0849cce6bd40dd0536bb360e880145a8c6 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 7 Apr 2023 13:19:46 -0500 Subject: [PATCH 073/806] update message format --- src/tests/kvtag_add_get_scale.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index bfa949961..dc939155f 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -145,8 +145,8 @@ main(int argc, char *argv[]) MPI_Barrier(MPI_COMM_WORLD); percent_time = MPI_Wtime() - stime; if (my_rank == 0) { - int current_percentage = i / obj_10percent; - int estimated_current_object_number = n_obj / 1000 * current_percentage; + int current_percentage = i / obj_10percent * 10; + int estimated_current_object_number = n_obj / 100 * current_percentage; double tps = estimated_current_object_number / percent_time; printf("[OBJ PROGRESS %d%% ] %d objects, %.2f seconds, TPS: %.4f \n", current_percentage, estimated_current_object_number, percent_time, tps); @@ -182,8 +182,8 @@ main(int argc, char *argv[]) MPI_Barrier(MPI_COMM_WORLD); percent_time = MPI_Wtime() - stime; if (my_rank == 0) { - int current_percentage = i / tag_10percent; - int estimated_current_tag_number = n_obj / 1000 * current_percentage; + int current_percentage = i / tag_10percent * 10; + int estimated_current_tag_number = n_obj / 100 * current_percentage; double tps = estimated_current_tag_number / percent_time; printf("[TAG PROGRESS %d%% ] %d tags, %.2f seconds, TPS: %.4f \n", current_percentage, estimated_current_tag_number, percent_time, tps); @@ -214,8 +214,8 @@ main(int argc, char *argv[]) MPI_Barrier(MPI_COMM_WORLD); percent_time = MPI_Wtime() - stime; if (my_rank == 0) { - int current_percentage = i / query_10percent; - int estimated_current_query_number = n_obj / 1000 * current_percentage; + int current_percentage = i / query_10percent * 10; + int estimated_current_query_number = n_obj / 100 * current_percentage; double tps = estimated_current_query_number / percent_time; printf("[QRY PROGRESS %d%% ] %d queries, %.2f seconds, TPS: %.4f \n", current_percentage, estimated_current_query_number, percent_time, tps); From 0adf28e685f25d9e0990d4b922fa4188c7c5f84d Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 7 Apr 2023 13:25:28 -0500 Subject: [PATCH 074/806] update message format --- src/tests/kvtag_add_get_scale.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index dc939155f..2bc5cc94c 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -70,7 +70,7 @@ main(int argc, char *argv[]) pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t * obj_ids; int n_obj, n_add_tag, n_query, my_obj, my_obj_s, my_add_tag, my_query, my_add_tag_s, my_query_s; - int obj_10percent = 0, tag_10percent = 0, query_10percent = 0; + int obj_1percent = 0, tag_1percent = 0, query_1percent = 0; int proc_num, my_rank, i, v; char obj_name[128]; double stime, total_time, percent_time; @@ -101,9 +101,9 @@ main(int argc, char *argv[]) assign_work_to_rank(my_rank, proc_num, n_query, &my_query, &my_query_s); assign_work_to_rank(my_rank, proc_num, n_obj, &my_obj, &my_obj_s); - obj_10percent = my_obj / 10; - tag_10percent = my_add_tag / 10; - query_10percent = my_query / 10; + obj_1percent = my_obj / 100; + tag_1percent = my_add_tag / 100; + query_1percent = my_query / 100; if (my_rank == 0) printf("Create %d obj, %d tags, query %d\n", my_obj, my_add_tag, my_query); @@ -140,12 +140,12 @@ main(int argc, char *argv[]) if (obj_ids[i] <= 0) printf("Fail to create object @ line %d!\n", __LINE__); - if (i % obj_10percent == 0) { + if (i > 0 && i % obj_1percent == 0) { #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); percent_time = MPI_Wtime() - stime; if (my_rank == 0) { - int current_percentage = i / obj_10percent * 10; + int current_percentage = i / obj_1percent; int estimated_current_object_number = n_obj / 100 * current_percentage; double tps = estimated_current_object_number / percent_time; printf("[OBJ PROGRESS %d%% ] %d objects, %.2f seconds, TPS: %.4f \n", current_percentage, @@ -161,7 +161,7 @@ main(int argc, char *argv[]) #endif if (my_rank == 0) - printf("Total time to create %d objects: %.4f\n\n", n_obj, total_time); + printf("Total time to create %d objects: %.4f , throughput %.4f \n", n_obj, total_time, n_obj / total_time); // Add tags kvtag.name = "Group"; @@ -177,16 +177,16 @@ main(int argc, char *argv[]) if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) printf("fail to add a kvtag to o%d\n", i + my_obj_s); - if (i % tag_10percent == 0) { + if (i % tag_1percent == 0) { #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); percent_time = MPI_Wtime() - stime; if (my_rank == 0) { - int current_percentage = i / tag_10percent * 10; + int current_percentage = i / tag_1percent; int estimated_current_tag_number = n_obj / 100 * current_percentage; double tps = estimated_current_tag_number / percent_time; printf("[TAG PROGRESS %d%% ] %d tags, %.2f seconds, TPS: %.4f \n", current_percentage, - estimated_current_tag_number, percent_time, tps); + estimated_current_tag_number, percent_time, tps); } #endif } @@ -197,7 +197,7 @@ main(int argc, char *argv[]) total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to add tags to %d objects: %.4f\n", n_add_tag, total_time); + printf("Total time to add tags to %d objects: %.4f , throughput %.4f \n", n_add_tag, total_time, n_add_tag / total_time); values = (void **)calloc(my_query, sizeof(void *)); @@ -209,16 +209,16 @@ main(int argc, char *argv[]) if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_size) < 0) printf("fail to get a kvtag from o%d\n", i + my_query_s); - if (i % query_10percent == 0) { + if (i % query_1percent == 0) { #ifdef ENABLE_MPI MPI_Barrier(MPI_COMM_WORLD); percent_time = MPI_Wtime() - stime; if (my_rank == 0) { - int current_percentage = i / query_10percent * 10; + int current_percentage = i / query_1percent; int estimated_current_query_number = n_obj / 100 * current_percentage; double tps = estimated_current_query_number / percent_time; printf("[QRY PROGRESS %d%% ] %d queries, %.2f seconds, TPS: %.4f \n", current_percentage, - estimated_current_query_number, percent_time, tps); + estimated_current_query_number, percent_time, tps); } #endif } @@ -229,7 +229,7 @@ main(int argc, char *argv[]) total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to retrieve 1 tag from %d objects: %.4f\n", n_query, total_time); + printf("Total time to retrieve 1 tag from %d objects: %.4f , throughput %.4f \n", n_query, total_time, n_query / total_time); fflush(stdout); From 8ebcf3b810b30d7488efe5b9f4aa868d4903c2b8 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 7 Apr 2023 13:28:02 -0500 Subject: [PATCH 075/806] update message format --- scripts/kvtag_add_get_scale/perlmutter/template.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/kvtag_add_get_scale/perlmutter/template.sh b/scripts/kvtag_add_get_scale/perlmutter/template.sh index 8b424ef88..d48cb29f6 100755 --- a/scripts/kvtag_add_get_scale/perlmutter/template.sh +++ b/scripts/kvtag_add_get_scale/perlmutter/template.sh @@ -34,7 +34,7 @@ CLOSE=$EXECPATH/close_server chmod +x $EXECPATH/* -NUM_OBJ=$((1024*1024*100)) +NUM_OBJ=$((1024*1024*1024)) NUM_TAGS=$NUM_OBJ NUM_QUERY=$((NUM_OBJ)) From 41875be40f38b3dabe148988f95e76d056edbf83 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 7 Apr 2023 14:09:32 -0500 Subject: [PATCH 076/806] clang format --- src/api/pdc_client_connect.c | 2 +- src/tests/kvtag_add_get_benchmark.c | 3 +-- src/tests/kvtag_add_get_scale.c | 9 ++++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index 457001079..7293c6834 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -6960,7 +6960,7 @@ PDC_add_kvtag(pdcid_t obj_id, pdc_kvtag_t *kvtag, int is_cont) in.hash_value = PDC_get_hash_by_name(cont_prop->cont_info_pub->name); } - //TODO: delete this line after debugging. + // TODO: delete this line after debugging. // printf("==CLIENT[%d]: PDC_add_kvtag::in.obj_id = %llu \n ", pdc_client_mpi_rank_g, in.obj_id); server_id = PDC_get_server_by_obj_id(meta_id, pdc_server_num_g); diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index be332a265..7b719ec96 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -378,7 +378,7 @@ main(int argc, char *argv[]) char **tag_values = gen_strings(n_attr, n_attr_len); do { - + k++; #ifdef ENABLE_MPI @@ -463,7 +463,6 @@ main(int argc, char *argv[]) my_obj_s += n_obj_incr; my_query_s += n_obj_incr; - } while (total_object_count < n_obj); diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index 2bc5cc94c..40b895b98 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -161,7 +161,8 @@ main(int argc, char *argv[]) #endif if (my_rank == 0) - printf("Total time to create %d objects: %.4f , throughput %.4f \n", n_obj, total_time, n_obj / total_time); + printf("Total time to create %d objects: %.4f , throughput %.4f \n", n_obj, total_time, + n_obj / total_time); // Add tags kvtag.name = "Group"; @@ -197,7 +198,8 @@ main(int argc, char *argv[]) total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to add tags to %d objects: %.4f , throughput %.4f \n", n_add_tag, total_time, n_add_tag / total_time); + printf("Total time to add tags to %d objects: %.4f , throughput %.4f \n", n_add_tag, total_time, + n_add_tag / total_time); values = (void **)calloc(my_query, sizeof(void *)); @@ -229,7 +231,8 @@ main(int argc, char *argv[]) total_time = MPI_Wtime() - stime; #endif if (my_rank == 0) - printf("Total time to retrieve 1 tag from %d objects: %.4f , throughput %.4f \n", n_query, total_time, n_query / total_time); + printf("Total time to retrieve 1 tag from %d objects: %.4f , throughput %.4f \n", n_query, total_time, + n_query / total_time); fflush(stdout); From d294866eb085b4b6c3739b9585c40afa68ccae35 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 11:32:09 -0500 Subject: [PATCH 077/806] update job scripts --- scripts/kvtag_add_get_benchmark/perlmutter/template.sh | 7 ++++--- scripts/kvtag_add_get_scale/perlmutter/template.sh | 9 +++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh index 9c69b9872..a7e29d7db 100755 --- a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh +++ b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh @@ -21,6 +21,7 @@ rm -rf $PDC_TMPDIR/* REPEAT=1 N_NODE=NODENUM +# NCLIENT=127 NCLIENT=31 export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE @@ -48,19 +49,19 @@ echo "" echo "=============" echo "$i Init server" echo "=============" -srun -N $N_NODE -n $N_NODE -c 2 --mem=100000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $SERVER & +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*4)) -c 2 --cpu_bind=cores $SERVER & sleep 5 echo "============================================" echo "KVTAGS with $N_NODE nodes" echo "============================================" -srun -N $N_NODE -n $TOTALPROC -c 2 --mem=100000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $TOTALPROC -c 2 --cpu_bind=cores $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE echo "" echo "=================" echo "$i Closing server" echo "=================" -srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLOSE +stdbuf -i0 -o0 -e0 srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores $CLOSE date diff --git a/scripts/kvtag_add_get_scale/perlmutter/template.sh b/scripts/kvtag_add_get_scale/perlmutter/template.sh index d48cb29f6..bec9b51e0 100755 --- a/scripts/kvtag_add_get_scale/perlmutter/template.sh +++ b/scripts/kvtag_add_get_scale/perlmutter/template.sh @@ -3,7 +3,7 @@ #REGSBATCH -q regular #DBGSBATCH -q debug #SBATCH -N NODENUM -#REGSBATCH -t 4:00:00 +#REGSBATCH -t 1:00:00 #DBGSBATCH -t 0:30:00 #SBATCH -C cpu #SBATCH -J JOBNAME @@ -21,6 +21,7 @@ REPEAT=1 N_NODE=NODENUM NCLIENT=31 +# NCLIENT=126 export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE mkdir -p $PDC_TMPDIR @@ -45,19 +46,19 @@ echo "" echo "=============" echo "$i Init server" echo "=============" -srun -N $N_NODE -n $N_NODE -c 2 --mem=128000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $SERVER & +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*4)) -c 2 --cpu_bind=cores $SERVER & sleep 5 echo "============================================" echo "KVTAGS with $N_NODE nodes" echo "============================================" -srun -N $N_NODE -n $TOTALPROC -c 2 --mem=256000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLIENT $NUM_OBJ $NUM_TAGS $NUM_QUERY +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $TOTALPROC -c 2 --cpu_bind=cores $CLIENT $NUM_OBJ $NUM_TAGS $NUM_QUERY echo "" echo "=================" echo "$i Closing server" echo "=================" -srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLOSE +stdbuf -i0 -o0 -e0 srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores $CLOSE date From 68d4dab7bde4864f9dda34d3610ec3fa1cb85965 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 13:39:57 -0500 Subject: [PATCH 078/806] comment off object/container close procedure in benchmark to save node hours --- .../perlmutter/template.sh | 2 +- .../kvtag_add_get_scale/perlmutter/template.sh | 2 +- src/tests/kvtag_add_get_benchmark.c | 15 ++++++++------- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh index a7e29d7db..7ea2e6149 100755 --- a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh +++ b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh @@ -49,7 +49,7 @@ echo "" echo "=============" echo "$i Init server" echo "=============" -stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*4)) -c 2 --cpu_bind=cores $SERVER & +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*1)) -c 2 --cpu_bind=cores $SERVER & sleep 5 diff --git a/scripts/kvtag_add_get_scale/perlmutter/template.sh b/scripts/kvtag_add_get_scale/perlmutter/template.sh index bec9b51e0..392d498b8 100755 --- a/scripts/kvtag_add_get_scale/perlmutter/template.sh +++ b/scripts/kvtag_add_get_scale/perlmutter/template.sh @@ -46,7 +46,7 @@ echo "" echo "=============" echo "$i Init server" echo "=============" -stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*4)) -c 2 --cpu_bind=cores $SERVER & +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*1)) -c 2 --cpu_bind=cores $SERVER & sleep 5 diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 7b719ec96..6c4457eb4 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -293,12 +293,12 @@ check_and_release_query_result(uint64_t n_query, uint64_t my_obj, uint64_t my_ob } } free(values); - // close objects - for (i = 0; i < my_obj; i++) { - v = i + my_obj_s; - if (PDCobj_close(obj_ids[i]) < 0) - printf("fail to close object o%" PRIu64 "\n", v); - } + // FIXME: close objects. This is currently commented off to save node hours for benchmarks. + // for (i = 0; i < my_obj; i++) { + // v = i + my_obj_s; + // if (PDCobj_close(obj_ids[i]) < 0) + // printf("fail to close object o%" PRIu64 "\n", v); + // } } void @@ -486,7 +486,8 @@ main(int argc, char *argv[]) free(tag_values); free(obj_ids); - closePDC(pdc, cont_prop, cont, obj_prop); + //FIXME: the following is currently commented off to reduce node hours taken by time-consuming resource releasing procedure. + // closePDC(pdc, cont_prop, cont, obj_prop); done: #ifdef ENABLE_MPI From bb6c87bac198c1dfbbffcf9c9929e368b68ccfde Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 13:43:11 -0500 Subject: [PATCH 079/806] change the max number of object to 1M --- scripts/kvtag_add_get_benchmark/perlmutter/template.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh index 7ea2e6149..a2ef04329 100755 --- a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh +++ b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh @@ -36,7 +36,7 @@ CLOSE=$EXECPATH/close_server chmod +x $EXECPATH/* -MAX_OBJ_COUNT=$((1024*1024*1024)) +MAX_OBJ_COUNT=$((1024*1024)) OBJ_INCR=$((MAX_OBJ_COUNT/1024)) ATTR_COUNT=ATTRNUM ATTR_LENGTH=ATTRLEN From b09b94ebfc5fc0af102e59ba52eed3383991161a Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 14:09:15 -0500 Subject: [PATCH 080/806] change the max length of attribute value --- scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh | 2 +- scripts/kvtag_add_get_benchmark/perlmutter/template.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh b/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh index 62eb1a2b2..9804bfee5 100755 --- a/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh +++ b/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh @@ -7,7 +7,7 @@ MAX_ATTRLEN=1000 for (( i = 1; i <= $MAX_NODE; i*=2 )); do mkdir -p $i for (( j = 1; j <= $MAX_ATTR; j*=4 )); do - for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k+=100 )); do JOBNAME=kvtag_bench_${i}_${j}_${k} TARGET=./$i/$JOBNAME.sbatch cp template.sh $TARGET diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh index a2ef04329..db06915c0 100755 --- a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh +++ b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh @@ -3,7 +3,7 @@ #REGSBATCH -q regular #DBGSBATCH -q debug #SBATCH -N NODENUM -#REGSBATCH -t 4:00:00 +#REGSBATCH -t 1:00:00 #DBGSBATCH -t 0:30:00 #SBATCH -C cpu #SBATCH -J JOBNAME From 2ef74178b0e0abf96567c6557f564e1f0da55cb1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 14:10:41 -0500 Subject: [PATCH 081/806] change the max length of attribute value --- scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh b/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh index 9804bfee5..1bc285fe7 100755 --- a/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh +++ b/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh @@ -2,12 +2,12 @@ N_THREAD=NO MAX_NODE=512 MAX_ATTR=1024 -MAX_ATTRLEN=1000 +MAX_ATTRLEN=1000000 for (( i = 1; i <= $MAX_NODE; i*=2 )); do mkdir -p $i for (( j = 1; j <= $MAX_ATTR; j*=4 )); do - for (( k = 100; k <= $MAX_ATTRLEN; k+=100 )); do + for (( k = 100; k <= $MAX_ATTRLEN; k*=10 )); do JOBNAME=kvtag_bench_${i}_${j}_${k} TARGET=./$i/$JOBNAME.sbatch cp template.sh $TARGET From 2129f9655031310d6eccc6a1a3074f10686dc8df Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 23:16:11 -0500 Subject: [PATCH 082/806] llsm tiff import test --- tools/CMakeLists.txt | 9 +- tools/llsm/CMakeLists.txt | 5 - tools/llsm/parallelReadTiff.c | 1136 ++++++++++++++++++++------------- tools/llsm/parallelReadTiff.h | 22 + tools/llsm_importer.c | 47 ++ 5 files changed, 757 insertions(+), 462 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index e2a017bd4..9e4062959 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -54,7 +54,6 @@ endif() option(USE_SYSTEM_OPENMP "Use system-installed OpenMP." ON) if(USE_SYSTEM_OPENMP) find_package(OpenMP REQUIRED) - if(OPENMP_FOUND) set(OPENMP_LIBRARIES "${OpenMP_C_LIBRARIES}") else() @@ -65,12 +64,18 @@ endif() add_definitions(-DENABLE_MPI=1) add_library(cjson cjson/cJSON.c) -add_subdirectory(llsm) +add_library(llsm_tiff llsm/parallelReadTiff.c) +target_compile_options(llsm_tiff PRIVATE ${OpenMP_C_FLAGS}) +target_include_directories(llsm_tiff PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/llsm) +target_link_libraries(llsm_tiff PUBLIC ${OpenMP_C_LIBRARIES}) + +# add_subdirectory(llsm) set(PROGRAMS pdc_ls pdc_import pdc_export + llsm_importer ) foreach(program ${PROGRAMS}) diff --git a/tools/llsm/CMakeLists.txt b/tools/llsm/CMakeLists.txt index a0ea9404d..e69de29bb 100644 --- a/tools/llsm/CMakeLists.txt +++ b/tools/llsm/CMakeLists.txt @@ -1,5 +0,0 @@ - -add_library(llsm_tiff parallelReadTiff.c) -target_compile_options(llsm_tiff PRIVATE ${OpenMP_C_FLAGS}) -target_include_directories(llsm_tiff PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) -target_link_libraries(llsm_tiff PUBLIC ${OpenMP_C_LIBRARIES}) \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index fb4283e6a..ef417ee60 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -1,91 +1,76 @@ -#include -#include -#include -#include -#include -#include -#include - -#include "tiffio.h" -#include "omp.h" -#include "mex.h" -//mex -v COPTIMFLAGS="-O3 -DNDEBUG" CFLAGS='$CFLAGS -O3 -fopenmp' LDFLAGS='$LDFLAGS -O3 -fopenmp' '-I/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' '-L/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' -ltiff parallelReadTiff.c -//mex COMPFLAGS='$COMPFLAGS /openmp' '-IC:\Program Files (x86)\tiff\include\' '-LC:\Program Files (x86)\tiff\lib\' -ltiffd.lib C:\Users\Matt\Documents\parallelTiff\main.cpp - -//libtiff 4.4.0 -//mex -v COPTIMFLAGS="-O3 -DNDEBUG" LDOPTIMFLAGS="-O3 -DNDEBUG" CFLAGS='$CFLAGS -O3 -fopenmp' LDFLAGS='$LDFLAGS -O3 -fopenmp' '-I/clusterfs/fiona/matthewmueller/software/tiff-4.4.0/include' '-L/clusterfs/fiona/matthewmueller/software/tiff-4.4.0/lib' -ltiff parallelReadTiff.c - -// Handle the tilde character in filenames on Linux/Mac -#ifndef _WIN32 -#include -char* expandTilde(char* path) { - wordexp_t expPath; - wordexp(path, &expPath, 0); - return expPath.we_wordv[0]; -} -#endif +#include "parallelReadTiff.h" -void DummyHandler(const char* module, const char* fmt, va_list ap) +void +DummyHandler(const char *module, const char *fmt, va_list ap) { // ignore errors and warnings } // Backup method in case there are errors reading strips -void readTiffParallelBak(uint64_t x, uint64_t y, uint64_t z, const char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint8_t flipXY){ - int32_t numWorkers = omp_get_max_threads(); - int32_t batchSize = (z-1)/numWorkers+1; - uint64_t bytes = bits/8; +void +readTiffParallelBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void *tiff, uint64_t bits, + uint64_t startSlice, uint8_t flipXY) +{ + int32_t numWorkers = omp_get_max_threads(); + int32_t batchSize = (z - 1) / numWorkers + 1; + uint64_t bytes = bits / 8; int32_t w; - #pragma omp parallel for - for(w = 0; w < numWorkers; w++){ +#pragma omp parallel for + for (w = 0; w < numWorkers; w++) { - TIFF* tif = TIFFOpen(fileName, "r"); - if(!tif) mexErrMsgIdAndTxt("tiff:threadError","Thread %d: File \"%s\" cannot be opened\n",w,fileName); + TIFF *tif = TIFFOpen(fileName, "r"); + if (!tif) + printf("tiff:threadError", "Thread %d: File \"%s\" cannot be opened\n", w, fileName); - void* buffer = malloc(x*bytes); - for(int64_t dir = startSlice+(w*batchSize); dir < startSlice+((w+1)*batchSize); dir++){ - if(dir>=z+startSlice) break; + void *buffer = malloc(x * bytes); + for (int64_t dir = startSlice + (w * batchSize); dir < startSlice + ((w + 1) * batchSize); dir++) { + if (dir >= z + startSlice) + break; int counter = 0; - while(!TIFFSetDirectory(tif, (uint64_t)dir) && counter<3){ - printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n",w,fileName,dir,counter+1); + while (!TIFFSetDirectory(tif, (uint64_t)dir) && counter < 3) { + printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n", w, fileName, dir, + counter + 1); counter++; } - for (int64_t i = 0; i < y; i++) - { + for (int64_t i = 0; i < y; i++) { TIFFReadScanline(tif, buffer, i, 0); - if(!flipXY){ - memcpy(tiff+((i*x)*bytes),buffer,x*bytes); + if (!flipXY) { + memcpy(tiff + ((i * x) * bytes), buffer, x * bytes); continue; } - //loading the data into a buffer - switch(bits){ + // loading the data into a buffer + switch (bits) { case 8: // Map Values to flip x and y for MATLAB - for(int64_t j = 0; j < x; j++){ - ((uint8_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint8_t*)buffer)[j]; + for (int64_t j = 0; j < x; j++) { + ((uint8_t *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((uint8_t *)buffer)[j]; } - break; + break; case 16: // Map Values to flip x and y for MATLAB - for(int64_t j = 0; j < x; j++){ - ((uint16_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint16_t*)buffer)[j]; + for (int64_t j = 0; j < x; j++) { + ((uint16_t *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((uint16_t *)buffer)[j]; } - break; + break; case 32: // Map Values to flip x and y for MATLAB - for(int64_t j = 0; j < x; j++){ - ((float*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((float*)buffer)[j]; + for (int64_t j = 0; j < x; j++) { + ((float *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((float *)buffer)[j]; } - break; + break; case 64: // Map Values to flip x and y for MATLAB - for(int64_t j = 0; j < x; j++){ - ((double*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((double*)buffer)[j]; + for (int64_t j = 0; j < x; j++) { + ((double *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((double *)buffer)[j]; } - break; + break; } } } @@ -94,113 +79,127 @@ void readTiffParallelBak(uint64_t x, uint64_t y, uint64_t z, const char* fileNam } } -void readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint64_t stripSize, uint8_t flipXY){ - int32_t numWorkers = omp_get_max_threads(); - int32_t batchSize = (z-1)/numWorkers+1; - uint64_t bytes = bits/8; +void +readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void *tiff, uint64_t bits, + uint64_t startSlice, uint64_t stripSize, uint8_t flipXY) +{ + int32_t numWorkers = omp_get_max_threads(); + int32_t batchSize = (z - 1) / numWorkers + 1; + uint64_t bytes = bits / 8; uint16_t compressed = 1; - TIFF* tif = TIFFOpen(fileName, "r"); + TIFF * tif = TIFFOpen(fileName, "r"); TIFFGetField(tif, TIFFTAG_COMPRESSION, &compressed); - - - int32_t w; uint8_t errBak = 0; - uint8_t err = 0; - char errString[10000]; - if(compressed > 1 || z < 32768){ + uint8_t err = 0; + char errString[10000]; + if (compressed > 1 || z < 32768) { TIFFClose(tif); - #pragma omp parallel for - for(w = 0; w < numWorkers; w++){ +#pragma omp parallel for + for (w = 0; w < numWorkers; w++) { uint8_t outCounter = 0; - TIFF* tif = TIFFOpen(fileName, "r"); - while(!tif){ + TIFF * tif = TIFFOpen(fileName, "r"); + while (!tif) { tif = TIFFOpen(fileName, "r"); - if(outCounter == 3){ - #pragma omp critical + if (outCounter == 3) { +#pragma omp critical { err = 1; - sprintf(errString,"Thread %d: File \"%s\" cannot be opened\n",w,fileName); + sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); } continue; } outCounter++; } - void* buffer = malloc(x*stripSize*bytes); - for(int64_t dir = startSlice+(w*batchSize); dir < startSlice+((w+1)*batchSize); dir++){ - if(dir>=z+startSlice || err) break; + void *buffer = malloc(x * stripSize * bytes); + for (int64_t dir = startSlice + (w * batchSize); dir < startSlice + ((w + 1) * batchSize); + dir++) { + if (dir >= z + startSlice || err) + break; uint8_t counter = 0; - while(!TIFFSetDirectory(tif, (uint64_t)dir) && counter<3){ + while (!TIFFSetDirectory(tif, (uint64_t)dir) && counter < 3) { counter++; - if(counter == 3){ - #pragma omp critical + if (counter == 3) { +#pragma omp critical { err = 1; - sprintf(errString,"Thread %d: File \"%s\" cannot be opened\n",w,fileName); + sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); } } } - if(err) break; - for (int64_t i = 0; i*stripSize < y; i++) - { + if (err) + break; + for (int64_t i = 0; i * stripSize < y; i++) { - //loading the data into a buffer - int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize*x*bytes); - if(cBytes < 0){ - #pragma omp critical + // loading the data into a buffer + int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize * x * bytes); + if (cBytes < 0) { +#pragma omp critical { errBak = 1; - err = 1; - sprintf(errString,"Thread %d: Strip %ld cannot be read\n",w,i); + err = 1; + sprintf(errString, "Thread %d: Strip %ld cannot be read\n", w, i); } break; } - if(!flipXY){ - memcpy(tiff+((i*stripSize*x)*bytes),buffer,cBytes); + if (!flipXY) { + memcpy(tiff + ((i * stripSize * x) * bytes), buffer, cBytes); continue; } - switch(bits){ + switch (bits) { case 8: // Map Values to flip x and y for MATLAB - for(int64_t k = 0; k < stripSize; k++){ - if((k+(i*stripSize)) >= y) break; - for(int64_t j = 0; j < x; j++){ - ((uint8_t*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((uint8_t*)buffer)[j+(k*x)]; + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((uint8_t *)tiff)[((j * y) + (k + (i * stripSize))) + + ((dir - startSlice) * (x * y))] = + ((uint8_t *)buffer)[j + (k * x)]; } } - break; + break; case 16: // Map Values to flip x and y for MATLAB - for(int64_t k = 0; k < stripSize; k++){ - if((k+(i*stripSize)) >= y) break; - for(int64_t j = 0; j < x; j++){ - ((uint16_t*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((uint16_t*)buffer)[j+(k*x)]; + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((uint16_t *)tiff)[((j * y) + (k + (i * stripSize))) + + ((dir - startSlice) * (x * y))] = + ((uint16_t *)buffer)[j + (k * x)]; } } - break; + break; case 32: // Map Values to flip x and y for MATLAB - for(int64_t k = 0; k < stripSize; k++){ - if((k+(i*stripSize)) >= y) break; - for(int64_t j = 0; j < x; j++){ - ((float*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((float*)buffer)[j+(k*x)]; + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((float *)tiff)[((j * y) + (k + (i * stripSize))) + + ((dir - startSlice) * (x * y))] = + ((float *)buffer)[j + (k * x)]; } } - break; + break; case 64: // Map Values to flip x and y for MATLAB - for(int64_t k = 0; k < stripSize; k++){ - if((k+(i*stripSize)) >= y) break; - for(int64_t j = 0; j < x; j++){ - ((double*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((double*)buffer)[j+(k*x)]; + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((double *)tiff)[((j * y) + (k + (i * stripSize))) + + ((dir - startSlice) * (x * y))] = + ((double *)buffer)[j + (k * x)]; } } - break; + break; } } } @@ -208,62 +207,68 @@ void readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char* fileName, TIFFClose(tif); } } - else{ - uint64_t stripsPerDir = (uint64_t)ceil((double)y/(double)stripSize); - #ifdef _WIN32 - int fd = open(fileName,O_RDONLY | O_BINARY); - #else - int fd = open(fileName,O_RDONLY); - #endif - if(fd == -1) mexErrMsgIdAndTxt("disk:threadError","File \"%s\" cannot be opened from Disk\n",fileName); - - if(!tif) mexErrMsgIdAndTxt("tiff:threadError","File \"%s\" cannot be opened\n",fileName); - uint64_t offset = 0; - uint64_t* offsets = NULL; + else { + uint64_t stripsPerDir = (uint64_t)ceil((double)y / (double)stripSize); +#ifdef _WIN32 + int fd = open(fileName, O_RDONLY | O_BINARY); +#else + int fd = open(fileName, O_RDONLY); +#endif + if (fd == -1) + printf("disk:threadError", "File \"%s\" cannot be opened from Disk\n", fileName); + + if (!tif) + printf("tiff:threadError", "File \"%s\" cannot be opened\n", fileName); + uint64_t offset = 0; + uint64_t *offsets = NULL; TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); - uint64_t* byteCounts = NULL; + uint64_t *byteCounts = NULL; TIFFGetField(tif, TIFFTAG_STRIPBYTECOUNTS, &byteCounts); - if(!offsets || !byteCounts) mexErrMsgIdAndTxt("tiff:threadError","Could not get offsets or byte counts from the tiff file\n"); - offset = offsets[0]; - uint64_t fOffset = offsets[stripsPerDir-1]+byteCounts[stripsPerDir-1]; - uint64_t zSize = fOffset-offset; - TIFFSetDirectory(tif,1); + if (!offsets || !byteCounts) + printf("tiff:threadError", "Could not get offsets or byte counts from the tiff file\n"); + offset = offsets[0]; + uint64_t fOffset = offsets[stripsPerDir - 1] + byteCounts[stripsPerDir - 1]; + uint64_t zSize = fOffset - offset; + TIFFSetDirectory(tif, 1); TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); - uint64_t gap = offsets[0]-fOffset; - - lseek(fd, offset, SEEK_SET); + uint64_t gap = offsets[0] - fOffset; + lseek(fd, offset, SEEK_SET); TIFFClose(tif); - uint64_t curr = 0; + uint64_t curr = 0; uint64_t bytesRead = 0; // TESTING // Not sure if we will need to read in chunks like for ImageJ - for(uint64_t i = 0; i < z; i++){ - bytesRead = read(fd,tiff+curr,zSize); + for (uint64_t i = 0; i < z; i++) { + bytesRead = read(fd, tiff + curr, zSize); curr += bytesRead; - lseek(fd,gap,SEEK_CUR); + lseek(fd, gap, SEEK_CUR); } close(fd); - uint64_t size = x*y*z*(bits/8); - void* tiffC = malloc(size); - memcpy(tiffC,tiff,size); - #pragma omp parallel for - for(uint64_t k = 0; k < z; k++){ - for(uint64_t j = 0; j < x; j++){ - for(uint64_t i = 0; i < y; i++){ - switch(bits){ + uint64_t size = x * y * z * (bits / 8); + void * tiffC = malloc(size); + memcpy(tiffC, tiff, size); +#pragma omp parallel for + for (uint64_t k = 0; k < z; k++) { + for (uint64_t j = 0; j < x; j++) { + for (uint64_t i = 0; i < y; i++) { + switch (bits) { case 8: - ((uint8_t*)tiff)[i+(j*y)+(k*x*y)] = ((uint8_t*)tiffC)[j+(i*x)+(k*x*y)]; + ((uint8_t *)tiff)[i + (j * y) + (k * x * y)] = + ((uint8_t *)tiffC)[j + (i * x) + (k * x * y)]; break; case 16: - ((uint16_t*)tiff)[i+(j*y)+(k*x*y)] = ((uint16_t*)tiffC)[j+(i*x)+(k*x*y)]; + ((uint16_t *)tiff)[i + (j * y) + (k * x * y)] = + ((uint16_t *)tiffC)[j + (i * x) + (k * x * y)]; break; case 32: - ((float*)tiff)[i+(j*y)+(k*x*y)] = ((float*)tiffC)[j+(i*x)+(k*x*y)]; + ((float *)tiff)[i + (j * y) + (k * x * y)] = + ((float *)tiffC)[j + (i * x) + (k * x * y)]; break; case 64: - ((double*)tiff)[i+(j*y)+(k*x*y)] = ((double*)tiffC)[j+(i*x)+(k*x*y)]; + ((double *)tiff)[i + (j * y) + (k * x * y)] = + ((double *)tiffC)[j + (i * x) + (k * x * y)]; break; } } @@ -271,69 +276,81 @@ void readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char* fileName, } free(tiffC); } - if(err){ - if(errBak) readTiffParallelBak(x, y, z, fileName, tiff, bits, startSlice, flipXY); - else mexErrMsgIdAndTxt("tiff:threadError",errString); + if (err) { + if (errBak) + readTiffParallelBak(x, y, z, fileName, tiff, bits, startSlice, flipXY); + else + printf("tiff:threadError", errString); } } // Backup method in case there are errors reading strips -void readTiffParallel2DBak(uint64_t x, uint64_t y, uint64_t z, const char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint8_t flipXY){ - int32_t numWorkers = omp_get_max_threads(); - int32_t batchSize = (y-1)/numWorkers+1; - uint64_t bytes = bits/8; +void +readTiffParallel2DBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void *tiff, uint64_t bits, + uint64_t startSlice, uint8_t flipXY) +{ + int32_t numWorkers = omp_get_max_threads(); + int32_t batchSize = (y - 1) / numWorkers + 1; + uint64_t bytes = bits / 8; int32_t w; - #pragma omp parallel for - for(w = 0; w < numWorkers; w++){ +#pragma omp parallel for + for (w = 0; w < numWorkers; w++) { - TIFF* tif = TIFFOpen(fileName, "r"); - if(!tif) mexErrMsgIdAndTxt("tiff:threadError","Thread %d: File \"%s\" cannot be opened\n",w,fileName); + TIFF *tif = TIFFOpen(fileName, "r"); + if (!tif) + printf("tiff:threadError", "Thread %d: File \"%s\" cannot be opened\n", w, fileName); - void* buffer = malloc(x*bytes); - for(int64_t dir = startSlice+(w*batchSize); dir < startSlice+((w+1)*batchSize); dir++){ - if(dir>=z+startSlice) break; + void *buffer = malloc(x * bytes); + for (int64_t dir = startSlice + (w * batchSize); dir < startSlice + ((w + 1) * batchSize); dir++) { + if (dir >= z + startSlice) + break; int counter = 0; - while(!TIFFSetDirectory(tif, (uint64_t)0) && counter<3){ - printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n",w,fileName,dir,counter+1); + while (!TIFFSetDirectory(tif, (uint64_t)0) && counter < 3) { + printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n", w, fileName, dir, + counter + 1); counter++; } - for (int64_t i = (w*batchSize); i < ((w+1)*batchSize); i++) - { - if(i >= y) break; + for (int64_t i = (w * batchSize); i < ((w + 1) * batchSize); i++) { + if (i >= y) + break; TIFFReadScanline(tif, buffer, i, 0); - if(!flipXY){ - memcpy(tiff+((i*x)*bytes),buffer,x*bytes); + if (!flipXY) { + memcpy(tiff + ((i * x) * bytes), buffer, x * bytes); continue; } - //loading the data into a buffer - switch(bits){ + // loading the data into a buffer + switch (bits) { case 8: // Map Values to flip x and y for MATLAB - for(int64_t j = 0; j < x; j++){ - ((uint8_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint8_t*)buffer)[j]; + for (int64_t j = 0; j < x; j++) { + ((uint8_t *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((uint8_t *)buffer)[j]; } - break; + break; case 16: // Map Values to flip x and y for MATLAB - for(int64_t j = 0; j < x; j++){ - ((uint16_t*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((uint16_t*)buffer)[j]; + for (int64_t j = 0; j < x; j++) { + ((uint16_t *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((uint16_t *)buffer)[j]; } - break; + break; case 32: // Map Values to flip x and y for MATLAB - for(int64_t j = 0; j < x; j++){ - ((float*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((float*)buffer)[j]; + for (int64_t j = 0; j < x; j++) { + ((float *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((float *)buffer)[j]; } - break; + break; case 64: // Map Values to flip x and y for MATLAB - for(int64_t j = 0; j < x; j++){ - ((double*)tiff)[((j*y)+i)+((dir-startSlice)*(x*y))] = ((double*)buffer)[j]; + for (int64_t j = 0; j < x; j++) { + ((double *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((double *)buffer)[j]; } - break; + break; } } } @@ -342,135 +359,152 @@ void readTiffParallel2DBak(uint64_t x, uint64_t y, uint64_t z, const char* fileN } } -void readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint64_t stripSize, uint8_t flipXY){ - int32_t numWorkers = omp_get_max_threads(); - uint64_t stripsPerDir = (uint64_t)ceil((double)y/(double)stripSize); - int32_t batchSize = (stripsPerDir-1)/numWorkers+1; - uint64_t bytes = bits/8; +void +readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void *tiff, uint64_t bits, + uint64_t startSlice, uint64_t stripSize, uint8_t flipXY) +{ + int32_t numWorkers = omp_get_max_threads(); + uint64_t stripsPerDir = (uint64_t)ceil((double)y / (double)stripSize); + int32_t batchSize = (stripsPerDir - 1) / numWorkers + 1; + uint64_t bytes = bits / 8; int32_t w; - uint8_t err = 0; + uint8_t err = 0; uint8_t errBak = 0; - char errString[10000]; - + char errString[10000]; - #pragma omp parallel for - for(w = 0; w < numWorkers; w++){ +#pragma omp parallel for + for (w = 0; w < numWorkers; w++) { uint8_t outCounter = 0; - TIFF* tif = TIFFOpen(fileName, "r"); - while(!tif){ + TIFF * tif = TIFFOpen(fileName, "r"); + while (!tif) { tif = TIFFOpen(fileName, "r"); - if(outCounter == 3){ - #pragma omp critical + if (outCounter == 3) { +#pragma omp critical { err = 1; - sprintf(errString,"Thread %d: File \"%s\" cannot be opened\n",w,fileName); + sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); } continue; } outCounter++; } - void* buffer = malloc(x*stripSize*bytes); - + void *buffer = malloc(x * stripSize * bytes); uint8_t counter = 0; - while(!TIFFSetDirectory(tif, 0) && counter<3){ - printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n",w,fileName,0,counter+1); + while (!TIFFSetDirectory(tif, 0) && counter < 3) { + printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n", w, fileName, 0, + counter + 1); counter++; - if(counter == 3){ - #pragma omp critical + if (counter == 3) { +#pragma omp critical { err = 1; - sprintf(errString,"Thread %d: File \"%s\" cannot be opened\n",w,fileName); + sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); } } } - for (int64_t i = (w*batchSize); i < (w+1)*batchSize; i++) - { - if(i*stripSize >= y || err) break; - //loading the data into a buffer - int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize*x*bytes); - if(cBytes < 0){ - #pragma omp critical + for (int64_t i = (w * batchSize); i < (w + 1) * batchSize; i++) { + if (i * stripSize >= y || err) + break; + // loading the data into a buffer + int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize * x * bytes); + if (cBytes < 0) { +#pragma omp critical { errBak = 1; - err = 1; - sprintf(errString,"Thread %d: Strip %ld cannot be read\n",w,i); + err = 1; + sprintf(errString, "Thread %d: Strip %ld cannot be read\n", w, i); } break; } - if(!flipXY){ - memcpy(tiff+((i*stripSize*x)*bytes),buffer,cBytes); + if (!flipXY) { + memcpy(tiff + ((i * stripSize * x) * bytes), buffer, cBytes); continue; } - switch(bits){ + switch (bits) { case 8: // Map Values to flip x and y for MATLAB - for(int64_t k = 0; k < stripSize; k++){ - if((k+(i*stripSize)) >= y) break; - for(int64_t j = 0; j < x; j++){ - ((uint8_t*)tiff)[((j*y)+(k+(i*stripSize)))] = ((uint8_t*)buffer)[j+(k*x)]; + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((uint8_t *)tiff)[((j * y) + (k + (i * stripSize)))] = + ((uint8_t *)buffer)[j + (k * x)]; } } - break; + break; case 16: // Map Values to flip x and y for MATLAB - for(int64_t k = 0; k < stripSize; k++){ - if((k+(i*stripSize)) >= y) break; - for(int64_t j = 0; j < x; j++){ - ((uint16_t*)tiff)[((j*y)+(k+(i*stripSize)))] = ((uint16_t*)buffer)[j+(k*x)]; + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((uint16_t *)tiff)[((j * y) + (k + (i * stripSize)))] = + ((uint16_t *)buffer)[j + (k * x)]; } } - break; + break; case 32: // Map Values to flip x and y for MATLAB - for(int64_t k = 0; k < stripSize; k++){ - if((k+(i*stripSize)) >= y) break; - for(int64_t j = 0; j < x; j++){ - ((float*)tiff)[((j*y)+(k+(i*stripSize)))] = ((float*)buffer)[j+(k*x)]; + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((float *)tiff)[((j * y) + (k + (i * stripSize)))] = + ((float *)buffer)[j + (k * x)]; } } - break; + break; case 64: // Map Values to flip x and y for MATLAB - for(int64_t k = 0; k < stripSize; k++){ - if((k+(i*stripSize)) >= y) break; - for(int64_t j = 0; j < x; j++){ - ((double*)tiff)[((j*y)+(k+(i*stripSize)))] = ((double*)buffer)[j+(k*x)]; + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((double *)tiff)[((j * y) + (k + (i * stripSize)))] = + ((double *)buffer)[j + (k * x)]; } } - break; + break; } } free(buffer); TIFFClose(tif); } - if(err) { - if(errBak) readTiffParallel2DBak(x, y, z, fileName, tiff, bits, startSlice, flipXY); - else mexErrMsgIdAndTxt("tiff:threadError",errString); + if (err) { + if (errBak) + readTiffParallel2DBak(x, y, z, fileName, tiff, bits, startSlice, flipXY); + else + printf("tiff:threadError", errString); } } // Reading images saved by ImageJ -void readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint64_t stripSize, uint8_t flipXY){ - #ifdef _WIN32 - int fd = open(fileName,O_RDONLY | O_BINARY); - #else - int fd = open(fileName,O_RDONLY); - #endif - TIFF* tif = TIFFOpen(fileName, "r"); - if(!tif) mexErrMsgIdAndTxt("tiff:threadError","File \"%s\" cannot be opened\n",fileName); - uint64_t offset = 0; - uint64_t* offsets = NULL; +void +readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void *tiff, uint64_t bits, + uint64_t startSlice, uint64_t stripSize, uint8_t flipXY) +{ +#ifdef _WIN32 + int fd = open(fileName, O_RDONLY | O_BINARY); +#else + int fd = open(fileName, O_RDONLY); +#endif + TIFF *tif = TIFFOpen(fileName, "r"); + if (!tif) + printf("tiff:threadError", "File \"%s\" cannot be opened\n", fileName); + uint64_t offset = 0; + uint64_t *offsets = NULL; TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); - if(offsets) offset = offsets[0]; + if (offsets) + offset = offsets[0]; TIFFClose(tif); lseek(fd, offset, SEEK_SET); - uint64_t bytes = bits/8; + uint64_t bytes = bits / 8; //#pragma omp parallel for /* for(uint64_t i = 0; i < z; i++){ @@ -478,75 +512,84 @@ void readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char* file //pread(fd,tiff+cOffset,x*y*bytes,offset+cOffset); read(fd,tiff+cOffset,x*y*bytes); }*/ - uint64_t chunk = 0; - uint64_t tBytes = x*y*z*bytes; + uint64_t chunk = 0; + uint64_t tBytes = x * y * z * bytes; uint64_t bytesRead; uint64_t rBytes = tBytes; - if(tBytes < INT_MAX) bytesRead = read(fd,tiff,tBytes); - else{ - while(chunk < tBytes){ - rBytes = tBytes-chunk; - if(rBytes > INT_MAX) bytesRead = read(fd,tiff+chunk,INT_MAX); - else bytesRead = read(fd,tiff+chunk,rBytes); + if (tBytes < INT_MAX) + bytesRead = read(fd, tiff, tBytes); + else { + while (chunk < tBytes) { + rBytes = tBytes - chunk; + if (rBytes > INT_MAX) + bytesRead = read(fd, tiff + chunk, INT_MAX); + else + bytesRead = read(fd, tiff + chunk, rBytes); chunk += bytesRead; } } close(fd); // Swap endianess for types greater than 8 bits // TODO: May need to change later because we may not always need to swap - if(bits > 8){ - #pragma omp parallel for - for(uint64_t i = 0; i < x*y*z; i++){ - switch(bits){ + if (bits > 8) { +#pragma omp parallel for + for (uint64_t i = 0; i < x * y * z; i++) { + switch (bits) { case 16: - //((uint16_t*)tiff)[i] = ((((uint16_t*)tiff)[i] & 0xff) >> 8) | (((uint16_t*)tiff)[i] << 8); + //((uint16_t*)tiff)[i] = ((((uint16_t*)tiff)[i] & 0xff) >> 8) | (((uint16_t*)tiff)[i] << + // 8); //((uint16_t*)tiff)[i] = bswap_16(((uint16_t*)tiff)[i]); - ((uint16_t*)tiff)[i] = ((((uint16_t*)tiff)[i] << 8) & 0xff00) | ((((uint16_t*)tiff)[i] >> 8) & 0x00ff); + ((uint16_t *)tiff)[i] = + ((((uint16_t *)tiff)[i] << 8) & 0xff00) | ((((uint16_t *)tiff)[i] >> 8) & 0x00ff); break; case 32: - //((num & 0xff000000) >> 24) | ((num & 0x00ff0000) >> 8) | ((num & 0x0000ff00) << 8) | (num << 24) + //((num & 0xff000000) >> 24) | ((num & 0x00ff0000) >> 8) | ((num & 0x0000ff00) << 8) | + //(num << 24) //((float*)tiff)[i] = bswap_32(((float*)tiff)[i]); - ((uint32_t*)tiff)[i] = ((((uint32_t*)tiff)[i] << 24) & 0xff000000 ) | - ((((uint32_t*)tiff)[i] << 8) & 0x00ff0000 ) | - ((((uint32_t*)tiff)[i] >> 8) & 0x0000ff00 ) | - ((((uint32_t*)tiff)[i] >> 24) & 0x000000ff ); + ((uint32_t *)tiff)[i] = ((((uint32_t *)tiff)[i] << 24) & 0xff000000) | + ((((uint32_t *)tiff)[i] << 8) & 0x00ff0000) | + ((((uint32_t *)tiff)[i] >> 8) & 0x0000ff00) | + ((((uint32_t *)tiff)[i] >> 24) & 0x000000ff); break; case 64: //((double*)tiff)[i] = bswap_64(((double*)tiff)[i]); - ((uint64_t*)tiff)[i] = ( (((uint64_t*)tiff)[i] << 56) & 0xff00000000000000UL ) | - ( (((uint64_t*)tiff)[i] << 40) & 0x00ff000000000000UL ) | - ( (((uint64_t*)tiff)[i] << 24) & 0x0000ff0000000000UL ) | - ( (((uint64_t*)tiff)[i] << 8) & 0x000000ff00000000UL ) | - ( (((uint64_t*)tiff)[i] >> 8) & 0x00000000ff000000UL ) | - ( (((uint64_t*)tiff)[i] >> 24) & 0x0000000000ff0000UL ) | - ( (((uint64_t*)tiff)[i] >> 40) & 0x000000000000ff00UL ) | - ( (((uint64_t*)tiff)[i] >> 56) & 0x00000000000000ffUL ); + ((uint64_t *)tiff)[i] = ((((uint64_t *)tiff)[i] << 56) & 0xff00000000000000UL) | + ((((uint64_t *)tiff)[i] << 40) & 0x00ff000000000000UL) | + ((((uint64_t *)tiff)[i] << 24) & 0x0000ff0000000000UL) | + ((((uint64_t *)tiff)[i] << 8) & 0x000000ff00000000UL) | + ((((uint64_t *)tiff)[i] >> 8) & 0x00000000ff000000UL) | + ((((uint64_t *)tiff)[i] >> 24) & 0x0000000000ff0000UL) | + ((((uint64_t *)tiff)[i] >> 40) & 0x000000000000ff00UL) | + ((((uint64_t *)tiff)[i] >> 56) & 0x00000000000000ffUL); break; } - } } // Find a way to do this in-place without making a copy - if(flipXY){ - uint64_t size = x*y*z*(bits/8); - void* tiffC = malloc(size); - memcpy(tiffC,tiff,size); - #pragma omp parallel for - for(uint64_t k = 0; k < z; k++){ - for(uint64_t j = 0; j < x; j++){ - for(uint64_t i = 0; i < y; i++){ - switch(bits){ + if (flipXY) { + uint64_t size = x * y * z * (bits / 8); + void * tiffC = malloc(size); + memcpy(tiffC, tiff, size); +#pragma omp parallel for + for (uint64_t k = 0; k < z; k++) { + for (uint64_t j = 0; j < x; j++) { + for (uint64_t i = 0; i < y; i++) { + switch (bits) { case 8: - ((uint8_t*)tiff)[i+(j*y)+(k*x*y)] = ((uint8_t*)tiffC)[j+(i*x)+(k*x*y)]; + ((uint8_t *)tiff)[i + (j * y) + (k * x * y)] = + ((uint8_t *)tiffC)[j + (i * x) + (k * x * y)]; break; case 16: - ((uint16_t*)tiff)[i+(j*y)+(k*x*y)] = ((uint16_t*)tiffC)[j+(i*x)+(k*x*y)]; + ((uint16_t *)tiff)[i + (j * y) + (k * x * y)] = + ((uint16_t *)tiffC)[j + (i * x) + (k * x * y)]; break; case 32: - ((float*)tiff)[i+(j*y)+(k*x*y)] = ((float*)tiffC)[j+(i*x)+(k*x*y)]; + ((float *)tiff)[i + (j * y) + (k * x * y)] = + ((float *)tiffC)[j + (i * x) + (k * x * y)]; break; case 64: - ((double*)tiff)[i+(j*y)+(k*x*y)] = ((double*)tiffC)[j+(i*x)+(k*x*y)]; + ((double *)tiff)[i + (j * y) + (k * x * y)] = + ((double *)tiffC)[j + (i * x) + (k * x * y)]; break; } } @@ -556,207 +599,390 @@ void readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char* file } } -uint8_t isImageJIm(const char* fileName){ - TIFF* tif = TIFFOpen(fileName, "r"); - if(!tif) return 0; - char* tiffDesc = NULL; - if(TIFFGetField(tif, TIFFTAG_IMAGEDESCRIPTION, &tiffDesc)){ - if(strstr(tiffDesc, "ImageJ")){ +uint8_t +isImageJIm(TIFF *tif) +{ + if (!tif) + return 0; + char *tiffDesc = NULL; + if (TIFFGetField(tif, TIFFTAG_IMAGEDESCRIPTION, &tiffDesc)) { + if (strstr(tiffDesc, "ImageJ")) { return 1; } } return 0; } -uint64_t imageJImGetZ(const char* fileName){ - TIFF* tif = TIFFOpen(fileName, "r"); - if(!tif) return 0; - char* tiffDesc = NULL; - if(TIFFGetField(tif, TIFFTAG_IMAGEDESCRIPTION, &tiffDesc)){ - if(strstr(tiffDesc, "ImageJ")){ - char* nZ = strstr(tiffDesc,"images="); - if(nZ){ - nZ+=7; - char* temp; - return strtol(nZ,&temp,10); +uint64_t +imageJImGetZ(TIFF *tif) +{ + if (!tif) + return 0; + char *tiffDesc = NULL; + if (TIFFGetField(tif, TIFFTAG_IMAGEDESCRIPTION, &tiffDesc)) { + if (strstr(tiffDesc, "ImageJ")) { + char *nZ = strstr(tiffDesc, "images="); + if (nZ) { + nZ += 7; + char *temp; + return strtol(nZ, &temp, 10); } } } return 0; } -void mexFunction(int nlhs, mxArray *plhs[], - int nrhs, const mxArray *prhs[]) +void +get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, uint64_t *y, uint64_t *z, + uint64_t *bits, uint64_t *startSlice, uint64_t *stripSize, uint64_t *is_imageJ, + uint64_t *imageJ_Z) { - // Check if the fileName is a char array or matlab style - char* fileName = NULL; - if(!mxIsClass(prhs[0], "string")){ - if(!mxIsChar(prhs[0])) mexErrMsgIdAndTxt("tiff:inputError","The first argument must be a string"); - fileName = mxArrayToString(prhs[0]); - } - else{ - mxArray* mString[1]; - mxArray* mCharA[1]; - - // Convert string to char array - mString[0] = mxDuplicateArray(prhs[0]); - mexCallMATLAB(1, mCharA, 1, mString, "char"); - fileName = mxArrayToString(mCharA[0]); - } - - // Handle the tilde character in filenames on Linux/Mac - #ifndef _WIN32 - if(strchr(fileName,'~')) fileName = expandTilde(fileName); - #endif - - uint8_t flipXY = 1; - //uint8_t flipXY = 0; - - - //if(nrhs > 2){ - // flipXY = (uint8_t)*(mxGetPr(prhs[2])); - //} - - TIFFSetWarningHandler(DummyHandler); - TIFF* tif = TIFFOpen(fileName, "r"); - if(!tif) mexErrMsgIdAndTxt("tiff:inputError","File \"%s\" cannot be opened",fileName); + TIFF *tif = TIFFOpen(fileName, "r"); + if (!tif) + printf("tiff:inputError", "File \"%s\" cannot be opened", fileName); - uint64_t x = 1,y = 1,z = 1,bits = 1, startSlice = 0; - TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &x); - TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &y); + TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, x); + TIFFGetField(tif, TIFFTAG_IMAGELENGTH, y); - if(nrhs == 1){ + if (strip_range == NULL) { uint16_t s = 0, m = 0, t = 1; - while(TIFFSetDirectory(tif,t)){ + while (TIFFSetDirectory(tif, t)) { s = t; t *= 8; - if(s > t){ + if (s > t) { t = 65535; printf("Number of slices > 32768\n"); break; } } - while(s != t){ - m = (s+t+1)/2; - if(TIFFSetDirectory(tif,m)){ + while (s != t) { + m = (s + t + 1) / 2; + if (TIFFSetDirectory(tif, m)) { s = m; } - else{ - if(m > 0) t = m-1; - else t = m; + else { + if (m > 0) + t = m - 1; + else + t = m; } } - z = s+1; + z = s + 1; } - else{ - if(mxGetN(prhs[1]) != 2){ - mexErrMsgIdAndTxt("tiff:inputError","Input range is not 2"); - } - else{ - startSlice = (uint64_t)*(mxGetPr(prhs[1]))-1; - z = (uint64_t)*((mxGetPr(prhs[1])+1))-startSlice; - if (!TIFFSetDirectory(tif,startSlice+z-1) || !TIFFSetDirectory(tif,startSlice)){ - mexErrMsgIdAndTxt("tiff:rangeOutOfBound","Range is out of bounds"); + else { + if (strip_range->length != 2) { + printf("tiff:inputError", "Input range is not 2"); + } + else { + *startSlice = (uint64_t) * (strip_range->range) - 1; + *z = (uint64_t) * (strip_range->range + 1)) - startSlice; + if (!TIFFSetDirectory(tif, startSlice[0] + z[0] - 1) || !TIFFSetDirectory(tif, startSlice[0])) { + printf("tiff:rangeOutOfBound", "Range is out of bounds"); } } } - TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits); - uint64_t stripSize = 1; - TIFFGetField(tif, TIFFTAG_ROWSPERSTRIP, &stripSize); + *is_imageJ = isImageJIm(tif); + *imageJ_Z = imageJImGetZ(tif); + if (*is_imageJ) { + *is_imageJ = 1; + *imageJ_Z = imageJImGetZ(fileName); + if (*imageJ_Z) + *z = *imageJ_Z; + } + + TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, bits); + TIFFGetField(tif, TIFFTAG_ROWSPERSTRIP, stripSize); TIFFClose(tif); +} + +uint8_t *** +createU8Array(int ndim, size_t *dim) +{ + size_t i, j; + uint8_t ***array = (uint8_t ***)malloc(dim[0] * sizeof(uint8_t **)); + for (i = 0; i < dim[0]; i++) { + array[i] = (uint8_t **)malloc(dim[1] * sizeof(uint8_t *)); + for (j = 0; j < dim[1]; j++) { + array[i][j] = (uint8_t *)calloc(dim[2], sizeof(uint8_t)); + } + } + return array; +} - uint8_t imageJIm = 0; - if(isImageJIm(fileName)){ - imageJIm = 1; - uint64_t tempZ = imageJImGetZ(fileName); - if(tempZ) z = tempZ; +uint16_t *** +createU16Array(int ndim, size_t *dim) +{ + size_t i, j; + uint16_t ***array = (uint16_t ***)malloc(dim[0] * sizeof(uint16_t **)); + for (i = 0; i < dim[0]; i++) { + array[i] = (uint16_t **)malloc(dim[1] * sizeof(uint16_t *)); + for (j = 0; j < dim[1]; j++) { + array[i][j] = (uint16_t *)calloc(dim[2], sizeof(uint16_t)); + } } + return array; +} - uint64_t dim[3]; - dim[0] = y; - dim[1] = x; - dim[2] = z; +float *** +createFloatArray(int ndim, size_t *dim) +{ + size_t i, j; + float ***array = (float ***)malloc(dim[0] * sizeof(float **)); + for (i = 0; i < dim[0]; i++) { + array[i] = (float **)malloc(dim[1] * sizeof(float *)); + for (j = 0; j < dim[1]; j++) { + array[i][j] = (float *)calloc(dim[2], sizeof(float)); + } + } + return array; +} +double *** +createDoubleArray(int ndim, size_t *dim) +{ + size_t i, j; + double ***array = (double ***)malloc(dim[0] * sizeof(double **)); + for (i = 0; i < dim[0]; i++) { + array[i] = (double **)malloc(dim[1] * sizeof(double *)); + for (j = 0; j < dim[1]; j++) { + array[i][j] = (double *)calloc(dim[2], sizeof(double)); + } + } + return array; +} +void +_get_tiff_array(int bits, int ndim, size_t *dim) +{ + void *tiff = NULL; + if (bits == 8) { + tiff = (void *)createU8Array(ndim, dims); + } + else if (bits == 16) { + tiff = (void *)createU16Array(ndim, dims); + } + else if (bits == 32) { + tiff = (void *)createFloatArray(ndim, dims); + } + else if (bits == 64) { + tiff = (void *)createDoubleArray(ndim, dims); + } + return tiff; +} +void +_TIFF_load(char *fileName, uint64_t x, uint64_t y, uint64_t z, uint64_t bits, uint64_t startSlice, + uint64_t stripSize, uint8_t flipXY, int ndim, sizt_t *dims, void **tiff_ptr) +{ + if (tiff == NULL) { + printf("tiff:dataTypeError, Data type not suppported\n"); + } + *tiff_ptr = _get_tiff_array(bits, ndim, dims); // Case for ImageJ - if(imageJIm){ - if(bits == 8){ - plhs[0] = mxCreateNumericArray(3,dim,mxUINT8_CLASS, mxREAL); - uint8_t* tiff = (uint8_t*)mxGetPr(plhs[0]); - readTiffParallelImageJ(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else if(bits == 16){ - plhs[0] = mxCreateNumericArray(3,dim,mxUINT16_CLASS, mxREAL); - uint16_t* tiff = (uint16_t*)mxGetPr(plhs[0]); - readTiffParallelImageJ(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else if(bits == 32){ - plhs[0] = mxCreateNumericArray(3,dim,mxSINGLE_CLASS, mxREAL); - float* tiff = (float*)mxGetPr(plhs[0]); - readTiffParallelImageJ(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else if(bits == 64){ - plhs[0] = mxCreateNumericArray(3,dim,mxDOUBLE_CLASS, mxREAL); - double* tiff = (double*)mxGetPr(plhs[0]); - readTiffParallelImageJ(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else{ - mexErrMsgIdAndTxt("tiff:dataTypeError","Data type not suppported"); - } + if (imageJIm) { + readTiffParallelImageJ(x, y, z, fileName, *tiff_ptr, bits, startSlice, stripSize, flipXY); } // Case for 2D - else if(z <= 1){ - if(bits == 8){ - plhs[0] = mxCreateNumericArray(3,dim,mxUINT8_CLASS, mxREAL); - uint8_t* tiff = (uint8_t*)mxGetPr(plhs[0]); - readTiffParallel2D(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else if(bits == 16){ - plhs[0] = mxCreateNumericArray(3,dim,mxUINT16_CLASS, mxREAL); - uint16_t* tiff = (uint16_t*)mxGetPr(plhs[0]); - readTiffParallel2D(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else if(bits == 32){ - plhs[0] = mxCreateNumericArray(3,dim,mxSINGLE_CLASS, mxREAL); - float* tiff = (float*)mxGetPr(plhs[0]); - readTiffParallel2D(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else if(bits == 64){ - plhs[0] = mxCreateNumericArray(3,dim,mxDOUBLE_CLASS, mxREAL); - double* tiff = (double*)mxGetPr(plhs[0]); - readTiffParallel2D(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else{ - mexErrMsgIdAndTxt("tiff:dataTypeError","Data type not suppported"); - } + else if (z <= 1) { + readTiffParallel2D(x, y, z, fileName, *tiff_ptr, bits, startSlice, stripSize, flipXY); } // Case for 3D - else{ - if(bits == 8){ - plhs[0] = mxCreateNumericArray(3,dim,mxUINT8_CLASS, mxREAL); - uint8_t* tiff = (uint8_t*)mxGetPr(plhs[0]); - readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else if(bits == 16){ - plhs[0] = mxCreateNumericArray(3,dim,mxUINT16_CLASS, mxREAL); - uint16_t* tiff = (uint16_t*)mxGetPr(plhs[0]); - readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else if(bits == 32){ - plhs[0] = mxCreateNumericArray(3,dim,mxSINGLE_CLASS, mxREAL); - float* tiff = (float*)mxGetPr(plhs[0]); - readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else if(bits == 64){ - plhs[0] = mxCreateNumericArray(3,dim,mxDOUBLE_CLASS, mxREAL); - double* tiff = (double*)mxGetPr(plhs[0]); - readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize, flipXY); - } - else{ - mexErrMsgIdAndTxt("tiff:dataTypeError","Data type not suppported"); - } + else { + readTiffParallel(x, y, z, fileName, *tiff_ptr, bits, startSlice, stripSize, flipXY); } -} \ No newline at end of file +} + +void +parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tiff_range_t *strip_range) +{ + uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0, stripeSize = 0, is_imageJ = 0, imageJ_Z = 0; + + get_tif_info(file_name, strip_range, &x, &y, &z, &bits, &startSlice, &stripeSize, &is_imageJ, &imageJ_Z); + + int ndim = 3; + uint64_t dims[ndim]; + dims[0] = flipXY ? y : x; + dims[1] = flipXY ? x : y; + dims[2] = z; + + _TIFF_load(fileName, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, tiff_ptr); +} + +// void +// mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +// { +// // Check if the fileName is a char array or matlab style +// char *fileName = NULL; +// if (!mxIsClass(prhs[0], "string")) { +// if (!mxIsChar(prhs[0])) +// printf("tiff:inputError", "The first argument must be a string"); +// fileName = mxArrayToString(prhs[0]); +// } +// else { +// mxArray *mString[1]; +// mxArray *mCharA[1]; + +// // Convert string to char array +// mString[0] = mxDuplicateArray(prhs[0]); +// mexCallMATLAB(1, mCharA, 1, mString, "char"); +// fileName = mxArrayToString(mCharA[0]); +// } + +// // Handle the tilde character in filenames on Linux/Mac +// // #ifndef _WIN32 +// // if(strchr(fileName,'~')) fileName = expandTilde(fileName); +// // #endif + +// uint8_t flipXY = 1; +// // uint8_t flipXY = 0; + +// // if(nrhs > 2){ +// // flipXY = (uint8_t)*(mxGetPr(prhs[2])); +// //} + +// TIFFSetWarningHandler(DummyHandler); +// TIFF *tif = TIFFOpen(fileName, "r"); +// if (!tif) +// printf("tiff:inputError", "File \"%s\" cannot be opened", fileName); + +// uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0; +// TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &x); +// TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &y); + +// if (nrhs == 1) { +// uint16_t s = 0, m = 0, t = 1; +// while (TIFFSetDirectory(tif, t)) { +// s = t; +// t *= 8; +// if (s > t) { +// t = 65535; +// printf("Number of slices > 32768\n"); +// break; +// } +// } +// while (s != t) { +// m = (s + t + 1) / 2; +// if (TIFFSetDirectory(tif, m)) { +// s = m; +// } +// else { +// if (m > 0) +// t = m - 1; +// else +// t = m; +// } +// } +// z = s + 1; +// } +// else { +// if (mxGetN(prhs[1]) != 2) { +// printf("tiff:inputError", "Input range is not 2"); +// } +// else { +// startSlice = (uint64_t) * (mxGetPr(prhs[1])) - 1; +// z = (uint64_t) * ((mxGetPr(prhs[1]) + 1)) - startSlice; +// if (!TIFFSetDirectory(tif, startSlice + z - 1) || !TIFFSetDirectory(tif, startSlice)) { +// printf("tiff:rangeOutOfBound", "Range is out of bounds"); +// } +// } +// } + +// TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits); +// uint64_t stripSize = 1; +// TIFFGetField(tif, TIFFTAG_ROWSPERSTRIP, &stripSize); +// TIFFClose(tif); + +// uint8_t imageJIm = 0; +// if (isImageJIm(fileName)) { +// imageJIm = 1; +// uint64_t tempZ = imageJImGetZ(fileName); +// if (tempZ) +// z = tempZ; +// } + +// uint64_t dim[3]; +// dim[0] = y; +// dim[1] = x; +// dim[2] = z; + +// // Case for ImageJ +// if (imageJIm) { +// if (bits == 8) { +// plhs[0] = mxCreateNumericArray(3, dim, mxUINT8_CLASS, mxREAL); +// uint8_t *tiff = (uint8_t *)mxGetPr(plhs[0]); +// readTiffParallelImageJ(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else if (bits == 16) { +// plhs[0] = mxCreateNumericArray(3, dim, mxUINT16_CLASS, mxREAL); +// uint16_t *tiff = (uint16_t *)mxGetPr(plhs[0]); +// readTiffParallelImageJ(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else if (bits == 32) { +// plhs[0] = mxCreateNumericArray(3, dim, mxSINGLE_CLASS, mxREAL); +// float *tiff = (float *)mxGetPr(plhs[0]); +// readTiffParallelImageJ(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else if (bits == 64) { +// plhs[0] = mxCreateNumericArray(3, dim, mxDOUBLE_CLASS, mxREAL); +// double *tiff = (double *)mxGetPr(plhs[0]); +// readTiffParallelImageJ(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else { +// printf("tiff:dataTypeError", "Data type not suppported"); +// } +// } +// // Case for 2D +// else if (z <= 1) { +// if (bits == 8) { +// plhs[0] = mxCreateNumericArray(3, dim, mxUINT8_CLASS, mxREAL); +// uint8_t *tiff = (uint8_t *)mxGetPr(plhs[0]); +// readTiffParallel2D(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else if (bits == 16) { +// plhs[0] = mxCreateNumericArray(3, dim, mxUINT16_CLASS, mxREAL); +// uint16_t *tiff = (uint16_t *)mxGetPr(plhs[0]); +// readTiffParallel2D(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else if (bits == 32) { +// plhs[0] = mxCreateNumericArray(3, dim, mxSINGLE_CLASS, mxREAL); +// float *tiff = (float *)mxGetPr(plhs[0]); +// readTiffParallel2D(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else if (bits == 64) { +// plhs[0] = mxCreateNumericArray(3, dim, mxDOUBLE_CLASS, mxREAL); +// double *tiff = (double *)mxGetPr(plhs[0]); +// readTiffParallel2D(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else { +// printf("tiff:dataTypeError", "Data type not suppported"); +// } +// } +// // Case for 3D +// else { +// if (bits == 8) { +// plhs[0] = mxCreateNumericArray(3, dim, mxUINT8_CLASS, mxREAL); +// uint8_t *tiff = (uint8_t *)mxGetPr(plhs[0]); +// readTiffParallel(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else if (bits == 16) { +// plhs[0] = mxCreateNumericArray(3, dim, mxUINT16_CLASS, mxREAL); +// uint16_t *tiff = (uint16_t *)mxGetPr(plhs[0]); +// readTiffParallel(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else if (bits == 32) { +// plhs[0] = mxCreateNumericArray(3, dim, mxSINGLE_CLASS, mxREAL); +// float *tiff = (float *)mxGetPr(plhs[0]); +// readTiffParallel(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else if (bits == 64) { +// plhs[0] = mxCreateNumericArray(3, dim, mxDOUBLE_CLASS, mxREAL); +// double *tiff = (double *)mxGetPr(plhs[0]); +// readTiffParallel(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); +// } +// else { +// printf("tiff:dataTypeError", "Data type not suppported"); +// } +// } +// } \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.h b/tools/llsm/parallelReadTiff.h index e69de29bb..5745216e0 100644 --- a/tools/llsm/parallelReadTiff.h +++ b/tools/llsm/parallelReadTiff.h @@ -0,0 +1,22 @@ +#ifndef PARALLELREADTIFF_H +#define PARALLELREADTIFF_H + +#include +#include +#include +#include +#include +#include +#include + +#include "tiffio.h" +#include "omp.h" +// #include "commons/generic/pdc_generic.h" + +typedef struct { + uint64_t *range, size_t length +} parallel_tiff_range_t; + +void parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tiff_range_t *strip_range); + +#endif // PARALLELREADTIFF_H \ No newline at end of file diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 34912b3f0..f5099af5e 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -1,6 +1,7 @@ #include #include #include +#include #define ENABLE_MPI 1 @@ -12,3 +13,49 @@ #include "pdc_client_server_common.h" #include "pdc_client_connect.h" +#include "llsm/parallelReadTiff.h" + + +void +parse_console_args(int argc, char *argv[], char **file_name) +{ + int c; + + while ((c = getopt(argc, argv, "f:")) != -1) { + switch (c) { + case 'f': + *filename = optarg; + break; + case '?': + if (optopt == 'f') { + fprintf(stderr, "Option -%c requires an argument.\n", optopt); + } + else { + fprintf(stderr, "Unknown option: -%c\n", optopt); + } + return 1; + default: + abort(); + } + } +} + + + +int +main(int argc, char *argv[]) +{ + + char * file_name = NULL; + void *tiff = NULL; + + parse_console_args(argc, argv, &file_name); + + printf("Filename: %s\n", file_name ? file_name : "(none)"); + + parallel_TIFF_load(file_name, &tiff, 1, NULL); + + + + return 0; +} \ No newline at end of file From c5314848cc7896c2d3e343d6d54acc1a5483c52f Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 23:41:49 -0500 Subject: [PATCH 083/806] llsm tiff import test --- tools/llsm_importer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index f5099af5e..81bd85b80 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -2,6 +2,7 @@ #include #include #include +#include #define ENABLE_MPI 1 @@ -55,7 +56,6 @@ main(int argc, char *argv[]) parallel_TIFF_load(file_name, &tiff, 1, NULL); - return 0; } \ No newline at end of file From b19f722e48d5d1b9a6c93554ea2d8180bd91d53c Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 23:43:03 -0500 Subject: [PATCH 084/806] llsm tiff import test --- tools/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 9e4062959..163df9dde 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -82,5 +82,6 @@ foreach(program ${PROGRAMS}) add_executable(${program} ${program}.c) target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) target_link_libraries(${program} cjson) + target_link_libraries(${program} llsm_tiff) endforeach(program) From 0cd72e8678f3d58706426a57c2a308d12556b7d1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 23:45:07 -0500 Subject: [PATCH 085/806] llsm tiff import test --- tools/llsm/parallelReadTiff.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/llsm/parallelReadTiff.h b/tools/llsm/parallelReadTiff.h index 5745216e0..02e0379a6 100644 --- a/tools/llsm/parallelReadTiff.h +++ b/tools/llsm/parallelReadTiff.h @@ -2,6 +2,7 @@ #define PARALLELREADTIFF_H #include +#include #include #include #include @@ -14,7 +15,8 @@ // #include "commons/generic/pdc_generic.h" typedef struct { - uint64_t *range, size_t length + uint64_t *range; + size_t length; } parallel_tiff_range_t; void parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tiff_range_t *strip_range); From 9670bc579b366ad3c988ee24b2177bc4efd0800e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 23:49:12 -0500 Subject: [PATCH 086/806] update code --- tools/llsm/parallelReadTiff.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index ef417ee60..43ad2f61f 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -668,7 +668,7 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u t = m; } } - z = s + 1; + *z = s + 1; } else { if (strip_range->length != 2) { @@ -676,7 +676,7 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u } else { *startSlice = (uint64_t) * (strip_range->range) - 1; - *z = (uint64_t) * (strip_range->range + 1)) - startSlice; + *z = (uint64_t) * (strip_range->range + 1) - startSlice; if (!TIFFSetDirectory(tif, startSlice[0] + z[0] - 1) || !TIFFSetDirectory(tif, startSlice[0])) { printf("tiff:rangeOutOfBound", "Range is out of bounds"); } @@ -687,7 +687,7 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u *imageJ_Z = imageJImGetZ(tif); if (*is_imageJ) { *is_imageJ = 1; - *imageJ_Z = imageJImGetZ(fileName); + *imageJ_Z = imageJImGetZ(tif); if (*imageJ_Z) *z = *imageJ_Z; } @@ -753,8 +753,8 @@ createDoubleArray(int ndim, size_t *dim) return array; } -void -_get_tiff_array(int bits, int ndim, size_t *dim) +void * +_get_tiff_array(int bits, int ndim, size_t *dims) { void *tiff = NULL; if (bits == 8) { @@ -774,7 +774,7 @@ _get_tiff_array(int bits, int ndim, size_t *dim) void _TIFF_load(char *fileName, uint64_t x, uint64_t y, uint64_t z, uint64_t bits, uint64_t startSlice, - uint64_t stripSize, uint8_t flipXY, int ndim, sizt_t *dims, void **tiff_ptr) + uint64_t stripSize, uint8_t flipXY, int ndim, size_t *dims, void **tiff_ptr) { if (tiff == NULL) { printf("tiff:dataTypeError, Data type not suppported\n"); @@ -799,7 +799,7 @@ parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tif { uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0, stripeSize = 0, is_imageJ = 0, imageJ_Z = 0; - get_tif_info(file_name, strip_range, &x, &y, &z, &bits, &startSlice, &stripeSize, &is_imageJ, &imageJ_Z); + get_tiff_info(file_name, strip_range, &x, &y, &z, &bits, &startSlice, &stripeSize, &is_imageJ, &imageJ_Z); int ndim = 3; uint64_t dims[ndim]; @@ -807,7 +807,7 @@ parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tif dims[1] = flipXY ? x : y; dims[2] = z; - _TIFF_load(fileName, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, tiff_ptr); + _TIFF_load(file_name, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, tiff_ptr); } // void From b2afd956c1035a20de4452db96cb5851fefeab55 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 23:52:19 -0500 Subject: [PATCH 087/806] update code --- tools/llsm/parallelReadTiff.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 43ad2f61f..2ab7f7333 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -773,15 +773,15 @@ _get_tiff_array(int bits, int ndim, size_t *dims) } void -_TIFF_load(char *fileName, uint64_t x, uint64_t y, uint64_t z, uint64_t bits, uint64_t startSlice, - uint64_t stripSize, uint8_t flipXY, int ndim, size_t *dims, void **tiff_ptr) +_TIFF_load(char *fileName, uint8_t isImageJIm, uint64_t x, uint64_t y, uint64_t z, uint64_t bits, + uint64_t startSlice, uint64_t stripSize, uint8_t flipXY, int ndim, size_t *dims, void **tiff_ptr) { - if (tiff == NULL) { + if (tiff_ptr == NULL) { printf("tiff:dataTypeError, Data type not suppported\n"); } *tiff_ptr = _get_tiff_array(bits, ndim, dims); // Case for ImageJ - if (imageJIm) { + if (isImageJIm) { readTiffParallelImageJ(x, y, z, fileName, *tiff_ptr, bits, startSlice, stripSize, flipXY); } // Case for 2D @@ -799,7 +799,7 @@ parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tif { uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0, stripeSize = 0, is_imageJ = 0, imageJ_Z = 0; - get_tiff_info(file_name, strip_range, &x, &y, &z, &bits, &startSlice, &stripeSize, &is_imageJ, &imageJ_Z); + get_tiff_info(fileName, strip_range, &x, &y, &z, &bits, &startSlice, &stripeSize, &is_imageJ, &imageJ_Z); int ndim = 3; uint64_t dims[ndim]; @@ -807,7 +807,7 @@ parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tif dims[1] = flipXY ? x : y; dims[2] = z; - _TIFF_load(file_name, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, tiff_ptr); + _TIFF_load(fileName, is_imageJ, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, tiff_ptr); } // void From 448383b2efdd513d2c53a0594d530efce9586c29 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 23:54:09 -0500 Subject: [PATCH 088/806] update code --- tools/llsm/parallelReadTiff.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 2ab7f7333..9cc97b17c 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -676,7 +676,7 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u } else { *startSlice = (uint64_t) * (strip_range->range) - 1; - *z = (uint64_t) * (strip_range->range + 1) - startSlice; + *z = (uint64_t) * (strip_range->range[1]) - startSlice; if (!TIFFSetDirectory(tif, startSlice[0] + z[0] - 1) || !TIFFSetDirectory(tif, startSlice[0])) { printf("tiff:rangeOutOfBound", "Range is out of bounds"); } @@ -882,7 +882,7 @@ parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tif // } // else { // startSlice = (uint64_t) * (mxGetPr(prhs[1])) - 1; -// z = (uint64_t) * ((mxGetPr(prhs[1]) + 1)) - startSlice; + // z = (uint64_t) * ((mxGetPr(prhs[1]) + 1)) - startSlice; // if (!TIFFSetDirectory(tif, startSlice + z - 1) || !TIFFSetDirectory(tif, startSlice)) { // printf("tiff:rangeOutOfBound", "Range is out of bounds"); // } From b79f950e71eb12d825fafb5240d4cc9a6ef437ac Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 23:55:25 -0500 Subject: [PATCH 089/806] update code --- tools/llsm/parallelReadTiff.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 9cc97b17c..35e53f331 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -675,8 +675,8 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u printf("tiff:inputError", "Input range is not 2"); } else { - *startSlice = (uint64_t) * (strip_range->range) - 1; - *z = (uint64_t) * (strip_range->range[1]) - startSlice; + *startSlice = (uint64_t) (*(strip_range->range)) - 1; + *z = (uint64_t) (*(strip_range->range + 1)) - startSlice; if (!TIFFSetDirectory(tif, startSlice[0] + z[0] - 1) || !TIFFSetDirectory(tif, startSlice[0])) { printf("tiff:rangeOutOfBound", "Range is out of bounds"); } From c2d5a3aea84b287104bc69400c2ae8450a05fe1d Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 23:57:10 -0500 Subject: [PATCH 090/806] update code --- tools/llsm/parallelReadTiff.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 35e53f331..1f0cedbbe 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -675,8 +675,8 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u printf("tiff:inputError", "Input range is not 2"); } else { - *startSlice = (uint64_t) (*(strip_range->range)) - 1; - *z = (uint64_t) (*(strip_range->range + 1)) - startSlice; + *startSlice = (uint64_t) ((strip_range->range[0])) - 1; + *z = (uint64_t) ((strip_range->range[1])) - startSlice; if (!TIFFSetDirectory(tif, startSlice[0] + z[0] - 1) || !TIFFSetDirectory(tif, startSlice[0])) { printf("tiff:rangeOutOfBound", "Range is out of bounds"); } From e18ebcf884cfb7bb43ec6d89f58f80724a16e5bc Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 13 Apr 2023 23:59:03 -0500 Subject: [PATCH 091/806] update code --- tools/llsm/parallelReadTiff.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 1f0cedbbe..a242eb3bc 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -675,8 +675,8 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u printf("tiff:inputError", "Input range is not 2"); } else { - *startSlice = (uint64_t) ((strip_range->range[0])) - 1; - *z = (uint64_t) ((strip_range->range[1])) - startSlice; + *startSlice = (uint64_t)(*(strip_range->range)) - 1; + *z = (uint64_t)(*(strip_range->range + 1)) - startSlice[0]; if (!TIFFSetDirectory(tif, startSlice[0] + z[0] - 1) || !TIFFSetDirectory(tif, startSlice[0])) { printf("tiff:rangeOutOfBound", "Range is out of bounds"); } @@ -882,7 +882,7 @@ parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tif // } // else { // startSlice = (uint64_t) * (mxGetPr(prhs[1])) - 1; - // z = (uint64_t) * ((mxGetPr(prhs[1]) + 1)) - startSlice; +// z = (uint64_t) * ((mxGetPr(prhs[1]) + 1)) - startSlice; // if (!TIFFSetDirectory(tif, startSlice + z - 1) || !TIFFSetDirectory(tif, startSlice)) { // printf("tiff:rangeOutOfBound", "Range is out of bounds"); // } From cfb4fde69a97186fc31db98a0b0d4ba07921a24f Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 14 Apr 2023 00:15:54 -0500 Subject: [PATCH 092/806] update code --- tools/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 163df9dde..84391ad4e 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -83,5 +83,6 @@ foreach(program ${PROGRAMS}) target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) target_link_libraries(${program} cjson) target_link_libraries(${program} llsm_tiff) + target_include_directories(${program} PUBLIC ${PDC_EXT_INCLUDE_DEPENDENCIES}) endforeach(program) From 1afb06ae6121818223984085b0f1d5ee6e592e60 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 14 Apr 2023 09:21:46 -0500 Subject: [PATCH 093/806] update code --- tools/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 84391ad4e..bce78df19 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -83,6 +83,6 @@ foreach(program ${PROGRAMS}) target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) target_link_libraries(${program} cjson) target_link_libraries(${program} llsm_tiff) - target_include_directories(${program} PUBLIC ${PDC_EXT_INCLUDE_DEPENDENCIES}) + target_include_directories(${program} PUBLIC ${PDC_INCLUDE_DIR}) endforeach(program) From 912b14c085b43a2f02fcadccee116ca631d9031c Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 14 Apr 2023 09:30:44 -0500 Subject: [PATCH 094/806] update code --- tools/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index bce78df19..fd4736045 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -81,6 +81,7 @@ set(PROGRAMS foreach(program ${PROGRAMS}) add_executable(${program} ${program}.c) target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) + target_link_libraries(${program} pdc) target_link_libraries(${program} cjson) target_link_libraries(${program} llsm_tiff) target_include_directories(${program} PUBLIC ${PDC_INCLUDE_DIR}) From 3ae5c1412e70e86095e058b767cf862efabcc0bb Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 14 Apr 2023 09:31:50 -0500 Subject: [PATCH 095/806] update code --- tools/llsm_importer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 81bd85b80..71f61e4d9 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -11,8 +11,8 @@ #endif #include "pdc.h" -#include "pdc_client_server_common.h" -#include "pdc_client_connect.h" +// #include "pdc_client_server_common.h" +// #include "pdc_client_connect.h" #include "llsm/parallelReadTiff.h" From 16c5843278b132d596ffda47f9b78373db32d52e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 14 Apr 2023 09:35:08 -0500 Subject: [PATCH 096/806] update code --- tools/llsm_importer.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 71f61e4d9..e6c156b6b 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -16,8 +16,7 @@ #include "llsm/parallelReadTiff.h" - -void +int parse_console_args(int argc, char *argv[], char **file_name) { int c; @@ -25,7 +24,7 @@ parse_console_args(int argc, char *argv[], char **file_name) while ((c = getopt(argc, argv, "f:")) != -1) { switch (c) { case 'f': - *filename = optarg; + *file_name = optarg; break; case '?': if (optopt == 'f') { @@ -41,21 +40,22 @@ parse_console_args(int argc, char *argv[], char **file_name) } } - - int main(int argc, char *argv[]) { - char * file_name = NULL; - void *tiff = NULL; + char *file_name = NULL; + void *tiff = NULL; - parse_console_args(argc, argv, &file_name); + int parse_code = parse_console_args(argc, argv, &file_name); + + if (parse_code) { + return parse_code; + } printf("Filename: %s\n", file_name ? file_name : "(none)"); parallel_TIFF_load(file_name, &tiff, 1, NULL); - return 0; } \ No newline at end of file From b4ff1c63f81b06346ce1d29148ef376fef7f21fc Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 09:28:17 -0500 Subject: [PATCH 097/806] update cmake and llsm_importer --- tools/CMakeLists.txt | 29 +++++++++++++++++++++++------ tools/llsm/CMakeLists.txt | 0 tools/llsm_importer.c | 12 +++++++++--- tools/pdc_export.c | 2 +- tools/pdc_import.c | 2 +- 5 files changed, 34 insertions(+), 11 deletions(-) delete mode 100644 tools/llsm/CMakeLists.txt diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index fd4736045..77ad41813 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -64,18 +64,12 @@ endif() add_definitions(-DENABLE_MPI=1) add_library(cjson cjson/cJSON.c) -add_library(llsm_tiff llsm/parallelReadTiff.c) -target_compile_options(llsm_tiff PRIVATE ${OpenMP_C_FLAGS}) -target_include_directories(llsm_tiff PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/llsm) -target_link_libraries(llsm_tiff PUBLIC ${OpenMP_C_LIBRARIES}) -# add_subdirectory(llsm) set(PROGRAMS pdc_ls pdc_import pdc_export - llsm_importer ) foreach(program ${PROGRAMS}) @@ -87,3 +81,26 @@ foreach(program ${PROGRAMS}) target_include_directories(${program} PUBLIC ${PDC_INCLUDE_DIR}) endforeach(program) + +# Find LibTIFF +option(USE_LIB_TIFF "Enable LibTiff." ON) +if(USE_LIB_TIFF) + find_package(TIFF REQUIRED) + if(TIFF_FOUND) + # Add the LibTIFF include directory to the include path + include_directories(${TIFF_INCLUDE_DIRS}) + add_library(llsm_tiff llsm/parallelReadTiff.c) + target_compile_options(llsm_tiff PRIVATE ${OpenMP_C_FLAGS}) + target_include_directories(llsm_tiff PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/llsm) + target_link_libraries(llsm_tiff PUBLIC ${OpenMP_C_LIBRARIES}) + target_link_libraries(llsm_tiff ${TIFF_LIBRARIES}) + + add_executable(llsm_importer llsm_importer.c) + target_link_libraries(llsm_importer ${PDC_EXT_LIB_DEPENDENCIES}) + target_link_libraries(llsm_importer pdc) + target_link_libraries(llsm_importer cjson) + target_link_libraries(llsm_importer ${TIFF_LIBRARIES}) + target_link_libraries(llsm_importer llsm_tiff) + else() + message(WARNING "LibTiff not found, ignore building the executables which requires LibTiff support.") + endif() \ No newline at end of file diff --git a/tools/llsm/CMakeLists.txt b/tools/llsm/CMakeLists.txt deleted file mode 100644 index e69de29bb..000000000 diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index e6c156b6b..096cc38ff 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -4,10 +4,10 @@ #include #include -#define ENABLE_MPI 1 +// #define ENABLE_MPI 1 #ifdef ENABLE_MPI -#include "mpi.h" +// #include "mpi.h" #endif #include "pdc.h" @@ -46,7 +46,8 @@ main(int argc, char *argv[]) char *file_name = NULL; void *tiff = NULL; - + int i = 0; + char bytes[10]; int parse_code = parse_console_args(argc, argv, &file_name); if (parse_code) { @@ -57,5 +58,10 @@ main(int argc, char *argv[]) parallel_TIFF_load(file_name, &tiff, 1, NULL); + for (i = 0; i < 10; i++) { + bytes[i] = (char)tiff[i]; + } + printf("first few bytes : %s\n", bytes); + return 0; } \ No newline at end of file diff --git a/tools/pdc_export.c b/tools/pdc_export.c index e46c17f2a..af42e116a 100644 --- a/tools/pdc_export.c +++ b/tools/pdc_export.c @@ -8,7 +8,7 @@ #include #include "hdf5.h" -#define ENABLE_MPI 1 +// #define ENABLE_MPI 1 #ifdef ENABLE_MPI #include "mpi.h" diff --git a/tools/pdc_import.c b/tools/pdc_import.c index 9e12d3a84..9388ac495 100644 --- a/tools/pdc_import.c +++ b/tools/pdc_import.c @@ -2,7 +2,7 @@ #include #include -#define ENABLE_MPI 1 +// #define ENABLE_MPI 1 #ifdef ENABLE_MPI #include "mpi.h" From 84804c8ce98d37b2e6b880dd446a2b157f5c2dad Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 09:28:53 -0500 Subject: [PATCH 098/806] update cmake and llsm_importer --- tools/CMakeLists.txt | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 77ad41813..38858e3fa 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -66,20 +66,20 @@ add_definitions(-DENABLE_MPI=1) add_library(cjson cjson/cJSON.c) -set(PROGRAMS - pdc_ls - pdc_import - pdc_export - ) +# set(PROGRAMS +# pdc_ls +# pdc_import +# pdc_export +# ) -foreach(program ${PROGRAMS}) - add_executable(${program} ${program}.c) - target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) - target_link_libraries(${program} pdc) - target_link_libraries(${program} cjson) - target_link_libraries(${program} llsm_tiff) - target_include_directories(${program} PUBLIC ${PDC_INCLUDE_DIR}) -endforeach(program) +# foreach(program ${PROGRAMS}) +# add_executable(${program} ${program}.c) +# target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) +# target_link_libraries(${program} pdc) +# target_link_libraries(${program} cjson) +# target_link_libraries(${program} llsm_tiff) +# target_include_directories(${program} PUBLIC ${PDC_INCLUDE_DIR}) +# endforeach(program) # Find LibTIFF From 95194c97f94ae0ac11dbdcdc1604758c834d7e7b Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 09:32:03 -0500 Subject: [PATCH 099/806] close if in cmake --- tools/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 38858e3fa..ea6a8fdf3 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -103,4 +103,5 @@ if(USE_LIB_TIFF) target_link_libraries(llsm_importer llsm_tiff) else() message(WARNING "LibTiff not found, ignore building the executables which requires LibTiff support.") - endif() \ No newline at end of file + endif() +endif() \ No newline at end of file From 12ac52b6103c3a64a2422d14fa16a09a80ecf4e5 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 09:41:56 -0500 Subject: [PATCH 100/806] cmake fix tiff --- tools/CMakeLists.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index ea6a8fdf3..f4513a6a6 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -89,11 +89,12 @@ if(USE_LIB_TIFF) if(TIFF_FOUND) # Add the LibTIFF include directory to the include path include_directories(${TIFF_INCLUDE_DIRS}) - add_library(llsm_tiff llsm/parallelReadTiff.c) + add_library(llsm_tiff llsm/parallelReadTiff.c llsm/parallelReadTiff.h) target_compile_options(llsm_tiff PRIVATE ${OpenMP_C_FLAGS}) - target_include_directories(llsm_tiff PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/llsm) target_link_libraries(llsm_tiff PUBLIC ${OpenMP_C_LIBRARIES}) - target_link_libraries(llsm_tiff ${TIFF_LIBRARIES}) + target_link_libraries(llsm_tiff PUBLIC ${TIFF_LIBRARIES}) + target_include_directories(llsm_tiff PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/llsm) + add_executable(llsm_importer llsm_importer.c) target_link_libraries(llsm_importer ${PDC_EXT_LIB_DEPENDENCIES}) From a5066b8d7cde6967e39f930ef12ab93e00950209 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 09:49:33 -0500 Subject: [PATCH 101/806] cmake policy to suppress warning --- tools/CMakeLists.txt | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index f4513a6a6..79c327367 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -1,5 +1,24 @@ cmake_minimum_required (VERSION 2.8.12) +# Setup cmake policies. +foreach(p + CMP0012 + CMP0013 + CMP0014 + CMP0022 # CMake 2.8.12 + CMP0025 # CMake 3.0 + CMP0053 # CMake 3.1 + CMP0054 # CMake 3.1 + CMP0074 # CMake 3.12 + CMP0075 # CMake 3.12 + CMP0083 # CMake 3.14 + CMP0093 # CMake 3.15 + ) + if(POLICY ${p}) + cmake_policy(SET ${p} NEW) + endif() +endforeach() + project(PDC_VOL C) include_directories( From a66dd0d9f9a3c7e30296d5572926f94483e8a183 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 10:20:26 -0500 Subject: [PATCH 102/806] add pdc include dir --- tools/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 79c327367..6145514ef 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -121,6 +121,7 @@ if(USE_LIB_TIFF) target_link_libraries(llsm_importer cjson) target_link_libraries(llsm_importer ${TIFF_LIBRARIES}) target_link_libraries(llsm_importer llsm_tiff) + target_include_directories(llsm_tiff PUBLIC ${PDC_INCLUDE_DIR}) else() message(WARNING "LibTiff not found, ignore building the executables which requires LibTiff support.") endif() From 372cc4f404cc4b6e0096db5cf167c1f419748d85 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 10:23:58 -0500 Subject: [PATCH 103/806] update code --- tools/llsm_importer.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 096cc38ff..b9dcdfcbe 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -48,6 +48,7 @@ main(int argc, char *argv[]) void *tiff = NULL; int i = 0; char bytes[10]; + char *tiff_str_ptr; int parse_code = parse_console_args(argc, argv, &file_name); if (parse_code) { @@ -58,8 +59,9 @@ main(int argc, char *argv[]) parallel_TIFF_load(file_name, &tiff, 1, NULL); + tiff_str_ptr = (char *)tiff; for (i = 0; i < 10; i++) { - bytes[i] = (char)tiff[i]; + bytes[i] = tiff_str_ptr[i]; } printf("first few bytes : %s\n", bytes); From f36920d3037475fb0899fe96910392dbf9d7c79c Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 10:28:53 -0500 Subject: [PATCH 104/806] update code --- tools/llsm_importer.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index b9dcdfcbe..60e737d22 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -22,6 +22,7 @@ parse_console_args(int argc, char *argv[], char **file_name) int c; while ((c = getopt(argc, argv, "f:")) != -1) { + printf("c : %c \n", c); switch (c) { case 'f': *file_name = optarg; @@ -49,8 +50,9 @@ main(int argc, char *argv[]) int i = 0; char bytes[10]; char *tiff_str_ptr; + printf("Program started!\n"); int parse_code = parse_console_args(argc, argv, &file_name); - + printf("parse_code %d\n", parse_code); if (parse_code) { return parse_code; } From a80d9dfe132de012ff90dfa7f0d634600b16cd44 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 10:34:00 -0500 Subject: [PATCH 105/806] update code --- tools/llsm_importer.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 60e737d22..258a4af76 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -27,16 +27,9 @@ parse_console_args(int argc, char *argv[], char **file_name) case 'f': *file_name = optarg; break; - case '?': - if (optopt == 'f') { - fprintf(stderr, "Option -%c requires an argument.\n", optopt); - } - else { - fprintf(stderr, "Unknown option: -%c\n", optopt); - } - return 1; default: - abort(); + fprintf(stderr, "Usage: %s [-f filename]\n", argv[0]); + exit(EXIT_FAILURE); } } } From 0b188f72e6103e942513b7ae0b3f4f53dceffb01 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 10:35:47 -0500 Subject: [PATCH 106/806] update code --- tools/llsm_importer.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 258a4af76..0d79805b7 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -19,19 +19,22 @@ int parse_console_args(int argc, char *argv[], char **file_name) { - int c; - + int c, parse_code = -1; + while ((c = getopt(argc, argv, "f:")) != -1) { printf("c : %c \n", c); switch (c) { case 'f': *file_name = optarg; + parse_code = 1; break; default: fprintf(stderr, "Usage: %s [-f filename]\n", argv[0]); + parse_code = -1; exit(EXIT_FAILURE); } } + return parse_code; } int From b54fc25168b507728eef49fab61f88455dbd476e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 10:36:35 -0500 Subject: [PATCH 107/806] update code --- tools/llsm_importer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 0d79805b7..2a47c561a 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -26,7 +26,7 @@ parse_console_args(int argc, char *argv[], char **file_name) switch (c) { case 'f': *file_name = optarg; - parse_code = 1; + parse_code = 0; break; default: fprintf(stderr, "Usage: %s [-f filename]\n", argv[0]); From f665f7d98613988275badcbfd2201a36165142ad Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 10:56:11 -0500 Subject: [PATCH 108/806] update code --- tools/llsm/parallelReadTiff.c | 33 +++++++++++++++++++++++++++++++++ tools/llsm/parallelReadTiff.h | 3 --- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index a242eb3bc..ed1f7674f 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -1,4 +1,11 @@ #include "parallelReadTiff.h" +#include "tiffio.h" + +#define USE_OMP 0 + +#ifdef USE_OMP +#include "omp.h" +#endif void DummyHandler(const char *module, const char *fmt, va_list ap) @@ -16,7 +23,9 @@ readTiffParallelBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, vo uint64_t bytes = bits / 8; int32_t w; +#ifdef USE_OMP #pragma omp parallel for +#endif for (w = 0; w < numWorkers; w++) { TIFF *tif = TIFFOpen(fileName, "r"); @@ -97,7 +106,9 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void char errString[10000]; if (compressed > 1 || z < 32768) { TIFFClose(tif); +#ifdef USE_OMP #pragma omp parallel for +#endif for (w = 0; w < numWorkers; w++) { uint8_t outCounter = 0; @@ -105,7 +116,9 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void while (!tif) { tif = TIFFOpen(fileName, "r"); if (outCounter == 3) { +#ifdef USE_OMP #pragma omp critical +#endif { err = 1; sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); @@ -125,7 +138,9 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void while (!TIFFSetDirectory(tif, (uint64_t)dir) && counter < 3) { counter++; if (counter == 3) { +#ifdef USE_OMP #pragma omp critical +#endif { err = 1; sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); @@ -139,7 +154,9 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void // loading the data into a buffer int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize * x * bytes); if (cBytes < 0) { +#ifdef USE_OMP #pragma omp critical +#endif { errBak = 1; err = 1; @@ -249,7 +266,9 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void uint64_t size = x * y * z * (bits / 8); void * tiffC = malloc(size); memcpy(tiffC, tiff, size); +#ifdef USE_OMP #pragma omp parallel for +#endif for (uint64_t k = 0; k < z; k++) { for (uint64_t j = 0; j < x; j++) { for (uint64_t i = 0; i < y; i++) { @@ -294,7 +313,9 @@ readTiffParallel2DBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, uint64_t bytes = bits / 8; int32_t w; +#ifdef USE_OMP #pragma omp parallel for +#endif for (w = 0; w < numWorkers; w++) { TIFF *tif = TIFFOpen(fileName, "r"); @@ -373,7 +394,9 @@ readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, voi uint8_t errBak = 0; char errString[10000]; +#ifdef USE_OMP #pragma omp parallel for +#endif for (w = 0; w < numWorkers; w++) { uint8_t outCounter = 0; @@ -381,7 +404,9 @@ readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, voi while (!tif) { tif = TIFFOpen(fileName, "r"); if (outCounter == 3) { +#ifdef USE_OMP #pragma omp critical +#endif { err = 1; sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); @@ -399,7 +424,9 @@ readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, voi counter + 1); counter++; if (counter == 3) { +#ifdef USE_OMP #pragma omp critical +#endif { err = 1; sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); @@ -412,7 +439,9 @@ readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, voi // loading the data into a buffer int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize * x * bytes); if (cBytes < 0) { +#ifdef USE_OMP #pragma omp critical +#endif { errBak = 1; err = 1; @@ -532,7 +561,9 @@ readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char *fileName, // Swap endianess for types greater than 8 bits // TODO: May need to change later because we may not always need to swap if (bits > 8) { +#ifdef USE_OMP #pragma omp parallel for +#endif for (uint64_t i = 0; i < x * y * z; i++) { switch (bits) { case 16: @@ -570,7 +601,9 @@ readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char *fileName, uint64_t size = x * y * z * (bits / 8); void * tiffC = malloc(size); memcpy(tiffC, tiff, size); +#ifdef USE_OMP #pragma omp parallel for +#endif for (uint64_t k = 0; k < z; k++) { for (uint64_t j = 0; j < x; j++) { for (uint64_t i = 0; i < y; i++) { diff --git a/tools/llsm/parallelReadTiff.h b/tools/llsm/parallelReadTiff.h index 02e0379a6..e8fe59fbc 100644 --- a/tools/llsm/parallelReadTiff.h +++ b/tools/llsm/parallelReadTiff.h @@ -10,9 +10,6 @@ #include #include -#include "tiffio.h" -#include "omp.h" -// #include "commons/generic/pdc_generic.h" typedef struct { uint64_t *range; From ba2f67a27c7fde27f927364808ca321bf0c68581 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 23:51:27 -0500 Subject: [PATCH 109/806] update array generating method --- tools/llsm/parallelReadTiff.c | 95 ++++++++++++++++++++++------------- tools/llsm/parallelReadTiff.h | 8 +++ 2 files changed, 67 insertions(+), 36 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index ed1f7674f..712c0d7e7 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -730,60 +730,83 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u TIFFClose(tif); } -uint8_t *** +uint8_t * createU8Array(int ndim, size_t *dim) { - size_t i, j; - uint8_t ***array = (uint8_t ***)malloc(dim[0] * sizeof(uint8_t **)); - for (i = 0; i < dim[0]; i++) { - array[i] = (uint8_t **)malloc(dim[1] * sizeof(uint8_t *)); - for (j = 0; j < dim[1]; j++) { - array[i][j] = (uint8_t *)calloc(dim[2], sizeof(uint8_t)); - } + size_t i = 0, dim_prod = 1; + for (i = 0; i < ndim; i++) { + dim_prod *= dim[i]; } - return array; + return (uint8_t*)malloc(dim_prod * sizeof(uint8_t)); + // size_t i, j; + // uint8_t ***array = (uint8_t ***)malloc(dim[0] * sizeof(uint8_t **)); + // for (i = 0; i < dim[0]; i++) { + // array[i] = (uint8_t **)malloc(dim[1] * sizeof(uint8_t *)); + // for (j = 0; j < dim[1]; j++) { + // array[i][j] = (uint8_t *)calloc(dim[2], sizeof(uint8_t)); + // } + // } + // return array; } -uint16_t *** +uint16_t * createU16Array(int ndim, size_t *dim) { - size_t i, j; - uint16_t ***array = (uint16_t ***)malloc(dim[0] * sizeof(uint16_t **)); - for (i = 0; i < dim[0]; i++) { - array[i] = (uint16_t **)malloc(dim[1] * sizeof(uint16_t *)); - for (j = 0; j < dim[1]; j++) { - array[i][j] = (uint16_t *)calloc(dim[2], sizeof(uint16_t)); - } + size_t i = 0, dim_prod = 1; + for (i = 0; i < ndim; i++) { + dim_prod *= dim[i]; } - return array; + return (uint16_t*)malloc(dim_prod * sizeof(uint16_t)); + + // size_t i, j; + // uint16_t ***array = (uint16_t ***)malloc(dim[0] * sizeof(uint16_t **)); + // for (i = 0; i < dim[0]; i++) { + // array[i] = (uint16_t **)malloc(dim[1] * sizeof(uint16_t *)); + // for (j = 0; j < dim[1]; j++) { + // array[i][j] = (uint16_t *)calloc(dim[2], sizeof(uint16_t)); + // } + // } + // return array; } -float *** +float * createFloatArray(int ndim, size_t *dim) { - size_t i, j; - float ***array = (float ***)malloc(dim[0] * sizeof(float **)); - for (i = 0; i < dim[0]; i++) { - array[i] = (float **)malloc(dim[1] * sizeof(float *)); - for (j = 0; j < dim[1]; j++) { - array[i][j] = (float *)calloc(dim[2], sizeof(float)); - } + size_t i = 0, dim_prod = 1; + for (i = 0; i < ndim; i++) { + dim_prod *= dim[i]; } - return array; + return (float *)malloc(dim_prod * sizeof(float)); + + // size_t i, j; + // float ***array = (float ***)malloc(dim[0] * sizeof(float **)); + // for (i = 0; i < dim[0]; i++) { + // array[i] = (float **)malloc(dim[1] * sizeof(float *)); + // for (j = 0; j < dim[1]; j++) { + // array[i][j] = (float *)calloc(dim[2], sizeof(float)); + // } + // } + // return array; } -double *** +double * createDoubleArray(int ndim, size_t *dim) { - size_t i, j; - double ***array = (double ***)malloc(dim[0] * sizeof(double **)); - for (i = 0; i < dim[0]; i++) { - array[i] = (double **)malloc(dim[1] * sizeof(double *)); - for (j = 0; j < dim[1]; j++) { - array[i][j] = (double *)calloc(dim[2], sizeof(double)); - } + size_t i = 0, dim_prod = 1; + for (i = 0; i < ndim; i++) { + dim_prod *= dim[i]; } - return array; + return (double *)malloc(dim_prod * sizeof(double)); + + // size_t i, j; + // double ***array = (double ***)malloc(dim[0] * sizeof(double **)); + // for (i = 0; i < dim[0]; i++) { + // array[i] = (double **)malloc(dim[1] * sizeof(double *)); + // for (j = 0; j < dim[1]; j++) { + // array[i][j] = (double *)calloc(dim[2], sizeof(double)); + // } + // } + // return array; } void * diff --git a/tools/llsm/parallelReadTiff.h b/tools/llsm/parallelReadTiff.h index e8fe59fbc..f7b4ba2c5 100644 --- a/tools/llsm/parallelReadTiff.h +++ b/tools/llsm/parallelReadTiff.h @@ -11,6 +11,14 @@ #include +#define CREATE_ARRAY(result_var, type, ndim, dim) do { \ + size_t i = 0, dim_prod = 1; \ + for (i = 0; i < (ndim); i++) { \ + dim_prod *= (dim)[i]; \ + } \ + result_var = (void *)malloc(dim_prod * sizeof(type)); \ +} while (0) + typedef struct { uint64_t *range; size_t length; From 08ab57e98a297cd805951b965f59d0f82f10d35e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 23:55:57 -0500 Subject: [PATCH 110/806] update array generating method --- tools/llsm/parallelReadTiff.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 712c0d7e7..ac0040da8 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -814,16 +814,20 @@ _get_tiff_array(int bits, int ndim, size_t *dims) { void *tiff = NULL; if (bits == 8) { - tiff = (void *)createU8Array(ndim, dims); + CREATE_ARRAY(tiff, uint8_t, ndim, dims); + // tiff = (void *)createU8Array(ndim, dims); } else if (bits == 16) { - tiff = (void *)createU16Array(ndim, dims); + CREATE_ARRAY(tiff, uint16_t, ndim, dims); + // tiff = (void *)createU16Array(ndim, dims); } else if (bits == 32) { - tiff = (void *)createFloatArray(ndim, dims); + CREATE_ARRAY(tiff, float, ndim, dims); + // tiff = (void *)createFloatArray(ndim, dims); } else if (bits == 64) { - tiff = (void *)createDoubleArray(ndim, dims); + CREATE_ARRAY(tiff, double, ndim, dims); + // tiff = (void *)createDoubleArray(ndim, dims); } return tiff; } From 2377bb3e7674974add11e6450835e1eca28a02d9 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Wed, 19 Apr 2023 23:57:52 -0500 Subject: [PATCH 111/806] update array generating method --- tools/llsm/parallelReadTiff.c | 270 ++-------------------------------- tools/llsm/parallelReadTiff.h | 9 -- 2 files changed, 9 insertions(+), 270 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index ac0040da8..69178bdbd 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -7,6 +7,14 @@ #include "omp.h" #endif +#define CREATE_ARRAY(result_var, type, ndim, dim) do { \ + size_t i = 0, dim_prod = 1; \ + for (i = 0; i < (ndim); i++) { \ + dim_prod *= (dim)[i]; \ + } \ + result_var = (void *)malloc(dim_prod * sizeof(type)); \ +} while (0) + void DummyHandler(const char *module, const char *fmt, va_list ap) { @@ -730,104 +738,21 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u TIFFClose(tif); } -uint8_t * -createU8Array(int ndim, size_t *dim) -{ - size_t i = 0, dim_prod = 1; - for (i = 0; i < ndim; i++) { - dim_prod *= dim[i]; - } - return (uint8_t*)malloc(dim_prod * sizeof(uint8_t)); - // size_t i, j; - // uint8_t ***array = (uint8_t ***)malloc(dim[0] * sizeof(uint8_t **)); - // for (i = 0; i < dim[0]; i++) { - // array[i] = (uint8_t **)malloc(dim[1] * sizeof(uint8_t *)); - // for (j = 0; j < dim[1]; j++) { - // array[i][j] = (uint8_t *)calloc(dim[2], sizeof(uint8_t)); - // } - // } - // return array; -} - -uint16_t * -createU16Array(int ndim, size_t *dim) -{ - size_t i = 0, dim_prod = 1; - for (i = 0; i < ndim; i++) { - dim_prod *= dim[i]; - } - return (uint16_t*)malloc(dim_prod * sizeof(uint16_t)); - - // size_t i, j; - // uint16_t ***array = (uint16_t ***)malloc(dim[0] * sizeof(uint16_t **)); - // for (i = 0; i < dim[0]; i++) { - // array[i] = (uint16_t **)malloc(dim[1] * sizeof(uint16_t *)); - // for (j = 0; j < dim[1]; j++) { - // array[i][j] = (uint16_t *)calloc(dim[2], sizeof(uint16_t)); - // } - // } - // return array; -} - -float * -createFloatArray(int ndim, size_t *dim) -{ - size_t i = 0, dim_prod = 1; - for (i = 0; i < ndim; i++) { - dim_prod *= dim[i]; - } - return (float *)malloc(dim_prod * sizeof(float)); - - // size_t i, j; - // float ***array = (float ***)malloc(dim[0] * sizeof(float **)); - // for (i = 0; i < dim[0]; i++) { - // array[i] = (float **)malloc(dim[1] * sizeof(float *)); - // for (j = 0; j < dim[1]; j++) { - // array[i][j] = (float *)calloc(dim[2], sizeof(float)); - // } - // } - // return array; -} - -double * -createDoubleArray(int ndim, size_t *dim) -{ - size_t i = 0, dim_prod = 1; - for (i = 0; i < ndim; i++) { - dim_prod *= dim[i]; - } - return (double *)malloc(dim_prod * sizeof(double)); - - // size_t i, j; - // double ***array = (double ***)malloc(dim[0] * sizeof(double **)); - // for (i = 0; i < dim[0]; i++) { - // array[i] = (double **)malloc(dim[1] * sizeof(double *)); - // for (j = 0; j < dim[1]; j++) { - // array[i][j] = (double *)calloc(dim[2], sizeof(double)); - // } - // } - // return array; -} - void * _get_tiff_array(int bits, int ndim, size_t *dims) { void *tiff = NULL; if (bits == 8) { CREATE_ARRAY(tiff, uint8_t, ndim, dims); - // tiff = (void *)createU8Array(ndim, dims); } else if (bits == 16) { CREATE_ARRAY(tiff, uint16_t, ndim, dims); - // tiff = (void *)createU16Array(ndim, dims); } else if (bits == 32) { CREATE_ARRAY(tiff, float, ndim, dims); - // tiff = (void *)createFloatArray(ndim, dims); } else if (bits == 64) { CREATE_ARRAY(tiff, double, ndim, dims); - // tiff = (void *)createDoubleArray(ndim, dims); } return tiff; } @@ -868,181 +793,4 @@ parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tif dims[2] = z; _TIFF_load(fileName, is_imageJ, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, tiff_ptr); -} - -// void -// mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) -// { -// // Check if the fileName is a char array or matlab style -// char *fileName = NULL; -// if (!mxIsClass(prhs[0], "string")) { -// if (!mxIsChar(prhs[0])) -// printf("tiff:inputError", "The first argument must be a string"); -// fileName = mxArrayToString(prhs[0]); -// } -// else { -// mxArray *mString[1]; -// mxArray *mCharA[1]; - -// // Convert string to char array -// mString[0] = mxDuplicateArray(prhs[0]); -// mexCallMATLAB(1, mCharA, 1, mString, "char"); -// fileName = mxArrayToString(mCharA[0]); -// } - -// // Handle the tilde character in filenames on Linux/Mac -// // #ifndef _WIN32 -// // if(strchr(fileName,'~')) fileName = expandTilde(fileName); -// // #endif - -// uint8_t flipXY = 1; -// // uint8_t flipXY = 0; - -// // if(nrhs > 2){ -// // flipXY = (uint8_t)*(mxGetPr(prhs[2])); -// //} - -// TIFFSetWarningHandler(DummyHandler); -// TIFF *tif = TIFFOpen(fileName, "r"); -// if (!tif) -// printf("tiff:inputError", "File \"%s\" cannot be opened", fileName); - -// uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0; -// TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &x); -// TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &y); - -// if (nrhs == 1) { -// uint16_t s = 0, m = 0, t = 1; -// while (TIFFSetDirectory(tif, t)) { -// s = t; -// t *= 8; -// if (s > t) { -// t = 65535; -// printf("Number of slices > 32768\n"); -// break; -// } -// } -// while (s != t) { -// m = (s + t + 1) / 2; -// if (TIFFSetDirectory(tif, m)) { -// s = m; -// } -// else { -// if (m > 0) -// t = m - 1; -// else -// t = m; -// } -// } -// z = s + 1; -// } -// else { -// if (mxGetN(prhs[1]) != 2) { -// printf("tiff:inputError", "Input range is not 2"); -// } -// else { -// startSlice = (uint64_t) * (mxGetPr(prhs[1])) - 1; -// z = (uint64_t) * ((mxGetPr(prhs[1]) + 1)) - startSlice; -// if (!TIFFSetDirectory(tif, startSlice + z - 1) || !TIFFSetDirectory(tif, startSlice)) { -// printf("tiff:rangeOutOfBound", "Range is out of bounds"); -// } -// } -// } - -// TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits); -// uint64_t stripSize = 1; -// TIFFGetField(tif, TIFFTAG_ROWSPERSTRIP, &stripSize); -// TIFFClose(tif); - -// uint8_t imageJIm = 0; -// if (isImageJIm(fileName)) { -// imageJIm = 1; -// uint64_t tempZ = imageJImGetZ(fileName); -// if (tempZ) -// z = tempZ; -// } - -// uint64_t dim[3]; -// dim[0] = y; -// dim[1] = x; -// dim[2] = z; - -// // Case for ImageJ -// if (imageJIm) { -// if (bits == 8) { -// plhs[0] = mxCreateNumericArray(3, dim, mxUINT8_CLASS, mxREAL); -// uint8_t *tiff = (uint8_t *)mxGetPr(plhs[0]); -// readTiffParallelImageJ(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else if (bits == 16) { -// plhs[0] = mxCreateNumericArray(3, dim, mxUINT16_CLASS, mxREAL); -// uint16_t *tiff = (uint16_t *)mxGetPr(plhs[0]); -// readTiffParallelImageJ(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else if (bits == 32) { -// plhs[0] = mxCreateNumericArray(3, dim, mxSINGLE_CLASS, mxREAL); -// float *tiff = (float *)mxGetPr(plhs[0]); -// readTiffParallelImageJ(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else if (bits == 64) { -// plhs[0] = mxCreateNumericArray(3, dim, mxDOUBLE_CLASS, mxREAL); -// double *tiff = (double *)mxGetPr(plhs[0]); -// readTiffParallelImageJ(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else { -// printf("tiff:dataTypeError", "Data type not suppported"); -// } -// } -// // Case for 2D -// else if (z <= 1) { -// if (bits == 8) { -// plhs[0] = mxCreateNumericArray(3, dim, mxUINT8_CLASS, mxREAL); -// uint8_t *tiff = (uint8_t *)mxGetPr(plhs[0]); -// readTiffParallel2D(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else if (bits == 16) { -// plhs[0] = mxCreateNumericArray(3, dim, mxUINT16_CLASS, mxREAL); -// uint16_t *tiff = (uint16_t *)mxGetPr(plhs[0]); -// readTiffParallel2D(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else if (bits == 32) { -// plhs[0] = mxCreateNumericArray(3, dim, mxSINGLE_CLASS, mxREAL); -// float *tiff = (float *)mxGetPr(plhs[0]); -// readTiffParallel2D(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else if (bits == 64) { -// plhs[0] = mxCreateNumericArray(3, dim, mxDOUBLE_CLASS, mxREAL); -// double *tiff = (double *)mxGetPr(plhs[0]); -// readTiffParallel2D(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else { -// printf("tiff:dataTypeError", "Data type not suppported"); -// } -// } -// // Case for 3D -// else { -// if (bits == 8) { -// plhs[0] = mxCreateNumericArray(3, dim, mxUINT8_CLASS, mxREAL); -// uint8_t *tiff = (uint8_t *)mxGetPr(plhs[0]); -// readTiffParallel(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else if (bits == 16) { -// plhs[0] = mxCreateNumericArray(3, dim, mxUINT16_CLASS, mxREAL); -// uint16_t *tiff = (uint16_t *)mxGetPr(plhs[0]); -// readTiffParallel(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else if (bits == 32) { -// plhs[0] = mxCreateNumericArray(3, dim, mxSINGLE_CLASS, mxREAL); -// float *tiff = (float *)mxGetPr(plhs[0]); -// readTiffParallel(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else if (bits == 64) { -// plhs[0] = mxCreateNumericArray(3, dim, mxDOUBLE_CLASS, mxREAL); -// double *tiff = (double *)mxGetPr(plhs[0]); -// readTiffParallel(x, y, z, fileName, (void *)tiff, bits, startSlice, stripSize, flipXY); -// } -// else { -// printf("tiff:dataTypeError", "Data type not suppported"); -// } -// } -// } \ No newline at end of file +} \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.h b/tools/llsm/parallelReadTiff.h index f7b4ba2c5..4f1d7ad5e 100644 --- a/tools/llsm/parallelReadTiff.h +++ b/tools/llsm/parallelReadTiff.h @@ -10,15 +10,6 @@ #include #include - -#define CREATE_ARRAY(result_var, type, ndim, dim) do { \ - size_t i = 0, dim_prod = 1; \ - for (i = 0; i < (ndim); i++) { \ - dim_prod *= (dim)[i]; \ - } \ - result_var = (void *)malloc(dim_prod * sizeof(type)); \ -} while (0) - typedef struct { uint64_t *range; size_t length; From da917802c2dc8e2da195cc31d224142d9a0203f2 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 20 Apr 2023 00:13:06 -0500 Subject: [PATCH 112/806] update array generating method --- tools/CMakeLists.txt | 2 ++ tools/llsm/parallelReadTiff.c | 30 +++++++++++++++--------------- tools/llsm_importer.c | 6 +++--- 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 6145514ef..b6123afc3 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -74,6 +74,8 @@ option(USE_SYSTEM_OPENMP "Use system-installed OpenMP." ON) if(USE_SYSTEM_OPENMP) find_package(OpenMP REQUIRED) if(OPENMP_FOUND) + add_definitions(-DENABLE_OPENMP=1) + set(ENABLE_OPENMP 1) set(OPENMP_LIBRARIES "${OpenMP_C_LIBRARIES}") else() message(FATAL_ERROR "OpenMP not found") diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 69178bdbd..0a8f609fd 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -1,9 +1,9 @@ #include "parallelReadTiff.h" #include "tiffio.h" -#define USE_OMP 0 +#define ENABLE_OPENMP -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #include "omp.h" #endif @@ -31,7 +31,7 @@ readTiffParallelBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, vo uint64_t bytes = bits / 8; int32_t w; -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp parallel for #endif for (w = 0; w < numWorkers; w++) { @@ -114,7 +114,7 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void char errString[10000]; if (compressed > 1 || z < 32768) { TIFFClose(tif); -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp parallel for #endif for (w = 0; w < numWorkers; w++) { @@ -124,7 +124,7 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void while (!tif) { tif = TIFFOpen(fileName, "r"); if (outCounter == 3) { -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp critical #endif { @@ -146,7 +146,7 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void while (!TIFFSetDirectory(tif, (uint64_t)dir) && counter < 3) { counter++; if (counter == 3) { -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp critical #endif { @@ -162,7 +162,7 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void // loading the data into a buffer int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize * x * bytes); if (cBytes < 0) { -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp critical #endif { @@ -274,7 +274,7 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void uint64_t size = x * y * z * (bits / 8); void * tiffC = malloc(size); memcpy(tiffC, tiff, size); -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp parallel for #endif for (uint64_t k = 0; k < z; k++) { @@ -321,7 +321,7 @@ readTiffParallel2DBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, uint64_t bytes = bits / 8; int32_t w; -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp parallel for #endif for (w = 0; w < numWorkers; w++) { @@ -402,7 +402,7 @@ readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, voi uint8_t errBak = 0; char errString[10000]; -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp parallel for #endif for (w = 0; w < numWorkers; w++) { @@ -412,7 +412,7 @@ readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, voi while (!tif) { tif = TIFFOpen(fileName, "r"); if (outCounter == 3) { -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp critical #endif { @@ -432,7 +432,7 @@ readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, voi counter + 1); counter++; if (counter == 3) { -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp critical #endif { @@ -447,7 +447,7 @@ readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, voi // loading the data into a buffer int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize * x * bytes); if (cBytes < 0) { -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp critical #endif { @@ -569,7 +569,7 @@ readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char *fileName, // Swap endianess for types greater than 8 bits // TODO: May need to change later because we may not always need to swap if (bits > 8) { -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp parallel for #endif for (uint64_t i = 0; i < x * y * z; i++) { @@ -609,7 +609,7 @@ readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char *fileName, uint64_t size = x * y * z * (bits / 8); void * tiffC = malloc(size); memcpy(tiffC, tiff, size); -#ifdef USE_OMP +#ifdef ENABLE_OPENMP #pragma omp parallel for #endif for (uint64_t k = 0; k < z; k++) { diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 2a47c561a..565c20b0c 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -22,7 +22,6 @@ parse_console_args(int argc, char *argv[], char **file_name) int c, parse_code = -1; while ((c = getopt(argc, argv, "f:")) != -1) { - printf("c : %c \n", c); switch (c) { case 'f': *file_name = optarg; @@ -46,15 +45,16 @@ main(int argc, char *argv[]) int i = 0; char bytes[10]; char *tiff_str_ptr; - printf("Program started!\n"); + // parse console argument int parse_code = parse_console_args(argc, argv, &file_name); - printf("parse_code %d\n", parse_code); if (parse_code) { return parse_code; } + // print file name for validating purpose printf("Filename: %s\n", file_name ? file_name : "(none)"); + // calling tiff loading process. parallel_TIFF_load(file_name, &tiff, 1, NULL); tiff_str_ptr = (char *)tiff; From a3ab20fe168c84cd85ee72045b0a8ec6eefe1159 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 20 Apr 2023 00:15:21 -0500 Subject: [PATCH 113/806] update CMakeLists --- tools/llsm/parallelReadTiff.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 0a8f609fd..d9a2d54f0 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -1,7 +1,7 @@ #include "parallelReadTiff.h" #include "tiffio.h" -#define ENABLE_OPENMP +// #define ENABLE_OPENMP #ifdef ENABLE_OPENMP #include "omp.h" From ac06b1276d7dba18dee9f06bc5597e104b647365 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 20 Apr 2023 00:32:22 -0500 Subject: [PATCH 114/806] update CMakeLists --- tools/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index b6123afc3..394b8a085 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -123,7 +123,7 @@ if(USE_LIB_TIFF) target_link_libraries(llsm_importer cjson) target_link_libraries(llsm_importer ${TIFF_LIBRARIES}) target_link_libraries(llsm_importer llsm_tiff) - target_include_directories(llsm_tiff PUBLIC ${PDC_INCLUDE_DIR}) + target_include_directories(llsm_importer PUBLIC ${PDC_INCLUDE_DIR}) else() message(WARNING "LibTiff not found, ignore building the executables which requires LibTiff support.") endif() From 6e5a046801be09f6229cf72a0f67a2095d7f52aa Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 20 Apr 2023 14:35:25 -0500 Subject: [PATCH 115/806] update CMakeLists --- tools/CMakeLists.txt | 2 +- tools/llsm/imageListReader.c | 71 +++++++++++++++++++++++++++++++++++ tools/llsm/imageListReader.h | 28 ++++++++++++++ tools/llsm/parallelReadTiff.c | 15 ++++---- tools/llsm/parallelReadTiff.h | 4 +- tools/llsm_importer.c | 53 +++++++++++++++++++------- 6 files changed, 150 insertions(+), 23 deletions(-) create mode 100644 tools/llsm/imageListReader.c create mode 100644 tools/llsm/imageListReader.h diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 394b8a085..fed5d93d3 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -110,7 +110,7 @@ if(USE_LIB_TIFF) if(TIFF_FOUND) # Add the LibTIFF include directory to the include path include_directories(${TIFF_INCLUDE_DIRS}) - add_library(llsm_tiff llsm/parallelReadTiff.c llsm/parallelReadTiff.h) + add_library(llsm_tiff llsm/parallelReadTiff.c llsm/imageListReader.c) target_compile_options(llsm_tiff PRIVATE ${OpenMP_C_FLAGS}) target_link_libraries(llsm_tiff PUBLIC ${OpenMP_C_LIBRARIES}) target_link_libraries(llsm_tiff PUBLIC ${TIFF_LIBRARIES}) diff --git a/tools/llsm/imageListReader.c b/tools/llsm/imageListReader.c new file mode 100644 index 000000000..199ec788c --- /dev/null +++ b/tools/llsm/imageListReader.c @@ -0,0 +1,71 @@ +#include "imageListReader.h" +#include "parallelReadTiff.h" + +void +scan_image_list(char *imageListFileName, on_image_ptr_t image_callback) +{ + FILE* file = fopen(imageListFileName, "r"); + + if (file == NULL) { + printf("Error: could not open file %s\n", imageListFileName); + return; + } + + char buffer[1024]; + char* token; + int line_count = 0; + image_file_info_t image_info; + + // Read and discard the first line (the header) + fgets(buffer, sizeof(buffer), file); + line_count++; + + // Read the remaining lines of the file + while (fgets(buffer, sizeof(buffer), file)) { + line_count++; + + // Tokenize the line by comma + token = strtok(buffer, ","); + + // Extract the filepath + image_info.filepath = strdup(token); + + // Extract the filename + token = strtok(NULL, ","); + image_info.filename = strdup(token); + + // Extract the stageX_um_ + token = strtok(NULL, ","); + image_info.stageX_um_ = atof(token); + + // Extract the stageY_um_ + token = strtok(NULL, ","); + image_info.stageY_um_ = atof(token); + + // Extract the stageZ_um_ + token = strtok(NULL, ","); + image_info.stageZ_um_ = atof(token); + + // Extract the objectiveX_um_ + token = strtok(NULL, ","); + image_info.objectiveX_um_ = atof(token); + + // Extract the objectiveY_um_ + token = strtok(NULL, ","); + image_info.objectiveY_um_ = atof(token); + + // Extract the objectiveZ_um_ + token = strtok(NULL, ","); + image_info.objectiveZ_um_ = atof(token); + + // Do something with the extracted image info... + if (image_callback != NULL) { + image_callback(&image_info); + } + + } + + fclose(file); + + printf("Read %d lines from file %s\n", line_count, imageListFileName); +} \ No newline at end of file diff --git a/tools/llsm/imageListReader.h b/tools/llsm/imageListReader.h new file mode 100644 index 000000000..db70a14fb --- /dev/null +++ b/tools/llsm/imageListReader.h @@ -0,0 +1,28 @@ +#ifndef IMAGELISTREADER_H +#define IMAGELISTREADER_H + +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct { + char *filepath; + char *filename; + double stageX_um_; + double stageY_um_; + double stageZ_um_; + double objectiveX_um_; + double objectiveY_um_; + double objectiveZ_um_; +} image_file_info_t; + +typedef void (*on_image_ptr_t)(image_file_info_t *); + +void scan_image_list(char *imageListFileName, on_image_ptr_t image_callback); + +#endif // IMAGELISTREADER_H \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index d9a2d54f0..8ad47b52d 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -7,13 +7,14 @@ #include "omp.h" #endif -#define CREATE_ARRAY(result_var, type, ndim, dim) do { \ - size_t i = 0, dim_prod = 1; \ - for (i = 0; i < (ndim); i++) { \ - dim_prod *= (dim)[i]; \ - } \ - result_var = (void *)malloc(dim_prod * sizeof(type)); \ -} while (0) +#define CREATE_ARRAY(result_var, type, ndim, dim) \ + do { \ + size_t i = 0, dim_prod = 1; \ + for (i = 0; i < (ndim); i++) { \ + dim_prod *= (dim)[i]; \ + } \ + result_var = (void *)malloc(dim_prod * sizeof(type)); \ + } while (0) void DummyHandler(const char *module, const char *fmt, va_list ap) diff --git a/tools/llsm/parallelReadTiff.h b/tools/llsm/parallelReadTiff.h index 4f1d7ad5e..aec1dfe7b 100644 --- a/tools/llsm/parallelReadTiff.h +++ b/tools/llsm/parallelReadTiff.h @@ -11,8 +11,8 @@ #include typedef struct { - uint64_t *range; - size_t length; + uint64_t *range; + size_t length; } parallel_tiff_range_t; void parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tiff_range_t *strip_range); diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 565c20b0c..275f6fd9c 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -15,12 +15,13 @@ // #include "pdc_client_connect.h" #include "llsm/parallelReadTiff.h" +#include "llsm/imageListReader.h" int parse_console_args(int argc, char *argv[], char **file_name) { int c, parse_code = -1; - + while ((c = getopt(argc, argv, "f:")) != -1) { switch (c) { case 'f': @@ -36,15 +37,48 @@ parse_console_args(int argc, char *argv[], char **file_name) return parse_code; } +void +print_image_file_info(const image_file_info_t *image_info) +{ + printf("Filepath: %s\n", image_info->filepath); + printf("Filename: %s\n", image_info->filename); + printf("Stage X (um): %.2f\n", image_info->stageX_um_); + printf("Stage Y (um): %.2f\n", image_info->stageY_um_); + printf("Stage Z (um): %.2f\n", image_info->stageZ_um_); + printf("Objective X (um): %.2f\n", image_info->objectiveX_um_); + printf("Objective Y (um): %.2f\n", image_info->objectiveY_um_); + printf("Objective Z (um): %.2f\n", image_info->objectiveZ_um_); +} + +void +on_image(image_file_info_t *imageinfo) +{ + print_image_file_info(imageinfo); + // calling tiff loading process. + void *tiff = NULL; + int i = 0; + parallel_TIFF_load(imageinfo->filename, &tiff, 1, NULL); + + if (!tiff) + return 1; + + printf("first few bytes "); + for (i = 0; i < 10; i++) { + printf("%d ", ((uint8_t *)tiff)[i]); + } + printf("\n"); + free(tiff); +} + int main(int argc, char *argv[]) { char *file_name = NULL; - void *tiff = NULL; - int i = 0; - char bytes[10]; - char *tiff_str_ptr; + + + char bytes[10]; + // parse console argument int parse_code = parse_console_args(argc, argv, &file_name); if (parse_code) { @@ -54,14 +88,7 @@ main(int argc, char *argv[]) // print file name for validating purpose printf("Filename: %s\n", file_name ? file_name : "(none)"); - // calling tiff loading process. - parallel_TIFF_load(file_name, &tiff, 1, NULL); - - tiff_str_ptr = (char *)tiff; - for (i = 0; i < 10; i++) { - bytes[i] = tiff_str_ptr[i]; - } - printf("first few bytes : %s\n", bytes); + scan_image_list(file_name, &on_image); return 0; } \ No newline at end of file From 65c96d9063556002c635ce3dd362cc55fddb4b89 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 20 Apr 2023 15:31:14 -0500 Subject: [PATCH 116/806] update CMakeLists --- tools/llsm/imageListReader.c | 4 ++-- tools/llsm/imageListReader.h | 9 +++++++-- tools/llsm_importer.c | 30 ++++++++++++++++++++++-------- 3 files changed, 31 insertions(+), 12 deletions(-) diff --git a/tools/llsm/imageListReader.c b/tools/llsm/imageListReader.c index 199ec788c..c9b0384b9 100644 --- a/tools/llsm/imageListReader.c +++ b/tools/llsm/imageListReader.c @@ -2,7 +2,7 @@ #include "parallelReadTiff.h" void -scan_image_list(char *imageListFileName, on_image_ptr_t image_callback) +scan_image_list(char *imageListFileName, on_image_ptr_t image_callback, img_scan_callback_args_t *args) { FILE* file = fopen(imageListFileName, "r"); @@ -60,7 +60,7 @@ scan_image_list(char *imageListFileName, on_image_ptr_t image_callback) // Do something with the extracted image info... if (image_callback != NULL) { - image_callback(&image_info); + image_callback(&image_info, args); } } diff --git a/tools/llsm/imageListReader.h b/tools/llsm/imageListReader.h index db70a14fb..fd3f646d9 100644 --- a/tools/llsm/imageListReader.h +++ b/tools/llsm/imageListReader.h @@ -21,8 +21,13 @@ typedef struct { double objectiveZ_um_; } image_file_info_t; -typedef void (*on_image_ptr_t)(image_file_info_t *); +typedef struct { + void *input; + void *output; +} img_scan_callback_args_t; + +typedef void (*on_image_ptr_t)(image_file_info_t *, img_scan_callback_args_t *args); -void scan_image_list(char *imageListFileName, on_image_ptr_t image_callback); +void scan_image_list(char *imageListFileName, on_image_ptr_t image_callback, img_scan_callback_args_t *args); #endif // IMAGELISTREADER_H \ No newline at end of file diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 275f6fd9c..4c893b011 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -16,6 +16,7 @@ #include "llsm/parallelReadTiff.h" #include "llsm/imageListReader.h" +#include int parse_console_args(int argc, char *argv[], char **file_name) @@ -51,13 +52,26 @@ print_image_file_info(const image_file_info_t *image_info) } void -on_image(image_file_info_t *imageinfo) +on_image(image_file_info_t *image_info, img_scan_callback_args_t *args) { - print_image_file_info(imageinfo); + print_image_file_info(image_info); + + char *dirname = (char *)args->input; + char filepath[256]; // calling tiff loading process. void *tiff = NULL; int i = 0; - parallel_TIFF_load(imageinfo->filename, &tiff, 1, NULL); + + // check if the path ends with a forward slash + if (dirname[strlen(dirname) - 1] != '/') { + strcat(dirname, "/"); // add a forward slash to the end of the path + } + + strcpy(filepath, dirname); // copy the directory path to the file path + strcat(filepath, image_info->filename); // concatenate the file name to the file path + + + parallel_TIFF_load(filepath, &tiff, 1, NULL); if (!tiff) return 1; @@ -75,20 +89,20 @@ main(int argc, char *argv[]) { char *file_name = NULL; - - - char bytes[10]; - + img_scan_callback_args_t callback_args; // parse console argument int parse_code = parse_console_args(argc, argv, &file_name); if (parse_code) { return parse_code; } + char* directory_path = dirname(strdup(directory_path)); // print file name for validating purpose printf("Filename: %s\n", file_name ? file_name : "(none)"); + printf("Directory: %s\n", directory_path ? directory_path : "(none)"); - scan_image_list(file_name, &on_image); + callback_args.input = (void *)directory_path; + scan_image_list(file_name, &on_image, &callback_args); return 0; } \ No newline at end of file From ec20bc7d9aaa1834af54dbd9b450544b983c8f52 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 20 Apr 2023 23:31:03 -0500 Subject: [PATCH 117/806] update CMakeLists --- tools/llsm_importer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 4c893b011..a37ad2853 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -95,7 +95,7 @@ main(int argc, char *argv[]) if (parse_code) { return parse_code; } - char* directory_path = dirname(strdup(directory_path)); + char* directory_path = dirname(strdup(file_name)); // print file name for validating purpose printf("Filename: %s\n", file_name ? file_name : "(none)"); From 46258f1648a4318119d26243c9678a3f1b60faeb Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 20 Apr 2023 23:32:10 -0500 Subject: [PATCH 118/806] fix return type --- tools/llsm_importer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index a37ad2853..7ab7d13ee 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -74,7 +74,7 @@ on_image(image_file_info_t *image_info, img_scan_callback_args_t *args) parallel_TIFF_load(filepath, &tiff, 1, NULL); if (!tiff) - return 1; + return; printf("first few bytes "); for (i = 0; i < 10; i++) { From 6a72a8d49ceec3eed41772834e5615b9449ca411 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 20 Apr 2023 23:35:48 -0500 Subject: [PATCH 119/806] fix return type --- tools/llsm/parallelReadTiff.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 8ad47b52d..4d6873ca8 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -39,7 +39,7 @@ readTiffParallelBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, vo TIFF *tif = TIFFOpen(fileName, "r"); if (!tif) - printf("tiff:threadError", "Thread %d: File \"%s\" cannot be opened\n", w, fileName); + printf("tiff:threadError | Thread %d: File \"%s\" cannot be opened\n", w, fileName); void *buffer = malloc(x * bytes); for (int64_t dir = startSlice + (w * batchSize); dir < startSlice + ((w + 1) * batchSize); dir++) { @@ -241,17 +241,17 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void int fd = open(fileName, O_RDONLY); #endif if (fd == -1) - printf("disk:threadError", "File \"%s\" cannot be opened from Disk\n", fileName); + printf("disk:threadError | File \"%s\" cannot be opened from Disk\n", fileName); if (!tif) - printf("tiff:threadError", "File \"%s\" cannot be opened\n", fileName); + printf("tiff:threadError | File \"%s\" cannot be opened\n", fileName); uint64_t offset = 0; uint64_t *offsets = NULL; TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); uint64_t *byteCounts = NULL; TIFFGetField(tif, TIFFTAG_STRIPBYTECOUNTS, &byteCounts); if (!offsets || !byteCounts) - printf("tiff:threadError", "Could not get offsets or byte counts from the tiff file\n"); + printf("tiff:threadError | Could not get offsets or byte counts from the tiff file\n"); offset = offsets[0]; uint64_t fOffset = offsets[stripsPerDir - 1] + byteCounts[stripsPerDir - 1]; uint64_t zSize = fOffset - offset; @@ -308,7 +308,7 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void if (errBak) readTiffParallelBak(x, y, z, fileName, tiff, bits, startSlice, flipXY); else - printf("tiff:threadError", errString); + printf("tiff:threadError %s\n", errString); } } @@ -329,7 +329,7 @@ readTiffParallel2DBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, TIFF *tif = TIFFOpen(fileName, "r"); if (!tif) - printf("tiff:threadError", "Thread %d: File \"%s\" cannot be opened\n", w, fileName); + printf("tiff:threadError | Thread %d: File \"%s\" cannot be opened\n", w, fileName); void *buffer = malloc(x * bytes); for (int64_t dir = startSlice + (w * batchSize); dir < startSlice + ((w + 1) * batchSize); dir++) { @@ -517,7 +517,7 @@ readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, voi if (errBak) readTiffParallel2DBak(x, y, z, fileName, tiff, bits, startSlice, flipXY); else - printf("tiff:threadError", errString); + printf("tiff:threadError %s\n", errString); } } @@ -533,7 +533,7 @@ readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char *fileName, #endif TIFF *tif = TIFFOpen(fileName, "r"); if (!tif) - printf("tiff:threadError", "File \"%s\" cannot be opened\n", fileName); + printf("tiff:threadError | File \"%s\" cannot be opened\n", fileName); uint64_t offset = 0; uint64_t *offsets = NULL; TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); @@ -682,7 +682,7 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u TIFFSetWarningHandler(DummyHandler); TIFF *tif = TIFFOpen(fileName, "r"); if (!tif) - printf("tiff:inputError", "File \"%s\" cannot be opened", fileName); + printf("tiff:inputError | File \"%s\" cannot be opened", fileName); TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, x); TIFFGetField(tif, TIFFTAG_IMAGELENGTH, y); @@ -714,13 +714,13 @@ get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, u } else { if (strip_range->length != 2) { - printf("tiff:inputError", "Input range is not 2"); + printf("tiff:inputError | Input range is not 2"); } else { *startSlice = (uint64_t)(*(strip_range->range)) - 1; *z = (uint64_t)(*(strip_range->range + 1)) - startSlice[0]; if (!TIFFSetDirectory(tif, startSlice[0] + z[0] - 1) || !TIFFSetDirectory(tif, startSlice[0])) { - printf("tiff:rangeOutOfBound", "Range is out of bounds"); + printf("tiff:rangeOutOfBound | Range is out of bounds"); } } } From 9e552574f82eee65b1b8b85864bedf9ee21f6a49 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Thu, 20 Apr 2023 23:58:08 -0500 Subject: [PATCH 120/806] add timing --- tools/llsm_importer.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 7ab7d13ee..bf86f6727 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -55,24 +55,34 @@ void on_image(image_file_info_t *image_info, img_scan_callback_args_t *args) { print_image_file_info(image_info); - + char *dirname = (char *)args->input; - char filepath[256]; + char filepath[256]; // calling tiff loading process. - void *tiff = NULL; - int i = 0; + void * tiff = NULL; + int i = 0; + struct timespec start, end; + double duration; // check if the path ends with a forward slash if (dirname[strlen(dirname) - 1] != '/') { strcat(dirname, "/"); // add a forward slash to the end of the path } - strcpy(filepath, dirname); // copy the directory path to the file path + strcpy(filepath, dirname); // copy the directory path to the file path strcat(filepath, image_info->filename); // concatenate the file name to the file path + clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation parallel_TIFF_load(filepath, &tiff, 1, NULL); + clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation + + duration = (end.tv_sec - start.tv_sec) * 1e9 + + (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds + + printf("Read %s Done! Time taken: %.4f seconds\n", filepath, duration / 1e9); + if (!tiff) return; @@ -88,14 +98,14 @@ int main(int argc, char *argv[]) { - char *file_name = NULL; + char * file_name = NULL; img_scan_callback_args_t callback_args; // parse console argument int parse_code = parse_console_args(argc, argv, &file_name); if (parse_code) { return parse_code; } - char* directory_path = dirname(strdup(file_name)); + char *directory_path = dirname(strdup(file_name)); // print file name for validating purpose printf("Filename: %s\n", file_name ? file_name : "(none)"); From 390be8a61d34e987843047e28cc546478d15f381 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 21 Apr 2023 12:52:40 -0500 Subject: [PATCH 121/806] add timing --- tools/llsm_importer.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index bf86f6727..322f1504e 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -18,6 +18,8 @@ #include "llsm/imageListReader.h" #include +int rank = 0, size = 1; + int parse_console_args(int argc, char *argv[], char **file_name) { @@ -51,6 +53,17 @@ print_image_file_info(const image_file_info_t *image_info) printf("Objective Z (um): %.2f\n", image_info->objectiveZ_um_); } +// void +// import_to_pdc(const image_file_info_t *image_info) { + + +// PDCprop_set_obj_dims(obj_prop_g, cur_ndims, (uint64_t *)cur_dims); +// PDCprop_set_obj_type(obj_prop_g, cur_type); +// PDCprop_set_obj_time_step(obj_prop_g, 0); +// PDCprop_set_obj_user_id(obj_prop_g, getuid()); +// PDCprop_set_obj_app_name(obj_prop_g, app_name); +// } + void on_image(image_file_info_t *image_info, img_scan_callback_args_t *args) { @@ -98,6 +111,12 @@ int main(int argc, char *argv[]) { +// #ifdef ENABLE_MPI +// MPI_Init(&argc, &argv); +// MPI_Comm_rank(MPI_COMM_WORLD, &rank); +// MPI_Comm_size(MPI_COMM_WORLD, &size); +// #endif + char * file_name = NULL; img_scan_callback_args_t callback_args; // parse console argument @@ -114,5 +133,10 @@ main(int argc, char *argv[]) callback_args.input = (void *)directory_path; scan_image_list(file_name, &on_image, &callback_args); + +// #ifdef ENABLE_MPI +// MPI_Finalize(); +// #endif + return 0; } \ No newline at end of file From a647af9c0b64e95a32c8a6318dd8503ea4d494be Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 21 Apr 2023 13:00:57 -0500 Subject: [PATCH 122/806] fix output --- tools/llsm/imageListReader.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llsm/imageListReader.c b/tools/llsm/imageListReader.c index c9b0384b9..cf63891e7 100644 --- a/tools/llsm/imageListReader.c +++ b/tools/llsm/imageListReader.c @@ -67,5 +67,5 @@ scan_image_list(char *imageListFileName, on_image_ptr_t image_callback, img_scan fclose(file); - printf("Read %d lines from file %s\n", line_count, imageListFileName); + printf("Read %d lines from file %s after the header line.\n", line_count - 1, imageListFileName); } \ No newline at end of file From 5c0c37dce9131d05b05444b59cfe2421a347c63c Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 17:16:16 -0500 Subject: [PATCH 123/806] llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader --- tools/llsm/csvReader.c | 377 ++++++++++++++++++++++++++++++++++ tools/llsm/csvReader.h | 166 +++++++++++++++ tools/llsm/imageListReader.c | 71 ------- tools/llsm/imageListReader.h | 33 --- tools/llsm/parallelReadTiff.c | 15 +- tools/llsm/parallelReadTiff.h | 16 +- tools/llsm/pdc_list.c | 142 +++++++++++++ tools/llsm/pdc_list.h | 111 ++++++++++ tools/llsm_importer.c | 254 ++++++++++++++++++----- 9 files changed, 1031 insertions(+), 154 deletions(-) create mode 100644 tools/llsm/csvReader.c create mode 100644 tools/llsm/csvReader.h delete mode 100644 tools/llsm/imageListReader.c delete mode 100644 tools/llsm/imageListReader.h create mode 100644 tools/llsm/pdc_list.c create mode 100644 tools/llsm/pdc_list.h diff --git a/tools/llsm/csvReader.c b/tools/llsm/csvReader.c new file mode 100644 index 000000000..baeaf6684 --- /dev/null +++ b/tools/llsm/csvReader.c @@ -0,0 +1,377 @@ +#include "csvReader.h" + +csv_header_t * +csv_parse_header(char *line, char *field_types) +{ + csv_header_t *first_header = NULL; + csv_header_t *last_header = NULL; + char * token = NULL; + char * saveptr = NULL; + int field_index = 0; + int in_quotes = 0; + int value_start = 0; + int i = 0; + + for (int i = 0; line[i] != '\0'; ++i) { + if (line[i] == '\"') { + in_quotes = !in_quotes; + } + else if (!in_quotes && (line[i] == ',' || line[i + 1] == '\0')) { + // Allocate memory for the header struct + csv_header_t *header = (csv_header_t *)malloc(sizeof(csv_header_t)); + if (header == NULL) { + return NULL; + } + // Remove quotes and spaces from the field name + header->field_name = strndup(line + value_start, i - value_start + (line[i + 1] == '\0')); + + // Set the field index + header->field_index = field_index; + + // Set the field type + if (field_types != NULL) { + header->field_type = field_types[field_index]; + } + else { + header->field_type = 's'; + } + + // Set the next pointer to NULL + header->next = NULL; + + // Add the header to the linked list + if (first_header == NULL) { + first_header = header; + last_header = header; + } + else { + last_header->next = header; + last_header = header; + } + + value_start = i + 1; + field_index++; + } + } + + return first_header; +} + +csv_cell_t * +csv_parse_row(char *line, csv_header_t *header) +{ + csv_cell_t * first_cell = NULL; + csv_cell_t * last_cell = NULL; + csv_header_t *current_header = header; + char * token = NULL; + char * saveptr = NULL; + int field_index = 0; + int in_quotes = 0; + int value_start = 0; + int i = 0; + + for (int i = 0; line[i] != '\0'; ++i) { + if (line[i] == '\"') { + in_quotes = !in_quotes; + } + else if (!in_quotes && (line[i] == ',' || line[i + 1] == '\0')) { + // Allocate memory for the cell struct + csv_cell_t *cell = (csv_cell_t *)malloc(sizeof(csv_cell_t)); + if (cell == NULL) { + return NULL; + } + + // Set the field name + cell->header = current_header; + + // Set the field value + cell->field_value = strndup(line + value_start, i - value_start + (line[i + 1] == '\0')); + + // Set the next pointer to NULL + cell->next = NULL; + + // Add the cell to the linked list + if (first_cell == NULL) { + first_cell = cell; + last_cell = cell; + } + else { + last_cell->next = cell; + last_cell = cell; + } + + value_start = i + 1; + field_index++; + current_header = current_header->next; + } + } + + return first_cell; +} + +csv_cell_t * +csv_get_field_value_by_name(char *line, csv_header_t *header, char *field_name) +{ + csv_cell_t *cell = csv_parse_row(line, header); + while (cell != NULL) { + if (strcmp(cell->header->field_name, field_name) == 0) { + return cell; + } + cell = cell->next; + } + return NULL; +} + +csv_cell_t * +csv_get_field_value_by_index(char *line, csv_header_t *header, int field_index) +{ + csv_cell_t *cell = csv_parse_row(line, header); + while (cell != NULL) { + if (cell->header->field_index == field_index) { + return cell; + } + cell = cell->next; + } + return NULL; +} + +csv_table_t * +csv_parse_file(char *file_name, char *field_types) +{ + FILE *fp = fopen(file_name, "r"); + if (fp == NULL) { + return NULL; + } + + // Allocate memory for the table struct + csv_table_t *table = (csv_table_t *)malloc(sizeof(csv_table_t)); + if (table == NULL) { + return NULL; + } + + // Read the first line of the file + char * line = NULL; + size_t len = 0; + ssize_t read = getline(&line, &len, fp); + + // Parse the header + table->first_header = csv_parse_header(line, field_types); + + // Parse the rows + csv_row_t *first_row = NULL; + csv_row_t *last_row = NULL; + while ((read = getline(&line, &len, fp)) != -1) { + // Allocate memory for the row struct + csv_row_t *row = (csv_row_t *)malloc(sizeof(csv_row_t)); + if (row == NULL) { + return NULL; + } + + // Parse the row + row->first_cell = csv_parse_row(line, table->first_header); + + // Set the next pointer to NULL + row->next = NULL; + + // Add the row to the linked list + if (first_row == NULL) { + first_row = row; + last_row = row; + } + else { + last_row->next = row; + last_row = row; + } + } + + table->first_row = first_row; + + return table; +} + +csv_table_t *csv_parse_list(PDC_LIST *list, char *field_types){ + csv_table_t *table = (csv_table_t *)malloc(sizeof(csv_table_t)); + if (table == NULL) { + return NULL; + } + int num_file_read = 0; + csv_row_t *first_row = NULL; + csv_row_t *last_row = NULL; + + PDC_LIST_ITERATOR *iter = pdc_list_iterator_new(list); + while (pdc_list_iterator_has_next(iter)) { + char *line = (char *)pdc_list_iterator_next(iter); + if (num_file_read == 0) { + table->first_header = csv_parse_header(line, field_types); + } else { + // Allocate memory for the row struct + csv_row_t *row = (csv_row_t *)malloc(sizeof(csv_row_t)); + if (row == NULL) { + return NULL; + } + + // Parse the row + row->first_cell = csv_parse_row(line, table->first_header); + + // Set the next pointer to NULL + row->next = NULL; + + // Add the row to the linked list + if (first_row == NULL) { + first_row = row; + last_row = row; + } + else { + last_row->next = row; + last_row = row; + } + } + num_file_read++; + } + + table->first_row = first_row; + + return table; +} + +void +csv_free_header(csv_header_t *header) +{ + csv_header_t *current_header = header; + csv_header_t *next_header = NULL; + while (current_header != NULL) { + next_header = current_header->next; + free(current_header->field_name); + free(current_header); + current_header = next_header; + } +} + +void +csv_free_row(csv_row_t *row) +{ + csv_row_t *current_row = row; + csv_row_t *next_row = NULL; + while (current_row != NULL) { + next_row = current_row->next; + csv_free_cell(current_row->first_cell); + free(current_row); + current_row = next_row; + } +} + +void +csv_free_cell(csv_cell_t *cell) +{ + csv_cell_t *current_cell = cell; + csv_cell_t *next_cell = NULL; + while (current_cell != NULL) { + next_cell = current_cell->next; + free(current_cell->field_value); + free(current_cell); + current_cell = next_cell; + } +} + +void +csv_free_table(csv_table_t *table) +{ + csv_free_header(table->first_header); + csv_free_row(table->first_row); + free(table); +} + +void +csv_print_header(csv_header_t *header) +{ + csv_header_t *current_header = header; + while (current_header != NULL) { + printf("%s", current_header->field_name); + if (current_header->next != NULL) { + printf(", "); + } + current_header = current_header->next; + } + printf("\n"); +} + +void +csv_print_row(csv_row_t *row, int with_key) +{ + csv_cell_t *current_cell = row->first_cell; + while (current_cell != NULL) { + csv_print_cell(current_cell, with_key); + if (current_cell->next != NULL) { + printf(", "); + if (with_key) { + printf("\n"); + } + } + current_cell = current_cell->next; + } + printf("\n"); +} + +void +csv_print_cell(csv_cell_t *cell, int with_key) +{ + if (with_key) { + printf("%s: ", cell->header->field_name); + } + switch (cell->header->field_type) + { + case 'i': + printf("%ld", strtol(cell->field_value, NULL, 10)); + break; + + case 'f': + printf("%f", strtod(cell->field_value, NULL)); + break; + + case 's': + printf("%s", cell->field_value); + break; + + default: + printf("%s", cell->field_value); + break; + } + +} + + + +void +csv_print_table(csv_table_t *table) +{ + csv_print_header(table->first_header); + csv_row_t *current_row = table->first_row; + while (current_row != NULL) { + csv_print_row(current_row, 0); + current_row = current_row->next; + } +} + +int +csv_get_num_rows(csv_table_t *table) +{ + int num_rows = 0; + csv_row_t *current_row = table->first_row; + while (current_row != NULL) { + num_rows++; + current_row = current_row->next; + } + return num_rows; +} + +int +csv_get_num_fields(csv_table_t *table) +{ + int num_fields = 0; + csv_header_t *current_header = table->first_header; + while (current_header != NULL) { + num_fields++; + current_header = current_header->next; + } + return num_fields; +} \ No newline at end of file diff --git a/tools/llsm/csvReader.h b/tools/llsm/csvReader.h new file mode 100644 index 000000000..a3743b052 --- /dev/null +++ b/tools/llsm/csvReader.h @@ -0,0 +1,166 @@ +#ifndef CSVREADER_H +#define CSVREADER_H + +#include +#include +#include + +#include "pdc_list.h" + +typedef struct csv_header_t { + char * field_name; + int field_index; + char field_type; + struct csv_header_t *next; +} csv_header_t; + +typedef struct csv_cell_t { + char * field_value; + csv_header_t * header; + struct csv_cell_t *next; +} csv_cell_t; + +typedef struct csv_row_t { + csv_cell_t * first_cell; + struct csv_row_t *next; +} csv_row_t; + +typedef struct csv_table_t { + csv_header_t *first_header; + csv_row_t * first_row; +} csv_table_t; + +/** + * @brief This function parses a CSV header line and returns a linked list of csv_header_t structs. The header + * string may contain quotes and spaces + * @param line The CSV header line to parse. + * @param field_types A string of field types. The field types are 's' for string, 'i' for long integer, 'f' + * for float, and 'd' for double. If this is NULL, all fields are assumed to be strings. + * + * @return A pointer to the first csv_header_t struct in the linked list. + */ +csv_header_t *csv_parse_header(char *line, char *field_types); + +/** + * @brief This function parse a CSV row line and returns a linked list of csv_cell_t structs. The row string + * may contain quotes and spaces + * @param line The CSV row line to parse. + * @param header A pointer to the first csv_header_t struct in the linked list. + * + * @return A pointer to the first csv_cell_t struct in the linked list. The value in the csv_cell should be + * free of quotes or spaces. + */ +csv_cell_t *csv_parse_row(char *line, csv_header_t *header); + +/** + * @brief This function returns the string value of a field for a given row string. The row string may contain + * quotes and spaces + * @param line The CSV row line to parse. + * @param header A pointer to the first csv_header_t struct in the linked list. + * @param field_name The name of the field to get the value for. + * + * @return A pointer to the csv_cell struct of the field. The value in the csv_cell should be free of quotes + * or spaces. + */ +csv_cell_t *csv_get_field_value_by_name(char *line, csv_header_t *header, char *field_name); + +/** + * @brief This function returns the string value of a field for a given row string. The row string may contain + * quotes and spaces + * @param line The CSV row line to parse. + * @param header A pointer to the first csv_header_t struct in the linked list. + * @param field_index The index of the field to get the value for. + * + * @return A pointer to the csv_cell struct of the field. The value in the csv_cell should be free of quotes + * or spaces. + */ +csv_cell_t *csv_get_field_value_by_index(char *line, csv_header_t *header, int field_index); + +/** + * @brief This function parses a CSV file and returns a csv_table_t struct. + * @param file_name The name of the CSV file to parse. + * @param field_types A string of field types. The field types are 's' for string, 'i' for long integer, 'f' + * for float, and 'd' for double. If this is NULL, all fields are assumed to be strings. + * + * @return A pointer to the csv_table_t struct. + */ +csv_table_t *csv_parse_file(char *file_name, char *field_types); + +/** + * @brief This function parses a PDC_LIST of strings as a CSV file and returns a csv_table_t struct. + * @param list A PDC_LIST of strings to parse. + * @param field_types A string of field types. The field types are 's' for string, 'i' for long integer, 'f' + * for float, and 'd' for double. If this is NULL, all fields are assumed to be strings. + * + * @return A pointer to the csv_table_t struct. + */ +csv_table_t *csv_parse_list(PDC_LIST *list, char *field_types); + +/** + * @brief This function frees the memory allocated for a csv_table_t struct. + * @param table A pointer to the csv_table_t struct to free. + */ +void csv_free_table(csv_table_t *table); + +/** + * @brief This function frees the memory allocated for a csv_header_t struct. + * @param header A pointer to the csv_header_t struct to free. + */ +void csv_free_header(csv_header_t *header); + +/** + * @brief This function frees the memory allocated for a csv_row_t struct. + * @param row A pointer to the csv_row_t struct to free. + */ +void csv_free_row(csv_row_t *row); + +/** + * @brief This function frees the memory allocated for a csv_cell_t struct. + * @param cell A pointer to the csv_cell_t struct to free. + */ +void csv_free_cell(csv_cell_t *cell); + +/** + * @brief This function prints the contents of a csv_table_t struct. + * @param table A pointer to the csv_table_t struct to print. + */ +void csv_print_table(csv_table_t *table); + +/** + * @brief This function prints the contents of a csv_header_t struct. + * @param header A pointer to the csv_header_t struct to print. + */ +void csv_print_header(csv_header_t *header); + +/** + * @brief This function prints the contents of a csv_row_t struct. + * @param row A pointer to the csv_row_t struct to print. + * @param with_key A flag to indicate whether to print the key or not. + */ + +void csv_print_row(csv_row_t *row, int with_key); + +/** + * @brief This function prints the contents of a csv_cell_t struct. + * @param cell A pointer to the csv_cell_t struct to print. + * @param with_key A flag to indicate whether to print the key or not. + */ +void csv_print_cell(csv_cell_t *cell, int with_key); + +/** + * @brief This function returns the number of rows in a csv_table_t struct. + * @param table A pointer to the csv_table_t struct. + * + * @return The number of rows in the table. + */ +int csv_get_num_rows(csv_table_t *table); + +/** + * @brief This function returns the number of fields in a csv_table_t struct. + * @param table A pointer to the csv_table_t struct. + * + * @return The number of fields in the table. + */ +int csv_get_num_fields(csv_table_t *table); + +#endif // CSVREADER_H \ No newline at end of file diff --git a/tools/llsm/imageListReader.c b/tools/llsm/imageListReader.c deleted file mode 100644 index cf63891e7..000000000 --- a/tools/llsm/imageListReader.c +++ /dev/null @@ -1,71 +0,0 @@ -#include "imageListReader.h" -#include "parallelReadTiff.h" - -void -scan_image_list(char *imageListFileName, on_image_ptr_t image_callback, img_scan_callback_args_t *args) -{ - FILE* file = fopen(imageListFileName, "r"); - - if (file == NULL) { - printf("Error: could not open file %s\n", imageListFileName); - return; - } - - char buffer[1024]; - char* token; - int line_count = 0; - image_file_info_t image_info; - - // Read and discard the first line (the header) - fgets(buffer, sizeof(buffer), file); - line_count++; - - // Read the remaining lines of the file - while (fgets(buffer, sizeof(buffer), file)) { - line_count++; - - // Tokenize the line by comma - token = strtok(buffer, ","); - - // Extract the filepath - image_info.filepath = strdup(token); - - // Extract the filename - token = strtok(NULL, ","); - image_info.filename = strdup(token); - - // Extract the stageX_um_ - token = strtok(NULL, ","); - image_info.stageX_um_ = atof(token); - - // Extract the stageY_um_ - token = strtok(NULL, ","); - image_info.stageY_um_ = atof(token); - - // Extract the stageZ_um_ - token = strtok(NULL, ","); - image_info.stageZ_um_ = atof(token); - - // Extract the objectiveX_um_ - token = strtok(NULL, ","); - image_info.objectiveX_um_ = atof(token); - - // Extract the objectiveY_um_ - token = strtok(NULL, ","); - image_info.objectiveY_um_ = atof(token); - - // Extract the objectiveZ_um_ - token = strtok(NULL, ","); - image_info.objectiveZ_um_ = atof(token); - - // Do something with the extracted image info... - if (image_callback != NULL) { - image_callback(&image_info, args); - } - - } - - fclose(file); - - printf("Read %d lines from file %s after the header line.\n", line_count - 1, imageListFileName); -} \ No newline at end of file diff --git a/tools/llsm/imageListReader.h b/tools/llsm/imageListReader.h deleted file mode 100644 index fd3f646d9..000000000 --- a/tools/llsm/imageListReader.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef IMAGELISTREADER_H -#define IMAGELISTREADER_H - -#include -#include -#include -#include -#include -#include -#include -#include - -typedef struct { - char *filepath; - char *filename; - double stageX_um_; - double stageY_um_; - double stageZ_um_; - double objectiveX_um_; - double objectiveY_um_; - double objectiveZ_um_; -} image_file_info_t; - -typedef struct { - void *input; - void *output; -} img_scan_callback_args_t; - -typedef void (*on_image_ptr_t)(image_file_info_t *, img_scan_callback_args_t *args); - -void scan_image_list(char *imageListFileName, on_image_ptr_t image_callback, img_scan_callback_args_t *args); - -#endif // IMAGELISTREADER_H \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 4d6873ca8..f36c13ea3 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -781,7 +781,7 @@ _TIFF_load(char *fileName, uint8_t isImageJIm, uint64_t x, uint64_t y, uint64_t } void -parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tiff_range_t *strip_range) +parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_range, image_info_t **image_info) { uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0, stripeSize = 0, is_imageJ = 0, imageJ_Z = 0; @@ -793,5 +793,16 @@ parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tif dims[1] = flipXY ? x : y; dims[2] = z; - _TIFF_load(fileName, is_imageJ, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, tiff_ptr); + *image_info = (image_info_t *)malloc(sizeof(image_info_t)); + (*image_info)->x = dims[0]; + (*image_info)->y = dims[1]; + (*image_info)->z = dims[2]; + (*image_info)->bits = bits; + (*image_info)->startSlice = startSlice; + (*image_info)->stripeSize = stripeSize; + (*image_info)->is_imageJ = is_imageJ; + (*image_info)->imageJ_Z = imageJ_Z; + (*image_info)->tiff_size = dims[0] * dims[1] * dims[2] * (bits / 8); + + _TIFF_load(fileName, is_imageJ, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, (void **)&((*image_info)->data)); } \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.h b/tools/llsm/parallelReadTiff.h index aec1dfe7b..60f1cb9f4 100644 --- a/tools/llsm/parallelReadTiff.h +++ b/tools/llsm/parallelReadTiff.h @@ -15,6 +15,20 @@ typedef struct { size_t length; } parallel_tiff_range_t; -void parallel_TIFF_load(char *fileName, void **tiff_ptr, uint8_t flipXY, parallel_tiff_range_t *strip_range); +typedef struct { + uint64_t x; + uint64_t y; + uint64_t z; + uint64_t bits; + uint64_t startSlice; + uint64_t stripeSize; + uint64_t is_imageJ; + uint64_t imageJ_Z; + void * tiff_ptr; + size_t tiff_size; +} image_info_t; + + +void parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_range, image_info_t **image_info_t); #endif // PARALLELREADTIFF_H \ No newline at end of file diff --git a/tools/llsm/pdc_list.c b/tools/llsm/pdc_list.c new file mode 100644 index 000000000..e6ea059be --- /dev/null +++ b/tools/llsm/pdc_list.c @@ -0,0 +1,142 @@ +#include "pdc_list.h" + + +PDC_LIST *pdc_list_new(){ + return pdc_list_create(100, 2.0); +} + +PDC_LIST* pdc_list_create(size_t initial_capacity, double expansion_factor) { + // Allocate memory for the list struct. + PDC_LIST* list = (PDC_LIST*) malloc(sizeof(PDC_LIST)); + if (list == NULL) { + return NULL; + } + + // Allocate memory for the array of items. + list->items = (void**) malloc(initial_capacity * sizeof(void*)); + if (list->items == NULL) { + free(list); + return NULL; + } + + // Initialize the list fields. + list->item_count = 0; + list->capacity = initial_capacity; + list->expansion_factor = expansion_factor; + + return list; +} + +void pdc_list_destroy(PDC_LIST* list) { + if (list == NULL) { + return; + } + + // Free all allocated memory for each item. + for (size_t i = 0; i < list->item_count; i++) { + free(list->items[i]); + } + + // Free the array of items and the list struct. + free(list->items); + free(list); +} + +void pdc_list_add(PDC_LIST* list, void* item) { + if (list == NULL || item == NULL) { + return; + } + + // Expand the array of items if necessary. + if (list->item_count >= list->capacity) { + list->capacity *= list->expansion_factor; + list->items = (void**) realloc(list->items, list->capacity * sizeof(void*)); + if (list->items == NULL) { + return; + } + } + + // Add the new item to the end of the array. + list->items[list->item_count++] = item; +} + +void* pdc_list_get(PDC_LIST* list, size_t index) { + if (list == NULL || index >= list->item_count) { + return NULL; + } + + // Return a pointer to the item at the given index. + return list->items[index]; +} + +size_t pdc_list_size(PDC_LIST* list) { + if (list == NULL) { + return 0; + } + + // Return the number of items in the list. + return list->item_count; +} + +void pdc_list_set_expansion_factor(PDC_LIST* list, double expansion_factor) { + if (list == NULL) { + return; + } + + // Set the new expansion factor for the list. + list->expansion_factor = expansion_factor; +} + +double pdc_list_get_expansion_factor(PDC_LIST* list) { + if (list == NULL) { + return 0; + } + + // Return the current expansion factor for the list. + return list->expansion_factor; +} + +PDC_LIST_ITERATOR* pdc_list_iterator_new(PDC_LIST* list) { + if (list == NULL) { + return NULL; + } + + // Allocate memory for the iterator struct. + PDC_LIST_ITERATOR* iterator = (PDC_LIST_ITERATOR*) malloc(sizeof(PDC_LIST_ITERATOR)); + if (iterator == NULL) { + return NULL; + } + + // Initialize the iterator fields. + iterator->list = list; + iterator->index = 0; + + return iterator; +} + +void pdc_list_iterator_destroy(PDC_LIST_ITERATOR* iterator) { + if (iterator == NULL) { + return; + } + + // Free the iterator struct. + free(iterator); +} + +void* pdc_list_iterator_next(PDC_LIST_ITERATOR* iterator) { + if (iterator == NULL) { + return NULL; + } + + // Return the next item in the list. + return pdc_list_get(iterator->list, iterator->index++); +} + +int pdc_list_iterator_has_next(PDC_LIST_ITERATOR* iterator) { + if (iterator == NULL) { + return 0; + } + + // Return true if there are more items in the list. + return iterator->index < pdc_list_size(iterator->list); +} \ No newline at end of file diff --git a/tools/llsm/pdc_list.h b/tools/llsm/pdc_list.h new file mode 100644 index 000000000..9847469e5 --- /dev/null +++ b/tools/llsm/pdc_list.h @@ -0,0 +1,111 @@ +#ifndef PDC_LIST_H +#define PDC_LIST_H + +#include + +/** + * A generic list data structure that stores a variable number of items of any type. + */ +typedef struct { + void **items; // Pointer to the array of items. + size_t item_count; // Number of items in the list. + size_t capacity; // Capacity of the array of items. + double expansion_factor; // Factor by which the capacity is expanded. +} PDC_LIST; + +/** + * A generic iterator for iterating over the items in a PDC_LIST. + */ +typedef struct { + PDC_LIST *list; // The list being iterated over. + size_t index; // The index of the next item to be returned. +} PDC_LIST_ITERATOR; + + +/** + * Creates a new PDC_LIST with default initial capacity 100 and default expansion factor 2.0. + * @return A pointer to the new PDC_LIST. + */ +PDC_LIST *pdc_list_new(); + +/** + * Creates a new PDC_LIST with the given initial capacity and expansion factor. + * @param initial_capacity The initial capacity of the list. + * @param expansion_factor The factor by which the capacity is expanded when the list is full. + * + * @return A pointer to the new PDC_LIST. + */ +PDC_LIST *pdc_list_create(size_t initial_capacity, double expansion_factor); + +/** + * Destroys the given PDC_LIST and frees all allocated memory. + * @param list The PDC_LIST to destroy. + */ +void pdc_list_destroy(PDC_LIST *list); + +/** + * Adds the given item to the end of the given PDC_LIST. + * @param list The PDC_LIST to add the item to. + * @param item The item to add to the PDC_LIST. + * + */ +void pdc_list_add(PDC_LIST *list, void *item); + +/** + * Gets the item at the given index in the given PDC_LIST. + * @param list The PDC_LIST to get the item from. + * @param index The index of the item to get. + * + * @return A pointer to the item at the given index. + */ +void *pdc_list_get(PDC_LIST *list, size_t index); + +/** + * Sets the item at the given index in the given PDC_LIST. + * @param list The PDC_LIST to set the item in. + * + * @return The number of items in the list. + */ +size_t pdc_list_size(PDC_LIST *list); + +/** + * Sets the expansion factor for the given PDC_LIST. + * @param list The PDC_LIST to set the expansion factor for. + * @param expansion_factor The factor by which the capacity is expanded when the list is full. + */ +void pdc_list_set_expansion_factor(PDC_LIST *list, double expansion_factor); + +/** + * Gets the expansion factor for the given PDC_LIST. + * @param list The PDC_LIST to get the expansion factor for. + */ +double pdc_list_get_expansion_factor(PDC_LIST *list); + +/** + * Creates a new PDC_LIST_ITERATOR for the given PDC_LIST. + * @param list The PDC_LIST to create the iterator for. + * @return A pointer to the new PDC_LIST_ITERATOR. + */ +PDC_LIST_ITERATOR *pdc_list_iterator_new(PDC_LIST *list); + +/** + * Destroys the given PDC_LIST_ITERATOR and frees all allocated memory. + * @param iterator The PDC_LIST_ITERATOR to destroy. + */ +void pdc_list_iterator_destroy(PDC_LIST_ITERATOR *iterator); + +/** + * Returns the next item in the PDC_LIST_ITERATOR. + * @param iterator The PDC_LIST_ITERATOR to get the next item from. + * @return A pointer to the next item in the PDC_LIST_ITERATOR. + */ +void *pdc_list_iterator_next(PDC_LIST_ITERATOR *iterator); + +/** + * Returns true if the PDC_LIST_ITERATOR has more items. + * @param iterator The PDC_LIST_ITERATOR to check. + * @return True if the PDC_LIST_ITERATOR has more items. + */ +int pdc_list_iterator_has_next(PDC_LIST_ITERATOR *iterator); + +#endif // PDC_LIST_H \ No newline at end of file diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 322f1504e..ffc87282c 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -4,10 +4,10 @@ #include #include -// #define ENABLE_MPI 1 +#define ENABLE_MPI 1 #ifdef ENABLE_MPI -// #include "mpi.h" +#include "mpi.h" #endif #include "pdc.h" @@ -15,10 +15,19 @@ // #include "pdc_client_connect.h" #include "llsm/parallelReadTiff.h" -#include "llsm/imageListReader.h" +#include "llsm/pdc_list.h" +#include "llsm/csvReader.h" #include -int rank = 0, size = 1; +typedef struct llsm_importer_args_t { + char * directory_path; + csv_header_t *csv_header; +} llsm_importer_args_t; + +int rank = 0, size = 1; + +pdcid_t pdc, cont_prop, cont, obj_prop; +pdcid_t pdc_id_g = 0, cont_prop_g = 0, cont_id_g = 0, obj_prop_g = 0; int parse_console_args(int argc, char *argv[], char **file_name) @@ -41,84 +50,175 @@ parse_console_args(int argc, char *argv[], char **file_name) } void -print_image_file_info(const image_file_info_t *image_info) +import_to_pdc(const image_info_t *image_info, const csv_cell_t *fileName_cell) { - printf("Filepath: %s\n", image_info->filepath); - printf("Filename: %s\n", image_info->filename); - printf("Stage X (um): %.2f\n", image_info->stageX_um_); - printf("Stage Y (um): %.2f\n", image_info->stageY_um_); - printf("Stage Z (um): %.2f\n", image_info->stageZ_um_); - printf("Objective X (um): %.2f\n", image_info->objectiveX_um_); - printf("Objective Y (um): %.2f\n", image_info->objectiveY_um_); - printf("Objective Z (um): %.2f\n", image_info->objectiveZ_um_); -} + struct timespec start, end; + double duration; + + clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation + + pdcid_t cur_obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); + + psize_t ndims = 3; + // FIXME: we should support uint64_t. + uint64_t dims[3] = {image_info->x, image_info->y, image_info->z}; + // FIXME: we should change the ndims parameter to psize_t type. + PDCprop_set_obj_dims(obj_prop_g, (PDC_int_t)ndims, dims); + PDCprop_set_obj_type(obj_prop_g, PDC_FLOAT); + PDCprop_set_obj_time_step(obj_prop_g, 0); + PDCprop_set_obj_user_id(obj_prop_g, getuid()); + PDCprop_set_obj_app_name(obj_prop_g, "LLSM"); + + // create object + // FIXME: There are many attributes currently in one file name, + // and we should do some research to see what would be a good object name for each image. + pdcid_t cur_obj_g = PDCobj_create(cont_id_g, fileName_cell->field_value, cur_obj_prop_g); + + // write data to object + pdcid_t local_region = PDCregion_create(ndims, 0, image_info->tiff_size); + pdcid_t remote_region = PDCregion_create(ndims, 0, image_info->tiff_size); + pdcid_t transfer_request = PDCregion_transfer_create(image_info->tiff_ptr, PDC_WRITE, cur_obj_g, local_region, remote_region); + PDCregion_transfer_start(transfer_request); + PDCregion_transfer_wait(transfer_request); + + // add metadata tags based on the csv row + csv_cell_t *cell = fileName_cell; + while (cell != NULL) { + char *field_name = cell->header->field_name; + char data_type = cell->header->field_type; + char *field_value = cell->field_value; + switch(data_type) { + case 'i': + int value = atoi(field_value); + PDCobj_put_tag(cur_obj_g, field_name, &value, sizeof(int)); + break; + case 'f': + double value = atof(field_value); + PDCobj_put_tag(cur_obj_g, field_name, &value, sizeof(double)); + break; + case 's': + PDCobj_put_tag(cur_obj_g, field_name, field_value, sizeof(char) * strlen(field_value)); + break; + default: + break; + } + cell = cell->next; + } -// void -// import_to_pdc(const image_file_info_t *image_info) { + // add extra metadata tags based on the image_info struct + PDCobj_put_tag(cur_obj_g, "x", &image_info->x, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "y", &image_info->y, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "z", &image_info->z, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "bits", &image_info->bits, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "startSlice", &image_info->startSlice, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "stripeSize", &image_info->stripeSize, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "is_imageJ", &image_info->is_imageJ, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "imageJ_Z", &image_info->imageJ_Z, sizeof(uint64_t)); + // close object + PDCobj_close(cur_obj_g); -// PDCprop_set_obj_dims(obj_prop_g, cur_ndims, (uint64_t *)cur_dims); -// PDCprop_set_obj_type(obj_prop_g, cur_type); -// PDCprop_set_obj_time_step(obj_prop_g, 0); -// PDCprop_set_obj_user_id(obj_prop_g, getuid()); -// PDCprop_set_obj_app_name(obj_prop_g, app_name); -// } + // get timing + clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation + duration = (end.tv_sec - start.tv_sec) * 1e9 + + (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds + + printf("[Rank %4d]create object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, duration / 1e9); + +} void -on_image(image_file_info_t *image_info, img_scan_callback_args_t *args) +on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) { - print_image_file_info(image_info); + csv_print_row(row, 1); - char *dirname = (char *)args->input; + char *dirname = strdup(llsm_args->directory_path); char filepath[256]; // calling tiff loading process. - void * tiff = NULL; - int i = 0; + image_info_t * image_info = NULL; + int i = 0; struct timespec start, end; double duration; + // Filepath,Filename,StageX_um_,StageY_um_,StageZ_um_,ObjectiveX_um_,ObjectiveY_um_,ObjectiveZ_um_ + + // get the file name from the csv row + csv_cell_t *fileName_cell = csv_get_field_value_by_name(row, llsm_args->csv_header, "Filename"); // check if the path ends with a forward slash if (dirname[strlen(dirname) - 1] != '/') { strcat(dirname, "/"); // add a forward slash to the end of the path } - strcpy(filepath, dirname); // copy the directory path to the file path - strcat(filepath, image_info->filename); // concatenate the file name to the file path + strcpy(filepath, dirname); // copy the directory path to the file path + strcat(filepath, fileName_cell->field_value); // concatenate the file name to the file path clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation - parallel_TIFF_load(filepath, &tiff, 1, NULL); + parallel_TIFF_load(filepath, 1, NULL, &image_info); clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation duration = (end.tv_sec - start.tv_sec) * 1e9 + (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds - printf("Read %s Done! Time taken: %.4f seconds\n", filepath, duration / 1e9); + printf("[Rand %4d]Read %s Done! Time taken: %.4f seconds\n", rank, filepath, duration / 1e9); - if (!tiff) + if (image_info == NULL || image_info->tiff_ptr == NULL) { return; + } printf("first few bytes "); for (i = 0; i < 10; i++) { - printf("%d ", ((uint8_t *)tiff)[i]); + printf("%d ", ((uint8_t *)image_info->tiff_ptr)[i]); } printf("\n"); - free(tiff); + + // import the image to PDC + import_to_pdc(image_info, fileName_cell); + + // free the image info + free(image_info->tiff_ptr); + free(image_info); + free(dirname); + + // free the csv row + csv_free_row(row); +} + +void +read_txt(char *txtFileName, PDC_LIST *list) +{ + FILE *file = fopen(txtFileName, "r"); + + if (file == NULL) { + printf("Error: could not open file %s\n", txtFileName); + return; + } + char buffer[1024]; + // Read the lines of the file + while (fgets(buffer, sizeof(buffer), file)) { + pdc_list_add(list, strdup(buffer)); + } } int main(int argc, char *argv[]) { -// #ifdef ENABLE_MPI -// MPI_Init(&argc, &argv); -// MPI_Comm_rank(MPI_COMM_WORLD, &rank); -// MPI_Comm_size(MPI_COMM_WORLD, &size); -// #endif +#ifdef ENABLE_MPI + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); +#endif - char * file_name = NULL; - img_scan_callback_args_t callback_args; + char * file_name = NULL; + PDC_LIST * list = pdc_list_new(); + char * csv_line = NULL; + int num_row_read = 0; + csv_header_t * csv_header = NULL; + csv_row_t * csv_row = NULL; + llsm_importer_args_t *llsm_args = NULL; + char * csv_field_types = {'s', 's', 'f', 'f', 'f', 'f', 'f', 'f'}; // parse console argument int parse_code = parse_console_args(argc, argv, &file_name); if (parse_code) { @@ -130,13 +230,73 @@ main(int argc, char *argv[]) printf("Filename: %s\n", file_name ? file_name : "(none)"); printf("Directory: %s\n", directory_path ? directory_path : "(none)"); - callback_args.input = (void *)directory_path; - scan_image_list(file_name, &on_image, &callback_args); + // create a pdc + pdc_id_g = PDCinit("pdc"); + // create a container property + cont_prop_g = PDCprop_create(PDC_CONT_CREATE, pdc); + if (cont_prop <= 0) + printf("Fail to create container property @ line %d!\n", __LINE__); -// #ifdef ENABLE_MPI -// MPI_Finalize(); -// #endif + // create a container + cont = PDCcont_create("c1", cont_prop); + if (cont <= 0) + printf("Fail to create container @ line %d!\n", __LINE__); + + // Rank 0 reads the filename list and distribute data to other ranks + if (rank == 0) { + read_txt(file_name, list); +#ifdef ENABLE_MPI + // broadcast the number of lines + int num_lines = pdc_list_size(list); + MPI_Bcast(&num_lines, 1, MPI_INT, 0, MPI_COMM_WORLD); + // broadcast the file names + PDC_LIST_ITERATOR *iter = pdc_list_iterator_new(list); + while (pdc_list_iterator_has_next(iter)) { + char *csv_line = (char *)pdc_list_iterator_next(iter); + MPI_Bcast(csv_line, 256, MPI_CHAR, 0, MPI_COMM_WORLD); + } +#endif + } + else { +#ifdef ENABLE_MPI + // other ranks receive the number of files + int num_lines; + MPI_Bcast(&num_lines, 1, MPI_INT, 0, MPI_COMM_WORLD); + // receive the file names + int i; + for (i = 0; i < num_lines; i++) { + csv_line = (char *)malloc(256 * sizeof(char)); + MPI_Bcast(csv_line, 256, MPI_CHAR, 0, MPI_COMM_WORLD); + pdc_list_add(list, csv_line); + } +#endif + } + // parse the csv + csv_table_t *csv_table = csv_parse_list(list, csv_field_types); + if (csv_table == NULL) { + printf("Fail to parse csv file @ line %d!\n", __LINE__); + return -1; + } + llsm_args = (llsm_importer_args_t *)malloc(sizeof(llsm_importer_args_t)); + llsm_args->directory_path = directory_path; + llsm_args->csv_header = csv_table->first_header; + + // go through the csv table + csv_row_t *current_row = csv_table->first_row; + while (current_row != NULL) { + if (num_row_read % size == rank) { + on_csv_row(current_row, llsm_args); + } + num_row_read++; + current_row = current_row->next; + } + + csv_free_table(csv_table); + +#ifdef ENABLE_MPI + MPI_Finalize(); +#endif return 0; -} \ No newline at end of file +} From 5615b8f25ab7a60493e98a048144603eae4a0ee8 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 17:18:30 -0500 Subject: [PATCH 124/806] fix vairable name --- tools/llsm/parallelReadTiff.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index f36c13ea3..727ba02c1 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -804,5 +804,5 @@ parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_ (*image_info)->imageJ_Z = imageJ_Z; (*image_info)->tiff_size = dims[0] * dims[1] * dims[2] * (bits / 8); - _TIFF_load(fileName, is_imageJ, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, (void **)&((*image_info)->data)); + _TIFF_load(fileName, is_imageJ, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, (void **)&((*image_info)->tiff_ptr)); } \ No newline at end of file From 015906ee2e5e8ccdfb71c77b33d3f671441db403 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 17:24:06 -0500 Subject: [PATCH 125/806] fix cmake --- tools/CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index fed5d93d3..b0ae54dd2 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -108,9 +108,12 @@ option(USE_LIB_TIFF "Enable LibTiff." ON) if(USE_LIB_TIFF) find_package(TIFF REQUIRED) if(TIFF_FOUND) + set(LLSM_LIB_SOURCE + llsm/*.c + ) # Add the LibTIFF include directory to the include path include_directories(${TIFF_INCLUDE_DIRS}) - add_library(llsm_tiff llsm/parallelReadTiff.c llsm/imageListReader.c) + add_library(llsm_tiff ${LLSM_LIB_SOURCE}) target_compile_options(llsm_tiff PRIVATE ${OpenMP_C_FLAGS}) target_link_libraries(llsm_tiff PUBLIC ${OpenMP_C_LIBRARIES}) target_link_libraries(llsm_tiff PUBLIC ${TIFF_LIBRARIES}) From 090cff8469901fbdab47c68296ee2e13b1ed4c26 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 17:25:27 -0500 Subject: [PATCH 126/806] fix cmake --- tools/CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index b0ae54dd2..1c6940ca9 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -109,7 +109,9 @@ if(USE_LIB_TIFF) find_package(TIFF REQUIRED) if(TIFF_FOUND) set(LLSM_LIB_SOURCE - llsm/*.c + llsm/parallelReadTiff.c + llsm/csvReader.c + llsm/pdc_list.c ) # Add the LibTIFF include directory to the include path include_directories(${TIFF_INCLUDE_DIRS}) From 4e8c2113cbc5a9211bef57a18114ab06fa20ddd1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 17:39:03 -0500 Subject: [PATCH 127/806] fix cmake --- tools/llsm_importer.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index ffc87282c..0d09e959d 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -69,14 +69,21 @@ import_to_pdc(const image_info_t *image_info, const csv_cell_t *fileName_cell) PDCprop_set_obj_user_id(obj_prop_g, getuid()); PDCprop_set_obj_app_name(obj_prop_g, "LLSM"); + uint64_t *offsets = (uint64_t *)malloc(sizeof(uint64_t) * ndims); + uint64_t *num_bytes = (uint64_t *)malloc(sizeof(uint64_t) * ndims); + for (int i = 0; i < ndims; i++) { + offsets[i] = 0; + num_bytes[i] = dims[i] * image_info->bits/8; + } + // create object // FIXME: There are many attributes currently in one file name, // and we should do some research to see what would be a good object name for each image. pdcid_t cur_obj_g = PDCobj_create(cont_id_g, fileName_cell->field_value, cur_obj_prop_g); // write data to object - pdcid_t local_region = PDCregion_create(ndims, 0, image_info->tiff_size); - pdcid_t remote_region = PDCregion_create(ndims, 0, image_info->tiff_size); + pdcid_t local_region = PDCregion_create(ndims, offsets, num_bytes); + pdcid_t remote_region = PDCregion_create(ndims, offsets, num_bytes); pdcid_t transfer_request = PDCregion_transfer_create(image_info->tiff_ptr, PDC_WRITE, cur_obj_g, local_region, remote_region); PDCregion_transfer_start(transfer_request); PDCregion_transfer_wait(transfer_request); @@ -125,6 +132,14 @@ import_to_pdc(const image_info_t *image_info, const csv_cell_t *fileName_cell) printf("[Rank %4d]create object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, duration / 1e9); + // free memory + free(offsets); + free(num_bytes); + PDCregion_close(local_region); + PDCregion_close(remote_region); + PDCregion_transfer_close(transfer_request); + PDCprop_close(cur_obj_prop_g); + } void @@ -180,9 +195,6 @@ on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) free(image_info->tiff_ptr); free(image_info); free(dirname); - - // free the csv row - csv_free_row(row); } void From 247e8b215ce83652066f62c42e7344079964840a Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 17:45:30 -0500 Subject: [PATCH 128/806] fix cmake --- tools/llsm_importer.c | 75 ++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 0d09e959d..f7edabb0c 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -50,7 +50,7 @@ parse_console_args(int argc, char *argv[], char **file_name) } void -import_to_pdc(const image_info_t *image_info, const csv_cell_t *fileName_cell) +import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) { struct timespec start, end; double duration; @@ -59,8 +59,8 @@ import_to_pdc(const image_info_t *image_info, const csv_cell_t *fileName_cell) pdcid_t cur_obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); - psize_t ndims = 3; - // FIXME: we should support uint64_t. + psize_t ndims = 3; + // FIXME: we should support uint64_t. uint64_t dims[3] = {image_info->x, image_info->y, image_info->z}; // FIXME: we should change the ndims parameter to psize_t type. PDCprop_set_obj_dims(obj_prop_g, (PDC_int_t)ndims, dims); @@ -69,39 +69,40 @@ import_to_pdc(const image_info_t *image_info, const csv_cell_t *fileName_cell) PDCprop_set_obj_user_id(obj_prop_g, getuid()); PDCprop_set_obj_app_name(obj_prop_g, "LLSM"); - uint64_t *offsets = (uint64_t *)malloc(sizeof(uint64_t) * ndims); - uint64_t *num_bytes = (uint64_t *)malloc(sizeof(uint64_t) * ndims); + uint64_t *offsets = (uint64_t *)malloc(sizeof(uint64_t) * ndims); + uint64_t *num_bytes = (uint64_t *)malloc(sizeof(uint64_t) * ndims); for (int i = 0; i < ndims; i++) { - offsets[i] = 0; - num_bytes[i] = dims[i] * image_info->bits/8; + offsets[i] = 0; + num_bytes[i] = dims[i] * image_info->bits / 8; } // create object - // FIXME: There are many attributes currently in one file name, + // FIXME: There are many attributes currently in one file name, // and we should do some research to see what would be a good object name for each image. pdcid_t cur_obj_g = PDCobj_create(cont_id_g, fileName_cell->field_value, cur_obj_prop_g); // write data to object - pdcid_t local_region = PDCregion_create(ndims, offsets, num_bytes); - pdcid_t remote_region = PDCregion_create(ndims, offsets, num_bytes); - pdcid_t transfer_request = PDCregion_transfer_create(image_info->tiff_ptr, PDC_WRITE, cur_obj_g, local_region, remote_region); + pdcid_t local_region = PDCregion_create(ndims, offsets, num_bytes); + pdcid_t remote_region = PDCregion_create(ndims, offsets, num_bytes); + pdcid_t transfer_request = + PDCregion_transfer_create(image_info->tiff_ptr, PDC_WRITE, cur_obj_g, local_region, remote_region); PDCregion_transfer_start(transfer_request); PDCregion_transfer_wait(transfer_request); // add metadata tags based on the csv row csv_cell_t *cell = fileName_cell; while (cell != NULL) { - char *field_name = cell->header->field_name; - char data_type = cell->header->field_type; + char *field_name = cell->header->field_name; + char data_type = cell->header->field_type; char *field_value = cell->field_value; - switch(data_type) { + switch (data_type) { case 'i': - int value = atoi(field_value); - PDCobj_put_tag(cur_obj_g, field_name, &value, sizeof(int)); + int ivalue = atoi(field_value); + PDCobj_put_tag(cur_obj_g, field_name, &ivalue, sizeof(int)); break; case 'f': - double value = atof(field_value); - PDCobj_put_tag(cur_obj_g, field_name, &value, sizeof(double)); + double fvalue = atof(field_value); + PDCobj_put_tag(cur_obj_g, field_name, &fvalue, sizeof(double)); break; case 's': PDCobj_put_tag(cur_obj_g, field_name, field_value, sizeof(char) * strlen(field_value)); @@ -113,14 +114,14 @@ import_to_pdc(const image_info_t *image_info, const csv_cell_t *fileName_cell) } // add extra metadata tags based on the image_info struct - PDCobj_put_tag(cur_obj_g, "x", &image_info->x, sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "y", &image_info->y, sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "z", &image_info->z, sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "bits", &image_info->bits, sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "startSlice", &image_info->startSlice, sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "stripeSize", &image_info->stripeSize, sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "is_imageJ", &image_info->is_imageJ, sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "imageJ_Z", &image_info->imageJ_Z, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "x", &(image_info->x), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "y", &(image_info->y), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "z", &(image_info->z), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "bits", &(image_info->bits), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "startSlice", &(image_info->startSlice), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "stripeSize", &(image_info->stripeSize), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "is_imageJ", &(image_info->is_imageJ), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "imageJ_Z", &(image_info->imageJ_Z), sizeof(uint64_t)); // close object PDCobj_close(cur_obj_g); @@ -130,7 +131,8 @@ import_to_pdc(const image_info_t *image_info, const csv_cell_t *fileName_cell) duration = (end.tv_sec - start.tv_sec) * 1e9 + (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds - printf("[Rank %4d]create object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, duration / 1e9); + printf("[Rank %4d]create object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, + duration / 1e9); // free memory free(offsets); @@ -139,7 +141,6 @@ import_to_pdc(const image_info_t *image_info, const csv_cell_t *fileName_cell) PDCregion_close(remote_region); PDCregion_transfer_close(transfer_request); PDCprop_close(cur_obj_prop_g); - } void @@ -164,7 +165,7 @@ on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) strcat(dirname, "/"); // add a forward slash to the end of the path } - strcpy(filepath, dirname); // copy the directory path to the file path + strcpy(filepath, dirname); // copy the directory path to the file path strcat(filepath, fileName_cell->field_value); // concatenate the file name to the file path clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation @@ -223,14 +224,14 @@ main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &size); #endif - char * file_name = NULL; - PDC_LIST * list = pdc_list_new(); - char * csv_line = NULL; - int num_row_read = 0; - csv_header_t * csv_header = NULL; - csv_row_t * csv_row = NULL; - llsm_importer_args_t *llsm_args = NULL; - char * csv_field_types = {'s', 's', 'f', 'f', 'f', 'f', 'f', 'f'}; + char * file_name = NULL; + PDC_LIST * list = pdc_list_new(); + char * csv_line = NULL; + int num_row_read = 0; + csv_header_t * csv_header = NULL; + csv_row_t * csv_row = NULL; + llsm_importer_args_t *llsm_args = NULL; + char csv_field_types[] = {'s', 's', 'f', 'f', 'f', 'f', 'f', 'f'}; // parse console argument int parse_code = parse_console_args(argc, argv, &file_name); if (parse_code) { From faed7ca2d6c3d346fe5fcc4673da78002c1a4855 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 17:58:14 -0500 Subject: [PATCH 129/806] fix cmake --- tools/llsm/csvReader.c | 32 +++++++++++--------------------- tools/llsm/csvReader.h | 12 ++++++------ 2 files changed, 17 insertions(+), 27 deletions(-) diff --git a/tools/llsm/csvReader.c b/tools/llsm/csvReader.c index baeaf6684..19b3dee8f 100644 --- a/tools/llsm/csvReader.c +++ b/tools/llsm/csvReader.c @@ -57,7 +57,7 @@ csv_parse_header(char *line, char *field_types) return first_header; } -csv_cell_t * +csv_row_t * csv_parse_row(char *line, csv_header_t *header) { csv_cell_t * first_cell = NULL; @@ -105,14 +105,16 @@ csv_parse_row(char *line, csv_header_t *header) current_header = current_header->next; } } - - return first_cell; + csv_row_t *row = (csv_row_t *)malloc(sizeof(csv_row_t)); + row->first_cell = first_cell; + row->next = NULL; + return row; } csv_cell_t * -csv_get_field_value_by_name(char *line, csv_header_t *header, char *field_name) +csv_get_field_value_by_name(csv_row_t *row, csv_header_t *header, char *field_name) { - csv_cell_t *cell = csv_parse_row(line, header); + csv_cell_t *cell = row->first_cell; while (cell != NULL) { if (strcmp(cell->header->field_name, field_name) == 0) { return cell; @@ -123,9 +125,9 @@ csv_get_field_value_by_name(char *line, csv_header_t *header, char *field_name) } csv_cell_t * -csv_get_field_value_by_index(char *line, csv_header_t *header, int field_index) +csv_get_field_value_by_index(csv_row_t *row, csv_header_t *header, int field_index) { - csv_cell_t *cell = csv_parse_row(line, header); + csv_cell_t *cell = row->first_cell; while (cell != NULL) { if (cell->header->field_index == field_index) { return cell; @@ -162,17 +164,11 @@ csv_parse_file(char *file_name, char *field_types) csv_row_t *last_row = NULL; while ((read = getline(&line, &len, fp)) != -1) { // Allocate memory for the row struct - csv_row_t *row = (csv_row_t *)malloc(sizeof(csv_row_t)); + csv_row_t *row = csv_parse_row(line, table->first_header); if (row == NULL) { return NULL; } - // Parse the row - row->first_cell = csv_parse_row(line, table->first_header); - - // Set the next pointer to NULL - row->next = NULL; - // Add the row to the linked list if (first_row == NULL) { first_row = row; @@ -205,17 +201,11 @@ csv_table_t *csv_parse_list(PDC_LIST *list, char *field_types){ table->first_header = csv_parse_header(line, field_types); } else { // Allocate memory for the row struct - csv_row_t *row = (csv_row_t *)malloc(sizeof(csv_row_t)); + csv_row_t *row = csv_parse_row(line, table->first_header); if (row == NULL) { return NULL; } - // Parse the row - row->first_cell = csv_parse_row(line, table->first_header); - - // Set the next pointer to NULL - row->next = NULL; - // Add the row to the linked list if (first_row == NULL) { first_row = row; diff --git a/tools/llsm/csvReader.h b/tools/llsm/csvReader.h index a3743b052..e7599ecb2 100644 --- a/tools/llsm/csvReader.h +++ b/tools/llsm/csvReader.h @@ -47,34 +47,34 @@ csv_header_t *csv_parse_header(char *line, char *field_types); * @param line The CSV row line to parse. * @param header A pointer to the first csv_header_t struct in the linked list. * - * @return A pointer to the first csv_cell_t struct in the linked list. The value in the csv_cell should be + * @return A pointer to the csv_row_t struct. The value in the csv_cell should be * free of quotes or spaces. */ -csv_cell_t *csv_parse_row(char *line, csv_header_t *header); +csv_row_t *csv_parse_row(char *line, csv_header_t *header); /** * @brief This function returns the string value of a field for a given row string. The row string may contain * quotes and spaces - * @param line The CSV row line to parse. + * @param row The CSV row to look for. * @param header A pointer to the first csv_header_t struct in the linked list. * @param field_name The name of the field to get the value for. * * @return A pointer to the csv_cell struct of the field. The value in the csv_cell should be free of quotes * or spaces. */ -csv_cell_t *csv_get_field_value_by_name(char *line, csv_header_t *header, char *field_name); +csv_cell_t *csv_get_field_value_by_name(csv_row_t *row, csv_header_t *header, char *field_name); /** * @brief This function returns the string value of a field for a given row string. The row string may contain * quotes and spaces - * @param line The CSV row line to parse. + * @param row The CSV row to look for. * @param header A pointer to the first csv_header_t struct in the linked list. * @param field_index The index of the field to get the value for. * * @return A pointer to the csv_cell struct of the field. The value in the csv_cell should be free of quotes * or spaces. */ -csv_cell_t *csv_get_field_value_by_index(char *line, csv_header_t *header, int field_index); +csv_cell_t *csv_get_field_value_by_index(csv_row_t *row, csv_header_t *header, int field_index); /** * @brief This function parses a CSV file and returns a csv_table_t struct. From 208baed0ded22d0ef5224ba6cf61a768a30c9a4d Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 17:59:19 -0500 Subject: [PATCH 130/806] fix cmake --- tools/llsm_importer.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index f7edabb0c..2c841a362 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -4,7 +4,9 @@ #include #include -#define ENABLE_MPI 1 +#ifndef ENABLE_MPI +#define ENABLE_MPI +#endif #ifdef ENABLE_MPI #include "mpi.h" From 4bbf585ba1936066049f6a023a6d1707b2b6b980 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:10:38 -0500 Subject: [PATCH 131/806] add scripts --- scripts/llsm_importer/clean.sh | 8 +++ scripts/llsm_importer/gen_script.sh | 21 ++++++++ scripts/llsm_importer/submit.sh | 76 +++++++++++++++++++++++++++++ scripts/llsm_importer/template.sh | 65 ++++++++++++++++++++++++ 4 files changed, 170 insertions(+) create mode 100644 scripts/llsm_importer/clean.sh create mode 100644 scripts/llsm_importer/gen_script.sh create mode 100644 scripts/llsm_importer/submit.sh create mode 100644 scripts/llsm_importer/template.sh diff --git a/scripts/llsm_importer/clean.sh b/scripts/llsm_importer/clean.sh new file mode 100644 index 000000000..d6476962e --- /dev/null +++ b/scripts/llsm_importer/clean.sh @@ -0,0 +1,8 @@ +#!/bin/bash +MAX_NODE=512 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + + rm -rf $i/* + +done \ No newline at end of file diff --git a/scripts/llsm_importer/gen_script.sh b/scripts/llsm_importer/gen_script.sh new file mode 100644 index 000000000..9d310f2bb --- /dev/null +++ b/scripts/llsm_importer/gen_script.sh @@ -0,0 +1,21 @@ +#!/bin/bash +N_THREAD=NO +MAX_NODE=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +PROG_BASENAME=llsm_importer + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + mkdir -p $i + JOBNAME=${PROG_BASENAME}_${i} + TARGET=./$i/$JOBNAME.sbatch + cp template.sh $TARGET + sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET + sed -i "s/NODENUM/${i}/g" $TARGET + if [[ "$i" -gt "16" ]]; then + sed -i "s/REG//g" $TARGET + else + sed -i "s/DBG//g" $TARGET + fi +done diff --git a/scripts/llsm_importer/submit.sh b/scripts/llsm_importer/submit.sh new file mode 100644 index 000000000..b9019d149 --- /dev/null +++ b/scripts/llsm_importer/submit.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +# MIN_PROC=4 +# MAX_PROC=128 +MIN_PROC=1 +MAX_PROC=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +PROG_BASENAME=llsm_importer + +curdir=$(pwd) + +first_submit=1 + +for (( i = 1; i <= $MAX_PROC; i*=2 )); do + mkdir -p $i + JOBNAME=${PROG_BASENAME}_${i} + TARGET=./$i/JOBNAME.sh + + njob=`squeue -u $USER | grep ${PROG_BASENAME} | wc -l` + echo $njob + while [ $njob -ge 4 ] + do + sleeptime=$[ ( $RANDOM % 1000 ) ] + sleep $sleeptime + njob=`squeue -u $USER | grep ${PROG_BASENAME} | wc -l` + echo $njob + done + + if [[ $first_submit == 1 ]]; then + # Submit first job w/o dependency + echo "Submitting $TARGET" + job=`sbatch $TARGET` + first_submit=0 + else + echo "Submitting $TARGET after ${job: -8}" + job=`sbatch -d afterany:${job: -8} $TARGET` + fi + + sleeptime=$[ ( $RANDOM % 5 ) ] + sleep $sleeptime +done + + +# for (( j = $MIN_PROC; j <= $MAX_PROC ; j*=2 )); do + +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# while [ $njob -ge 4 ] +# do +# sleeptime=$[ ( $RANDOM % 1000 ) ] +# sleep $sleeptime +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# done + + +# cd $curdir/$j +# for filename in ./*.sh ; do + +# if [[ $first_submit == 1 ]]; then +# # Submit first job w/o dependency +# echo "Submitting $filename" +# job=`sbatch $filename` +# first_submit=0 +# else +# echo "Submitting $filename after ${job: -8}" +# job=`sbatch -d afterany:${job: -8} $filename` +# fi + +# sleeptime=$[ ( $RANDOM % 5 ) ] +# sleep $sleeptime + +# done +# done diff --git a/scripts/llsm_importer/template.sh b/scripts/llsm_importer/template.sh new file mode 100644 index 000000000..c7074ec13 --- /dev/null +++ b/scripts/llsm_importer/template.sh @@ -0,0 +1,65 @@ +#!/bin/bash -l + +#REGSBATCH -q regular +#DBGSBATCH -q debug +#SBATCH -N NODENUM +#REGSBATCH -t 1:00:00 +#DBGSBATCH -t 0:30:00 +#SBATCH -C cpu +#SBATCH -J JOBNAME +#SBATCH -A m2621 +#SBATCH -o o%j.JOBNAME.out +#SBATCH -e o%j.JOBNAME.out + +# export PDC_DEBUG=0 + + +export PDC_TMPDIR=$SCRATCH/data/pdc/conf + +rm -rf $PDC_TMPDIR/* + +REPEAT=1 + +N_NODE=NODENUM +NCLIENT=31 +# NCLIENT=126 + +export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE +mkdir -p $PDC_TMPDIR + +let TOTALPROC=$NCLIENT*$N_NODE + +EXECPATH=/global/cfs/cdirs/m2621/wzhang5/perlmutter/install/pdc/share/test/bin +TOOLPATH=/global/cfs/cdirs/m2621/wzhang5/perlmutter/source/pdc_llsm/tools/build +SERVER=$EXECPATH/pdc_server.exe +CLIENT=$TOOLPATH/llsm_importer +CLOSE=$EXECPATH/close_server + +chmod +x $EXECPATH/* +chmod +x $TOOLPATH/llsm_importer + +IMGLIST_PATH=/global/cfs/cdirs/m2621/wzhang5/data/20220115_Korra_LLCPK_LFOV_0p1PSAmpKan/run1/ImageList_from_encoder.csv + +date + + +echo "" +echo "=============" +echo "$i Init server" +echo "=============" +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*1)) -c 2 --cpu_bind=cores $SERVER & +sleep 5 + + +echo "============================================" +echo "KVTAGS with $N_NODE nodes" +echo "============================================" +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $TOTALPROC -c 2 --cpu_bind=cores $CLIENT -f $IMGLIST_PATH + +echo "" +echo "=================" +echo "$i Closing server" +echo "=================" +stdbuf -i0 -o0 -e0 srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores $CLOSE + +date From 227e85a00fdb07e21eb819f88074dfa581253624 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:11:44 -0500 Subject: [PATCH 132/806] add scripts --- scripts/llsm_importer/clean.sh | 0 scripts/llsm_importer/gen_script.sh | 0 scripts/llsm_importer/submit.sh | 0 scripts/llsm_importer/template.sh | 0 4 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 scripts/llsm_importer/clean.sh mode change 100644 => 100755 scripts/llsm_importer/gen_script.sh mode change 100644 => 100755 scripts/llsm_importer/submit.sh mode change 100644 => 100755 scripts/llsm_importer/template.sh diff --git a/scripts/llsm_importer/clean.sh b/scripts/llsm_importer/clean.sh old mode 100644 new mode 100755 diff --git a/scripts/llsm_importer/gen_script.sh b/scripts/llsm_importer/gen_script.sh old mode 100644 new mode 100755 diff --git a/scripts/llsm_importer/submit.sh b/scripts/llsm_importer/submit.sh old mode 100644 new mode 100755 diff --git a/scripts/llsm_importer/template.sh b/scripts/llsm_importer/template.sh old mode 100644 new mode 100755 From 421b00e69f2872a67c6d453d0f27857762661d63 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:13:35 -0500 Subject: [PATCH 133/806] add scripts --- scripts/llsm_importer/template.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/llsm_importer/template.sh b/scripts/llsm_importer/template.sh index c7074ec13..d9305c83e 100755 --- a/scripts/llsm_importer/template.sh +++ b/scripts/llsm_importer/template.sh @@ -21,7 +21,7 @@ rm -rf $PDC_TMPDIR/* REPEAT=1 N_NODE=NODENUM -NCLIENT=31 +NCLIENT=1 # NCLIENT=126 export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE From 8d0f24d29fdb2f46cfeeabe8d3b0431b7d272646 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:18:07 -0500 Subject: [PATCH 134/806] debugging for nonMPI program --- tools/llsm_importer.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 2c841a362..d052688af 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -9,7 +9,8 @@ #endif #ifdef ENABLE_MPI -#include "mpi.h" +// #include "mpi.h" +#undef ENABLE_MPI #endif #include "pdc.h" @@ -192,7 +193,7 @@ on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) printf("\n"); // import the image to PDC - import_to_pdc(image_info, fileName_cell); + // import_to_pdc(image_info, fileName_cell); // free the image info free(image_info->tiff_ptr); From 58de0a040ff756f02fc4cc7d6c600949fa094b81 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:20:40 -0500 Subject: [PATCH 135/806] debugging for nonMPI program --- tools/llsm_importer.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index d052688af..17a40a2dd 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -246,6 +246,7 @@ main(int argc, char *argv[]) printf("Filename: %s\n", file_name ? file_name : "(none)"); printf("Directory: %s\n", directory_path ? directory_path : "(none)"); +#ifdef ENABLE_MPI // create a pdc pdc_id_g = PDCinit("pdc"); @@ -258,6 +259,7 @@ main(int argc, char *argv[]) cont = PDCcont_create("c1", cont_prop); if (cont <= 0) printf("Fail to create container @ line %d!\n", __LINE__); +#endif // Rank 0 reads the filename list and distribute data to other ranks if (rank == 0) { From 25c151397c1cebe560711c580956c97bf204a0a1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:24:26 -0500 Subject: [PATCH 136/806] debugging for nonMPI program --- tools/llsm_importer.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 17a40a2dd..6ac2cfa2c 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -246,20 +246,20 @@ main(int argc, char *argv[]) printf("Filename: %s\n", file_name ? file_name : "(none)"); printf("Directory: %s\n", directory_path ? directory_path : "(none)"); -#ifdef ENABLE_MPI - // create a pdc - pdc_id_g = PDCinit("pdc"); - - // create a container property - cont_prop_g = PDCprop_create(PDC_CONT_CREATE, pdc); - if (cont_prop <= 0) - printf("Fail to create container property @ line %d!\n", __LINE__); - - // create a container - cont = PDCcont_create("c1", cont_prop); - if (cont <= 0) - printf("Fail to create container @ line %d!\n", __LINE__); -#endif + + // // create a pdc + // pdc_id_g = PDCinit("pdc"); + + // // create a container property + // cont_prop_g = PDCprop_create(PDC_CONT_CREATE, pdc); + // if (cont_prop <= 0) + // printf("Fail to create container property @ line %d!\n", __LINE__); + + // // create a container + // cont = PDCcont_create("c1", cont_prop); + // if (cont <= 0) + // printf("Fail to create container @ line %d!\n", __LINE__); + // Rank 0 reads the filename list and distribute data to other ranks if (rank == 0) { From d75bae273da785b8d3c2615a907653b2e4f6058b Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:30:28 -0500 Subject: [PATCH 137/806] clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created --- tools/llsm/csvReader.c | 51 ++++++++++++++-------------- tools/llsm/csvReader.h | 2 +- tools/llsm/parallelReadTiff.c | 22 ++++++------ tools/llsm/parallelReadTiff.h | 4 +-- tools/llsm/pdc_list.c | 63 ++++++++++++++++++++++++----------- tools/llsm/pdc_list.h | 13 ++++---- tools/llsm_importer.c | 2 -- 7 files changed, 89 insertions(+), 68 deletions(-) diff --git a/tools/llsm/csvReader.c b/tools/llsm/csvReader.c index 19b3dee8f..39c4571aa 100644 --- a/tools/llsm/csvReader.c +++ b/tools/llsm/csvReader.c @@ -105,7 +105,7 @@ csv_parse_row(char *line, csv_header_t *header) current_header = current_header->next; } } - csv_row_t *row = (csv_row_t *)malloc(sizeof(csv_row_t)); + csv_row_t *row = (csv_row_t *)malloc(sizeof(csv_row_t)); row->first_cell = first_cell; row->next = NULL; return row; @@ -185,21 +185,24 @@ csv_parse_file(char *file_name, char *field_types) return table; } -csv_table_t *csv_parse_list(PDC_LIST *list, char *field_types){ +csv_table_t * +csv_parse_list(PDC_LIST *list, char *field_types) +{ csv_table_t *table = (csv_table_t *)malloc(sizeof(csv_table_t)); if (table == NULL) { return NULL; } - int num_file_read = 0; - csv_row_t *first_row = NULL; - csv_row_t *last_row = NULL; + int num_file_read = 0; + csv_row_t *first_row = NULL; + csv_row_t *last_row = NULL; PDC_LIST_ITERATOR *iter = pdc_list_iterator_new(list); while (pdc_list_iterator_has_next(iter)) { char *line = (char *)pdc_list_iterator_next(iter); if (num_file_read == 0) { table->first_header = csv_parse_header(line, field_types); - } else { + } + else { // Allocate memory for the row struct csv_row_t *row = csv_parse_row(line, table->first_header); if (row == NULL) { @@ -308,29 +311,25 @@ csv_print_cell(csv_cell_t *cell, int with_key) if (with_key) { printf("%s: ", cell->header->field_name); } - switch (cell->header->field_type) - { - case 'i': - printf("%ld", strtol(cell->field_value, NULL, 10)); - break; - - case 'f': - printf("%f", strtod(cell->field_value, NULL)); - break; - - case 's': - printf("%s", cell->field_value); - break; - - default: - printf("%s", cell->field_value); - break; + switch (cell->header->field_type) { + case 'i': + printf("%ld", strtol(cell->field_value, NULL, 10)); + break; + + case 'f': + printf("%f", strtod(cell->field_value, NULL)); + break; + + case 's': + printf("%s", cell->field_value); + break; + + default: + printf("%s", cell->field_value); + break; } - } - - void csv_print_table(csv_table_t *table) { diff --git a/tools/llsm/csvReader.h b/tools/llsm/csvReader.h index e7599ecb2..08a56e4a0 100644 --- a/tools/llsm/csvReader.h +++ b/tools/llsm/csvReader.h @@ -91,7 +91,7 @@ csv_table_t *csv_parse_file(char *file_name, char *field_types); * @param list A PDC_LIST of strings to parse. * @param field_types A string of field types. The field types are 's' for string, 'i' for long integer, 'f' * for float, and 'd' for double. If this is NULL, all fields are assumed to be strings. - * + * * @return A pointer to the csv_table_t struct. */ csv_table_t *csv_parse_list(PDC_LIST *list, char *field_types); diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 727ba02c1..4da885e61 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -781,7 +781,8 @@ _TIFF_load(char *fileName, uint8_t isImageJIm, uint64_t x, uint64_t y, uint64_t } void -parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_range, image_info_t **image_info) +parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_range, + image_info_t **image_info) { uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0, stripeSize = 0, is_imageJ = 0, imageJ_Z = 0; @@ -793,16 +794,17 @@ parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_ dims[1] = flipXY ? x : y; dims[2] = z; - *image_info = (image_info_t *)malloc(sizeof(image_info_t)); - (*image_info)->x = dims[0]; - (*image_info)->y = dims[1]; - (*image_info)->z = dims[2]; - (*image_info)->bits = bits; + *image_info = (image_info_t *)malloc(sizeof(image_info_t)); + (*image_info)->x = dims[0]; + (*image_info)->y = dims[1]; + (*image_info)->z = dims[2]; + (*image_info)->bits = bits; (*image_info)->startSlice = startSlice; (*image_info)->stripeSize = stripeSize; - (*image_info)->is_imageJ = is_imageJ; - (*image_info)->imageJ_Z = imageJ_Z; - (*image_info)->tiff_size = dims[0] * dims[1] * dims[2] * (bits / 8); + (*image_info)->is_imageJ = is_imageJ; + (*image_info)->imageJ_Z = imageJ_Z; + (*image_info)->tiff_size = dims[0] * dims[1] * dims[2] * (bits / 8); - _TIFF_load(fileName, is_imageJ, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, (void **)&((*image_info)->tiff_ptr)); + _TIFF_load(fileName, is_imageJ, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, + (void **)&((*image_info)->tiff_ptr)); } \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.h b/tools/llsm/parallelReadTiff.h index 60f1cb9f4..081640110 100644 --- a/tools/llsm/parallelReadTiff.h +++ b/tools/llsm/parallelReadTiff.h @@ -28,7 +28,7 @@ typedef struct { size_t tiff_size; } image_info_t; - -void parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_range, image_info_t **image_info_t); +void parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_range, + image_info_t **image_info_t); #endif // PARALLELREADTIFF_H \ No newline at end of file diff --git a/tools/llsm/pdc_list.c b/tools/llsm/pdc_list.c index e6ea059be..95c6e59e7 100644 --- a/tools/llsm/pdc_list.c +++ b/tools/llsm/pdc_list.c @@ -1,33 +1,38 @@ #include "pdc_list.h" - -PDC_LIST *pdc_list_new(){ +PDC_LIST * +pdc_list_new() +{ return pdc_list_create(100, 2.0); } -PDC_LIST* pdc_list_create(size_t initial_capacity, double expansion_factor) { +PDC_LIST * +pdc_list_create(size_t initial_capacity, double expansion_factor) +{ // Allocate memory for the list struct. - PDC_LIST* list = (PDC_LIST*) malloc(sizeof(PDC_LIST)); + PDC_LIST *list = (PDC_LIST *)malloc(sizeof(PDC_LIST)); if (list == NULL) { return NULL; } // Allocate memory for the array of items. - list->items = (void**) malloc(initial_capacity * sizeof(void*)); + list->items = (void **)malloc(initial_capacity * sizeof(void *)); if (list->items == NULL) { free(list); return NULL; } // Initialize the list fields. - list->item_count = 0; - list->capacity = initial_capacity; + list->item_count = 0; + list->capacity = initial_capacity; list->expansion_factor = expansion_factor; return list; } -void pdc_list_destroy(PDC_LIST* list) { +void +pdc_list_destroy(PDC_LIST *list) +{ if (list == NULL) { return; } @@ -42,7 +47,9 @@ void pdc_list_destroy(PDC_LIST* list) { free(list); } -void pdc_list_add(PDC_LIST* list, void* item) { +void +pdc_list_add(PDC_LIST *list, void *item) +{ if (list == NULL || item == NULL) { return; } @@ -50,7 +57,7 @@ void pdc_list_add(PDC_LIST* list, void* item) { // Expand the array of items if necessary. if (list->item_count >= list->capacity) { list->capacity *= list->expansion_factor; - list->items = (void**) realloc(list->items, list->capacity * sizeof(void*)); + list->items = (void **)realloc(list->items, list->capacity * sizeof(void *)); if (list->items == NULL) { return; } @@ -60,7 +67,9 @@ void pdc_list_add(PDC_LIST* list, void* item) { list->items[list->item_count++] = item; } -void* pdc_list_get(PDC_LIST* list, size_t index) { +void * +pdc_list_get(PDC_LIST *list, size_t index) +{ if (list == NULL || index >= list->item_count) { return NULL; } @@ -69,7 +78,9 @@ void* pdc_list_get(PDC_LIST* list, size_t index) { return list->items[index]; } -size_t pdc_list_size(PDC_LIST* list) { +size_t +pdc_list_size(PDC_LIST *list) +{ if (list == NULL) { return 0; } @@ -78,7 +89,9 @@ size_t pdc_list_size(PDC_LIST* list) { return list->item_count; } -void pdc_list_set_expansion_factor(PDC_LIST* list, double expansion_factor) { +void +pdc_list_set_expansion_factor(PDC_LIST *list, double expansion_factor) +{ if (list == NULL) { return; } @@ -87,7 +100,9 @@ void pdc_list_set_expansion_factor(PDC_LIST* list, double expansion_factor) { list->expansion_factor = expansion_factor; } -double pdc_list_get_expansion_factor(PDC_LIST* list) { +double +pdc_list_get_expansion_factor(PDC_LIST *list) +{ if (list == NULL) { return 0; } @@ -96,25 +111,29 @@ double pdc_list_get_expansion_factor(PDC_LIST* list) { return list->expansion_factor; } -PDC_LIST_ITERATOR* pdc_list_iterator_new(PDC_LIST* list) { +PDC_LIST_ITERATOR * +pdc_list_iterator_new(PDC_LIST *list) +{ if (list == NULL) { return NULL; } // Allocate memory for the iterator struct. - PDC_LIST_ITERATOR* iterator = (PDC_LIST_ITERATOR*) malloc(sizeof(PDC_LIST_ITERATOR)); + PDC_LIST_ITERATOR *iterator = (PDC_LIST_ITERATOR *)malloc(sizeof(PDC_LIST_ITERATOR)); if (iterator == NULL) { return NULL; } // Initialize the iterator fields. - iterator->list = list; + iterator->list = list; iterator->index = 0; return iterator; } -void pdc_list_iterator_destroy(PDC_LIST_ITERATOR* iterator) { +void +pdc_list_iterator_destroy(PDC_LIST_ITERATOR *iterator) +{ if (iterator == NULL) { return; } @@ -123,7 +142,9 @@ void pdc_list_iterator_destroy(PDC_LIST_ITERATOR* iterator) { free(iterator); } -void* pdc_list_iterator_next(PDC_LIST_ITERATOR* iterator) { +void * +pdc_list_iterator_next(PDC_LIST_ITERATOR *iterator) +{ if (iterator == NULL) { return NULL; } @@ -132,7 +153,9 @@ void* pdc_list_iterator_next(PDC_LIST_ITERATOR* iterator) { return pdc_list_get(iterator->list, iterator->index++); } -int pdc_list_iterator_has_next(PDC_LIST_ITERATOR* iterator) { +int +pdc_list_iterator_has_next(PDC_LIST_ITERATOR *iterator) +{ if (iterator == NULL) { return 0; } diff --git a/tools/llsm/pdc_list.h b/tools/llsm/pdc_list.h index 9847469e5..aa71e6124 100644 --- a/tools/llsm/pdc_list.h +++ b/tools/llsm/pdc_list.h @@ -17,11 +17,10 @@ typedef struct { * A generic iterator for iterating over the items in a PDC_LIST. */ typedef struct { - PDC_LIST *list; // The list being iterated over. - size_t index; // The index of the next item to be returned. + PDC_LIST *list; // The list being iterated over. + size_t index; // The index of the next item to be returned. } PDC_LIST_ITERATOR; - /** * Creates a new PDC_LIST with default initial capacity 100 and default expansion factor 2.0. * @return A pointer to the new PDC_LIST. @@ -32,7 +31,7 @@ PDC_LIST *pdc_list_new(); * Creates a new PDC_LIST with the given initial capacity and expansion factor. * @param initial_capacity The initial capacity of the list. * @param expansion_factor The factor by which the capacity is expanded when the list is full. - * + * * @return A pointer to the new PDC_LIST. */ PDC_LIST *pdc_list_create(size_t initial_capacity, double expansion_factor); @@ -47,7 +46,7 @@ void pdc_list_destroy(PDC_LIST *list); * Adds the given item to the end of the given PDC_LIST. * @param list The PDC_LIST to add the item to. * @param item The item to add to the PDC_LIST. - * + * */ void pdc_list_add(PDC_LIST *list, void *item); @@ -55,7 +54,7 @@ void pdc_list_add(PDC_LIST *list, void *item); * Gets the item at the given index in the given PDC_LIST. * @param list The PDC_LIST to get the item from. * @param index The index of the item to get. - * + * * @return A pointer to the item at the given index. */ void *pdc_list_get(PDC_LIST *list, size_t index); @@ -63,7 +62,7 @@ void *pdc_list_get(PDC_LIST *list, size_t index); /** * Sets the item at the given index in the given PDC_LIST. * @param list The PDC_LIST to set the item in. - * + * * @return The number of items in the list. */ size_t pdc_list_size(PDC_LIST *list); diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 6ac2cfa2c..8ac8fc9fc 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -246,7 +246,6 @@ main(int argc, char *argv[]) printf("Filename: %s\n", file_name ? file_name : "(none)"); printf("Directory: %s\n", directory_path ? directory_path : "(none)"); - // // create a pdc // pdc_id_g = PDCinit("pdc"); @@ -260,7 +259,6 @@ main(int argc, char *argv[]) // if (cont <= 0) // printf("Fail to create container @ line %d!\n", __LINE__); - // Rank 0 reads the filename list and distribute data to other ranks if (rank == 0) { read_txt(file_name, list); From 0e13170103966decdca5cf0a29196446157ad5d6 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:32:14 -0500 Subject: [PATCH 138/806] enable MPI --- tools/llsm_importer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 8ac8fc9fc..cdd5ddd56 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -9,8 +9,8 @@ #endif #ifdef ENABLE_MPI -// #include "mpi.h" -#undef ENABLE_MPI +#include "mpi.h" +// #undef ENABLE_MPI #endif #include "pdc.h" From bdfd297c23014747a57abc2e962425d853ffafcb Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:37:04 -0500 Subject: [PATCH 139/806] enable MPI --- tools/llsm/csvReader.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/llsm/csvReader.c b/tools/llsm/csvReader.c index 39c4571aa..bb52748e3 100644 --- a/tools/llsm/csvReader.c +++ b/tools/llsm/csvReader.c @@ -296,9 +296,9 @@ csv_print_row(csv_row_t *row, int with_key) csv_print_cell(current_cell, with_key); if (current_cell->next != NULL) { printf(", "); - if (with_key) { - printf("\n"); - } + } + if (with_key) { + printf("\n"); } current_cell = current_cell->next; } From 62b0efbf9674b6415e7065522442161e3afa934d Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:42:05 -0500 Subject: [PATCH 140/806] enlarge BCase size --- tools/llsm_importer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index cdd5ddd56..b75ec349a 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -270,7 +270,7 @@ main(int argc, char *argv[]) PDC_LIST_ITERATOR *iter = pdc_list_iterator_new(list); while (pdc_list_iterator_has_next(iter)) { char *csv_line = (char *)pdc_list_iterator_next(iter); - MPI_Bcast(csv_line, 256, MPI_CHAR, 0, MPI_COMM_WORLD); + MPI_Bcast(csv_line, 1024, MPI_CHAR, 0, MPI_COMM_WORLD); } #endif } @@ -282,8 +282,8 @@ main(int argc, char *argv[]) // receive the file names int i; for (i = 0; i < num_lines; i++) { - csv_line = (char *)malloc(256 * sizeof(char)); - MPI_Bcast(csv_line, 256, MPI_CHAR, 0, MPI_COMM_WORLD); + csv_line = (char *)malloc(1024 * sizeof(char)); + MPI_Bcast(csv_line, 1024, MPI_CHAR, 0, MPI_COMM_WORLD); pdc_list_add(list, csv_line); } #endif From 399289cad8236741c78f3ceddc1725729c7705f3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:47:28 -0500 Subject: [PATCH 141/806] enlarge BCase size --- tools/llsm_importer.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index b75ec349a..00e29e413 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -202,10 +202,12 @@ on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) } void -read_txt(char *txtFileName, PDC_LIST *list) +read_txt(char *txtFileName, PDC_LIST *list, int *max_row_length) { FILE *file = fopen(txtFileName, "r"); + int row_length = 0; + if (file == NULL) { printf("Error: could not open file %s\n", txtFileName); return; @@ -214,7 +216,15 @@ read_txt(char *txtFileName, PDC_LIST *list) // Read the lines of the file while (fgets(buffer, sizeof(buffer), file)) { pdc_list_add(list, strdup(buffer)); + if (row_length < strlen(buffer)) { + row_length = strlen(buffer); + } } + + fclose(file); + + // Find the maximum row length + *max_row_length = row_length + 5; } int @@ -234,6 +244,7 @@ main(int argc, char *argv[]) csv_header_t * csv_header = NULL; csv_row_t * csv_row = NULL; llsm_importer_args_t *llsm_args = NULL; + int bcast_count = 512; char csv_field_types[] = {'s', 's', 'f', 'f', 'f', 'f', 'f', 'f'}; // parse console argument int parse_code = parse_console_args(argc, argv, &file_name); @@ -261,7 +272,7 @@ main(int argc, char *argv[]) // Rank 0 reads the filename list and distribute data to other ranks if (rank == 0) { - read_txt(file_name, list); + read_txt(file_name, list, &bcast_count); #ifdef ENABLE_MPI // broadcast the number of lines int num_lines = pdc_list_size(list); @@ -270,7 +281,7 @@ main(int argc, char *argv[]) PDC_LIST_ITERATOR *iter = pdc_list_iterator_new(list); while (pdc_list_iterator_has_next(iter)) { char *csv_line = (char *)pdc_list_iterator_next(iter); - MPI_Bcast(csv_line, 1024, MPI_CHAR, 0, MPI_COMM_WORLD); + MPI_Bcast(csv_line, bcast_count, MPI_CHAR, 0, MPI_COMM_WORLD); } #endif } @@ -282,8 +293,8 @@ main(int argc, char *argv[]) // receive the file names int i; for (i = 0; i < num_lines; i++) { - csv_line = (char *)malloc(1024 * sizeof(char)); - MPI_Bcast(csv_line, 1024, MPI_CHAR, 0, MPI_COMM_WORLD); + csv_line = (char *)malloc(bcast_count * sizeof(char)); + MPI_Bcast(csv_line, bcast_count, MPI_CHAR, 0, MPI_COMM_WORLD); pdc_list_add(list, csv_line); } #endif From ab24b6bb92f1b346380d29be63ed3aa883e91f12 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:51:02 -0500 Subject: [PATCH 142/806] enlarge BCase size --- tools/llsm_importer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 00e29e413..795e31490 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -281,7 +281,7 @@ main(int argc, char *argv[]) PDC_LIST_ITERATOR *iter = pdc_list_iterator_new(list); while (pdc_list_iterator_has_next(iter)) { char *csv_line = (char *)pdc_list_iterator_next(iter); - MPI_Bcast(csv_line, bcast_count, MPI_CHAR, 0, MPI_COMM_WORLD); + MPI_Bcast(csv_line, 512, MPI_CHAR, 0, MPI_COMM_WORLD); } #endif } @@ -293,8 +293,8 @@ main(int argc, char *argv[]) // receive the file names int i; for (i = 0; i < num_lines; i++) { - csv_line = (char *)malloc(bcast_count * sizeof(char)); - MPI_Bcast(csv_line, bcast_count, MPI_CHAR, 0, MPI_COMM_WORLD); + csv_line = (char *)malloc(512 * sizeof(char)); + MPI_Bcast(csv_line, 512, MPI_CHAR, 0, MPI_COMM_WORLD); pdc_list_add(list, csv_line); } #endif From bd97115d52c299c9821e01ccb1c8bbf42f282ea0 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 18:55:27 -0500 Subject: [PATCH 143/806] resolve bcast count --- tools/llsm_importer.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 795e31490..a0671b51d 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -273,15 +273,20 @@ main(int argc, char *argv[]) // Rank 0 reads the filename list and distribute data to other ranks if (rank == 0) { read_txt(file_name, list, &bcast_count); + // print bcast_count + printf("bcast_count: %d", bcast_count); + #ifdef ENABLE_MPI // broadcast the number of lines int num_lines = pdc_list_size(list); MPI_Bcast(&num_lines, 1, MPI_INT, 0, MPI_COMM_WORLD); + // broadcast the bcast_count + MPI_Bcast(&bcast_count, 1, MPI_INT, 0, MPI_COMM_WORLD); // broadcast the file names PDC_LIST_ITERATOR *iter = pdc_list_iterator_new(list); while (pdc_list_iterator_has_next(iter)) { char *csv_line = (char *)pdc_list_iterator_next(iter); - MPI_Bcast(csv_line, 512, MPI_CHAR, 0, MPI_COMM_WORLD); + MPI_Bcast(csv_line, bcast_count, MPI_CHAR, 0, MPI_COMM_WORLD); } #endif } @@ -290,11 +295,13 @@ main(int argc, char *argv[]) // other ranks receive the number of files int num_lines; MPI_Bcast(&num_lines, 1, MPI_INT, 0, MPI_COMM_WORLD); + // receive the bcast_count + MPI_Bcast(&bcast_count, 1, MPI_INT, 0, MPI_COMM_WORLD); // receive the file names int i; for (i = 0; i < num_lines; i++) { - csv_line = (char *)malloc(512 * sizeof(char)); - MPI_Bcast(csv_line, 512, MPI_CHAR, 0, MPI_COMM_WORLD); + csv_line = (char *)malloc(bcast_count * sizeof(char)); + MPI_Bcast(csv_line, bcast_count, MPI_CHAR, 0, MPI_COMM_WORLD); pdc_list_add(list, csv_line); } #endif From ed20ee04e35f90ec23fdbb0f707497c8c9bb01ec Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 19:05:02 -0500 Subject: [PATCH 144/806] llsm data path in script --- scripts/llsm_importer/template.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/llsm_importer/template.sh b/scripts/llsm_importer/template.sh index d9305c83e..d736b4076 100755 --- a/scripts/llsm_importer/template.sh +++ b/scripts/llsm_importer/template.sh @@ -38,7 +38,9 @@ CLOSE=$EXECPATH/close_server chmod +x $EXECPATH/* chmod +x $TOOLPATH/llsm_importer -IMGLIST_PATH=/global/cfs/cdirs/m2621/wzhang5/data/20220115_Korra_LLCPK_LFOV_0p1PSAmpKan/run1/ImageList_from_encoder.csv +LLSM_DATA_PATH=/pscratch/sd/w/wzhang5/data/llsm/20220115_Korra_LLCPK_LFOV_0p1PSAmpKan/run1 +# LLSM_DATA_PATH=/global/cfs/cdirs/m2621/wzhang5/data/20220115_Korra_LLCPK_LFOV_0p1PSAmpKan/run1 +IMGLIST_PATH=${LLSM_DATA_PATH}/ImageList_from_encoder.csv date From 29bb5259e311727f4fec03938f1dac619aee60f0 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 19:51:33 -0500 Subject: [PATCH 145/806] llsm data path in script --- tools/llsm/csvReader.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/llsm/csvReader.c b/tools/llsm/csvReader.c index bb52748e3..5823d349a 100644 --- a/tools/llsm/csvReader.c +++ b/tools/llsm/csvReader.c @@ -297,9 +297,9 @@ csv_print_row(csv_row_t *row, int with_key) if (current_cell->next != NULL) { printf(", "); } - if (with_key) { - printf("\n"); - } + // if (with_key) { + // printf("\n"); + // } current_cell = current_cell->next; } printf("\n"); From 8876a4353dcf4f16ee9d17d3b7eaa5eb300a32db Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 20:07:36 -0500 Subject: [PATCH 146/806] update csv reader --- tools/llsm/csvReader.c | 45 ++++++++++++++++++++++++++++++++++-------- tools/llsm/csvReader.h | 25 +++++++++++++++++++++++ tools/llsm_importer.c | 2 +- 3 files changed, 63 insertions(+), 9 deletions(-) diff --git a/tools/llsm/csvReader.c b/tools/llsm/csvReader.c index 5823d349a..c4f4fbf81 100644 --- a/tools/llsm/csvReader.c +++ b/tools/llsm/csvReader.c @@ -1,5 +1,34 @@ #include "csvReader.h" +char csv_delimiter = ','; +char csv_quote = '\"'; +char csv_escape = '\\'; +char csv_newline = '\n'; + +void +csv_set_delimiter(char delimiter) +{ + csv_delimiter = delimiter; +} + +void +csv_set_quote(char quote) +{ + csv_quote = quote; +} + +void +csv_set_escape(char escape) +{ + csv_escape = escape; +} + +void +csv_set_newline(char newline) +{ + csv_newline = newline; +} + csv_header_t * csv_parse_header(char *line, char *field_types) { @@ -12,18 +41,18 @@ csv_parse_header(char *line, char *field_types) int value_start = 0; int i = 0; - for (int i = 0; line[i] != '\0'; ++i) { - if (line[i] == '\"') { + for (int i = 0; line[i] != csv_newline; ++i) { + if (line[i] == csv_quote) { in_quotes = !in_quotes; } - else if (!in_quotes && (line[i] == ',' || line[i + 1] == '\0')) { + else if (!in_quotes && (line[i] == csv_delimiter || line[i + 1] == csv_newline)) { // Allocate memory for the header struct csv_header_t *header = (csv_header_t *)malloc(sizeof(csv_header_t)); if (header == NULL) { return NULL; } // Remove quotes and spaces from the field name - header->field_name = strndup(line + value_start, i - value_start + (line[i + 1] == '\0')); + header->field_name = strndup(line + value_start, i - value_start + (line[i + 1] == csv_newline)); // Set the field index header->field_index = field_index; @@ -70,11 +99,11 @@ csv_parse_row(char *line, csv_header_t *header) int value_start = 0; int i = 0; - for (int i = 0; line[i] != '\0'; ++i) { - if (line[i] == '\"') { + for (int i = 0; line[i] != csv_newline; ++i) { + if (line[i] == csv_quote) { in_quotes = !in_quotes; } - else if (!in_quotes && (line[i] == ',' || line[i + 1] == '\0')) { + else if (!in_quotes && (line[i] == csv_delimiter || line[i + 1] == csv_newline)) { // Allocate memory for the cell struct csv_cell_t *cell = (csv_cell_t *)malloc(sizeof(csv_cell_t)); if (cell == NULL) { @@ -85,7 +114,7 @@ csv_parse_row(char *line, csv_header_t *header) cell->header = current_header; // Set the field value - cell->field_value = strndup(line + value_start, i - value_start + (line[i + 1] == '\0')); + cell->field_value = strndup(line + value_start, i - value_start + (line[i + 1] == csv_newline)); // Set the next pointer to NULL cell->next = NULL; diff --git a/tools/llsm/csvReader.h b/tools/llsm/csvReader.h index 08a56e4a0..898f60fdb 100644 --- a/tools/llsm/csvReader.h +++ b/tools/llsm/csvReader.h @@ -30,6 +30,31 @@ typedef struct csv_table_t { csv_row_t * first_row; } csv_table_t; +/** + * @brief This function sets the delimiter for the CSV file. The default is a comma. + * @param delimiter The delimiter to use. + */ +void csv_set_delimiter(char delimiter); + +/** + * @brief This function sets the quote character for the CSV file. The default is a double quote. + * @param quote The quote character to use. + */ +void csv_set_quote(char quote); + +/** + * @brief This function sets the escape character for the CSV file. The default is a backslash. + * @param escape The escape character to use. + */ +void csv_set_escape(char escape); + +/** + * @brief This function sets the newline character for the CSV file. The default is a newline. + * @param newline The newline character to use. + */ +void csv_set_newline(char newline); + + /** * @brief This function parses a CSV header line and returns a linked list of csv_header_t structs. The header * string may contain quotes and spaces diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index a0671b51d..f9a78a7da 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -274,7 +274,7 @@ main(int argc, char *argv[]) if (rank == 0) { read_txt(file_name, list, &bcast_count); // print bcast_count - printf("bcast_count: %d", bcast_count); + printf("bcast_count: %d \n", bcast_count); #ifdef ENABLE_MPI // broadcast the number of lines From 39f396b596d2f5416df144244b8999dbb4fe95aa Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 20:08:37 -0500 Subject: [PATCH 147/806] update csv reader --- tools/llsm/csvReader.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/llsm/csvReader.c b/tools/llsm/csvReader.c index c4f4fbf81..0bc480edf 100644 --- a/tools/llsm/csvReader.c +++ b/tools/llsm/csvReader.c @@ -325,10 +325,10 @@ csv_print_row(csv_row_t *row, int with_key) csv_print_cell(current_cell, with_key); if (current_cell->next != NULL) { printf(", "); + if (with_key) { + printf("\n"); + } } - // if (with_key) { - // printf("\n"); - // } current_cell = current_cell->next; } printf("\n"); From e587abf11e2bc12267ff1ad0d0540093fde24fcc Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 20:11:10 -0500 Subject: [PATCH 148/806] update csv reader --- tools/llsm/csvReader.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/llsm/csvReader.c b/tools/llsm/csvReader.c index 0bc480edf..c6623a287 100644 --- a/tools/llsm/csvReader.c +++ b/tools/llsm/csvReader.c @@ -325,9 +325,9 @@ csv_print_row(csv_row_t *row, int with_key) csv_print_cell(current_cell, with_key); if (current_cell->next != NULL) { printf(", "); - if (with_key) { - printf("\n"); - } + } + if (with_key) { + printf("\n"); } current_cell = current_cell->next; } From 3aa45056a4a8ec3122ffdf969956a2a133ccf36a Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 23:51:28 -0500 Subject: [PATCH 149/806] update pdc --- tools/llsm_importer.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index f9a78a7da..ef13c5dde 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -29,7 +29,6 @@ typedef struct llsm_importer_args_t { int rank = 0, size = 1; -pdcid_t pdc, cont_prop, cont, obj_prop; pdcid_t pdc_id_g = 0, cont_prop_g = 0, cont_id_g = 0, obj_prop_g = 0; int @@ -60,7 +59,7 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation - pdcid_t cur_obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); + obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); psize_t ndims = 3; // FIXME: we should support uint64_t. @@ -82,7 +81,7 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) // create object // FIXME: There are many attributes currently in one file name, // and we should do some research to see what would be a good object name for each image. - pdcid_t cur_obj_g = PDCobj_create(cont_id_g, fileName_cell->field_value, cur_obj_prop_g); + pdcid_t cur_obj_g = PDCobj_create(cont_id_g, fileName_cell->field_value, obj_prop_g); // write data to object pdcid_t local_region = PDCregion_create(ndims, offsets, num_bytes); @@ -143,7 +142,7 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) PDCregion_close(local_region); PDCregion_close(remote_region); PDCregion_transfer_close(transfer_request); - PDCprop_close(cur_obj_prop_g); + PDCprop_close(obj_prop_g); } void @@ -257,18 +256,18 @@ main(int argc, char *argv[]) printf("Filename: %s\n", file_name ? file_name : "(none)"); printf("Directory: %s\n", directory_path ? directory_path : "(none)"); - // // create a pdc - // pdc_id_g = PDCinit("pdc"); + // create a pdc + pdc_id_g = PDCinit("pdc"); - // // create a container property - // cont_prop_g = PDCprop_create(PDC_CONT_CREATE, pdc); - // if (cont_prop <= 0) - // printf("Fail to create container property @ line %d!\n", __LINE__); + // create a container property + cont_prop_g = PDCprop_create(PDC_CONT_CREATE, pdc_id_g); + if (cont_prop_g <= 0) + printf("Fail to create container property @ line %d!\n", __LINE__); - // // create a container - // cont = PDCcont_create("c1", cont_prop); - // if (cont <= 0) - // printf("Fail to create container @ line %d!\n", __LINE__); + // create a container + cont_id_g = PDCcont_create("c1", cont_prop_g); + if (cont_id_g <= 0) + printf("Fail to create container @ line %d!\n", __LINE__); // Rank 0 reads the filename list and distribute data to other ranks if (rank == 0) { @@ -328,6 +327,13 @@ main(int argc, char *argv[]) csv_free_table(csv_table); + // close the container + PDCcont_close(cont_id_g); + // close the container property + PDCprop_close(cont_prop_g); + // close the pdc + PDCclose(pdc_id_g); + #ifdef ENABLE_MPI MPI_Finalize(); #endif From 2154141c0e0dc977aeff78b68856a60a751e968f Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 23:51:44 -0500 Subject: [PATCH 150/806] update pdc --- tools/llsm_importer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index ef13c5dde..894220403 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -192,7 +192,7 @@ on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) printf("\n"); // import the image to PDC - // import_to_pdc(image_info, fileName_cell); + import_to_pdc(image_info, fileName_cell); // free the image info free(image_info->tiff_ptr); From 8f559748a7e9cfd2585c369e8c3893a2db77d207 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Sun, 23 Apr 2023 23:54:54 -0500 Subject: [PATCH 151/806] update pdc --- tools/llsm_importer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 894220403..fc6a27148 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -9,8 +9,8 @@ #endif #ifdef ENABLE_MPI -#include "mpi.h" -// #undef ENABLE_MPI +// #include "mpi.h" +#undef ENABLE_MPI #endif #include "pdc.h" From d8b2e572d0bf6703f8826776a8ad30dd19e2c38e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 00:29:12 -0500 Subject: [PATCH 152/806] update pdc --- tools/llsm_importer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index fc6a27148..b1b240a14 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -61,9 +61,9 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); - psize_t ndims = 3; + psize_t ndims = 1; // FIXME: we should support uint64_t. - uint64_t dims[3] = {image_info->x, image_info->y, image_info->z}; + uint64_t dims[1] = {image_info->x * image_info->y * image_info->z}; // FIXME: we should change the ndims parameter to psize_t type. PDCprop_set_obj_dims(obj_prop_g, (PDC_int_t)ndims, dims); PDCprop_set_obj_type(obj_prop_g, PDC_FLOAT); From 3009e0bcd5a4c3ed28699949ec50fdf1904a6557 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 00:32:34 -0500 Subject: [PATCH 153/806] update pdc --- tools/llsm_importer.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index b1b240a14..8e30e4718 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -84,12 +84,12 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) pdcid_t cur_obj_g = PDCobj_create(cont_id_g, fileName_cell->field_value, obj_prop_g); // write data to object - pdcid_t local_region = PDCregion_create(ndims, offsets, num_bytes); - pdcid_t remote_region = PDCregion_create(ndims, offsets, num_bytes); - pdcid_t transfer_request = - PDCregion_transfer_create(image_info->tiff_ptr, PDC_WRITE, cur_obj_g, local_region, remote_region); - PDCregion_transfer_start(transfer_request); - PDCregion_transfer_wait(transfer_request); + // pdcid_t local_region = PDCregion_create(ndims, offsets, num_bytes); + // pdcid_t remote_region = PDCregion_create(ndims, offsets, num_bytes); + // pdcid_t transfer_request = + // PDCregion_transfer_create(image_info->tiff_ptr, PDC_WRITE, cur_obj_g, local_region, remote_region); + // PDCregion_transfer_start(transfer_request); + // PDCregion_transfer_wait(transfer_request); // add metadata tags based on the csv row csv_cell_t *cell = fileName_cell; @@ -139,9 +139,9 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) // free memory free(offsets); free(num_bytes); - PDCregion_close(local_region); - PDCregion_close(remote_region); - PDCregion_transfer_close(transfer_request); + // PDCregion_close(local_region); + // PDCregion_close(remote_region); + // PDCregion_transfer_close(transfer_request); PDCprop_close(obj_prop_g); } From b90191f38431b5f3af02a4850e170035f64c6fa9 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 15:24:25 -0500 Subject: [PATCH 154/806] update pdc --- tools/llsm_importer.c | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 8e30e4718..85bfa3fef 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -61,9 +61,16 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); - psize_t ndims = 1; + psize_t ndims = 3; + uint64_t offsets[3] = {0, 0, 0}; // FIXME: we should support uint64_t. - uint64_t dims[1] = {image_info->x * image_info->y * image_info->z}; + uint64_t dims[3] = {image_info->x , image_info->y , image_info->z}; + + // psize_t ndims = 1; + // uint64_t offsets[1] = {0}; + // // FIXME: we should support uint64_t. + // uint64_t dims[1] = {image_info->x * image_info->y * image_info->z}; + // FIXME: we should change the ndims parameter to psize_t type. PDCprop_set_obj_dims(obj_prop_g, (PDC_int_t)ndims, dims); PDCprop_set_obj_type(obj_prop_g, PDC_FLOAT); @@ -71,12 +78,12 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) PDCprop_set_obj_user_id(obj_prop_g, getuid()); PDCprop_set_obj_app_name(obj_prop_g, "LLSM"); - uint64_t *offsets = (uint64_t *)malloc(sizeof(uint64_t) * ndims); - uint64_t *num_bytes = (uint64_t *)malloc(sizeof(uint64_t) * ndims); - for (int i = 0; i < ndims; i++) { - offsets[i] = 0; - num_bytes[i] = dims[i] * image_info->bits / 8; - } + // uint64_t *offsets = (uint64_t *)malloc(sizeof(uint64_t) * ndims); + // uint64_t *num_bytes = (uint64_t *)malloc(sizeof(uint64_t) * ndims); + // for (int i = 0; i < ndims; i++) { + // offsets[i] = 0; + // num_bytes[i] = dims[i] * image_info->bits / 8; + // } // create object // FIXME: There are many attributes currently in one file name, @@ -84,12 +91,12 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) pdcid_t cur_obj_g = PDCobj_create(cont_id_g, fileName_cell->field_value, obj_prop_g); // write data to object - // pdcid_t local_region = PDCregion_create(ndims, offsets, num_bytes); - // pdcid_t remote_region = PDCregion_create(ndims, offsets, num_bytes); - // pdcid_t transfer_request = - // PDCregion_transfer_create(image_info->tiff_ptr, PDC_WRITE, cur_obj_g, local_region, remote_region); - // PDCregion_transfer_start(transfer_request); - // PDCregion_transfer_wait(transfer_request); + pdcid_t local_region = PDCregion_create(ndims, offsets, dims); + pdcid_t remote_region = PDCregion_create(ndims, offsets, dims); + pdcid_t transfer_request = + PDCregion_transfer_create(image_info->tiff_ptr, PDC_WRITE, cur_obj_g, local_region, remote_region); + PDCregion_transfer_start(transfer_request); + PDCregion_transfer_wait(transfer_request); // add metadata tags based on the csv row csv_cell_t *cell = fileName_cell; From 08f9fc226fb0c7236a74e34dcfc2281fa0fe2fa7 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 15:26:33 -0500 Subject: [PATCH 155/806] update pdc --- tools/llsm_importer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 85bfa3fef..28f57777d 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -144,8 +144,8 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) duration / 1e9); // free memory - free(offsets); - free(num_bytes); + // free(offsets); + // free(num_bytes); // PDCregion_close(local_region); // PDCregion_close(remote_region); // PDCregion_transfer_close(transfer_request); From a3dc4972cecdc4259def3b6e0d60d83756364ea3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 15:31:58 -0500 Subject: [PATCH 156/806] update pdc --- tools/llsm_importer.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 28f57777d..af08d50d2 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -61,15 +61,15 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); - psize_t ndims = 3; - uint64_t offsets[3] = {0, 0, 0}; - // FIXME: we should support uint64_t. - uint64_t dims[3] = {image_info->x , image_info->y , image_info->z}; - - // psize_t ndims = 1; - // uint64_t offsets[1] = {0}; + // psize_t ndims = 3; + // uint64_t offsets[3] = {0, 0, 0}; // // FIXME: we should support uint64_t. - // uint64_t dims[1] = {image_info->x * image_info->y * image_info->z}; + // uint64_t dims[3] = {image_info->x , image_info->y , image_info->z}; + + psize_t ndims = 1; + uint64_t offsets[1] = {0}; + // FIXME: we should support uint64_t. + uint64_t dims[1] = {image_info->x * image_info->y * image_info->z}; // FIXME: we should change the ndims parameter to psize_t type. PDCprop_set_obj_dims(obj_prop_g, (PDC_int_t)ndims, dims); From 2e1c8bc77ff02cf14a359b26d6ba0ff2133a310f Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 15:43:31 -0500 Subject: [PATCH 157/806] update pdc --- tools/llsm_importer.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index af08d50d2..dcb0cba21 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -73,7 +73,25 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) // FIXME: we should change the ndims parameter to psize_t type. PDCprop_set_obj_dims(obj_prop_g, (PDC_int_t)ndims, dims); - PDCprop_set_obj_type(obj_prop_g, PDC_FLOAT); + pdc_var_type_t pdc_type = PDC_UNKNOWN; + switch (image_info->bits) { + case 8: + pdc_type = PDC_INT8; + break; + case 16: + pdc_type = PDC_INT16; + break; + case 32: + pdc_type = PDC_FLOAT; + break; + case 64: + pdc_type = PDC_DOUBLE; + break; + default: + printf("Error: unsupported data type.\n"); + exit(-1); + } + PDCprop_set_obj_type(obj_prop_g, pdc_type); PDCprop_set_obj_time_step(obj_prop_g, 0); PDCprop_set_obj_user_id(obj_prop_g, getuid()); PDCprop_set_obj_app_name(obj_prop_g, "LLSM"); From 3584a20bfe35ddf882adb2b74d325ffe78425821 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 15:51:51 -0500 Subject: [PATCH 158/806] update pdc --- src/api/include/pdc_public.h | 4 +++- src/server/pdc_client_server_common.c | 9 ++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/api/include/pdc_public.h b/src/api/include/pdc_public.h index dc286e5e9..8c47976f0 100644 --- a/src/api/include/pdc_public.h +++ b/src/api/include/pdc_public.h @@ -55,7 +55,9 @@ typedef enum { PDC_UINT64 = 9, /* 64-bit unsigned integer types */ PDC_INT16 = 10, PDC_INT8 = 11, - NCLASSES = 12 /* this must be last */ + PDC_UINT8 = 12, + PDC_UINT16 = 13, + NCLASSES = 14 /* this must be last */ } pdc_var_type_t; typedef enum { PDC_PERSIST, PDC_TRANSIENT } pdc_lifetime_t; diff --git a/src/server/pdc_client_server_common.c b/src/server/pdc_client_server_common.c index 289c20527..14417485b 100644 --- a/src/server/pdc_client_server_common.c +++ b/src/server/pdc_client_server_common.c @@ -265,7 +265,14 @@ PDC_get_var_type_size(pdc_var_type_t dtype) break; case PDC_INT8: ret_value = sizeof(int8_t); - + goto done; + break; + case PDC_UINT8: + ret_value = sizeof(uint8_t); + goto done; + break; + case PDC_UINT16: + ret_value = sizeof(uint16_t); goto done; break; case PDC_INT64: From 5b5e90e54d26f8a438f96e4e62d41ba21e78a44b Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 15:54:55 -0500 Subject: [PATCH 159/806] update pdc --- tools/llsm_importer.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index dcb0cba21..a412eda52 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -61,25 +61,25 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); - // psize_t ndims = 3; - // uint64_t offsets[3] = {0, 0, 0}; - // // FIXME: we should support uint64_t. - // uint64_t dims[3] = {image_info->x , image_info->y , image_info->z}; - - psize_t ndims = 1; - uint64_t offsets[1] = {0}; + psize_t ndims = 3; + uint64_t offsets[3] = {0, 0, 0}; // FIXME: we should support uint64_t. - uint64_t dims[1] = {image_info->x * image_info->y * image_info->z}; + uint64_t dims[3] = {image_info->x , image_info->y , image_info->z}; + + // psize_t ndims = 1; + // uint64_t offsets[1] = {0}; + // // FIXME: we should support uint64_t. + // uint64_t dims[1] = {image_info->x * image_info->y * image_info->z}; // FIXME: we should change the ndims parameter to psize_t type. PDCprop_set_obj_dims(obj_prop_g, (PDC_int_t)ndims, dims); pdc_var_type_t pdc_type = PDC_UNKNOWN; switch (image_info->bits) { case 8: - pdc_type = PDC_INT8; + pdc_type = PDC_UINT8; break; case 16: - pdc_type = PDC_INT16; + pdc_type = PDC_UINT16; break; case 32: pdc_type = PDC_FLOAT; From 53c07d81de7275d1bb4facf5a25e594bd1048ee0 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 15:59:37 -0500 Subject: [PATCH 160/806] enlarge max write --- src/server/pdc_server_region/pdc_server_data.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/pdc_server_region/pdc_server_data.c b/src/server/pdc_server_region/pdc_server_data.c index c8f80278a..11d91dc22 100644 --- a/src/server/pdc_server_region/pdc_server_data.c +++ b/src/server/pdc_server_region/pdc_server_data.c @@ -4686,7 +4686,7 @@ static perr_t PDC_Server_posix_write(int fd, void *buf, uint64_t write_size) { // Write 1GB at a time - uint64_t write_bytes = 0, max_write_size = 1073741824; + uint64_t write_bytes = 0, max_write_size = 1073741824 * 2; perr_t ret_value = SUCCEED; ssize_t ret; From c7551eaa5e076ce5f2c488a75ec0e5fe3da5674e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 16:17:58 -0500 Subject: [PATCH 161/806] update pdc --- tools/llsm_importer.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index a412eda52..1b86081d2 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -61,16 +61,16 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); - psize_t ndims = 3; + psize_t ndims = 3; uint64_t offsets[3] = {0, 0, 0}; // FIXME: we should support uint64_t. - uint64_t dims[3] = {image_info->x , image_info->y , image_info->z}; + uint64_t dims[3] = {image_info->x, image_info->y, image_info->z}; // psize_t ndims = 1; // uint64_t offsets[1] = {0}; // // FIXME: we should support uint64_t. // uint64_t dims[1] = {image_info->x * image_info->y * image_info->z}; - + // FIXME: we should change the ndims parameter to psize_t type. PDCprop_set_obj_dims(obj_prop_g, (PDC_int_t)ndims, dims); pdc_var_type_t pdc_type = PDC_UNKNOWN; @@ -116,6 +116,13 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) PDCregion_transfer_start(transfer_request); PDCregion_transfer_wait(transfer_request); + clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation + duration = (end.tv_sec - start.tv_sec) * 1e9 + + (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds + + printf("[Rank %4d] Region Transfer for object %s [%d Bytes] Done! Time taken: %.4f seconds\n", rank, + fileName_cell->field_value, image_info->tiff_size , duration / 1e9); + // add metadata tags based on the csv row csv_cell_t *cell = fileName_cell; while (cell != NULL) { @@ -158,7 +165,7 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) duration = (end.tv_sec - start.tv_sec) * 1e9 + (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds - printf("[Rank %4d]create object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, + printf("[Rank %4d] Create object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, duration / 1e9); // free memory @@ -204,7 +211,7 @@ on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) duration = (end.tv_sec - start.tv_sec) * 1e9 + (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds - printf("[Rand %4d]Read %s Done! Time taken: %.4f seconds\n", rank, filepath, duration / 1e9); + printf("[Rand %4d] Read %s Done! Time taken: %.4f seconds\n", rank, filepath, duration / 1e9); if (image_info == NULL || image_info->tiff_ptr == NULL) { return; From 02d401fc72874882500e9a312a251b576a586ea6 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 16:23:59 -0500 Subject: [PATCH 162/806] update pdc --- tools/llsm_importer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 1b86081d2..07196d018 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -9,8 +9,8 @@ #endif #ifdef ENABLE_MPI -// #include "mpi.h" -#undef ENABLE_MPI +#include "mpi.h" +// #undef ENABLE_MPI #endif #include "pdc.h" From cdc30203bfa1c75950944045ce273b2e454ea83f Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 16:34:25 -0500 Subject: [PATCH 163/806] update pdc --- src/server/pdc_server_region/pdc_server_data.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/server/pdc_server_region/pdc_server_data.c b/src/server/pdc_server_region/pdc_server_data.c index 11d91dc22..7d4057b1e 100644 --- a/src/server/pdc_server_region/pdc_server_data.c +++ b/src/server/pdc_server_region/pdc_server_data.c @@ -4695,7 +4695,8 @@ PDC_Server_posix_write(int fd, void *buf, uint64_t write_size) while (write_size > max_write_size) { ret = write(fd, buf, max_write_size); if (ret < 0 || ret != (ssize_t)max_write_size) { - printf("==PDC_SERVER[%d]: write %d failed\n", pdc_server_rank_g, fd); + printf("==PDC_SERVER[%d]: in-loop: write %d failed, ret = %d, max_write_size = %llu\n", + pdc_server_rank_g, fd, ret, max_write_size); ret_value = FAIL; goto done; } @@ -4705,7 +4706,8 @@ PDC_Server_posix_write(int fd, void *buf, uint64_t write_size) } ret = write(fd, buf, write_size); if (ret < 0 || ret != (ssize_t)write_size) { - printf("==PDC_SERVER[%d]: write %d failed\n", pdc_server_rank_g, fd); + printf("==PDC_SERVER[%d]: out-loop: write %d failed, ret = %d, max_write_size = %llu\n", + pdc_server_rank_g, fd, ret, max_write_size); ret_value = FAIL; goto done; } From 6d7d7a3549cd2ec3df3145a661664b3d326e110b Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 24 Apr 2023 22:39:47 -0500 Subject: [PATCH 164/806] update pdc --- src/server/pdc_server_region/pdc_server_data.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/pdc_server_region/pdc_server_data.c b/src/server/pdc_server_region/pdc_server_data.c index 7d4057b1e..372d08ac7 100644 --- a/src/server/pdc_server_region/pdc_server_data.c +++ b/src/server/pdc_server_region/pdc_server_data.c @@ -4686,7 +4686,7 @@ static perr_t PDC_Server_posix_write(int fd, void *buf, uint64_t write_size) { // Write 1GB at a time - uint64_t write_bytes = 0, max_write_size = 1073741824 * 2; + uint64_t write_bytes = 0, max_write_size = 1073741824; perr_t ret_value = SUCCEED; ssize_t ret; From 011b9f080baba9930dfdf4909f38b66b179079f3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 25 Apr 2023 22:18:32 -0500 Subject: [PATCH 165/806] update pdc_import.c --- tools/CMakeLists.txt | 25 ++++++++++++------------- tools/pdc_import.c | 29 ++++++++++++++++------------- 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 1c6940ca9..815a081ec 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -87,20 +87,19 @@ add_definitions(-DENABLE_MPI=1) add_library(cjson cjson/cJSON.c) -# set(PROGRAMS -# pdc_ls -# pdc_import -# pdc_export -# ) +set(PROGRAMS + pdc_ls + pdc_import + pdc_export + ) -# foreach(program ${PROGRAMS}) -# add_executable(${program} ${program}.c) -# target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) -# target_link_libraries(${program} pdc) -# target_link_libraries(${program} cjson) -# target_link_libraries(${program} llsm_tiff) -# target_include_directories(${program} PUBLIC ${PDC_INCLUDE_DIR}) -# endforeach(program) +foreach(program ${PROGRAMS}) + add_executable(${program} ${program}.c) + target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) + target_link_libraries(${program} pdc) + target_link_libraries(${program} cjson) + target_include_directories(${program} PUBLIC ${PDC_INCLUDE_DIR}) +endforeach(program) # Find LibTIFF diff --git a/tools/pdc_import.c b/tools/pdc_import.c index 9388ac495..4a0af982c 100644 --- a/tools/pdc_import.c +++ b/tools/pdc_import.c @@ -10,8 +10,8 @@ #include "hdf5.h" #include "pdc.h" -#include "pdc_client_server_common.h" -#include "pdc_client_connect.h" +// #include "pdc_client_server_common.h" +// #include "pdc_client_connect.h" #define MAX_NAME 1024 #define MAX_FILES 2500 @@ -698,12 +698,15 @@ scan_attrs(hid_t oid, pdcid_t obj_id) void do_attr(hid_t aid, pdcid_t obj_id) { - ssize_t len; - hid_t atype; - hid_t aspace; - char buf[MAX_NAME] = {0}; - char read_buf[TAG_LEN_MAX] = {0}; - pdc_kvtag_t kvtag1; + ssize_t len; + hid_t atype; + hid_t aspace; + char buf[MAX_NAME] = {0}; + char read_buf[TAG_LEN_MAX] = {0}; + // pdc_kvtag_t kvtag1; + char * tag_name; + void * tag_value; + size_t tag_size; /* * Get the name of the attribute. @@ -717,15 +720,15 @@ do_attr(hid_t aid, pdcid_t obj_id) atype = H5Aget_type(aid); H5Aread(aid, atype, read_buf); - kvtag1.name = buf; - kvtag1.value = (void *)read_buf; + tag_name = buf; + tag_value = (void *)read_buf; if (atype == H5T_STRING) { - kvtag1.size = strlen(read_buf) + 1; + tag_size = strlen(read_buf) + 1; } else { - kvtag1.size = H5Tget_size(atype); + tag_size = H5Tget_size(atype); } - PDCobj_put_tag(obj_id, kvtag1.name, kvtag1.value, kvtag1.size); + PDCobj_put_tag(obj_id, tag_name, tag_value, tag_size); /* * Get attribute information: dataspace, data type From 4dbc4a51bed60cfb474c89078dad897a69aaf1d0 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 25 Apr 2023 22:21:29 -0500 Subject: [PATCH 166/806] update pdc_import.c --- tools/pdc_export.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/pdc_export.c b/tools/pdc_export.c index af42e116a..a67f444e3 100644 --- a/tools/pdc_export.c +++ b/tools/pdc_export.c @@ -15,9 +15,9 @@ #endif #include "pdc.h" -#include "pdc_client_server_common.h" -#include "pdc_client_connect.h" -#include "../src/server/include/pdc_server_metadata.h" +// #include "pdc_client_server_common.h" +// #include "pdc_client_connect.h" +// #include "../src/server/include/pdc_server_metadata.h" #include "cjson/cJSON.h" const char *avail_args[] = {"-f"}; From fa745f5aa1c8f36404f3cde97f74f27238f56aac Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 25 Apr 2023 22:22:43 -0500 Subject: [PATCH 167/806] update pdc_export.c --- tools/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 815a081ec..c303a0062 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -90,7 +90,7 @@ add_library(cjson cjson/cJSON.c) set(PROGRAMS pdc_ls pdc_import - pdc_export + # pdc_export ) foreach(program ${PROGRAMS}) From b28957c072e30d3bde6c44becce889bb408ca7a2 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 25 Apr 2023 22:24:17 -0500 Subject: [PATCH 168/806] update pdc_import.c --- tools/pdc_import.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/pdc_import.c b/tools/pdc_import.c index 4a0af982c..1079dd5be 100644 --- a/tools/pdc_import.c +++ b/tools/pdc_import.c @@ -17,6 +17,7 @@ #define MAX_FILES 2500 #define MAX_FILENAME_LEN 64 #define MAX_TAG_SIZE 8192 +#define TAG_LEN_MAX 2048 typedef struct ArrayList { int length; From e2196a98fe032fe81094eb6de9f9bf76f970f4e9 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 25 Apr 2023 22:26:00 -0500 Subject: [PATCH 169/806] update pdc_import.c --- tools/pdc_import.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/pdc_import.c b/tools/pdc_import.c index 1079dd5be..f0598b792 100644 --- a/tools/pdc_import.c +++ b/tools/pdc_import.c @@ -1,6 +1,8 @@ #include #include #include +#include +#include // #define ENABLE_MPI 1 From 9f986d745ddb1f22cf4f37db329142b5340482da Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 25 Apr 2023 22:39:54 -0500 Subject: [PATCH 170/806] update pdc_import.c --- tools/pdc_import.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/tools/pdc_import.c b/tools/pdc_import.c index f0598b792..d1a7e785a 100644 --- a/tools/pdc_import.c +++ b/tools/pdc_import.c @@ -83,8 +83,7 @@ int ndset_g = 0; /* FILE *summary_fp_g; */ int max_tag_size_g = 0; pdcid_t pdc_id_g = 0, cont_prop_g = 0, cont_id_g = 0, obj_prop_g = 0; -struct timeval write_timer_start_g; -struct timeval write_timer_end_g; +struct timespec write_timer_start_g, write_timer_end_g; struct ArrayList *container_names; int overwrite = 0; @@ -249,9 +248,8 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); #endif - struct timeval pdc_timer_start; - struct timeval pdc_timer_end; - gettimeofday(&pdc_timer_start, 0); + struct timespec pdc_timer_start, pdc_timer_end; + clock_gettime(CLOCK_MONOTONIC, &pdc_timer_start); for (i = 0; i < my_count; i++) { filename = my_filenames[i]; @@ -275,7 +273,8 @@ main(int argc, char **argv) #endif // Checkpoint all metadata after import each hdf5 file if (rank == 0) { - PDC_Client_all_server_checkpoint(); + // FIXME: this should be replaced by a function in public headers. + // PDC_Client_all_server_checkpoint(); } /* printf("%s, %d\n", filename, max_tag_size_g); */ /* printf("\n\n======================\nNumber of datasets: %d\n", ndset_g); */ @@ -288,8 +287,9 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); #endif - gettimeofday(&pdc_timer_end, 0); - double write_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); + clock_gettime(CLOCK_MONOTONIC, &pdc_timer_end); + double write_time = (pdc_timer_end.tv_sec - pdc_timer_start.tv_sec) * 1e9 + + (pdc_timer_end.tv_nsec - pdc_timer_start.tv_nsec); // calculate duration in nanoseconds #ifdef ENABLE_MPI MPI_Reduce(&ndset_g, &total_dset, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); @@ -297,7 +297,7 @@ main(int argc, char **argv) total_dset = ndset_g; #endif if (rank == 0) { - printf("Import %d datasets with %d ranks took %.2f seconds.\n", total_dset, size, write_time); + printf("Import %d datasets with %d ranks took %.2f seconds.\n", total_dset, size, write_time/1e9); } } @@ -553,7 +553,7 @@ do_dset(hid_t did, char *name, char *app_name) obj_region.size = size; if (ndset_g == 1) - gettimeofday(&write_timer_start_g, 0); + clock_gettime(CLOCK_MONOTONIC, &write_timer_start_g); /* PDC_Client_query_metadata_name_timestep(dset_name_g, 0, &meta); */ /* if (meta == NULL) */ @@ -569,12 +569,13 @@ do_dset(hid_t did, char *name, char *app_name) // PDC_Client_write_id(obj_id, &obj_region, buf); if (ndset_g % 100 == 0) { - gettimeofday(&write_timer_end_g, 0); - double elapsed_time = PDC_get_elapsed_time_double(&write_timer_start_g, &write_timer_end_g); - printf("Importer%2d: Finished written 100 objects, took %.2f, my total %d\n", rank, elapsed_time, + clock_gettime(CLOCK_MONOTONIC, &write_timer_end_g); + double elapsed_time = (write_timer_end_g.tv_sec - write_timer_start_g.tv_sec) * 1e9 + + (write_timer_end_g.tv_nsec - write_timer_start_g.tv_nsec); // calculate duration in nanoseconds; + printf("Importer%2d: Finished written 100 objects, took %.2f, my total %d\n", rank, elapsed_time/1e9, ndset_g); fflush(stdout); - gettimeofday(&write_timer_start_g, 0); + clock_gettime(CLOCK_MONOTONIC, &write_timer_start_g); } free(buf); From edd1a1c8d2e88ca755c91bbd2729dcd1876184d1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 25 Apr 2023 22:41:00 -0500 Subject: [PATCH 171/806] update pdc_import.c --- tools/pdc_import.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/pdc_import.c b/tools/pdc_import.c index d1a7e785a..88958bb53 100644 --- a/tools/pdc_import.c +++ b/tools/pdc_import.c @@ -542,7 +542,7 @@ do_dset(hid_t did, char *name, char *app_name) scan_attrs(did, obj_id); - pdc_metadata_t *meta = NULL; + // pdc_metadata_t *meta = NULL; obj_region.ndim = ndim; for (i = 0; i < ndim; i++) { offset[i] = 0; From 59a332f49860af3ce98fd7bd81d77977690afa1e Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 25 Apr 2023 22:42:24 -0500 Subject: [PATCH 172/806] update tools/cmake --- tools/CMakeLists.txt | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index c303a0062..e15e21992 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -87,19 +87,19 @@ add_definitions(-DENABLE_MPI=1) add_library(cjson cjson/cJSON.c) -set(PROGRAMS - pdc_ls - pdc_import - # pdc_export - ) +# set(PROGRAMS +# pdc_ls +# pdc_import +# pdc_export +# ) -foreach(program ${PROGRAMS}) - add_executable(${program} ${program}.c) - target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) - target_link_libraries(${program} pdc) - target_link_libraries(${program} cjson) - target_include_directories(${program} PUBLIC ${PDC_INCLUDE_DIR}) -endforeach(program) +# foreach(program ${PROGRAMS}) +# add_executable(${program} ${program}.c) +# target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) +# target_link_libraries(${program} pdc) +# target_link_libraries(${program} cjson) +# target_include_directories(${program} PUBLIC ${PDC_INCLUDE_DIR}) +# endforeach(program) # Find LibTIFF From d5315f9c1dc3faf44df4b37a02abd2299691cb62 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 25 Apr 2023 23:21:21 -0500 Subject: [PATCH 173/806] clang format --- tools/llsm/csvReader.h | 1 - tools/llsm_importer.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/llsm/csvReader.h b/tools/llsm/csvReader.h index 898f60fdb..d5aa87aaf 100644 --- a/tools/llsm/csvReader.h +++ b/tools/llsm/csvReader.h @@ -54,7 +54,6 @@ void csv_set_escape(char escape); */ void csv_set_newline(char newline); - /** * @brief This function parses a CSV header line and returns a linked list of csv_header_t structs. The header * string may contain quotes and spaces diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 07196d018..d777812c9 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -121,7 +121,7 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds printf("[Rank %4d] Region Transfer for object %s [%d Bytes] Done! Time taken: %.4f seconds\n", rank, - fileName_cell->field_value, image_info->tiff_size , duration / 1e9); + fileName_cell->field_value, image_info->tiff_size, duration / 1e9); // add metadata tags based on the csv row csv_cell_t *cell = fileName_cell; From 17319974b09528837fa530f112bbc58d38a65fa3 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 25 Apr 2023 23:27:38 -0500 Subject: [PATCH 174/806] clang format --- src/tests/kvtag_add_get_benchmark.c | 5 +++-- tools/pdc_import.c | 19 +++++++++++-------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 6c4457eb4..5cda25433 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -293,7 +293,7 @@ check_and_release_query_result(uint64_t n_query, uint64_t my_obj, uint64_t my_ob } } free(values); - // FIXME: close objects. This is currently commented off to save node hours for benchmarks. + // FIXME: close objects. This is currently commented off to save node hours for benchmarks. // for (i = 0; i < my_obj; i++) { // v = i + my_obj_s; // if (PDCobj_close(obj_ids[i]) < 0) @@ -486,7 +486,8 @@ main(int argc, char *argv[]) free(tag_values); free(obj_ids); - //FIXME: the following is currently commented off to reduce node hours taken by time-consuming resource releasing procedure. + // FIXME: the following is currently commented off to reduce node hours taken by time-consuming resource + // releasing procedure. // closePDC(pdc, cont_prop, cont, obj_prop); done: diff --git a/tools/pdc_import.c b/tools/pdc_import.c index 88958bb53..f51f587c8 100644 --- a/tools/pdc_import.c +++ b/tools/pdc_import.c @@ -288,8 +288,9 @@ main(int argc, char **argv) #endif clock_gettime(CLOCK_MONOTONIC, &pdc_timer_end); - double write_time = (pdc_timer_end.tv_sec - pdc_timer_start.tv_sec) * 1e9 + - (pdc_timer_end.tv_nsec - pdc_timer_start.tv_nsec); // calculate duration in nanoseconds + double write_time = + (pdc_timer_end.tv_sec - pdc_timer_start.tv_sec) * 1e9 + + (pdc_timer_end.tv_nsec - pdc_timer_start.tv_nsec); // calculate duration in nanoseconds #ifdef ENABLE_MPI MPI_Reduce(&ndset_g, &total_dset, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); @@ -297,7 +298,8 @@ main(int argc, char **argv) total_dset = ndset_g; #endif if (rank == 0) { - printf("Import %d datasets with %d ranks took %.2f seconds.\n", total_dset, size, write_time/1e9); + printf("Import %d datasets with %d ranks took %.2f seconds.\n", total_dset, size, + write_time / 1e9); } } @@ -543,7 +545,7 @@ do_dset(hid_t did, char *name, char *app_name) scan_attrs(did, obj_id); // pdc_metadata_t *meta = NULL; - obj_region.ndim = ndim; + obj_region.ndim = ndim; for (i = 0; i < ndim; i++) { offset[i] = 0; size[i] = dims[i]; @@ -570,10 +572,11 @@ do_dset(hid_t did, char *name, char *app_name) // PDC_Client_write_id(obj_id, &obj_region, buf); if (ndset_g % 100 == 0) { clock_gettime(CLOCK_MONOTONIC, &write_timer_end_g); - double elapsed_time = (write_timer_end_g.tv_sec - write_timer_start_g.tv_sec) * 1e9 + - (write_timer_end_g.tv_nsec - write_timer_start_g.tv_nsec); // calculate duration in nanoseconds; - printf("Importer%2d: Finished written 100 objects, took %.2f, my total %d\n", rank, elapsed_time/1e9, - ndset_g); + double elapsed_time = + (write_timer_end_g.tv_sec - write_timer_start_g.tv_sec) * 1e9 + + (write_timer_end_g.tv_nsec - write_timer_start_g.tv_nsec); // calculate duration in nanoseconds; + printf("Importer%2d: Finished written 100 objects, took %.2f, my total %d\n", rank, + elapsed_time / 1e9, ndset_g); fflush(stdout); clock_gettime(CLOCK_MONOTONIC, &write_timer_start_g); } From dc842c56d10ab14c5045af0711399c2ce8f338e7 Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Mon, 8 May 2023 11:38:12 -0500 Subject: [PATCH 175/806] Merge tools/llsm_importer (#81) * remove unnecessary install block from CMakeLists.txt * update output * Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. * build kvtag_add_get_scale * comment off free * update code * 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working * do while loop added, tested with 1m object and works * 1m objects test works, 10m object test fail as the original also fails * add new executable to test set * enlarge PDC_SERVER_ID_INTERVAL * update code * update console args * add p search test * add console arg for changing number of attributes per object * free allocated memory * fix query count issue * fix attr length definition * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * fix data type * fix data type * fix data type * add client side statistics * add client side statistics * fix format * clang formatter * update CMake * update CMake * update CMake * free allocated memory properly * clang format * clang format * clang-format-10 * change file name * address review comments * update llsm importer * update llsm importer * update server checkpoint intervals * update gitignore * adding job scripts * adding one debugging msg * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update output for uint64_t * add scripts * update output for uint64_t * update output for uint64_t * update output for uint64_t * update scripts * update scripts * delete debugging message * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * update tag names * update tag names * update query startingpos * update query startingpos * update job scripts * add progressive timing for kvtag_add_get_scale * fix iteration count in final report * update job scripts and benckmark program * update message format * update message format * update message format * update message format * clang format * update job scripts * comment off object/container close procedure in benchmark to save node hours * change the max number of object to 1M * change the max length of attribute value * change the max length of attribute value * llsm tiff import test * llsm tiff import test * llsm tiff import test * llsm tiff import test * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update cmake and llsm_importer * update cmake and llsm_importer * close if in cmake * cmake fix tiff * cmake policy to suppress warning * add pdc include dir * update code * update code * update code * update code * update code * update code * update array generating method * update array generating method * update array generating method * update array generating method * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * fix return type * fix return type * add timing * add timing * fix output * llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader * fix vairable name * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * add scripts * add scripts * add scripts * debugging for nonMPI program * debugging for nonMPI program * debugging for nonMPI program * clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created * enable MPI * enable MPI * enlarge BCase size * enlarge BCase size * enlarge BCase size * resolve bcast count * llsm data path in script * llsm data path in script * update csv reader * update csv reader * update csv reader * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * enlarge max write * update pdc * update pdc * update pdc * update pdc * update pdc_import.c * update pdc_import.c * update pdc_export.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update tools/cmake * clang format * clang format --------- Co-authored-by: Houjun Tang --- .../perlmutter/gen_scripts.sh | 2 +- .../perlmutter/template.sh | 11 +- .../perlmutter/template.sh | 9 +- scripts/llsm_importer/clean.sh | 8 + scripts/llsm_importer/gen_script.sh | 21 + scripts/llsm_importer/submit.sh | 76 ++ scripts/llsm_importer/template.sh | 67 ++ src/api/include/pdc_public.h | 4 +- src/server/pdc_client_server_common.c | 9 +- .../pdc_server_region/pdc_server_data.c | 3 +- src/tests/kvtag_add_get_benchmark.c | 16 +- tools/CMakeLists.txt | 86 +- tools/llsm/csvReader.c | 395 +++++++++ tools/llsm/csvReader.h | 190 ++++ tools/llsm/parallelReadTiff.c | 810 ++++++++++++++++++ tools/llsm/parallelReadTiff.h | 34 + tools/llsm/pdc_list.c | 165 ++++ tools/llsm/pdc_list.h | 110 +++ tools/llsm_importer.c | 374 ++++++++ tools/pdc_export.c | 8 +- tools/pdc_import.c | 72 +- 21 files changed, 2405 insertions(+), 65 deletions(-) create mode 100755 scripts/llsm_importer/clean.sh create mode 100755 scripts/llsm_importer/gen_script.sh create mode 100755 scripts/llsm_importer/submit.sh create mode 100755 scripts/llsm_importer/template.sh create mode 100644 tools/llsm/csvReader.c create mode 100644 tools/llsm/csvReader.h create mode 100644 tools/llsm/parallelReadTiff.c create mode 100644 tools/llsm/parallelReadTiff.h create mode 100644 tools/llsm/pdc_list.c create mode 100644 tools/llsm/pdc_list.h create mode 100644 tools/llsm_importer.c diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh b/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh index 62eb1a2b2..1bc285fe7 100755 --- a/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh +++ b/scripts/kvtag_add_get_benchmark/perlmutter/gen_scripts.sh @@ -2,7 +2,7 @@ N_THREAD=NO MAX_NODE=512 MAX_ATTR=1024 -MAX_ATTRLEN=1000 +MAX_ATTRLEN=1000000 for (( i = 1; i <= $MAX_NODE; i*=2 )); do mkdir -p $i diff --git a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh index 9c69b9872..db06915c0 100755 --- a/scripts/kvtag_add_get_benchmark/perlmutter/template.sh +++ b/scripts/kvtag_add_get_benchmark/perlmutter/template.sh @@ -3,7 +3,7 @@ #REGSBATCH -q regular #DBGSBATCH -q debug #SBATCH -N NODENUM -#REGSBATCH -t 4:00:00 +#REGSBATCH -t 1:00:00 #DBGSBATCH -t 0:30:00 #SBATCH -C cpu #SBATCH -J JOBNAME @@ -21,6 +21,7 @@ rm -rf $PDC_TMPDIR/* REPEAT=1 N_NODE=NODENUM +# NCLIENT=127 NCLIENT=31 export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE @@ -35,7 +36,7 @@ CLOSE=$EXECPATH/close_server chmod +x $EXECPATH/* -MAX_OBJ_COUNT=$((1024*1024*1024)) +MAX_OBJ_COUNT=$((1024*1024)) OBJ_INCR=$((MAX_OBJ_COUNT/1024)) ATTR_COUNT=ATTRNUM ATTR_LENGTH=ATTRLEN @@ -48,19 +49,19 @@ echo "" echo "=============" echo "$i Init server" echo "=============" -srun -N $N_NODE -n $N_NODE -c 2 --mem=100000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $SERVER & +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*1)) -c 2 --cpu_bind=cores $SERVER & sleep 5 echo "============================================" echo "KVTAGS with $N_NODE nodes" echo "============================================" -srun -N $N_NODE -n $TOTALPROC -c 2 --mem=100000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $TOTALPROC -c 2 --cpu_bind=cores $CLIENT $MAX_OBJ_COUNT $OBJ_INCR $ATTR_COUNT $ATTR_LENGTH $QUERY_COUNT $N_NODE echo "" echo "=================" echo "$i Closing server" echo "=================" -srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLOSE +stdbuf -i0 -o0 -e0 srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores $CLOSE date diff --git a/scripts/kvtag_add_get_scale/perlmutter/template.sh b/scripts/kvtag_add_get_scale/perlmutter/template.sh index d48cb29f6..392d498b8 100755 --- a/scripts/kvtag_add_get_scale/perlmutter/template.sh +++ b/scripts/kvtag_add_get_scale/perlmutter/template.sh @@ -3,7 +3,7 @@ #REGSBATCH -q regular #DBGSBATCH -q debug #SBATCH -N NODENUM -#REGSBATCH -t 4:00:00 +#REGSBATCH -t 1:00:00 #DBGSBATCH -t 0:30:00 #SBATCH -C cpu #SBATCH -J JOBNAME @@ -21,6 +21,7 @@ REPEAT=1 N_NODE=NODENUM NCLIENT=31 +# NCLIENT=126 export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE mkdir -p $PDC_TMPDIR @@ -45,19 +46,19 @@ echo "" echo "=============" echo "$i Init server" echo "=============" -srun -N $N_NODE -n $N_NODE -c 2 --mem=128000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $SERVER & +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*1)) -c 2 --cpu_bind=cores $SERVER & sleep 5 echo "============================================" echo "KVTAGS with $N_NODE nodes" echo "============================================" -srun -N $N_NODE -n $TOTALPROC -c 2 --mem=256000 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLIENT $NUM_OBJ $NUM_TAGS $NUM_QUERY +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $TOTALPROC -c 2 --cpu_bind=cores $CLIENT $NUM_OBJ $NUM_TAGS $NUM_QUERY echo "" echo "=================" echo "$i Closing server" echo "=================" -srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores stdbuf -i0 -o0 -e0 $CLOSE +stdbuf -i0 -o0 -e0 srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores $CLOSE date diff --git a/scripts/llsm_importer/clean.sh b/scripts/llsm_importer/clean.sh new file mode 100755 index 000000000..d6476962e --- /dev/null +++ b/scripts/llsm_importer/clean.sh @@ -0,0 +1,8 @@ +#!/bin/bash +MAX_NODE=512 + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + + rm -rf $i/* + +done \ No newline at end of file diff --git a/scripts/llsm_importer/gen_script.sh b/scripts/llsm_importer/gen_script.sh new file mode 100755 index 000000000..9d310f2bb --- /dev/null +++ b/scripts/llsm_importer/gen_script.sh @@ -0,0 +1,21 @@ +#!/bin/bash +N_THREAD=NO +MAX_NODE=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +PROG_BASENAME=llsm_importer + +for (( i = 1; i <= $MAX_NODE; i*=2 )); do + mkdir -p $i + JOBNAME=${PROG_BASENAME}_${i} + TARGET=./$i/$JOBNAME.sbatch + cp template.sh $TARGET + sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET + sed -i "s/NODENUM/${i}/g" $TARGET + if [[ "$i" -gt "16" ]]; then + sed -i "s/REG//g" $TARGET + else + sed -i "s/DBG//g" $TARGET + fi +done diff --git a/scripts/llsm_importer/submit.sh b/scripts/llsm_importer/submit.sh new file mode 100755 index 000000000..b9019d149 --- /dev/null +++ b/scripts/llsm_importer/submit.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +# MIN_PROC=4 +# MAX_PROC=128 +MIN_PROC=1 +MAX_PROC=512 +MAX_ATTR=1024 +MAX_ATTRLEN=1000 + +PROG_BASENAME=llsm_importer + +curdir=$(pwd) + +first_submit=1 + +for (( i = 1; i <= $MAX_PROC; i*=2 )); do + mkdir -p $i + JOBNAME=${PROG_BASENAME}_${i} + TARGET=./$i/JOBNAME.sh + + njob=`squeue -u $USER | grep ${PROG_BASENAME} | wc -l` + echo $njob + while [ $njob -ge 4 ] + do + sleeptime=$[ ( $RANDOM % 1000 ) ] + sleep $sleeptime + njob=`squeue -u $USER | grep ${PROG_BASENAME} | wc -l` + echo $njob + done + + if [[ $first_submit == 1 ]]; then + # Submit first job w/o dependency + echo "Submitting $TARGET" + job=`sbatch $TARGET` + first_submit=0 + else + echo "Submitting $TARGET after ${job: -8}" + job=`sbatch -d afterany:${job: -8} $TARGET` + fi + + sleeptime=$[ ( $RANDOM % 5 ) ] + sleep $sleeptime +done + + +# for (( j = $MIN_PROC; j <= $MAX_PROC ; j*=2 )); do + +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# while [ $njob -ge 4 ] +# do +# sleeptime=$[ ( $RANDOM % 1000 ) ] +# sleep $sleeptime +# njob=`squeue -u $USER | grep vpic | wc -l` +# echo $njob +# done + + +# cd $curdir/$j +# for filename in ./*.sh ; do + +# if [[ $first_submit == 1 ]]; then +# # Submit first job w/o dependency +# echo "Submitting $filename" +# job=`sbatch $filename` +# first_submit=0 +# else +# echo "Submitting $filename after ${job: -8}" +# job=`sbatch -d afterany:${job: -8} $filename` +# fi + +# sleeptime=$[ ( $RANDOM % 5 ) ] +# sleep $sleeptime + +# done +# done diff --git a/scripts/llsm_importer/template.sh b/scripts/llsm_importer/template.sh new file mode 100755 index 000000000..d736b4076 --- /dev/null +++ b/scripts/llsm_importer/template.sh @@ -0,0 +1,67 @@ +#!/bin/bash -l + +#REGSBATCH -q regular +#DBGSBATCH -q debug +#SBATCH -N NODENUM +#REGSBATCH -t 1:00:00 +#DBGSBATCH -t 0:30:00 +#SBATCH -C cpu +#SBATCH -J JOBNAME +#SBATCH -A m2621 +#SBATCH -o o%j.JOBNAME.out +#SBATCH -e o%j.JOBNAME.out + +# export PDC_DEBUG=0 + + +export PDC_TMPDIR=$SCRATCH/data/pdc/conf + +rm -rf $PDC_TMPDIR/* + +REPEAT=1 + +N_NODE=NODENUM +NCLIENT=1 +# NCLIENT=126 + +export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE +mkdir -p $PDC_TMPDIR + +let TOTALPROC=$NCLIENT*$N_NODE + +EXECPATH=/global/cfs/cdirs/m2621/wzhang5/perlmutter/install/pdc/share/test/bin +TOOLPATH=/global/cfs/cdirs/m2621/wzhang5/perlmutter/source/pdc_llsm/tools/build +SERVER=$EXECPATH/pdc_server.exe +CLIENT=$TOOLPATH/llsm_importer +CLOSE=$EXECPATH/close_server + +chmod +x $EXECPATH/* +chmod +x $TOOLPATH/llsm_importer + +LLSM_DATA_PATH=/pscratch/sd/w/wzhang5/data/llsm/20220115_Korra_LLCPK_LFOV_0p1PSAmpKan/run1 +# LLSM_DATA_PATH=/global/cfs/cdirs/m2621/wzhang5/data/20220115_Korra_LLCPK_LFOV_0p1PSAmpKan/run1 +IMGLIST_PATH=${LLSM_DATA_PATH}/ImageList_from_encoder.csv + +date + + +echo "" +echo "=============" +echo "$i Init server" +echo "=============" +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*1)) -c 2 --cpu_bind=cores $SERVER & +sleep 5 + + +echo "============================================" +echo "KVTAGS with $N_NODE nodes" +echo "============================================" +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $TOTALPROC -c 2 --cpu_bind=cores $CLIENT -f $IMGLIST_PATH + +echo "" +echo "=================" +echo "$i Closing server" +echo "=================" +stdbuf -i0 -o0 -e0 srun -N 1 -n 1 -c 2 --mem=25600 --cpu_bind=cores $CLOSE + +date diff --git a/src/api/include/pdc_public.h b/src/api/include/pdc_public.h index dc286e5e9..8c47976f0 100644 --- a/src/api/include/pdc_public.h +++ b/src/api/include/pdc_public.h @@ -55,7 +55,9 @@ typedef enum { PDC_UINT64 = 9, /* 64-bit unsigned integer types */ PDC_INT16 = 10, PDC_INT8 = 11, - NCLASSES = 12 /* this must be last */ + PDC_UINT8 = 12, + PDC_UINT16 = 13, + NCLASSES = 14 /* this must be last */ } pdc_var_type_t; typedef enum { PDC_PERSIST, PDC_TRANSIENT } pdc_lifetime_t; diff --git a/src/server/pdc_client_server_common.c b/src/server/pdc_client_server_common.c index 289c20527..14417485b 100644 --- a/src/server/pdc_client_server_common.c +++ b/src/server/pdc_client_server_common.c @@ -265,7 +265,14 @@ PDC_get_var_type_size(pdc_var_type_t dtype) break; case PDC_INT8: ret_value = sizeof(int8_t); - + goto done; + break; + case PDC_UINT8: + ret_value = sizeof(uint8_t); + goto done; + break; + case PDC_UINT16: + ret_value = sizeof(uint16_t); goto done; break; case PDC_INT64: diff --git a/src/server/pdc_server_region/pdc_server_data.c b/src/server/pdc_server_region/pdc_server_data.c index 8388957b0..6f7974b5d 100644 --- a/src/server/pdc_server_region/pdc_server_data.c +++ b/src/server/pdc_server_region/pdc_server_data.c @@ -4695,7 +4695,8 @@ PDC_Server_posix_write(int fd, void *buf, uint64_t write_size) while (write_size > max_write_size) { ret = write(fd, buf, max_write_size); if (ret < 0 || ret != (ssize_t)max_write_size) { - printf("==PDC_SERVER[%d]: write %d failed\n", pdc_server_rank_g, fd); + printf("==PDC_SERVER[%d]: in-loop: write %d failed, ret = %d, max_write_size = %llu\n", + pdc_server_rank_g, fd, ret, max_write_size); ret_value = FAIL; goto done; } diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 7b719ec96..5cda25433 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -293,12 +293,12 @@ check_and_release_query_result(uint64_t n_query, uint64_t my_obj, uint64_t my_ob } } free(values); - // close objects - for (i = 0; i < my_obj; i++) { - v = i + my_obj_s; - if (PDCobj_close(obj_ids[i]) < 0) - printf("fail to close object o%" PRIu64 "\n", v); - } + // FIXME: close objects. This is currently commented off to save node hours for benchmarks. + // for (i = 0; i < my_obj; i++) { + // v = i + my_obj_s; + // if (PDCobj_close(obj_ids[i]) < 0) + // printf("fail to close object o%" PRIu64 "\n", v); + // } } void @@ -486,7 +486,9 @@ main(int argc, char *argv[]) free(tag_values); free(obj_ids); - closePDC(pdc, cont_prop, cont, obj_prop); + // FIXME: the following is currently commented off to reduce node hours taken by time-consuming resource + // releasing procedure. + // closePDC(pdc, cont_prop, cont, obj_prop); done: #ifdef ENABLE_MPI diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index f9915b221..e15e21992 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -1,5 +1,24 @@ cmake_minimum_required (VERSION 2.8.12) +# Setup cmake policies. +foreach(p + CMP0012 + CMP0013 + CMP0014 + CMP0022 # CMake 2.8.12 + CMP0025 # CMake 3.0 + CMP0053 # CMake 3.1 + CMP0054 # CMake 3.1 + CMP0074 # CMake 3.12 + CMP0075 # CMake 3.12 + CMP0083 # CMake 3.14 + CMP0093 # CMake 3.15 + ) + if(POLICY ${p}) + cmake_policy(SET ${p} NEW) + endif() +endforeach() + project(PDC_VOL C) include_directories( @@ -51,18 +70,65 @@ option(USE_SYSTEM_HDF5 "Use system-installed HDF5." ON) endif() endif() +option(USE_SYSTEM_OPENMP "Use system-installed OpenMP." ON) +if(USE_SYSTEM_OPENMP) + find_package(OpenMP REQUIRED) + if(OPENMP_FOUND) + add_definitions(-DENABLE_OPENMP=1) + set(ENABLE_OPENMP 1) + set(OPENMP_LIBRARIES "${OpenMP_C_LIBRARIES}") + else() + message(FATAL_ERROR "OpenMP not found") + endif() +endif() + + add_definitions(-DENABLE_MPI=1) add_library(cjson cjson/cJSON.c) -set(PROGRAMS - pdc_ls - pdc_import - pdc_export - ) -foreach(program ${PROGRAMS}) - add_executable(${program} ${program}.c) - target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) - target_link_libraries(${program} cjson) -endforeach(program) +# set(PROGRAMS +# pdc_ls +# pdc_import +# pdc_export +# ) + +# foreach(program ${PROGRAMS}) +# add_executable(${program} ${program}.c) +# target_link_libraries(${program} ${PDC_EXT_LIB_DEPENDENCIES}) +# target_link_libraries(${program} pdc) +# target_link_libraries(${program} cjson) +# target_include_directories(${program} PUBLIC ${PDC_INCLUDE_DIR}) +# endforeach(program) + +# Find LibTIFF +option(USE_LIB_TIFF "Enable LibTiff." ON) +if(USE_LIB_TIFF) + find_package(TIFF REQUIRED) + if(TIFF_FOUND) + set(LLSM_LIB_SOURCE + llsm/parallelReadTiff.c + llsm/csvReader.c + llsm/pdc_list.c + ) + # Add the LibTIFF include directory to the include path + include_directories(${TIFF_INCLUDE_DIRS}) + add_library(llsm_tiff ${LLSM_LIB_SOURCE}) + target_compile_options(llsm_tiff PRIVATE ${OpenMP_C_FLAGS}) + target_link_libraries(llsm_tiff PUBLIC ${OpenMP_C_LIBRARIES}) + target_link_libraries(llsm_tiff PUBLIC ${TIFF_LIBRARIES}) + target_include_directories(llsm_tiff PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/llsm) + + + add_executable(llsm_importer llsm_importer.c) + target_link_libraries(llsm_importer ${PDC_EXT_LIB_DEPENDENCIES}) + target_link_libraries(llsm_importer pdc) + target_link_libraries(llsm_importer cjson) + target_link_libraries(llsm_importer ${TIFF_LIBRARIES}) + target_link_libraries(llsm_importer llsm_tiff) + target_include_directories(llsm_importer PUBLIC ${PDC_INCLUDE_DIR}) + else() + message(WARNING "LibTiff not found, ignore building the executables which requires LibTiff support.") + endif() +endif() \ No newline at end of file diff --git a/tools/llsm/csvReader.c b/tools/llsm/csvReader.c new file mode 100644 index 000000000..c6623a287 --- /dev/null +++ b/tools/llsm/csvReader.c @@ -0,0 +1,395 @@ +#include "csvReader.h" + +char csv_delimiter = ','; +char csv_quote = '\"'; +char csv_escape = '\\'; +char csv_newline = '\n'; + +void +csv_set_delimiter(char delimiter) +{ + csv_delimiter = delimiter; +} + +void +csv_set_quote(char quote) +{ + csv_quote = quote; +} + +void +csv_set_escape(char escape) +{ + csv_escape = escape; +} + +void +csv_set_newline(char newline) +{ + csv_newline = newline; +} + +csv_header_t * +csv_parse_header(char *line, char *field_types) +{ + csv_header_t *first_header = NULL; + csv_header_t *last_header = NULL; + char * token = NULL; + char * saveptr = NULL; + int field_index = 0; + int in_quotes = 0; + int value_start = 0; + int i = 0; + + for (int i = 0; line[i] != csv_newline; ++i) { + if (line[i] == csv_quote) { + in_quotes = !in_quotes; + } + else if (!in_quotes && (line[i] == csv_delimiter || line[i + 1] == csv_newline)) { + // Allocate memory for the header struct + csv_header_t *header = (csv_header_t *)malloc(sizeof(csv_header_t)); + if (header == NULL) { + return NULL; + } + // Remove quotes and spaces from the field name + header->field_name = strndup(line + value_start, i - value_start + (line[i + 1] == csv_newline)); + + // Set the field index + header->field_index = field_index; + + // Set the field type + if (field_types != NULL) { + header->field_type = field_types[field_index]; + } + else { + header->field_type = 's'; + } + + // Set the next pointer to NULL + header->next = NULL; + + // Add the header to the linked list + if (first_header == NULL) { + first_header = header; + last_header = header; + } + else { + last_header->next = header; + last_header = header; + } + + value_start = i + 1; + field_index++; + } + } + + return first_header; +} + +csv_row_t * +csv_parse_row(char *line, csv_header_t *header) +{ + csv_cell_t * first_cell = NULL; + csv_cell_t * last_cell = NULL; + csv_header_t *current_header = header; + char * token = NULL; + char * saveptr = NULL; + int field_index = 0; + int in_quotes = 0; + int value_start = 0; + int i = 0; + + for (int i = 0; line[i] != csv_newline; ++i) { + if (line[i] == csv_quote) { + in_quotes = !in_quotes; + } + else if (!in_quotes && (line[i] == csv_delimiter || line[i + 1] == csv_newline)) { + // Allocate memory for the cell struct + csv_cell_t *cell = (csv_cell_t *)malloc(sizeof(csv_cell_t)); + if (cell == NULL) { + return NULL; + } + + // Set the field name + cell->header = current_header; + + // Set the field value + cell->field_value = strndup(line + value_start, i - value_start + (line[i + 1] == csv_newline)); + + // Set the next pointer to NULL + cell->next = NULL; + + // Add the cell to the linked list + if (first_cell == NULL) { + first_cell = cell; + last_cell = cell; + } + else { + last_cell->next = cell; + last_cell = cell; + } + + value_start = i + 1; + field_index++; + current_header = current_header->next; + } + } + csv_row_t *row = (csv_row_t *)malloc(sizeof(csv_row_t)); + row->first_cell = first_cell; + row->next = NULL; + return row; +} + +csv_cell_t * +csv_get_field_value_by_name(csv_row_t *row, csv_header_t *header, char *field_name) +{ + csv_cell_t *cell = row->first_cell; + while (cell != NULL) { + if (strcmp(cell->header->field_name, field_name) == 0) { + return cell; + } + cell = cell->next; + } + return NULL; +} + +csv_cell_t * +csv_get_field_value_by_index(csv_row_t *row, csv_header_t *header, int field_index) +{ + csv_cell_t *cell = row->first_cell; + while (cell != NULL) { + if (cell->header->field_index == field_index) { + return cell; + } + cell = cell->next; + } + return NULL; +} + +csv_table_t * +csv_parse_file(char *file_name, char *field_types) +{ + FILE *fp = fopen(file_name, "r"); + if (fp == NULL) { + return NULL; + } + + // Allocate memory for the table struct + csv_table_t *table = (csv_table_t *)malloc(sizeof(csv_table_t)); + if (table == NULL) { + return NULL; + } + + // Read the first line of the file + char * line = NULL; + size_t len = 0; + ssize_t read = getline(&line, &len, fp); + + // Parse the header + table->first_header = csv_parse_header(line, field_types); + + // Parse the rows + csv_row_t *first_row = NULL; + csv_row_t *last_row = NULL; + while ((read = getline(&line, &len, fp)) != -1) { + // Allocate memory for the row struct + csv_row_t *row = csv_parse_row(line, table->first_header); + if (row == NULL) { + return NULL; + } + + // Add the row to the linked list + if (first_row == NULL) { + first_row = row; + last_row = row; + } + else { + last_row->next = row; + last_row = row; + } + } + + table->first_row = first_row; + + return table; +} + +csv_table_t * +csv_parse_list(PDC_LIST *list, char *field_types) +{ + csv_table_t *table = (csv_table_t *)malloc(sizeof(csv_table_t)); + if (table == NULL) { + return NULL; + } + int num_file_read = 0; + csv_row_t *first_row = NULL; + csv_row_t *last_row = NULL; + + PDC_LIST_ITERATOR *iter = pdc_list_iterator_new(list); + while (pdc_list_iterator_has_next(iter)) { + char *line = (char *)pdc_list_iterator_next(iter); + if (num_file_read == 0) { + table->first_header = csv_parse_header(line, field_types); + } + else { + // Allocate memory for the row struct + csv_row_t *row = csv_parse_row(line, table->first_header); + if (row == NULL) { + return NULL; + } + + // Add the row to the linked list + if (first_row == NULL) { + first_row = row; + last_row = row; + } + else { + last_row->next = row; + last_row = row; + } + } + num_file_read++; + } + + table->first_row = first_row; + + return table; +} + +void +csv_free_header(csv_header_t *header) +{ + csv_header_t *current_header = header; + csv_header_t *next_header = NULL; + while (current_header != NULL) { + next_header = current_header->next; + free(current_header->field_name); + free(current_header); + current_header = next_header; + } +} + +void +csv_free_row(csv_row_t *row) +{ + csv_row_t *current_row = row; + csv_row_t *next_row = NULL; + while (current_row != NULL) { + next_row = current_row->next; + csv_free_cell(current_row->first_cell); + free(current_row); + current_row = next_row; + } +} + +void +csv_free_cell(csv_cell_t *cell) +{ + csv_cell_t *current_cell = cell; + csv_cell_t *next_cell = NULL; + while (current_cell != NULL) { + next_cell = current_cell->next; + free(current_cell->field_value); + free(current_cell); + current_cell = next_cell; + } +} + +void +csv_free_table(csv_table_t *table) +{ + csv_free_header(table->first_header); + csv_free_row(table->first_row); + free(table); +} + +void +csv_print_header(csv_header_t *header) +{ + csv_header_t *current_header = header; + while (current_header != NULL) { + printf("%s", current_header->field_name); + if (current_header->next != NULL) { + printf(", "); + } + current_header = current_header->next; + } + printf("\n"); +} + +void +csv_print_row(csv_row_t *row, int with_key) +{ + csv_cell_t *current_cell = row->first_cell; + while (current_cell != NULL) { + csv_print_cell(current_cell, with_key); + if (current_cell->next != NULL) { + printf(", "); + } + if (with_key) { + printf("\n"); + } + current_cell = current_cell->next; + } + printf("\n"); +} + +void +csv_print_cell(csv_cell_t *cell, int with_key) +{ + if (with_key) { + printf("%s: ", cell->header->field_name); + } + switch (cell->header->field_type) { + case 'i': + printf("%ld", strtol(cell->field_value, NULL, 10)); + break; + + case 'f': + printf("%f", strtod(cell->field_value, NULL)); + break; + + case 's': + printf("%s", cell->field_value); + break; + + default: + printf("%s", cell->field_value); + break; + } +} + +void +csv_print_table(csv_table_t *table) +{ + csv_print_header(table->first_header); + csv_row_t *current_row = table->first_row; + while (current_row != NULL) { + csv_print_row(current_row, 0); + current_row = current_row->next; + } +} + +int +csv_get_num_rows(csv_table_t *table) +{ + int num_rows = 0; + csv_row_t *current_row = table->first_row; + while (current_row != NULL) { + num_rows++; + current_row = current_row->next; + } + return num_rows; +} + +int +csv_get_num_fields(csv_table_t *table) +{ + int num_fields = 0; + csv_header_t *current_header = table->first_header; + while (current_header != NULL) { + num_fields++; + current_header = current_header->next; + } + return num_fields; +} \ No newline at end of file diff --git a/tools/llsm/csvReader.h b/tools/llsm/csvReader.h new file mode 100644 index 000000000..d5aa87aaf --- /dev/null +++ b/tools/llsm/csvReader.h @@ -0,0 +1,190 @@ +#ifndef CSVREADER_H +#define CSVREADER_H + +#include +#include +#include + +#include "pdc_list.h" + +typedef struct csv_header_t { + char * field_name; + int field_index; + char field_type; + struct csv_header_t *next; +} csv_header_t; + +typedef struct csv_cell_t { + char * field_value; + csv_header_t * header; + struct csv_cell_t *next; +} csv_cell_t; + +typedef struct csv_row_t { + csv_cell_t * first_cell; + struct csv_row_t *next; +} csv_row_t; + +typedef struct csv_table_t { + csv_header_t *first_header; + csv_row_t * first_row; +} csv_table_t; + +/** + * @brief This function sets the delimiter for the CSV file. The default is a comma. + * @param delimiter The delimiter to use. + */ +void csv_set_delimiter(char delimiter); + +/** + * @brief This function sets the quote character for the CSV file. The default is a double quote. + * @param quote The quote character to use. + */ +void csv_set_quote(char quote); + +/** + * @brief This function sets the escape character for the CSV file. The default is a backslash. + * @param escape The escape character to use. + */ +void csv_set_escape(char escape); + +/** + * @brief This function sets the newline character for the CSV file. The default is a newline. + * @param newline The newline character to use. + */ +void csv_set_newline(char newline); + +/** + * @brief This function parses a CSV header line and returns a linked list of csv_header_t structs. The header + * string may contain quotes and spaces + * @param line The CSV header line to parse. + * @param field_types A string of field types. The field types are 's' for string, 'i' for long integer, 'f' + * for float, and 'd' for double. If this is NULL, all fields are assumed to be strings. + * + * @return A pointer to the first csv_header_t struct in the linked list. + */ +csv_header_t *csv_parse_header(char *line, char *field_types); + +/** + * @brief This function parse a CSV row line and returns a linked list of csv_cell_t structs. The row string + * may contain quotes and spaces + * @param line The CSV row line to parse. + * @param header A pointer to the first csv_header_t struct in the linked list. + * + * @return A pointer to the csv_row_t struct. The value in the csv_cell should be + * free of quotes or spaces. + */ +csv_row_t *csv_parse_row(char *line, csv_header_t *header); + +/** + * @brief This function returns the string value of a field for a given row string. The row string may contain + * quotes and spaces + * @param row The CSV row to look for. + * @param header A pointer to the first csv_header_t struct in the linked list. + * @param field_name The name of the field to get the value for. + * + * @return A pointer to the csv_cell struct of the field. The value in the csv_cell should be free of quotes + * or spaces. + */ +csv_cell_t *csv_get_field_value_by_name(csv_row_t *row, csv_header_t *header, char *field_name); + +/** + * @brief This function returns the string value of a field for a given row string. The row string may contain + * quotes and spaces + * @param row The CSV row to look for. + * @param header A pointer to the first csv_header_t struct in the linked list. + * @param field_index The index of the field to get the value for. + * + * @return A pointer to the csv_cell struct of the field. The value in the csv_cell should be free of quotes + * or spaces. + */ +csv_cell_t *csv_get_field_value_by_index(csv_row_t *row, csv_header_t *header, int field_index); + +/** + * @brief This function parses a CSV file and returns a csv_table_t struct. + * @param file_name The name of the CSV file to parse. + * @param field_types A string of field types. The field types are 's' for string, 'i' for long integer, 'f' + * for float, and 'd' for double. If this is NULL, all fields are assumed to be strings. + * + * @return A pointer to the csv_table_t struct. + */ +csv_table_t *csv_parse_file(char *file_name, char *field_types); + +/** + * @brief This function parses a PDC_LIST of strings as a CSV file and returns a csv_table_t struct. + * @param list A PDC_LIST of strings to parse. + * @param field_types A string of field types. The field types are 's' for string, 'i' for long integer, 'f' + * for float, and 'd' for double. If this is NULL, all fields are assumed to be strings. + * + * @return A pointer to the csv_table_t struct. + */ +csv_table_t *csv_parse_list(PDC_LIST *list, char *field_types); + +/** + * @brief This function frees the memory allocated for a csv_table_t struct. + * @param table A pointer to the csv_table_t struct to free. + */ +void csv_free_table(csv_table_t *table); + +/** + * @brief This function frees the memory allocated for a csv_header_t struct. + * @param header A pointer to the csv_header_t struct to free. + */ +void csv_free_header(csv_header_t *header); + +/** + * @brief This function frees the memory allocated for a csv_row_t struct. + * @param row A pointer to the csv_row_t struct to free. + */ +void csv_free_row(csv_row_t *row); + +/** + * @brief This function frees the memory allocated for a csv_cell_t struct. + * @param cell A pointer to the csv_cell_t struct to free. + */ +void csv_free_cell(csv_cell_t *cell); + +/** + * @brief This function prints the contents of a csv_table_t struct. + * @param table A pointer to the csv_table_t struct to print. + */ +void csv_print_table(csv_table_t *table); + +/** + * @brief This function prints the contents of a csv_header_t struct. + * @param header A pointer to the csv_header_t struct to print. + */ +void csv_print_header(csv_header_t *header); + +/** + * @brief This function prints the contents of a csv_row_t struct. + * @param row A pointer to the csv_row_t struct to print. + * @param with_key A flag to indicate whether to print the key or not. + */ + +void csv_print_row(csv_row_t *row, int with_key); + +/** + * @brief This function prints the contents of a csv_cell_t struct. + * @param cell A pointer to the csv_cell_t struct to print. + * @param with_key A flag to indicate whether to print the key or not. + */ +void csv_print_cell(csv_cell_t *cell, int with_key); + +/** + * @brief This function returns the number of rows in a csv_table_t struct. + * @param table A pointer to the csv_table_t struct. + * + * @return The number of rows in the table. + */ +int csv_get_num_rows(csv_table_t *table); + +/** + * @brief This function returns the number of fields in a csv_table_t struct. + * @param table A pointer to the csv_table_t struct. + * + * @return The number of fields in the table. + */ +int csv_get_num_fields(csv_table_t *table); + +#endif // CSVREADER_H \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c new file mode 100644 index 000000000..4da885e61 --- /dev/null +++ b/tools/llsm/parallelReadTiff.c @@ -0,0 +1,810 @@ +#include "parallelReadTiff.h" +#include "tiffio.h" + +// #define ENABLE_OPENMP + +#ifdef ENABLE_OPENMP +#include "omp.h" +#endif + +#define CREATE_ARRAY(result_var, type, ndim, dim) \ + do { \ + size_t i = 0, dim_prod = 1; \ + for (i = 0; i < (ndim); i++) { \ + dim_prod *= (dim)[i]; \ + } \ + result_var = (void *)malloc(dim_prod * sizeof(type)); \ + } while (0) + +void +DummyHandler(const char *module, const char *fmt, va_list ap) +{ + // ignore errors and warnings +} + +// Backup method in case there are errors reading strips +void +readTiffParallelBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void *tiff, uint64_t bits, + uint64_t startSlice, uint8_t flipXY) +{ + int32_t numWorkers = omp_get_max_threads(); + int32_t batchSize = (z - 1) / numWorkers + 1; + uint64_t bytes = bits / 8; + + int32_t w; +#ifdef ENABLE_OPENMP +#pragma omp parallel for +#endif + for (w = 0; w < numWorkers; w++) { + + TIFF *tif = TIFFOpen(fileName, "r"); + if (!tif) + printf("tiff:threadError | Thread %d: File \"%s\" cannot be opened\n", w, fileName); + + void *buffer = malloc(x * bytes); + for (int64_t dir = startSlice + (w * batchSize); dir < startSlice + ((w + 1) * batchSize); dir++) { + if (dir >= z + startSlice) + break; + + int counter = 0; + while (!TIFFSetDirectory(tif, (uint64_t)dir) && counter < 3) { + printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n", w, fileName, dir, + counter + 1); + counter++; + } + + for (int64_t i = 0; i < y; i++) { + TIFFReadScanline(tif, buffer, i, 0); + if (!flipXY) { + memcpy(tiff + ((i * x) * bytes), buffer, x * bytes); + continue; + } + // loading the data into a buffer + switch (bits) { + case 8: + // Map Values to flip x and y for MATLAB + for (int64_t j = 0; j < x; j++) { + ((uint8_t *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((uint8_t *)buffer)[j]; + } + break; + case 16: + // Map Values to flip x and y for MATLAB + for (int64_t j = 0; j < x; j++) { + ((uint16_t *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((uint16_t *)buffer)[j]; + } + break; + case 32: + // Map Values to flip x and y for MATLAB + for (int64_t j = 0; j < x; j++) { + ((float *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((float *)buffer)[j]; + } + break; + case 64: + // Map Values to flip x and y for MATLAB + for (int64_t j = 0; j < x; j++) { + ((double *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((double *)buffer)[j]; + } + break; + } + } + } + free(buffer); + TIFFClose(tif); + } +} + +void +readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void *tiff, uint64_t bits, + uint64_t startSlice, uint64_t stripSize, uint8_t flipXY) +{ + int32_t numWorkers = omp_get_max_threads(); + int32_t batchSize = (z - 1) / numWorkers + 1; + uint64_t bytes = bits / 8; + + uint16_t compressed = 1; + TIFF * tif = TIFFOpen(fileName, "r"); + TIFFGetField(tif, TIFFTAG_COMPRESSION, &compressed); + + int32_t w; + uint8_t errBak = 0; + uint8_t err = 0; + char errString[10000]; + if (compressed > 1 || z < 32768) { + TIFFClose(tif); +#ifdef ENABLE_OPENMP +#pragma omp parallel for +#endif + for (w = 0; w < numWorkers; w++) { + + uint8_t outCounter = 0; + TIFF * tif = TIFFOpen(fileName, "r"); + while (!tif) { + tif = TIFFOpen(fileName, "r"); + if (outCounter == 3) { +#ifdef ENABLE_OPENMP +#pragma omp critical +#endif + { + err = 1; + sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); + } + continue; + } + outCounter++; + } + + void *buffer = malloc(x * stripSize * bytes); + for (int64_t dir = startSlice + (w * batchSize); dir < startSlice + ((w + 1) * batchSize); + dir++) { + if (dir >= z + startSlice || err) + break; + + uint8_t counter = 0; + while (!TIFFSetDirectory(tif, (uint64_t)dir) && counter < 3) { + counter++; + if (counter == 3) { +#ifdef ENABLE_OPENMP +#pragma omp critical +#endif + { + err = 1; + sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); + } + } + } + if (err) + break; + for (int64_t i = 0; i * stripSize < y; i++) { + + // loading the data into a buffer + int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize * x * bytes); + if (cBytes < 0) { +#ifdef ENABLE_OPENMP +#pragma omp critical +#endif + { + errBak = 1; + err = 1; + sprintf(errString, "Thread %d: Strip %ld cannot be read\n", w, i); + } + break; + } + if (!flipXY) { + memcpy(tiff + ((i * stripSize * x) * bytes), buffer, cBytes); + continue; + } + switch (bits) { + case 8: + // Map Values to flip x and y for MATLAB + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((uint8_t *)tiff)[((j * y) + (k + (i * stripSize))) + + ((dir - startSlice) * (x * y))] = + ((uint8_t *)buffer)[j + (k * x)]; + } + } + break; + case 16: + // Map Values to flip x and y for MATLAB + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((uint16_t *)tiff)[((j * y) + (k + (i * stripSize))) + + ((dir - startSlice) * (x * y))] = + ((uint16_t *)buffer)[j + (k * x)]; + } + } + break; + case 32: + // Map Values to flip x and y for MATLAB + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((float *)tiff)[((j * y) + (k + (i * stripSize))) + + ((dir - startSlice) * (x * y))] = + ((float *)buffer)[j + (k * x)]; + } + } + break; + case 64: + // Map Values to flip x and y for MATLAB + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((double *)tiff)[((j * y) + (k + (i * stripSize))) + + ((dir - startSlice) * (x * y))] = + ((double *)buffer)[j + (k * x)]; + } + } + break; + } + } + } + free(buffer); + TIFFClose(tif); + } + } + else { + uint64_t stripsPerDir = (uint64_t)ceil((double)y / (double)stripSize); +#ifdef _WIN32 + int fd = open(fileName, O_RDONLY | O_BINARY); +#else + int fd = open(fileName, O_RDONLY); +#endif + if (fd == -1) + printf("disk:threadError | File \"%s\" cannot be opened from Disk\n", fileName); + + if (!tif) + printf("tiff:threadError | File \"%s\" cannot be opened\n", fileName); + uint64_t offset = 0; + uint64_t *offsets = NULL; + TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); + uint64_t *byteCounts = NULL; + TIFFGetField(tif, TIFFTAG_STRIPBYTECOUNTS, &byteCounts); + if (!offsets || !byteCounts) + printf("tiff:threadError | Could not get offsets or byte counts from the tiff file\n"); + offset = offsets[0]; + uint64_t fOffset = offsets[stripsPerDir - 1] + byteCounts[stripsPerDir - 1]; + uint64_t zSize = fOffset - offset; + TIFFSetDirectory(tif, 1); + TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); + uint64_t gap = offsets[0] - fOffset; + + lseek(fd, offset, SEEK_SET); + + TIFFClose(tif); + uint64_t curr = 0; + uint64_t bytesRead = 0; + // TESTING + // Not sure if we will need to read in chunks like for ImageJ + for (uint64_t i = 0; i < z; i++) { + bytesRead = read(fd, tiff + curr, zSize); + curr += bytesRead; + lseek(fd, gap, SEEK_CUR); + } + close(fd); + uint64_t size = x * y * z * (bits / 8); + void * tiffC = malloc(size); + memcpy(tiffC, tiff, size); +#ifdef ENABLE_OPENMP +#pragma omp parallel for +#endif + for (uint64_t k = 0; k < z; k++) { + for (uint64_t j = 0; j < x; j++) { + for (uint64_t i = 0; i < y; i++) { + switch (bits) { + case 8: + ((uint8_t *)tiff)[i + (j * y) + (k * x * y)] = + ((uint8_t *)tiffC)[j + (i * x) + (k * x * y)]; + break; + case 16: + ((uint16_t *)tiff)[i + (j * y) + (k * x * y)] = + ((uint16_t *)tiffC)[j + (i * x) + (k * x * y)]; + break; + case 32: + ((float *)tiff)[i + (j * y) + (k * x * y)] = + ((float *)tiffC)[j + (i * x) + (k * x * y)]; + break; + case 64: + ((double *)tiff)[i + (j * y) + (k * x * y)] = + ((double *)tiffC)[j + (i * x) + (k * x * y)]; + break; + } + } + } + } + free(tiffC); + } + if (err) { + if (errBak) + readTiffParallelBak(x, y, z, fileName, tiff, bits, startSlice, flipXY); + else + printf("tiff:threadError %s\n", errString); + } +} + +// Backup method in case there are errors reading strips +void +readTiffParallel2DBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void *tiff, uint64_t bits, + uint64_t startSlice, uint8_t flipXY) +{ + int32_t numWorkers = omp_get_max_threads(); + int32_t batchSize = (y - 1) / numWorkers + 1; + uint64_t bytes = bits / 8; + + int32_t w; +#ifdef ENABLE_OPENMP +#pragma omp parallel for +#endif + for (w = 0; w < numWorkers; w++) { + + TIFF *tif = TIFFOpen(fileName, "r"); + if (!tif) + printf("tiff:threadError | Thread %d: File \"%s\" cannot be opened\n", w, fileName); + + void *buffer = malloc(x * bytes); + for (int64_t dir = startSlice + (w * batchSize); dir < startSlice + ((w + 1) * batchSize); dir++) { + if (dir >= z + startSlice) + break; + + int counter = 0; + while (!TIFFSetDirectory(tif, (uint64_t)0) && counter < 3) { + printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n", w, fileName, dir, + counter + 1); + counter++; + } + + for (int64_t i = (w * batchSize); i < ((w + 1) * batchSize); i++) { + if (i >= y) + break; + TIFFReadScanline(tif, buffer, i, 0); + if (!flipXY) { + memcpy(tiff + ((i * x) * bytes), buffer, x * bytes); + continue; + } + // loading the data into a buffer + switch (bits) { + case 8: + // Map Values to flip x and y for MATLAB + for (int64_t j = 0; j < x; j++) { + ((uint8_t *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((uint8_t *)buffer)[j]; + } + break; + case 16: + // Map Values to flip x and y for MATLAB + for (int64_t j = 0; j < x; j++) { + ((uint16_t *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((uint16_t *)buffer)[j]; + } + break; + case 32: + // Map Values to flip x and y for MATLAB + for (int64_t j = 0; j < x; j++) { + ((float *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((float *)buffer)[j]; + } + break; + case 64: + // Map Values to flip x and y for MATLAB + for (int64_t j = 0; j < x; j++) { + ((double *)tiff)[((j * y) + i) + ((dir - startSlice) * (x * y))] = + ((double *)buffer)[j]; + } + break; + } + } + } + free(buffer); + TIFFClose(tif); + } +} + +void +readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void *tiff, uint64_t bits, + uint64_t startSlice, uint64_t stripSize, uint8_t flipXY) +{ + int32_t numWorkers = omp_get_max_threads(); + uint64_t stripsPerDir = (uint64_t)ceil((double)y / (double)stripSize); + int32_t batchSize = (stripsPerDir - 1) / numWorkers + 1; + uint64_t bytes = bits / 8; + + int32_t w; + uint8_t err = 0; + uint8_t errBak = 0; + char errString[10000]; + +#ifdef ENABLE_OPENMP +#pragma omp parallel for +#endif + for (w = 0; w < numWorkers; w++) { + + uint8_t outCounter = 0; + TIFF * tif = TIFFOpen(fileName, "r"); + while (!tif) { + tif = TIFFOpen(fileName, "r"); + if (outCounter == 3) { +#ifdef ENABLE_OPENMP +#pragma omp critical +#endif + { + err = 1; + sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); + } + continue; + } + outCounter++; + } + + void *buffer = malloc(x * stripSize * bytes); + + uint8_t counter = 0; + while (!TIFFSetDirectory(tif, 0) && counter < 3) { + printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n", w, fileName, 0, + counter + 1); + counter++; + if (counter == 3) { +#ifdef ENABLE_OPENMP +#pragma omp critical +#endif + { + err = 1; + sprintf(errString, "Thread %d: File \"%s\" cannot be opened\n", w, fileName); + } + } + } + for (int64_t i = (w * batchSize); i < (w + 1) * batchSize; i++) { + if (i * stripSize >= y || err) + break; + // loading the data into a buffer + int64_t cBytes = TIFFReadEncodedStrip(tif, i, buffer, stripSize * x * bytes); + if (cBytes < 0) { +#ifdef ENABLE_OPENMP +#pragma omp critical +#endif + { + errBak = 1; + err = 1; + sprintf(errString, "Thread %d: Strip %ld cannot be read\n", w, i); + } + break; + } + if (!flipXY) { + memcpy(tiff + ((i * stripSize * x) * bytes), buffer, cBytes); + continue; + } + switch (bits) { + case 8: + // Map Values to flip x and y for MATLAB + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((uint8_t *)tiff)[((j * y) + (k + (i * stripSize)))] = + ((uint8_t *)buffer)[j + (k * x)]; + } + } + break; + case 16: + // Map Values to flip x and y for MATLAB + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((uint16_t *)tiff)[((j * y) + (k + (i * stripSize)))] = + ((uint16_t *)buffer)[j + (k * x)]; + } + } + break; + case 32: + // Map Values to flip x and y for MATLAB + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((float *)tiff)[((j * y) + (k + (i * stripSize)))] = + ((float *)buffer)[j + (k * x)]; + } + } + break; + case 64: + // Map Values to flip x and y for MATLAB + for (int64_t k = 0; k < stripSize; k++) { + if ((k + (i * stripSize)) >= y) + break; + for (int64_t j = 0; j < x; j++) { + ((double *)tiff)[((j * y) + (k + (i * stripSize)))] = + ((double *)buffer)[j + (k * x)]; + } + } + break; + } + } + free(buffer); + TIFFClose(tif); + } + + if (err) { + if (errBak) + readTiffParallel2DBak(x, y, z, fileName, tiff, bits, startSlice, flipXY); + else + printf("tiff:threadError %s\n", errString); + } +} + +// Reading images saved by ImageJ +void +readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void *tiff, uint64_t bits, + uint64_t startSlice, uint64_t stripSize, uint8_t flipXY) +{ +#ifdef _WIN32 + int fd = open(fileName, O_RDONLY | O_BINARY); +#else + int fd = open(fileName, O_RDONLY); +#endif + TIFF *tif = TIFFOpen(fileName, "r"); + if (!tif) + printf("tiff:threadError | File \"%s\" cannot be opened\n", fileName); + uint64_t offset = 0; + uint64_t *offsets = NULL; + TIFFGetField(tif, TIFFTAG_STRIPOFFSETS, &offsets); + if (offsets) + offset = offsets[0]; + + TIFFClose(tif); + lseek(fd, offset, SEEK_SET); + uint64_t bytes = bits / 8; + //#pragma omp parallel for + /* + for(uint64_t i = 0; i < z; i++){ + uint64_t cOffset = x*y*bytes*i; + //pread(fd,tiff+cOffset,x*y*bytes,offset+cOffset); + read(fd,tiff+cOffset,x*y*bytes); + }*/ + uint64_t chunk = 0; + uint64_t tBytes = x * y * z * bytes; + uint64_t bytesRead; + uint64_t rBytes = tBytes; + if (tBytes < INT_MAX) + bytesRead = read(fd, tiff, tBytes); + else { + while (chunk < tBytes) { + rBytes = tBytes - chunk; + if (rBytes > INT_MAX) + bytesRead = read(fd, tiff + chunk, INT_MAX); + else + bytesRead = read(fd, tiff + chunk, rBytes); + chunk += bytesRead; + } + } + close(fd); + // Swap endianess for types greater than 8 bits + // TODO: May need to change later because we may not always need to swap + if (bits > 8) { +#ifdef ENABLE_OPENMP +#pragma omp parallel for +#endif + for (uint64_t i = 0; i < x * y * z; i++) { + switch (bits) { + case 16: + //((uint16_t*)tiff)[i] = ((((uint16_t*)tiff)[i] & 0xff) >> 8) | (((uint16_t*)tiff)[i] << + // 8); + //((uint16_t*)tiff)[i] = bswap_16(((uint16_t*)tiff)[i]); + ((uint16_t *)tiff)[i] = + ((((uint16_t *)tiff)[i] << 8) & 0xff00) | ((((uint16_t *)tiff)[i] >> 8) & 0x00ff); + break; + case 32: + //((num & 0xff000000) >> 24) | ((num & 0x00ff0000) >> 8) | ((num & 0x0000ff00) << 8) | + //(num << 24) + //((float*)tiff)[i] = bswap_32(((float*)tiff)[i]); + ((uint32_t *)tiff)[i] = ((((uint32_t *)tiff)[i] << 24) & 0xff000000) | + ((((uint32_t *)tiff)[i] << 8) & 0x00ff0000) | + ((((uint32_t *)tiff)[i] >> 8) & 0x0000ff00) | + ((((uint32_t *)tiff)[i] >> 24) & 0x000000ff); + break; + case 64: + //((double*)tiff)[i] = bswap_64(((double*)tiff)[i]); + ((uint64_t *)tiff)[i] = ((((uint64_t *)tiff)[i] << 56) & 0xff00000000000000UL) | + ((((uint64_t *)tiff)[i] << 40) & 0x00ff000000000000UL) | + ((((uint64_t *)tiff)[i] << 24) & 0x0000ff0000000000UL) | + ((((uint64_t *)tiff)[i] << 8) & 0x000000ff00000000UL) | + ((((uint64_t *)tiff)[i] >> 8) & 0x00000000ff000000UL) | + ((((uint64_t *)tiff)[i] >> 24) & 0x0000000000ff0000UL) | + ((((uint64_t *)tiff)[i] >> 40) & 0x000000000000ff00UL) | + ((((uint64_t *)tiff)[i] >> 56) & 0x00000000000000ffUL); + break; + } + } + } + // Find a way to do this in-place without making a copy + if (flipXY) { + uint64_t size = x * y * z * (bits / 8); + void * tiffC = malloc(size); + memcpy(tiffC, tiff, size); +#ifdef ENABLE_OPENMP +#pragma omp parallel for +#endif + for (uint64_t k = 0; k < z; k++) { + for (uint64_t j = 0; j < x; j++) { + for (uint64_t i = 0; i < y; i++) { + switch (bits) { + case 8: + ((uint8_t *)tiff)[i + (j * y) + (k * x * y)] = + ((uint8_t *)tiffC)[j + (i * x) + (k * x * y)]; + break; + case 16: + ((uint16_t *)tiff)[i + (j * y) + (k * x * y)] = + ((uint16_t *)tiffC)[j + (i * x) + (k * x * y)]; + break; + case 32: + ((float *)tiff)[i + (j * y) + (k * x * y)] = + ((float *)tiffC)[j + (i * x) + (k * x * y)]; + break; + case 64: + ((double *)tiff)[i + (j * y) + (k * x * y)] = + ((double *)tiffC)[j + (i * x) + (k * x * y)]; + break; + } + } + } + } + free(tiffC); + } +} + +uint8_t +isImageJIm(TIFF *tif) +{ + if (!tif) + return 0; + char *tiffDesc = NULL; + if (TIFFGetField(tif, TIFFTAG_IMAGEDESCRIPTION, &tiffDesc)) { + if (strstr(tiffDesc, "ImageJ")) { + return 1; + } + } + return 0; +} + +uint64_t +imageJImGetZ(TIFF *tif) +{ + if (!tif) + return 0; + char *tiffDesc = NULL; + if (TIFFGetField(tif, TIFFTAG_IMAGEDESCRIPTION, &tiffDesc)) { + if (strstr(tiffDesc, "ImageJ")) { + char *nZ = strstr(tiffDesc, "images="); + if (nZ) { + nZ += 7; + char *temp; + return strtol(nZ, &temp, 10); + } + } + } + return 0; +} + +void +get_tiff_info(char *fileName, parallel_tiff_range_t *strip_range, uint64_t *x, uint64_t *y, uint64_t *z, + uint64_t *bits, uint64_t *startSlice, uint64_t *stripSize, uint64_t *is_imageJ, + uint64_t *imageJ_Z) +{ + TIFFSetWarningHandler(DummyHandler); + TIFF *tif = TIFFOpen(fileName, "r"); + if (!tif) + printf("tiff:inputError | File \"%s\" cannot be opened", fileName); + + TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, x); + TIFFGetField(tif, TIFFTAG_IMAGELENGTH, y); + + if (strip_range == NULL) { + uint16_t s = 0, m = 0, t = 1; + while (TIFFSetDirectory(tif, t)) { + s = t; + t *= 8; + if (s > t) { + t = 65535; + printf("Number of slices > 32768\n"); + break; + } + } + while (s != t) { + m = (s + t + 1) / 2; + if (TIFFSetDirectory(tif, m)) { + s = m; + } + else { + if (m > 0) + t = m - 1; + else + t = m; + } + } + *z = s + 1; + } + else { + if (strip_range->length != 2) { + printf("tiff:inputError | Input range is not 2"); + } + else { + *startSlice = (uint64_t)(*(strip_range->range)) - 1; + *z = (uint64_t)(*(strip_range->range + 1)) - startSlice[0]; + if (!TIFFSetDirectory(tif, startSlice[0] + z[0] - 1) || !TIFFSetDirectory(tif, startSlice[0])) { + printf("tiff:rangeOutOfBound | Range is out of bounds"); + } + } + } + + *is_imageJ = isImageJIm(tif); + *imageJ_Z = imageJImGetZ(tif); + if (*is_imageJ) { + *is_imageJ = 1; + *imageJ_Z = imageJImGetZ(tif); + if (*imageJ_Z) + *z = *imageJ_Z; + } + + TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, bits); + TIFFGetField(tif, TIFFTAG_ROWSPERSTRIP, stripSize); + TIFFClose(tif); +} + +void * +_get_tiff_array(int bits, int ndim, size_t *dims) +{ + void *tiff = NULL; + if (bits == 8) { + CREATE_ARRAY(tiff, uint8_t, ndim, dims); + } + else if (bits == 16) { + CREATE_ARRAY(tiff, uint16_t, ndim, dims); + } + else if (bits == 32) { + CREATE_ARRAY(tiff, float, ndim, dims); + } + else if (bits == 64) { + CREATE_ARRAY(tiff, double, ndim, dims); + } + return tiff; +} + +void +_TIFF_load(char *fileName, uint8_t isImageJIm, uint64_t x, uint64_t y, uint64_t z, uint64_t bits, + uint64_t startSlice, uint64_t stripSize, uint8_t flipXY, int ndim, size_t *dims, void **tiff_ptr) +{ + if (tiff_ptr == NULL) { + printf("tiff:dataTypeError, Data type not suppported\n"); + } + *tiff_ptr = _get_tiff_array(bits, ndim, dims); + // Case for ImageJ + if (isImageJIm) { + readTiffParallelImageJ(x, y, z, fileName, *tiff_ptr, bits, startSlice, stripSize, flipXY); + } + // Case for 2D + else if (z <= 1) { + readTiffParallel2D(x, y, z, fileName, *tiff_ptr, bits, startSlice, stripSize, flipXY); + } + // Case for 3D + else { + readTiffParallel(x, y, z, fileName, *tiff_ptr, bits, startSlice, stripSize, flipXY); + } +} + +void +parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_range, + image_info_t **image_info) +{ + uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0, stripeSize = 0, is_imageJ = 0, imageJ_Z = 0; + + get_tiff_info(fileName, strip_range, &x, &y, &z, &bits, &startSlice, &stripeSize, &is_imageJ, &imageJ_Z); + + int ndim = 3; + uint64_t dims[ndim]; + dims[0] = flipXY ? y : x; + dims[1] = flipXY ? x : y; + dims[2] = z; + + *image_info = (image_info_t *)malloc(sizeof(image_info_t)); + (*image_info)->x = dims[0]; + (*image_info)->y = dims[1]; + (*image_info)->z = dims[2]; + (*image_info)->bits = bits; + (*image_info)->startSlice = startSlice; + (*image_info)->stripeSize = stripeSize; + (*image_info)->is_imageJ = is_imageJ; + (*image_info)->imageJ_Z = imageJ_Z; + (*image_info)->tiff_size = dims[0] * dims[1] * dims[2] * (bits / 8); + + _TIFF_load(fileName, is_imageJ, x, y, z, bits, startSlice, stripeSize, flipXY, ndim, dims, + (void **)&((*image_info)->tiff_ptr)); +} \ No newline at end of file diff --git a/tools/llsm/parallelReadTiff.h b/tools/llsm/parallelReadTiff.h new file mode 100644 index 000000000..081640110 --- /dev/null +++ b/tools/llsm/parallelReadTiff.h @@ -0,0 +1,34 @@ +#ifndef PARALLELREADTIFF_H +#define PARALLELREADTIFF_H + +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct { + uint64_t *range; + size_t length; +} parallel_tiff_range_t; + +typedef struct { + uint64_t x; + uint64_t y; + uint64_t z; + uint64_t bits; + uint64_t startSlice; + uint64_t stripeSize; + uint64_t is_imageJ; + uint64_t imageJ_Z; + void * tiff_ptr; + size_t tiff_size; +} image_info_t; + +void parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_range, + image_info_t **image_info_t); + +#endif // PARALLELREADTIFF_H \ No newline at end of file diff --git a/tools/llsm/pdc_list.c b/tools/llsm/pdc_list.c new file mode 100644 index 000000000..95c6e59e7 --- /dev/null +++ b/tools/llsm/pdc_list.c @@ -0,0 +1,165 @@ +#include "pdc_list.h" + +PDC_LIST * +pdc_list_new() +{ + return pdc_list_create(100, 2.0); +} + +PDC_LIST * +pdc_list_create(size_t initial_capacity, double expansion_factor) +{ + // Allocate memory for the list struct. + PDC_LIST *list = (PDC_LIST *)malloc(sizeof(PDC_LIST)); + if (list == NULL) { + return NULL; + } + + // Allocate memory for the array of items. + list->items = (void **)malloc(initial_capacity * sizeof(void *)); + if (list->items == NULL) { + free(list); + return NULL; + } + + // Initialize the list fields. + list->item_count = 0; + list->capacity = initial_capacity; + list->expansion_factor = expansion_factor; + + return list; +} + +void +pdc_list_destroy(PDC_LIST *list) +{ + if (list == NULL) { + return; + } + + // Free all allocated memory for each item. + for (size_t i = 0; i < list->item_count; i++) { + free(list->items[i]); + } + + // Free the array of items and the list struct. + free(list->items); + free(list); +} + +void +pdc_list_add(PDC_LIST *list, void *item) +{ + if (list == NULL || item == NULL) { + return; + } + + // Expand the array of items if necessary. + if (list->item_count >= list->capacity) { + list->capacity *= list->expansion_factor; + list->items = (void **)realloc(list->items, list->capacity * sizeof(void *)); + if (list->items == NULL) { + return; + } + } + + // Add the new item to the end of the array. + list->items[list->item_count++] = item; +} + +void * +pdc_list_get(PDC_LIST *list, size_t index) +{ + if (list == NULL || index >= list->item_count) { + return NULL; + } + + // Return a pointer to the item at the given index. + return list->items[index]; +} + +size_t +pdc_list_size(PDC_LIST *list) +{ + if (list == NULL) { + return 0; + } + + // Return the number of items in the list. + return list->item_count; +} + +void +pdc_list_set_expansion_factor(PDC_LIST *list, double expansion_factor) +{ + if (list == NULL) { + return; + } + + // Set the new expansion factor for the list. + list->expansion_factor = expansion_factor; +} + +double +pdc_list_get_expansion_factor(PDC_LIST *list) +{ + if (list == NULL) { + return 0; + } + + // Return the current expansion factor for the list. + return list->expansion_factor; +} + +PDC_LIST_ITERATOR * +pdc_list_iterator_new(PDC_LIST *list) +{ + if (list == NULL) { + return NULL; + } + + // Allocate memory for the iterator struct. + PDC_LIST_ITERATOR *iterator = (PDC_LIST_ITERATOR *)malloc(sizeof(PDC_LIST_ITERATOR)); + if (iterator == NULL) { + return NULL; + } + + // Initialize the iterator fields. + iterator->list = list; + iterator->index = 0; + + return iterator; +} + +void +pdc_list_iterator_destroy(PDC_LIST_ITERATOR *iterator) +{ + if (iterator == NULL) { + return; + } + + // Free the iterator struct. + free(iterator); +} + +void * +pdc_list_iterator_next(PDC_LIST_ITERATOR *iterator) +{ + if (iterator == NULL) { + return NULL; + } + + // Return the next item in the list. + return pdc_list_get(iterator->list, iterator->index++); +} + +int +pdc_list_iterator_has_next(PDC_LIST_ITERATOR *iterator) +{ + if (iterator == NULL) { + return 0; + } + + // Return true if there are more items in the list. + return iterator->index < pdc_list_size(iterator->list); +} \ No newline at end of file diff --git a/tools/llsm/pdc_list.h b/tools/llsm/pdc_list.h new file mode 100644 index 000000000..aa71e6124 --- /dev/null +++ b/tools/llsm/pdc_list.h @@ -0,0 +1,110 @@ +#ifndef PDC_LIST_H +#define PDC_LIST_H + +#include + +/** + * A generic list data structure that stores a variable number of items of any type. + */ +typedef struct { + void **items; // Pointer to the array of items. + size_t item_count; // Number of items in the list. + size_t capacity; // Capacity of the array of items. + double expansion_factor; // Factor by which the capacity is expanded. +} PDC_LIST; + +/** + * A generic iterator for iterating over the items in a PDC_LIST. + */ +typedef struct { + PDC_LIST *list; // The list being iterated over. + size_t index; // The index of the next item to be returned. +} PDC_LIST_ITERATOR; + +/** + * Creates a new PDC_LIST with default initial capacity 100 and default expansion factor 2.0. + * @return A pointer to the new PDC_LIST. + */ +PDC_LIST *pdc_list_new(); + +/** + * Creates a new PDC_LIST with the given initial capacity and expansion factor. + * @param initial_capacity The initial capacity of the list. + * @param expansion_factor The factor by which the capacity is expanded when the list is full. + * + * @return A pointer to the new PDC_LIST. + */ +PDC_LIST *pdc_list_create(size_t initial_capacity, double expansion_factor); + +/** + * Destroys the given PDC_LIST and frees all allocated memory. + * @param list The PDC_LIST to destroy. + */ +void pdc_list_destroy(PDC_LIST *list); + +/** + * Adds the given item to the end of the given PDC_LIST. + * @param list The PDC_LIST to add the item to. + * @param item The item to add to the PDC_LIST. + * + */ +void pdc_list_add(PDC_LIST *list, void *item); + +/** + * Gets the item at the given index in the given PDC_LIST. + * @param list The PDC_LIST to get the item from. + * @param index The index of the item to get. + * + * @return A pointer to the item at the given index. + */ +void *pdc_list_get(PDC_LIST *list, size_t index); + +/** + * Sets the item at the given index in the given PDC_LIST. + * @param list The PDC_LIST to set the item in. + * + * @return The number of items in the list. + */ +size_t pdc_list_size(PDC_LIST *list); + +/** + * Sets the expansion factor for the given PDC_LIST. + * @param list The PDC_LIST to set the expansion factor for. + * @param expansion_factor The factor by which the capacity is expanded when the list is full. + */ +void pdc_list_set_expansion_factor(PDC_LIST *list, double expansion_factor); + +/** + * Gets the expansion factor for the given PDC_LIST. + * @param list The PDC_LIST to get the expansion factor for. + */ +double pdc_list_get_expansion_factor(PDC_LIST *list); + +/** + * Creates a new PDC_LIST_ITERATOR for the given PDC_LIST. + * @param list The PDC_LIST to create the iterator for. + * @return A pointer to the new PDC_LIST_ITERATOR. + */ +PDC_LIST_ITERATOR *pdc_list_iterator_new(PDC_LIST *list); + +/** + * Destroys the given PDC_LIST_ITERATOR and frees all allocated memory. + * @param iterator The PDC_LIST_ITERATOR to destroy. + */ +void pdc_list_iterator_destroy(PDC_LIST_ITERATOR *iterator); + +/** + * Returns the next item in the PDC_LIST_ITERATOR. + * @param iterator The PDC_LIST_ITERATOR to get the next item from. + * @return A pointer to the next item in the PDC_LIST_ITERATOR. + */ +void *pdc_list_iterator_next(PDC_LIST_ITERATOR *iterator); + +/** + * Returns true if the PDC_LIST_ITERATOR has more items. + * @param iterator The PDC_LIST_ITERATOR to check. + * @return True if the PDC_LIST_ITERATOR has more items. + */ +int pdc_list_iterator_has_next(PDC_LIST_ITERATOR *iterator); + +#endif // PDC_LIST_H \ No newline at end of file diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c new file mode 100644 index 000000000..d777812c9 --- /dev/null +++ b/tools/llsm_importer.c @@ -0,0 +1,374 @@ +#include +#include +#include +#include +#include + +#ifndef ENABLE_MPI +#define ENABLE_MPI +#endif + +#ifdef ENABLE_MPI +#include "mpi.h" +// #undef ENABLE_MPI +#endif + +#include "pdc.h" +// #include "pdc_client_server_common.h" +// #include "pdc_client_connect.h" + +#include "llsm/parallelReadTiff.h" +#include "llsm/pdc_list.h" +#include "llsm/csvReader.h" +#include + +typedef struct llsm_importer_args_t { + char * directory_path; + csv_header_t *csv_header; +} llsm_importer_args_t; + +int rank = 0, size = 1; + +pdcid_t pdc_id_g = 0, cont_prop_g = 0, cont_id_g = 0, obj_prop_g = 0; + +int +parse_console_args(int argc, char *argv[], char **file_name) +{ + int c, parse_code = -1; + + while ((c = getopt(argc, argv, "f:")) != -1) { + switch (c) { + case 'f': + *file_name = optarg; + parse_code = 0; + break; + default: + fprintf(stderr, "Usage: %s [-f filename]\n", argv[0]); + parse_code = -1; + exit(EXIT_FAILURE); + } + } + return parse_code; +} + +void +import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) +{ + struct timespec start, end; + double duration; + + clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation + + obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); + + psize_t ndims = 3; + uint64_t offsets[3] = {0, 0, 0}; + // FIXME: we should support uint64_t. + uint64_t dims[3] = {image_info->x, image_info->y, image_info->z}; + + // psize_t ndims = 1; + // uint64_t offsets[1] = {0}; + // // FIXME: we should support uint64_t. + // uint64_t dims[1] = {image_info->x * image_info->y * image_info->z}; + + // FIXME: we should change the ndims parameter to psize_t type. + PDCprop_set_obj_dims(obj_prop_g, (PDC_int_t)ndims, dims); + pdc_var_type_t pdc_type = PDC_UNKNOWN; + switch (image_info->bits) { + case 8: + pdc_type = PDC_UINT8; + break; + case 16: + pdc_type = PDC_UINT16; + break; + case 32: + pdc_type = PDC_FLOAT; + break; + case 64: + pdc_type = PDC_DOUBLE; + break; + default: + printf("Error: unsupported data type.\n"); + exit(-1); + } + PDCprop_set_obj_type(obj_prop_g, pdc_type); + PDCprop_set_obj_time_step(obj_prop_g, 0); + PDCprop_set_obj_user_id(obj_prop_g, getuid()); + PDCprop_set_obj_app_name(obj_prop_g, "LLSM"); + + // uint64_t *offsets = (uint64_t *)malloc(sizeof(uint64_t) * ndims); + // uint64_t *num_bytes = (uint64_t *)malloc(sizeof(uint64_t) * ndims); + // for (int i = 0; i < ndims; i++) { + // offsets[i] = 0; + // num_bytes[i] = dims[i] * image_info->bits / 8; + // } + + // create object + // FIXME: There are many attributes currently in one file name, + // and we should do some research to see what would be a good object name for each image. + pdcid_t cur_obj_g = PDCobj_create(cont_id_g, fileName_cell->field_value, obj_prop_g); + + // write data to object + pdcid_t local_region = PDCregion_create(ndims, offsets, dims); + pdcid_t remote_region = PDCregion_create(ndims, offsets, dims); + pdcid_t transfer_request = + PDCregion_transfer_create(image_info->tiff_ptr, PDC_WRITE, cur_obj_g, local_region, remote_region); + PDCregion_transfer_start(transfer_request); + PDCregion_transfer_wait(transfer_request); + + clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation + duration = (end.tv_sec - start.tv_sec) * 1e9 + + (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds + + printf("[Rank %4d] Region Transfer for object %s [%d Bytes] Done! Time taken: %.4f seconds\n", rank, + fileName_cell->field_value, image_info->tiff_size, duration / 1e9); + + // add metadata tags based on the csv row + csv_cell_t *cell = fileName_cell; + while (cell != NULL) { + char *field_name = cell->header->field_name; + char data_type = cell->header->field_type; + char *field_value = cell->field_value; + switch (data_type) { + case 'i': + int ivalue = atoi(field_value); + PDCobj_put_tag(cur_obj_g, field_name, &ivalue, sizeof(int)); + break; + case 'f': + double fvalue = atof(field_value); + PDCobj_put_tag(cur_obj_g, field_name, &fvalue, sizeof(double)); + break; + case 's': + PDCobj_put_tag(cur_obj_g, field_name, field_value, sizeof(char) * strlen(field_value)); + break; + default: + break; + } + cell = cell->next; + } + + // add extra metadata tags based on the image_info struct + PDCobj_put_tag(cur_obj_g, "x", &(image_info->x), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "y", &(image_info->y), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "z", &(image_info->z), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "bits", &(image_info->bits), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "startSlice", &(image_info->startSlice), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "stripeSize", &(image_info->stripeSize), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "is_imageJ", &(image_info->is_imageJ), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "imageJ_Z", &(image_info->imageJ_Z), sizeof(uint64_t)); + + // close object + PDCobj_close(cur_obj_g); + + // get timing + clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation + duration = (end.tv_sec - start.tv_sec) * 1e9 + + (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds + + printf("[Rank %4d] Create object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, + duration / 1e9); + + // free memory + // free(offsets); + // free(num_bytes); + // PDCregion_close(local_region); + // PDCregion_close(remote_region); + // PDCregion_transfer_close(transfer_request); + PDCprop_close(obj_prop_g); +} + +void +on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) +{ + csv_print_row(row, 1); + + char *dirname = strdup(llsm_args->directory_path); + char filepath[256]; + // calling tiff loading process. + image_info_t * image_info = NULL; + int i = 0; + struct timespec start, end; + double duration; + // Filepath,Filename,StageX_um_,StageY_um_,StageZ_um_,ObjectiveX_um_,ObjectiveY_um_,ObjectiveZ_um_ + + // get the file name from the csv row + csv_cell_t *fileName_cell = csv_get_field_value_by_name(row, llsm_args->csv_header, "Filename"); + + // check if the path ends with a forward slash + if (dirname[strlen(dirname) - 1] != '/') { + strcat(dirname, "/"); // add a forward slash to the end of the path + } + + strcpy(filepath, dirname); // copy the directory path to the file path + strcat(filepath, fileName_cell->field_value); // concatenate the file name to the file path + + clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation + + parallel_TIFF_load(filepath, 1, NULL, &image_info); + + clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation + + duration = (end.tv_sec - start.tv_sec) * 1e9 + + (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds + + printf("[Rand %4d] Read %s Done! Time taken: %.4f seconds\n", rank, filepath, duration / 1e9); + + if (image_info == NULL || image_info->tiff_ptr == NULL) { + return; + } + + printf("first few bytes "); + for (i = 0; i < 10; i++) { + printf("%d ", ((uint8_t *)image_info->tiff_ptr)[i]); + } + printf("\n"); + + // import the image to PDC + import_to_pdc(image_info, fileName_cell); + + // free the image info + free(image_info->tiff_ptr); + free(image_info); + free(dirname); +} + +void +read_txt(char *txtFileName, PDC_LIST *list, int *max_row_length) +{ + FILE *file = fopen(txtFileName, "r"); + + int row_length = 0; + + if (file == NULL) { + printf("Error: could not open file %s\n", txtFileName); + return; + } + char buffer[1024]; + // Read the lines of the file + while (fgets(buffer, sizeof(buffer), file)) { + pdc_list_add(list, strdup(buffer)); + if (row_length < strlen(buffer)) { + row_length = strlen(buffer); + } + } + + fclose(file); + + // Find the maximum row length + *max_row_length = row_length + 5; +} + +int +main(int argc, char *argv[]) +{ + +#ifdef ENABLE_MPI + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); +#endif + + char * file_name = NULL; + PDC_LIST * list = pdc_list_new(); + char * csv_line = NULL; + int num_row_read = 0; + csv_header_t * csv_header = NULL; + csv_row_t * csv_row = NULL; + llsm_importer_args_t *llsm_args = NULL; + int bcast_count = 512; + char csv_field_types[] = {'s', 's', 'f', 'f', 'f', 'f', 'f', 'f'}; + // parse console argument + int parse_code = parse_console_args(argc, argv, &file_name); + if (parse_code) { + return parse_code; + } + char *directory_path = dirname(strdup(file_name)); + + // print file name for validating purpose + printf("Filename: %s\n", file_name ? file_name : "(none)"); + printf("Directory: %s\n", directory_path ? directory_path : "(none)"); + + // create a pdc + pdc_id_g = PDCinit("pdc"); + + // create a container property + cont_prop_g = PDCprop_create(PDC_CONT_CREATE, pdc_id_g); + if (cont_prop_g <= 0) + printf("Fail to create container property @ line %d!\n", __LINE__); + + // create a container + cont_id_g = PDCcont_create("c1", cont_prop_g); + if (cont_id_g <= 0) + printf("Fail to create container @ line %d!\n", __LINE__); + + // Rank 0 reads the filename list and distribute data to other ranks + if (rank == 0) { + read_txt(file_name, list, &bcast_count); + // print bcast_count + printf("bcast_count: %d \n", bcast_count); + +#ifdef ENABLE_MPI + // broadcast the number of lines + int num_lines = pdc_list_size(list); + MPI_Bcast(&num_lines, 1, MPI_INT, 0, MPI_COMM_WORLD); + // broadcast the bcast_count + MPI_Bcast(&bcast_count, 1, MPI_INT, 0, MPI_COMM_WORLD); + // broadcast the file names + PDC_LIST_ITERATOR *iter = pdc_list_iterator_new(list); + while (pdc_list_iterator_has_next(iter)) { + char *csv_line = (char *)pdc_list_iterator_next(iter); + MPI_Bcast(csv_line, bcast_count, MPI_CHAR, 0, MPI_COMM_WORLD); + } +#endif + } + else { +#ifdef ENABLE_MPI + // other ranks receive the number of files + int num_lines; + MPI_Bcast(&num_lines, 1, MPI_INT, 0, MPI_COMM_WORLD); + // receive the bcast_count + MPI_Bcast(&bcast_count, 1, MPI_INT, 0, MPI_COMM_WORLD); + // receive the file names + int i; + for (i = 0; i < num_lines; i++) { + csv_line = (char *)malloc(bcast_count * sizeof(char)); + MPI_Bcast(csv_line, bcast_count, MPI_CHAR, 0, MPI_COMM_WORLD); + pdc_list_add(list, csv_line); + } +#endif + } + // parse the csv + csv_table_t *csv_table = csv_parse_list(list, csv_field_types); + if (csv_table == NULL) { + printf("Fail to parse csv file @ line %d!\n", __LINE__); + return -1; + } + llsm_args = (llsm_importer_args_t *)malloc(sizeof(llsm_importer_args_t)); + llsm_args->directory_path = directory_path; + llsm_args->csv_header = csv_table->first_header; + + // go through the csv table + csv_row_t *current_row = csv_table->first_row; + while (current_row != NULL) { + if (num_row_read % size == rank) { + on_csv_row(current_row, llsm_args); + } + num_row_read++; + current_row = current_row->next; + } + + csv_free_table(csv_table); + + // close the container + PDCcont_close(cont_id_g); + // close the container property + PDCprop_close(cont_prop_g); + // close the pdc + PDCclose(pdc_id_g); + +#ifdef ENABLE_MPI + MPI_Finalize(); +#endif + + return 0; +} diff --git a/tools/pdc_export.c b/tools/pdc_export.c index e46c17f2a..a67f444e3 100644 --- a/tools/pdc_export.c +++ b/tools/pdc_export.c @@ -8,16 +8,16 @@ #include #include "hdf5.h" -#define ENABLE_MPI 1 +// #define ENABLE_MPI 1 #ifdef ENABLE_MPI #include "mpi.h" #endif #include "pdc.h" -#include "pdc_client_server_common.h" -#include "pdc_client_connect.h" -#include "../src/server/include/pdc_server_metadata.h" +// #include "pdc_client_server_common.h" +// #include "pdc_client_connect.h" +// #include "../src/server/include/pdc_server_metadata.h" #include "cjson/cJSON.h" const char *avail_args[] = {"-f"}; diff --git a/tools/pdc_import.c b/tools/pdc_import.c index 9e12d3a84..f51f587c8 100644 --- a/tools/pdc_import.c +++ b/tools/pdc_import.c @@ -1,8 +1,10 @@ #include #include #include +#include +#include -#define ENABLE_MPI 1 +// #define ENABLE_MPI 1 #ifdef ENABLE_MPI #include "mpi.h" @@ -10,13 +12,14 @@ #include "hdf5.h" #include "pdc.h" -#include "pdc_client_server_common.h" -#include "pdc_client_connect.h" +// #include "pdc_client_server_common.h" +// #include "pdc_client_connect.h" #define MAX_NAME 1024 #define MAX_FILES 2500 #define MAX_FILENAME_LEN 64 #define MAX_TAG_SIZE 8192 +#define TAG_LEN_MAX 2048 typedef struct ArrayList { int length; @@ -80,8 +83,7 @@ int ndset_g = 0; /* FILE *summary_fp_g; */ int max_tag_size_g = 0; pdcid_t pdc_id_g = 0, cont_prop_g = 0, cont_id_g = 0, obj_prop_g = 0; -struct timeval write_timer_start_g; -struct timeval write_timer_end_g; +struct timespec write_timer_start_g, write_timer_end_g; struct ArrayList *container_names; int overwrite = 0; @@ -246,9 +248,8 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); #endif - struct timeval pdc_timer_start; - struct timeval pdc_timer_end; - gettimeofday(&pdc_timer_start, 0); + struct timespec pdc_timer_start, pdc_timer_end; + clock_gettime(CLOCK_MONOTONIC, &pdc_timer_start); for (i = 0; i < my_count; i++) { filename = my_filenames[i]; @@ -272,7 +273,8 @@ main(int argc, char **argv) #endif // Checkpoint all metadata after import each hdf5 file if (rank == 0) { - PDC_Client_all_server_checkpoint(); + // FIXME: this should be replaced by a function in public headers. + // PDC_Client_all_server_checkpoint(); } /* printf("%s, %d\n", filename, max_tag_size_g); */ /* printf("\n\n======================\nNumber of datasets: %d\n", ndset_g); */ @@ -285,8 +287,10 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); #endif - gettimeofday(&pdc_timer_end, 0); - double write_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); + clock_gettime(CLOCK_MONOTONIC, &pdc_timer_end); + double write_time = + (pdc_timer_end.tv_sec - pdc_timer_start.tv_sec) * 1e9 + + (pdc_timer_end.tv_nsec - pdc_timer_start.tv_nsec); // calculate duration in nanoseconds #ifdef ENABLE_MPI MPI_Reduce(&ndset_g, &total_dset, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); @@ -294,7 +298,8 @@ main(int argc, char **argv) total_dset = ndset_g; #endif if (rank == 0) { - printf("Import %d datasets with %d ranks took %.2f seconds.\n", total_dset, size, write_time); + printf("Import %d datasets with %d ranks took %.2f seconds.\n", total_dset, size, + write_time / 1e9); } } @@ -539,8 +544,8 @@ do_dset(hid_t did, char *name, char *app_name) scan_attrs(did, obj_id); - pdc_metadata_t *meta = NULL; - obj_region.ndim = ndim; + // pdc_metadata_t *meta = NULL; + obj_region.ndim = ndim; for (i = 0; i < ndim; i++) { offset[i] = 0; size[i] = dims[i]; @@ -550,7 +555,7 @@ do_dset(hid_t did, char *name, char *app_name) obj_region.size = size; if (ndset_g == 1) - gettimeofday(&write_timer_start_g, 0); + clock_gettime(CLOCK_MONOTONIC, &write_timer_start_g); /* PDC_Client_query_metadata_name_timestep(dset_name_g, 0, &meta); */ /* if (meta == NULL) */ @@ -566,12 +571,14 @@ do_dset(hid_t did, char *name, char *app_name) // PDC_Client_write_id(obj_id, &obj_region, buf); if (ndset_g % 100 == 0) { - gettimeofday(&write_timer_end_g, 0); - double elapsed_time = PDC_get_elapsed_time_double(&write_timer_start_g, &write_timer_end_g); - printf("Importer%2d: Finished written 100 objects, took %.2f, my total %d\n", rank, elapsed_time, - ndset_g); + clock_gettime(CLOCK_MONOTONIC, &write_timer_end_g); + double elapsed_time = + (write_timer_end_g.tv_sec - write_timer_start_g.tv_sec) * 1e9 + + (write_timer_end_g.tv_nsec - write_timer_start_g.tv_nsec); // calculate duration in nanoseconds; + printf("Importer%2d: Finished written 100 objects, took %.2f, my total %d\n", rank, + elapsed_time / 1e9, ndset_g); fflush(stdout); - gettimeofday(&write_timer_start_g, 0); + clock_gettime(CLOCK_MONOTONIC, &write_timer_start_g); } free(buf); @@ -698,12 +705,15 @@ scan_attrs(hid_t oid, pdcid_t obj_id) void do_attr(hid_t aid, pdcid_t obj_id) { - ssize_t len; - hid_t atype; - hid_t aspace; - char buf[MAX_NAME] = {0}; - char read_buf[TAG_LEN_MAX] = {0}; - pdc_kvtag_t kvtag1; + ssize_t len; + hid_t atype; + hid_t aspace; + char buf[MAX_NAME] = {0}; + char read_buf[TAG_LEN_MAX] = {0}; + // pdc_kvtag_t kvtag1; + char * tag_name; + void * tag_value; + size_t tag_size; /* * Get the name of the attribute. @@ -717,15 +727,15 @@ do_attr(hid_t aid, pdcid_t obj_id) atype = H5Aget_type(aid); H5Aread(aid, atype, read_buf); - kvtag1.name = buf; - kvtag1.value = (void *)read_buf; + tag_name = buf; + tag_value = (void *)read_buf; if (atype == H5T_STRING) { - kvtag1.size = strlen(read_buf) + 1; + tag_size = strlen(read_buf) + 1; } else { - kvtag1.size = H5Tget_size(atype); + tag_size = H5Tget_size(atype); } - PDCobj_put_tag(obj_id, kvtag1.name, kvtag1.value, kvtag1.size); + PDCobj_put_tag(obj_id, tag_name, tag_value, tag_size); /* * Get attribute information: dataspace, data type From 1029c138ef5cfd2e96c23d58949c4b121b5e5102 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 9 May 2023 23:09:18 -0400 Subject: [PATCH 176/806] added a tutorial for llsm_importer --- tools/LLSM_IMPORTER.md | 74 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 tools/LLSM_IMPORTER.md diff --git a/tools/LLSM_IMPORTER.md b/tools/LLSM_IMPORTER.md new file mode 100644 index 000000000..d2f6221ec --- /dev/null +++ b/tools/LLSM_IMPORTER.md @@ -0,0 +1,74 @@ +# LLSM_Importer Tutorial + +This is a tutorial for you to run llsm_importer on Perlmutter supercomputer at NERSC. + +## Prerequisite + +Before building and installing LLSM_importer tool, you need to make sure you install PDC correctly. Check out the latest update on the `develop` branch of `PDC`. Please refer to [Proactive Data Containers (PDC) Installation Guide](../README.md) + +Once you finish all the steps in the installation guide above, you should have environment variable `$WORK_SPACE` defined. + +## Installation + +To build and install LLSM_importer, you need to download libtiff 4.4.0 first. + +```bash +cd $WORK_SPACE/source +wget https://download.osgeo.org/libtiff/tiff-4.4.0.tar.gz +tar zxvf tiff-4.4.0.tar.gz +cd tiff-4.4.0 +./configure --prefix=$WORK_SPACE/install/tiff-4.4.0 +make -j 32 install +``` + +Now you should have libtiff 4.4.0 installed and you need to include the path to the library to your environment variables: + +```bash +echo "export TIFF_DIR=$WORK_SPACE/install/tiff-4.4.0" +echo 'export LD_LIBRARY_PATH=$TIFF_DIR/lib:$LD_LIBRARY_PATH' +echo 'export PATH=$TIFF_DIR/include:$TIFF_DIR/lib:$PATH' + +echo "export TIFF_DIR=$WORK_SPACE/install/tiff-4.4.0" >> $WORK_SPACE/pdc_env.sh +echo 'export LD_LIBRARY_PATH=$TIFF_DIR/lib:$LD_LIBRARY_PATH' >> $WORK_SPACE/pdc_env.sh +echo 'export PATH=$TIFF_DIR/include:$TIFF_DIR/lib:$PATH' >> $WORK_SPACE/pdc_env.sh +``` + +Copy the 3 export commands on your screen and run them, and next time if you need to rebuild the llsm_importer program, you can run `$WORK_SPACE/pdc_env.sh` again in advance! + +Now, time to build llsm_importer program. + +```bash +mkdir -p $WORK_SPACE/source/pdc/tools/build +cd $WORK_SPACE/source/pdc/tools/build + +cmake ../ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DPDC_DIR=$PDC_DIR -DUSE_LIB_TIFF=ON -DUSE_SYSTEM_HDF5=ON -DUSE_SYSTEM_OPENMP=ON -DCMAKE_INSTALL_PREFIX=$PDC_DIR/tools/ -DCMAKE_C_COMPILER=cc + +make -j 32 +``` + +After this, you should be able to see `llsm_importer` artifact under your `$WORK_SPACE/source/pdc/tools/build` directory. + +## Running LLSM_importer + +First, locate the llsm_importer script + +```bash +cd $WORK_SPACE/source/pdc/scripts/llsm_importer +``` + +Modify the template script `template.sh`. + +Change `EXECPATH` to where your `pdc_server.exe` is installed +Change `TOOLPATH` to where your `llsm_importer` artifact is. + +Change `LLSM_DATA_PATH` to where your sample dataset is. For exmaple, + +```bash +LLSM_DATA_PATH=/pscratch/sd/w/wzhang5/data/llsm/20220115_Korra_LLCPK_LFOV_0p1PSAmpKan/run1 +``` + +Note: you may download the sample dataset from the this [link](https://drive.google.com/file/d/19hH7v58iF_QBJ985ajwLD86MMseBH-YR/view?usp=sharing). It is provided with the courtesy of [Advanced BioImaging Center at UC Berkeley](https://mcb.berkeley.edu/faculty/cdb/upadhyayulas). + +Now, run `gen_script.sh` to generate scripts for different settings with various number of servers. + +After this, enter into any directory named with a number, and submit the job with `sbatch` command. From a82d52a175503a4bc137be070c29775e77b96de2 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 9 May 2023 23:11:46 -0400 Subject: [PATCH 177/806] added a tutorial for llsm_importer --- tools/LLSM_IMPORTER.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/LLSM_IMPORTER.md b/tools/LLSM_IMPORTER.md index d2f6221ec..42ded7682 100644 --- a/tools/LLSM_IMPORTER.md +++ b/tools/LLSM_IMPORTER.md @@ -72,3 +72,5 @@ Note: you may download the sample dataset from the this [link](https://drive.goo Now, run `gen_script.sh` to generate scripts for different settings with various number of servers. After this, enter into any directory named with a number, and submit the job with `sbatch` command. + +Note: This program is still under development and changes will be made available from time to time. Please always use the develop branch for a stable version of this llsm_importer tool. \ No newline at end of file From 97eeafeb8a7aad719998fe60eb54219e49b9bc53 Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Wed, 10 May 2023 11:38:37 -0400 Subject: [PATCH 178/806] Adding tutorial for llsm_importer tool. (#84) * remove unnecessary install block from CMakeLists.txt * update output * Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. * build kvtag_add_get_scale * comment off free * update code * 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working * do while loop added, tested with 1m object and works * 1m objects test works, 10m object test fail as the original also fails * add new executable to test set * enlarge PDC_SERVER_ID_INTERVAL * update code * update console args * add p search test * add console arg for changing number of attributes per object * free allocated memory * fix query count issue * fix attr length definition * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * fix data type * fix data type * fix data type * add client side statistics * add client side statistics * fix format * clang formatter * update CMake * update CMake * update CMake * free allocated memory properly * clang format * clang format * clang-format-10 * change file name * address review comments * update llsm importer * update llsm importer * update server checkpoint intervals * update gitignore * adding job scripts * adding one debugging msg * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update output for uint64_t * add scripts * update output for uint64_t * update output for uint64_t * update output for uint64_t * update scripts * update scripts * delete debugging message * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * update tag names * update tag names * update query startingpos * update query startingpos * update job scripts * add progressive timing for kvtag_add_get_scale * fix iteration count in final report * update job scripts and benckmark program * update message format * update message format * update message format * update message format * clang format * update job scripts * comment off object/container close procedure in benchmark to save node hours * change the max number of object to 1M * change the max length of attribute value * change the max length of attribute value * llsm tiff import test * llsm tiff import test * llsm tiff import test * llsm tiff import test * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update cmake and llsm_importer * update cmake and llsm_importer * close if in cmake * cmake fix tiff * cmake policy to suppress warning * add pdc include dir * update code * update code * update code * update code * update code * update code * update array generating method * update array generating method * update array generating method * update array generating method * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * fix return type * fix return type * add timing * add timing * fix output * llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader * fix vairable name * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * add scripts * add scripts * add scripts * debugging for nonMPI program * debugging for nonMPI program * debugging for nonMPI program * clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created * enable MPI * enable MPI * enlarge BCase size * enlarge BCase size * enlarge BCase size * resolve bcast count * llsm data path in script * llsm data path in script * update csv reader * update csv reader * update csv reader * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * enlarge max write * update pdc * update pdc * update pdc * update pdc * update pdc_import.c * update pdc_import.c * update pdc_export.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update tools/cmake * clang format * clang format * added a tutorial for llsm_importer * added a tutorial for llsm_importer --------- Co-authored-by: Houjun Tang --- tools/LLSM_IMPORTER.md | 76 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 tools/LLSM_IMPORTER.md diff --git a/tools/LLSM_IMPORTER.md b/tools/LLSM_IMPORTER.md new file mode 100644 index 000000000..42ded7682 --- /dev/null +++ b/tools/LLSM_IMPORTER.md @@ -0,0 +1,76 @@ +# LLSM_Importer Tutorial + +This is a tutorial for you to run llsm_importer on Perlmutter supercomputer at NERSC. + +## Prerequisite + +Before building and installing LLSM_importer tool, you need to make sure you install PDC correctly. Check out the latest update on the `develop` branch of `PDC`. Please refer to [Proactive Data Containers (PDC) Installation Guide](../README.md) + +Once you finish all the steps in the installation guide above, you should have environment variable `$WORK_SPACE` defined. + +## Installation + +To build and install LLSM_importer, you need to download libtiff 4.4.0 first. + +```bash +cd $WORK_SPACE/source +wget https://download.osgeo.org/libtiff/tiff-4.4.0.tar.gz +tar zxvf tiff-4.4.0.tar.gz +cd tiff-4.4.0 +./configure --prefix=$WORK_SPACE/install/tiff-4.4.0 +make -j 32 install +``` + +Now you should have libtiff 4.4.0 installed and you need to include the path to the library to your environment variables: + +```bash +echo "export TIFF_DIR=$WORK_SPACE/install/tiff-4.4.0" +echo 'export LD_LIBRARY_PATH=$TIFF_DIR/lib:$LD_LIBRARY_PATH' +echo 'export PATH=$TIFF_DIR/include:$TIFF_DIR/lib:$PATH' + +echo "export TIFF_DIR=$WORK_SPACE/install/tiff-4.4.0" >> $WORK_SPACE/pdc_env.sh +echo 'export LD_LIBRARY_PATH=$TIFF_DIR/lib:$LD_LIBRARY_PATH' >> $WORK_SPACE/pdc_env.sh +echo 'export PATH=$TIFF_DIR/include:$TIFF_DIR/lib:$PATH' >> $WORK_SPACE/pdc_env.sh +``` + +Copy the 3 export commands on your screen and run them, and next time if you need to rebuild the llsm_importer program, you can run `$WORK_SPACE/pdc_env.sh` again in advance! + +Now, time to build llsm_importer program. + +```bash +mkdir -p $WORK_SPACE/source/pdc/tools/build +cd $WORK_SPACE/source/pdc/tools/build + +cmake ../ -DCMAKE_BUILD_TYPE=RelWithDebInfo -DPDC_DIR=$PDC_DIR -DUSE_LIB_TIFF=ON -DUSE_SYSTEM_HDF5=ON -DUSE_SYSTEM_OPENMP=ON -DCMAKE_INSTALL_PREFIX=$PDC_DIR/tools/ -DCMAKE_C_COMPILER=cc + +make -j 32 +``` + +After this, you should be able to see `llsm_importer` artifact under your `$WORK_SPACE/source/pdc/tools/build` directory. + +## Running LLSM_importer + +First, locate the llsm_importer script + +```bash +cd $WORK_SPACE/source/pdc/scripts/llsm_importer +``` + +Modify the template script `template.sh`. + +Change `EXECPATH` to where your `pdc_server.exe` is installed +Change `TOOLPATH` to where your `llsm_importer` artifact is. + +Change `LLSM_DATA_PATH` to where your sample dataset is. For exmaple, + +```bash +LLSM_DATA_PATH=/pscratch/sd/w/wzhang5/data/llsm/20220115_Korra_LLCPK_LFOV_0p1PSAmpKan/run1 +``` + +Note: you may download the sample dataset from the this [link](https://drive.google.com/file/d/19hH7v58iF_QBJ985ajwLD86MMseBH-YR/view?usp=sharing). It is provided with the courtesy of [Advanced BioImaging Center at UC Berkeley](https://mcb.berkeley.edu/faculty/cdb/upadhyayulas). + +Now, run `gen_script.sh` to generate scripts for different settings with various number of servers. + +After this, enter into any directory named with a number, and submit the job with `sbatch` command. + +Note: This program is still under development and changes will be made available from time to time. Please always use the develop branch for a stable version of this llsm_importer tool. \ No newline at end of file From 152519b284e16425f04d39ea4ba332882ef10205 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Fri, 12 May 2023 14:22:03 -0400 Subject: [PATCH 179/806] make sure the line feed is included for string attribute --- tools/llsm_importer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index d777812c9..1fd61dc9b 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -139,7 +139,7 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) PDCobj_put_tag(cur_obj_g, field_name, &fvalue, sizeof(double)); break; case 's': - PDCobj_put_tag(cur_obj_g, field_name, field_value, sizeof(char) * strlen(field_value)); + PDCobj_put_tag(cur_obj_g, field_name, field_value, sizeof(char) * (strlen(field_value) + 1)); break; default: break; From 27621e23877e3603201dce7527950d0ac2daa18b Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 16 May 2023 11:17:15 -0400 Subject: [PATCH 180/806] update timing for overall completion time --- tools/llsm_importer.c | 82 ++++++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 32 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index 1fd61dc9b..a2a00dddd 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -29,7 +29,16 @@ typedef struct llsm_importer_args_t { int rank = 0, size = 1; -pdcid_t pdc_id_g = 0, cont_prop_g = 0, cont_id_g = 0, obj_prop_g = 0; +pdcid_t pdc_id_g = 0, cont_prop_g = 0, cont_id_g = 0, obj_prop_g = 0; +struct timespec ts; + +double +getDoubleTimestamp() +{ + clock_gettime(CLOCK_MONOTONIC, &ts); + double timestamp = (double)ts.tv_sec + (double)ts.tv_nsec / 1e9; + return timestamp; +} int parse_console_args(int argc, char *argv[], char **file_name) @@ -54,10 +63,9 @@ parse_console_args(int argc, char *argv[], char **file_name) void import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) { - struct timespec start, end; - double duration; + double duration, start; - clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation + start = getDoubleTimestamp(); // start timing the operation obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); @@ -116,12 +124,10 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) PDCregion_transfer_start(transfer_request); PDCregion_transfer_wait(transfer_request); - clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation - duration = (end.tv_sec - start.tv_sec) * 1e9 + - (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds + duration = getDoubleTimestamp() - start; // end timing the operation and calculate duration in nanoseconds - printf("[Rank %4d] Region Transfer for object %s [%d Bytes] Done! Time taken: %.4f seconds\n", rank, - fileName_cell->field_value, image_info->tiff_size, duration / 1e9); + printf("[Rank %4d] Region_Transfer %s_[%d_Bytes] Done! Time taken: %.4f seconds\n", rank, + fileName_cell->field_value, image_info->tiff_size, duration); // add metadata tags based on the csv row csv_cell_t *cell = fileName_cell; @@ -161,12 +167,10 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) PDCobj_close(cur_obj_g); // get timing - clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation - duration = (end.tv_sec - start.tv_sec) * 1e9 + - (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds + duration = getDoubleTimestamp() - start; // end timing the operation calculate duration in nanoseconds - printf("[Rank %4d] Create object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, - duration / 1e9); + printf("[Rank %4d] Create_object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, + duration); // free memory // free(offsets); @@ -185,10 +189,9 @@ on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) char *dirname = strdup(llsm_args->directory_path); char filepath[256]; // calling tiff loading process. - image_info_t * image_info = NULL; - int i = 0; - struct timespec start, end; - double duration; + image_info_t *image_info = NULL; + int i = 0; + double duration, start; // Filepath,Filename,StageX_um_,StageY_um_,StageZ_um_,ObjectiveX_um_,ObjectiveY_um_,ObjectiveZ_um_ // get the file name from the csv row @@ -202,16 +205,13 @@ on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) strcpy(filepath, dirname); // copy the directory path to the file path strcat(filepath, fileName_cell->field_value); // concatenate the file name to the file path - clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation + start = getDoubleTimestamp(); // start timing the operation parallel_TIFF_load(filepath, 1, NULL, &image_info); - clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation + duration = getDoubleTimestamp() - start; // end timing the operation and calculate duration in nanoseconds - duration = (end.tv_sec - start.tv_sec) * 1e9 + - (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds - - printf("[Rand %4d] Read %s Done! Time taken: %.4f seconds\n", rank, filepath, duration / 1e9); + printf("[Rank %4d] Read %s Done! Time taken: %.4f seconds\n", rank, filepath, duration); if (image_info == NULL || image_info->tiff_ptr == NULL) { return; @@ -268,14 +268,15 @@ main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &size); #endif - char * file_name = NULL; - PDC_LIST * list = pdc_list_new(); - char * csv_line = NULL; - int num_row_read = 0; - csv_header_t * csv_header = NULL; - csv_row_t * csv_row = NULL; - llsm_importer_args_t *llsm_args = NULL; - int bcast_count = 512; + char * file_name = NULL; + PDC_LIST * list = pdc_list_new(); + char * csv_line = NULL; + int num_row_read = 0; + csv_header_t * csv_header = NULL; + csv_row_t * csv_row = NULL; + llsm_importer_args_t *llsm_args = NULL; + int bcast_count = 512; + double duration = 0, start = 0; char csv_field_types[] = {'s', 's', 'f', 'f', 'f', 'f', 'f', 'f'}; // parse console argument int parse_code = parse_console_args(argc, argv, &file_name); @@ -347,6 +348,12 @@ main(int argc, char *argv[]) llsm_args->directory_path = directory_path; llsm_args->csv_header = csv_table->first_header; +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + start = MPI_Wtime(); +#else + start = getDoubleTimestamp(); +#endif // go through the csv table csv_row_t *current_row = csv_table->first_row; while (current_row != NULL) { @@ -357,6 +364,17 @@ main(int argc, char *argv[]) current_row = current_row->next; } +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + duration = MPI_Wtime() - start; +#else + duration = getDoubleTimestamp() - start; +#endif + + if (rank == 0) { + printf("[Completion Time] LLSM IMPORTER FINISHES! Time taken: %.4f seconds\n", rank, duration); + } + csv_free_table(csv_table); // close the container From 22b98ac1ea34510d9a3d4ec5109e860fded1e607 Mon Sep 17 00:00:00 2001 From: Jean Luca Bez Date: Fri, 19 May 2023 12:31:26 -0700 Subject: [PATCH 181/806] Update .gitlab-ci.yml removing Cori Remove Cori CI in advance of Cori's decommission by the end of the month. --- .gitlab-ci.yml | 173 ------------------------------------------------- 1 file changed, 173 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5fde85406..03c29ca4a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -7,26 +7,6 @@ stages: - test - metrics -cori-build: - stage: build - tags: - - cori - variables: - SCHEDULER_PARAMETERS: "-C haswell --qos=debug -N 1 -t 00:30:00 --gres=craynetwork:3" - LIBFABRIC_DIR: "/global/cfs/cdirs/m1248/pdc/libfabric-1.12.1/install" - MERCURY_DIR: "/global/cfs/cdirs/m1248/pdc/mercury-2.0.0/install" - script: - - module list - - mkdir -p ${PDC_BUILD_PATH}/cori - - cd ${PDC_BUILD_PATH}/cori - - cmake ../.. -DBUILD_MPI_TESTING=ON -DBUILD_SHARED_LIBS=ON -DBUILD_TESTING=ON -DPDC_ENABLE_MPI=ON -DPDC_ENABLE_TIMING=ON -DMERCURY_DIR=$MERCURY_DIR -DCMAKE_C_COMPILER=cc -DCMAKE_C_FLAGS=-dynamic -DMPI_RUN_CMD=srun -DPDC_ENABLE_LUSTRE=ON -DPDC_DISABLE_CHECKPOINT=ON -DCMAKE_INSTALL_PREFIX=${PDC_INSTALL_PATH}/cori - - make -j - - make install - artifacts: - paths: - - ${PDC_BUILD_PATH}/cori - - ${PDC_INSTALL_PATH}/cori - perlmutter-build: stage: build tags: @@ -48,119 +28,6 @@ perlmutter-build: - ${PDC_BUILD_PATH}/perlmutter - ${PDC_INSTALL_PATH}/perlmutter -cori-parallel-pdc: - stage: test - needs: - - cori-build - tags: - - cori - variables: - SCHEDULER_PARAMETERS: "-C haswell --qos=debug -N 1 -t 00:30:00 --gres=craynetwork:3" - LIBFABRIC_DIR: "/global/cfs/cdirs/m1248/pdc/libfabric-1.12.1/install" - MERCURY_DIR: "/global/cfs/cdirs/m1248/pdc/mercury-2.0.0/install" - PDC_TMPDIR: "${PDC_BUILD_PATH}/pdc-tmp-paralell-pdc" - PDC_DATA_LOC: "${PDC_BUILD_PATH}/pdc-data-paralell-pdc" - script: - - export LD_LIBRARY_PATH="$LIBFABRIC_DIR/lib:$MERCURY_DIR/lib:$LD_LIBRARY_PATH" - - cd ${PDC_BUILD_PATH}/cori - - ctest -L parallel_pdc - - rm -rf ${PDC_TMPDIR} ${PDC_DATA_LOC} - -cori-parallel-obj: - stage: test - needs: - - cori-build - - cori-parallel-pdc - tags: - - cori - variables: - SCHEDULER_PARAMETERS: "-C haswell --qos=debug -N 1 -t 00:30:00 --gres=craynetwork:3" - LIBFABRIC_DIR: "/global/cfs/cdirs/m1248/pdc/libfabric-1.12.1/install" - MERCURY_DIR: "/global/cfs/cdirs/m1248/pdc/mercury-2.0.0/install" - PDC_TMPDIR: "${PDC_BUILD_PATH}/pdc-tmp-paralell-obj" - PDC_DATA_LOC: "${PDC_BUILD_PATH}/pdc-data-paralell-obj" - script: - - export LD_LIBRARY_PATH="$LIBFABRIC_DIR/lib:$MERCURY_DIR/lib:$LD_LIBRARY_PATH" - - cd ${PDC_BUILD_PATH}/cori - - ctest -L parallel_obj - - rm -rf ${PDC_TMPDIR} ${PDC_DATA_LOC} - -cori-parallel-cont: - stage: test - needs: - - cori-build - - cori-parallel-pdc - tags: - - cori - variables: - SCHEDULER_PARAMETERS: "-C haswell --qos=debug -N 1 -t 00:30:00 --gres=craynetwork:3" - LIBFABRIC_DIR: "/global/cfs/cdirs/m1248/pdc/libfabric-1.12.1/install" - MERCURY_DIR: "/global/cfs/cdirs/m1248/pdc/mercury-2.0.0/install" - PDC_TMPDIR: "${PDC_BUILD_PATH}/pdc-tmp-paralell-cont" - PDC_DATA_LOC: "${PDC_BUILD_PATH}/pdc-data-paralell-cont" - script: - - export LD_LIBRARY_PATH="$LIBFABRIC_DIR/lib:$MERCURY_DIR/lib:$LD_LIBRARY_PATH" - - cd ${PDC_BUILD_PATH}/cori - - ctest -L parallel_cont - - rm -rf ${PDC_TMPDIR} ${PDC_DATA_LOC} - -cori-parallel-prop: - stage: test - needs: - - cori-build - - cori-parallel-pdc - tags: - - cori - variables: - SCHEDULER_PARAMETERS: "-C haswell --qos=debug -N 1 -t 00:30:00 --gres=craynetwork:3" - LIBFABRIC_DIR: "/global/cfs/cdirs/m1248/pdc/libfabric-1.12.1/install" - MERCURY_DIR: "/global/cfs/cdirs/m1248/pdc/mercury-2.0.0/install" - PDC_TMPDIR: "${PDC_BUILD_PATH}/pdc-tmp-paralell-prop" - PDC_DATA_LOC: "${PDC_BUILD_PATH}/pdc-data-paralell-prop" - script: - - export LD_LIBRARY_PATH="$LIBFABRIC_DIR/lib:$MERCURY_DIR/lib:$LD_LIBRARY_PATH" - - cd ${PDC_BUILD_PATH}/cori - - ctest -L parallel_prop - - rm -rf ${PDC_TMPDIR} ${PDC_DATA_LOC} - -cori-parallel-region: - stage: test - needs: - - cori-build - - cori-parallel-pdc - tags: - - cori - variables: - SCHEDULER_PARAMETERS: "-C haswell --qos=debug -N 1 -t 00:30:00 --gres=craynetwork:3" - LIBFABRIC_DIR: "/global/cfs/cdirs/m1248/pdc/libfabric-1.12.1/install" - MERCURY_DIR: "/global/cfs/cdirs/m1248/pdc/mercury-2.0.0/install" - PDC_TMPDIR: "${PDC_BUILD_PATH}/pdc-tmp-paralell-region" - PDC_DATA_LOC: "${PDC_BUILD_PATH}/pdc-data-paralell-region" - script: - - export LD_LIBRARY_PATH="$LIBFABRIC_DIR/lib:$MERCURY_DIR/lib:$LD_LIBRARY_PATH" - - cd ${PDC_BUILD_PATH}/cori - - ctest -L parallel_region_transfer - - rm -rf ${PDC_TMPDIR} ${PDC_DATA_LOC} - -cori-parallel-region-all: - stage: test - needs: - - cori-build - - cori-parallel-pdc - tags: - - cori - variables: - SCHEDULER_PARAMETERS: "-C haswell --qos=debug -N 1 -t 00:30:00 --gres=craynetwork:3" - LIBFABRIC_DIR: "/global/cfs/cdirs/m1248/pdc/libfabric-1.12.1/install" - MERCURY_DIR: "/global/cfs/cdirs/m1248/pdc/mercury-2.0.0/install" - PDC_TMPDIR: "${PDC_BUILD_PATH}/pdc-tmp-paralell-region-all" - PDC_DATA_LOC: "${PDC_BUILD_PATH}/pdc-data-paralell-region-all" - script: - - export LD_LIBRARY_PATH="$LIBFABRIC_DIR/lib:$MERCURY_DIR/lib:$LD_LIBRARY_PATH" - - cd ${PDC_BUILD_PATH}/cori - - ctest -L parallel_region_transfer_all - - rm -rf ${PDC_TMPDIR} ${PDC_DATA_LOC} - perlmutter-parallel-pdc: stage: test needs: @@ -274,46 +141,6 @@ perlmutter-parallel-region-all: - ctest -L parallel_region_transfer_all - rm -rf ${PDC_TMPDIR} ${PDC_DATA_LOC} -cori-metrics: - stage: metrics - needs: - - cori-build - tags: - - cori - variables: - PDC_N_NODES: 64 - PDC_N_CLIENTS: 31 - SCHEDULER_PARAMETERS: "-C haswell --qos=regular -N ${PDC_N_NODES} -t 00:30:00 --gres=craynetwork:3" - LIBFABRIC_DIR: "/global/cfs/cdirs/m1248/pdc/libfabric-1.12.1/install" - MERCURY_DIR: "/global/cfs/cdirs/m1248/pdc/mercury-2.0.0/install" - PDC_TMPDIR: "${PDC_BUILD_PATH}/pdc-tmp-metrics" - PDC_DATA_LOC: "${PDC_BUILD_PATH}/pdc-data-metrics" - PDC_CLIENT_LOOKUP: "NONE" - PDC_SERVER: "${PDC_BUILD_PATH}/cori/bin/pdc_server.exe" - PDC_SERVER_CLOSE: "${PDC_BUILD_PATH}/cori/bin/close_server" - PDC_CLIENT: "${PDC_BUILD_PATH}/cori/bin/vpicio_mts" - PDC_JOB_OUTPUT: "pdc-metrics.log" - script: - - module load python - - export LD_LIBRARY_PATH="$LIBFABRIC_DIR/lib:$MERCURY_DIR/lib:$LD_LIBRARY_PATH" - - cd ${PDC_BUILD_PATH}/cori - - let TOTAL_PROCESSES=$PDC_N_CLIENTS*$PDC_N_NODES - - echo "Starting PDC servers..." - - export FI_CXI_DEFAULT_VNI=0 - - srun --mem=25600 --cpu_bind=cores --gres=craynetwork:1 --overlap -u -o ${PDC_JOB_OUTPUT} --open-mode=append -N ${PDC_N_NODES} -n ${PDC_N_NODES} -c 1 ${PDC_SERVER} & - - echo "Starting application..." - - export FI_CXI_DEFAULT_VNI=1 - - srun --mem=25600 --cpu_bind=cores --gres=craynetwork:1 --overlap -u -o ${PDC_JOB_OUTPUT} --open-mode=append -N ${PDC_N_NODES} -n ${TOTAL_PROCESSES} -c 1 ${PDC_CLIENT} 8388608 5 20 - - echo "Closing PDC servers..." - - export FI_CXI_DEFAULT_VNI=2 - - srun --mem=25600 --cpu_bind=cores --gres=craynetwork:1 --overlap -u -o ${PDC_JOB_OUTPUT} --open-mode=append -N ${PDC_N_NODES} -n ${PDC_N_NODES} -c 1 ${PDC_SERVER_CLOSE} - - echo "Installing dependencies..." - - pip install pydrive gspread gspread-dataframe google - - echo "Storing PDC metrics..." - - python3 ../../.github/workflows/store-metrics.py Cori ${PDC_JOB_OUTPUT} - - echo "Removing files..." - - rm -rf ${PDC_TMPDIR} ${PDC_DATA_LOC} - perlmutter-metrics: stage: metrics needs: From 37dd453dec41614407102e3c1a2d75c8feda83c7 Mon Sep 17 00:00:00 2001 From: Chen Wang Date: Fri, 19 May 2023 14:30:14 -0700 Subject: [PATCH 182/806] Remove unnecessary fflush call Signed-off-by: Chen Wang --- src/api/pdc_client_connect.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index 9079e04f9..bafaef938 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -244,7 +244,6 @@ PDC_Client_check_response(hg_context_t **hg_context) ret_value = SUCCEED; - fflush(stdout); FUNC_LEAVE(ret_value); } From 108bcc5ce05348368809c5f0e07036827c244ba1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Tue, 23 May 2023 10:34:23 -0400 Subject: [PATCH 183/806] update formatting --- tools/llsm_importer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index a2a00dddd..ff358fcd3 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -352,7 +352,7 @@ main(int argc, char *argv[]) MPI_Barrier(MPI_COMM_WORLD); start = MPI_Wtime(); #else - start = getDoubleTimestamp(); + start = getDoubleTimestamp(); #endif // go through the csv table csv_row_t *current_row = csv_table->first_row; @@ -374,7 +374,7 @@ main(int argc, char *argv[]) if (rank == 0) { printf("[Completion Time] LLSM IMPORTER FINISHES! Time taken: %.4f seconds\n", rank, duration); } - + // free memory for csv table csv_free_table(csv_table); // close the container From d53a119bc0a6e9419894fa9d1203150fcee62266 Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Tue, 23 May 2023 14:34:44 -0700 Subject: [PATCH 184/806] LLSM_importer Tutorial and Timing for job completion time. (#86) * remove unnecessary install block from CMakeLists.txt * update output * Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. * build kvtag_add_get_scale * comment off free * update code * 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working * do while loop added, tested with 1m object and works * 1m objects test works, 10m object test fail as the original also fails * add new executable to test set * enlarge PDC_SERVER_ID_INTERVAL * update code * update console args * add p search test * add console arg for changing number of attributes per object * free allocated memory * fix query count issue * fix attr length definition * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * fix data type * fix data type * fix data type * add client side statistics * add client side statistics * fix format * clang formatter * update CMake * update CMake * update CMake * free allocated memory properly * clang format * clang format * clang-format-10 * change file name * address review comments * update llsm importer * update llsm importer * update server checkpoint intervals * update gitignore * adding job scripts * adding one debugging msg * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update output for uint64_t * add scripts * update output for uint64_t * update output for uint64_t * update output for uint64_t * update scripts * update scripts * delete debugging message * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * update tag names * update tag names * update query startingpos * update query startingpos * update job scripts * add progressive timing for kvtag_add_get_scale * fix iteration count in final report * update job scripts and benckmark program * update message format * update message format * update message format * update message format * clang format * update job scripts * comment off object/container close procedure in benchmark to save node hours * change the max number of object to 1M * change the max length of attribute value * change the max length of attribute value * llsm tiff import test * llsm tiff import test * llsm tiff import test * llsm tiff import test * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update cmake and llsm_importer * update cmake and llsm_importer * close if in cmake * cmake fix tiff * cmake policy to suppress warning * add pdc include dir * update code * update code * update code * update code * update code * update code * update array generating method * update array generating method * update array generating method * update array generating method * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * fix return type * fix return type * add timing * add timing * fix output * llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader * fix vairable name * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * add scripts * add scripts * add scripts * debugging for nonMPI program * debugging for nonMPI program * debugging for nonMPI program * clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created * enable MPI * enable MPI * enlarge BCase size * enlarge BCase size * enlarge BCase size * resolve bcast count * llsm data path in script * llsm data path in script * update csv reader * update csv reader * update csv reader * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * enlarge max write * update pdc * update pdc * update pdc * update pdc * update pdc_import.c * update pdc_import.c * update pdc_export.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update tools/cmake * clang format * clang format * added a tutorial for llsm_importer * added a tutorial for llsm_importer * make sure the line feed is included for string attribute * update timing for overall completion time * update formatting --------- Co-authored-by: Houjun Tang --- tools/llsm_importer.c | 84 ++++++++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 33 deletions(-) diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index d777812c9..ff358fcd3 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -29,7 +29,16 @@ typedef struct llsm_importer_args_t { int rank = 0, size = 1; -pdcid_t pdc_id_g = 0, cont_prop_g = 0, cont_id_g = 0, obj_prop_g = 0; +pdcid_t pdc_id_g = 0, cont_prop_g = 0, cont_id_g = 0, obj_prop_g = 0; +struct timespec ts; + +double +getDoubleTimestamp() +{ + clock_gettime(CLOCK_MONOTONIC, &ts); + double timestamp = (double)ts.tv_sec + (double)ts.tv_nsec / 1e9; + return timestamp; +} int parse_console_args(int argc, char *argv[], char **file_name) @@ -54,10 +63,9 @@ parse_console_args(int argc, char *argv[], char **file_name) void import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) { - struct timespec start, end; - double duration; + double duration, start; - clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation + start = getDoubleTimestamp(); // start timing the operation obj_prop_g = PDCprop_create(PDC_OBJ_CREATE, pdc_id_g); @@ -116,12 +124,10 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) PDCregion_transfer_start(transfer_request); PDCregion_transfer_wait(transfer_request); - clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation - duration = (end.tv_sec - start.tv_sec) * 1e9 + - (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds + duration = getDoubleTimestamp() - start; // end timing the operation and calculate duration in nanoseconds - printf("[Rank %4d] Region Transfer for object %s [%d Bytes] Done! Time taken: %.4f seconds\n", rank, - fileName_cell->field_value, image_info->tiff_size, duration / 1e9); + printf("[Rank %4d] Region_Transfer %s_[%d_Bytes] Done! Time taken: %.4f seconds\n", rank, + fileName_cell->field_value, image_info->tiff_size, duration); // add metadata tags based on the csv row csv_cell_t *cell = fileName_cell; @@ -139,7 +145,7 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) PDCobj_put_tag(cur_obj_g, field_name, &fvalue, sizeof(double)); break; case 's': - PDCobj_put_tag(cur_obj_g, field_name, field_value, sizeof(char) * strlen(field_value)); + PDCobj_put_tag(cur_obj_g, field_name, field_value, sizeof(char) * (strlen(field_value) + 1)); break; default: break; @@ -161,12 +167,10 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) PDCobj_close(cur_obj_g); // get timing - clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation - duration = (end.tv_sec - start.tv_sec) * 1e9 + - (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds + duration = getDoubleTimestamp() - start; // end timing the operation calculate duration in nanoseconds - printf("[Rank %4d] Create object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, - duration / 1e9); + printf("[Rank %4d] Create_object %s Done! Time taken: %.4f seconds\n", rank, fileName_cell->field_value, + duration); // free memory // free(offsets); @@ -185,10 +189,9 @@ on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) char *dirname = strdup(llsm_args->directory_path); char filepath[256]; // calling tiff loading process. - image_info_t * image_info = NULL; - int i = 0; - struct timespec start, end; - double duration; + image_info_t *image_info = NULL; + int i = 0; + double duration, start; // Filepath,Filename,StageX_um_,StageY_um_,StageZ_um_,ObjectiveX_um_,ObjectiveY_um_,ObjectiveZ_um_ // get the file name from the csv row @@ -202,16 +205,13 @@ on_csv_row(csv_row_t *row, llsm_importer_args_t *llsm_args) strcpy(filepath, dirname); // copy the directory path to the file path strcat(filepath, fileName_cell->field_value); // concatenate the file name to the file path - clock_gettime(CLOCK_MONOTONIC, &start); // start timing the operation + start = getDoubleTimestamp(); // start timing the operation parallel_TIFF_load(filepath, 1, NULL, &image_info); - clock_gettime(CLOCK_MONOTONIC, &end); // end timing the operation + duration = getDoubleTimestamp() - start; // end timing the operation and calculate duration in nanoseconds - duration = (end.tv_sec - start.tv_sec) * 1e9 + - (end.tv_nsec - start.tv_nsec); // calculate duration in nanoseconds - - printf("[Rand %4d] Read %s Done! Time taken: %.4f seconds\n", rank, filepath, duration / 1e9); + printf("[Rank %4d] Read %s Done! Time taken: %.4f seconds\n", rank, filepath, duration); if (image_info == NULL || image_info->tiff_ptr == NULL) { return; @@ -268,14 +268,15 @@ main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &size); #endif - char * file_name = NULL; - PDC_LIST * list = pdc_list_new(); - char * csv_line = NULL; - int num_row_read = 0; - csv_header_t * csv_header = NULL; - csv_row_t * csv_row = NULL; - llsm_importer_args_t *llsm_args = NULL; - int bcast_count = 512; + char * file_name = NULL; + PDC_LIST * list = pdc_list_new(); + char * csv_line = NULL; + int num_row_read = 0; + csv_header_t * csv_header = NULL; + csv_row_t * csv_row = NULL; + llsm_importer_args_t *llsm_args = NULL; + int bcast_count = 512; + double duration = 0, start = 0; char csv_field_types[] = {'s', 's', 'f', 'f', 'f', 'f', 'f', 'f'}; // parse console argument int parse_code = parse_console_args(argc, argv, &file_name); @@ -347,6 +348,12 @@ main(int argc, char *argv[]) llsm_args->directory_path = directory_path; llsm_args->csv_header = csv_table->first_header; +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + start = MPI_Wtime(); +#else + start = getDoubleTimestamp(); +#endif // go through the csv table csv_row_t *current_row = csv_table->first_row; while (current_row != NULL) { @@ -357,6 +364,17 @@ main(int argc, char *argv[]) current_row = current_row->next; } +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + duration = MPI_Wtime() - start; +#else + duration = getDoubleTimestamp() - start; +#endif + + if (rank == 0) { + printf("[Completion Time] LLSM IMPORTER FINISHES! Time taken: %.4f seconds\n", rank, duration); + } + // free memory for csv table csv_free_table(csv_table); // close the container From f255128bca6f794a882203d0423861632e34063f Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Tue, 23 May 2023 15:22:28 -0700 Subject: [PATCH 185/806] Fix Issue #85, server segfault when another client application with different number of ranks connects to it --- src/api/include/pdc_client_connect.h | 3 +- .../pdc_analysis_and_transforms_connect.c | 4 +- src/api/pdc_client_connect.c | 106 +++++++++--------- src/server/include/pdc_client_server_common.h | 7 ++ src/server/pdc_client_server_common.c | 1 + src/server/pdc_server.c | 10 +- 6 files changed, 71 insertions(+), 60 deletions(-) diff --git a/src/api/include/pdc_client_connect.h b/src/api/include/pdc_client_connect.h index 4b0816996..933f2a2e8 100644 --- a/src/api/include/pdc_client_connect.h +++ b/src/api/include/pdc_client_connect.h @@ -54,6 +54,7 @@ struct _pdc_client_lookup_args { uint32_t server_id; uint32_t client_id; int ret; + int is_init; char * ret_string; char * client_addr; @@ -687,7 +688,7 @@ hg_return_t PDC_Client_get_data_from_server_shm_cb(const struct hg_cb_info *call * * \return Non-negative on success/Negative on failure */ -perr_t PDC_Client_lookup_server(int server_id); +perr_t PDC_Client_lookup_server(int server_id, int is_init); /** * ******** diff --git a/src/api/pdc_analysis/pdc_analysis_and_transforms_connect.c b/src/api/pdc_analysis/pdc_analysis_and_transforms_connect.c index ac957b394..17bc11d5d 100644 --- a/src/api/pdc_analysis/pdc_analysis_and_transforms_connect.c +++ b/src/api/pdc_analysis/pdc_analysis_and_transforms_connect.c @@ -101,7 +101,7 @@ PDC_Client_send_iter_recv_id(pdcid_t iter_id, pdcid_t *meta_id) while (pdc_server_info_g[server_id].addr_valid != 1) { if (n_retry > 0) break; - if (PDC_Client_lookup_server(server_id) != SUCCEED) + if (PDC_Client_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_lookup_server", pdc_client_mpi_rank_g); n_retry++; @@ -212,7 +212,7 @@ PDC_Client_register_obj_analysis(struct _pdc_region_analysis_ftn_info *thisFtn, while (pdc_server_info_g[server_id].addr_valid != 1) { if (n_retry > 0) break; - if (PDC_Client_lookup_server(server_id) != SUCCEED) + if (PDC_Client_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_lookup_server", pdc_client_mpi_rank_g); n_retry++; diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index bafaef938..bdf967885 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -785,6 +785,7 @@ client_test_connect_lookup_cb(const struct hg_cb_info *callback_info) in.client_id = pdc_client_mpi_rank_g; in.nclient = pdc_client_mpi_size_g; in.client_addr = client_lookup_args->client_addr; + in.is_init = client_lookup_args->is_init; ret_value = HG_Forward(client_test_handle, client_test_connect_rpc_cb, client_lookup_args, &in); if (ret_value != HG_SUCCESS) @@ -796,7 +797,7 @@ client_test_connect_lookup_cb(const struct hg_cb_info *callback_info) } perr_t -PDC_Client_lookup_server(int server_id) +PDC_Client_lookup_server(int server_id, int is_init) { perr_t ret_value = SUCCEED; hg_return_t hg_ret; @@ -817,6 +818,7 @@ PDC_Client_lookup_server(int server_id) lookup_args.client_id = pdc_client_mpi_rank_g; lookup_args.server_id = server_id; lookup_args.client_addr = self_addr; + lookup_args.is_init = is_init; target_addr_string = pdc_server_info_g[lookup_args.server_id].addr_string; if (is_client_debug_g == 1) { @@ -846,7 +848,7 @@ PDC_Client_lookup_server(int server_id) } perr_t -PDC_Client_try_lookup_server(int server_id) +PDC_Client_try_lookup_server(int server_id, int is_init) { perr_t ret_value = SUCCEED; int n_retry = 1; @@ -859,7 +861,7 @@ PDC_Client_try_lookup_server(int server_id) while (pdc_server_info_g[server_id].addr_valid != 1) { if (n_retry > PDC_MAX_TRIAL_NUM) break; - ret_value = PDC_Client_lookup_server(server_id); + ret_value = PDC_Client_lookup_server(server_id, is_init); if (ret_value != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: ERROR with PDC_Client_lookup_server", pdc_client_mpi_rank_g); n_retry++; @@ -1292,7 +1294,7 @@ PDC_Client_mercury_init(hg_class_t **hg_class, hg_context_t **hg_context, int po // Each client connect to its node local server only at start time local_server_id = PDC_get_local_server_id(pdc_client_mpi_rank_g, pdc_nclient_per_server_g, pdc_server_num_g); - if (PDC_Client_try_lookup_server(local_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(local_server_id, 1) != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: ERROR lookup server %d\n", pdc_client_mpi_rank_g, local_server_id); } @@ -1302,7 +1304,7 @@ PDC_Client_mercury_init(hg_class_t **hg_class, hg_context_t **hg_context, int po for (local_server_id = 0; local_server_id < pdc_server_num_g; local_server_id++) { if (pdc_client_mpi_size_g > 1000) PDC_msleep(pdc_client_mpi_rank_g % 300); - if (PDC_Client_try_lookup_server(local_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(local_server_id, 1) != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: ERROR lookup server %d", pdc_client_mpi_rank_g, local_server_id); } @@ -1643,7 +1645,7 @@ PDC_partial_query(int is_list_all, int user_id, const char *app_name, const char } for (server_id = my_server_start; server_id < my_server_end; server_id++) { - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); hg_ret = HG_Create(send_context_g, pdc_server_info_g[server_id].addr, query_partial_register_id_g, @@ -1727,7 +1729,7 @@ PDC_Client_query_tag(const char *tags, int *n_res, pdc_metadata_t ***out) *n_res = 0; for (server_id = 0; server_id < pdc_server_num_g; server_id++) { - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); hg_ret = HG_Create(send_context_g, pdc_server_info_g[server_id].addr, query_partial_register_id_g, @@ -1938,7 +1940,7 @@ PDC_Client_add_tag(pdcid_t obj_id, const char *tag) // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_add_tag_register_id_g, @@ -2023,7 +2025,7 @@ PDC_Client_update_metadata(pdc_metadata_t *old, pdc_metadata_t *new) // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_update_register_id_g, @@ -2112,7 +2114,7 @@ PDC_Client_delete_metadata_by_id(uint64_t obj_id) else debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); hg_ret = HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_delete_by_id_register_id_g, @@ -2169,7 +2171,7 @@ PDC_Client_delete_metadata(char *delete_name, pdcid_t obj_delete_prop) else debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); hg_ret = HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_delete_register_id_g, @@ -2224,7 +2226,7 @@ PDC_Client_query_metadata_name_only(const char *obj_name, pdc_metadata_t **out) // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_query_register_id_g, @@ -2281,7 +2283,7 @@ PDC_Client_query_metadata_name_timestep(const char *obj_name, int time_step, pdc // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_query_register_id_g, @@ -2416,7 +2418,7 @@ PDC_Client_create_cont_id(const char *cont_name, pdcid_t cont_create_prop ATTRIB // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); hg_ret = @@ -2491,7 +2493,7 @@ PDC_Client_obj_reset_dims(const char *obj_name, int time_step, int ndim, uint64_ // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, obj_reset_dims_register_id_g, @@ -2621,7 +2623,7 @@ PDC_Client_send_name_recv_id(const char *obj_name, uint64_t cont_id, pdcid_t obj server_id); fflush(stdout); } - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); // We have already filled in the pdc_server_info_g[server_id].addr in previous @@ -2682,7 +2684,7 @@ PDC_Client_close_all_server() if (pdc_client_mpi_size_g >= pdc_server_num_g) { if (pdc_client_mpi_rank_g < pdc_server_num_g) { server_id = pdc_client_mpi_rank_g; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); @@ -2708,7 +2710,7 @@ PDC_Client_close_all_server() if (pdc_client_mpi_rank_g == 0) { for (i = 0; i < (uint32_t)pdc_server_num_g; i++) { server_id = pdc_server_num_g - 1 - i; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); @@ -2776,7 +2778,7 @@ PDC_Client_buf_unmap(pdcid_t remote_obj_id, pdcid_t remote_reg_id, struct pdc_re // Debug statistics for counting number of messages sent to each server. debug_server_id_count[data_server_id]++; - if (PDC_Client_try_lookup_server(data_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(data_server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[data_server_id].addr, buf_unmap_register_id_g, @@ -2857,7 +2859,7 @@ PDC_Client_flush_obj(uint64_t obj_id) FUNC_ENTER(NULL); for (i = 0; i < (uint32_t)pdc_server_num_g; i++) { server_id = pdc_server_num_g - 1 - i; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, flush_obj_register_id_g, @@ -2897,7 +2899,7 @@ PDC_Client_flush_obj_all() FUNC_ENTER(NULL); for (i = 0; i < (uint32_t)pdc_server_num_g; i++) { server_id = pdc_server_num_g - 1 - i; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, flush_obj_all_register_id_g, @@ -2956,7 +2958,7 @@ PDC_Client_transfer_request_all(int n_objs, pdc_access_t access_type, uint32_t d hg_class = HG_Context_get_class(send_context_g); - if (PDC_Client_try_lookup_server(data_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(data_server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server @ line %d", pdc_client_mpi_rank_g, __LINE__); @@ -3042,7 +3044,7 @@ PDC_Client_transfer_request_metadata_query2(char *buf, uint64_t total_buf_size, hg_class = HG_Context_get_class(send_context_g); - if (PDC_Client_try_lookup_server(metadata_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(metadata_server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server @ line %d", pdc_client_mpi_rank_g, __LINE__); hg_ret = HG_Create(send_context_g, pdc_server_info_g[metadata_server_id].addr, @@ -3113,7 +3115,7 @@ PDC_Client_transfer_request_metadata_query(char *buf, uint64_t total_buf_size, i hg_class = HG_Context_get_class(send_context_g); - if (PDC_Client_try_lookup_server(metadata_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(metadata_server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server @ line %d", pdc_client_mpi_rank_g, __LINE__); hg_ret = HG_Create(send_context_g, pdc_server_info_g[metadata_server_id].addr, @@ -3184,7 +3186,7 @@ PDC_Client_transfer_request_wait_all(int n_objs, pdcid_t *transfer_request_id, u hg_class = HG_Context_get_class(send_context_g); - if (PDC_Client_try_lookup_server(data_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(data_server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server @ line %d", pdc_client_mpi_rank_g, __LINE__); hg_ret = @@ -3296,7 +3298,7 @@ PDC_Client_transfer_request(void *buf, pdcid_t obj_id, uint32_t data_server_id, pack_region_metadata(remote_ndim, remote_offset, remote_size, &(in.remote_region)); - if (PDC_Client_try_lookup_server(data_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(data_server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server @ line %d", pdc_client_mpi_rank_g, __LINE__); @@ -3366,7 +3368,7 @@ PDC_Client_transfer_request_status(pdcid_t transfer_request_id, uint32_t data_se in.transfer_request_id = transfer_request_id; - if (PDC_Client_try_lookup_server(data_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(data_server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server @ line %d", pdc_client_mpi_rank_g, __LINE__); @@ -3418,7 +3420,7 @@ PDC_Client_transfer_request_wait(pdcid_t transfer_request_id, uint32_t data_serv in.transfer_request_id = transfer_request_id; in.access_type = access_type; - if (PDC_Client_try_lookup_server(data_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(data_server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server @ line %d", pdc_client_mpi_rank_g, __LINE__); @@ -3599,7 +3601,7 @@ PDC_Client_buf_map(pdcid_t local_region_id, pdcid_t remote_obj_id, size_t ndim, else PGOTO_ERROR(FAIL, "mapping for array of dimension greater than 4 is not supproted"); - if (PDC_Client_try_lookup_server(data_server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(data_server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[data_server_id].addr, buf_map_register_id_g, @@ -3682,7 +3684,7 @@ PDC_Client_region_lock(pdcid_t remote_obj_id, struct _pdc_obj_info *object_info, in.data_unit = PDC_get_var_type_size(data_type); PDC_region_info_t_to_transfer_unit(region_info, &(in.region), in.data_unit); - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, region_lock_register_id_g, @@ -3793,7 +3795,7 @@ pdc_region_release_with_server_transform(struct _pdc_obj_info * object_info, unit = type_extent; PDC_region_info_t_to_transfer_unit(region_info, &(in.region), unit); - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); // Create a bulk handle for the temp buffer used by the transform @@ -3902,7 +3904,7 @@ pdc_region_release_with_server_analysis(struct _pdc_obj_info * object_info, in.output_obj_id = obj_prop->obj_prop_pub->obj_prop_id; in.output_iter = outputIter->meta_id; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, region_analysis_release_register_id_g, @@ -4002,7 +4004,7 @@ pdc_region_release_with_client_transform(struct _pdc_obj_info * object_info, unit = type_extent; PDC_region_info_t_to_transfer_unit(region_info, &(in.region), unit); - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); transform_args.data = data_ptrs[0]; @@ -4334,7 +4336,7 @@ PDC_Client_region_release(pdcid_t remote_obj_id, struct _pdc_obj_info *object_in in.data_unit = PDC_get_var_type_size(data_type); PDC_region_info_t_to_transfer_unit(region_info, &(in.region), in.data_unit); - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, region_release_register_id_g, @@ -4557,7 +4559,7 @@ PDC_Client_data_server_read_check(int server_id, uint32_t client_id, pdc_metadat read_size *= region->size[i]; } - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, data_server_read_check_register_id_g, @@ -4704,7 +4706,7 @@ PDC_Client_data_server_read(struct pdc_request *request) PDC_metadata_t_to_transfer_t(meta, &in.meta); PDC_region_info_t_to_transfer(region, &in.region); - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, data_server_read_register_id_g, @@ -4824,7 +4826,7 @@ PDC_Client_data_server_write_check(struct pdc_request *request, int *status) write_size *= region->size[i]; } - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, data_server_write_check_register_id_g, @@ -4989,7 +4991,7 @@ PDC_Client_data_server_write(struct pdc_request *request) PDC_metadata_t_to_transfer_t(meta, &in.meta); PDC_region_info_t_to_transfer(region, &in.region); - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); hg_ret = HG_Create(send_context_g, pdc_server_info_g[server_id].addr, data_server_write_register_id_g, @@ -5360,7 +5362,7 @@ PDC_Client_add_del_objects_to_container(int nobj, uint64_t *obj_ids, uint64_t co // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); // Send the bulk handle to the target with RPC @@ -5478,7 +5480,7 @@ PDC_Client_add_tags_to_container(pdcid_t cont_id, char *tags) // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); // Send the bulk handle to the target with RPC @@ -5555,7 +5557,7 @@ PDC_Client_query_container_name(const char *cont_name, uint64_t *cont_meta_id) // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, container_query_register_id_g, @@ -5732,7 +5734,7 @@ PDC_Client_query_name_read_entire_obj(int nobj, char **obj_names, void ***out_bu // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); // Send the bulk handle to the target with RPC @@ -5913,7 +5915,7 @@ PDC_Client_server_checkpoint(uint32_t server_id) // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); hg_ret = HG_Create(send_context_g, pdc_server_info_g[server_id].addr, server_checkpoint_rpc_register_id_g, @@ -6005,7 +6007,7 @@ PDC_Client_send_client_shm_info(uint32_t server_id, char *shm_addr, uint64_t siz // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); hg_ret = @@ -6120,7 +6122,7 @@ PDC_send_region_storage_meta_shm(uint32_t server_id, int n, region_storage_meta_ // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); hg_ret = HG_Create(send_context_g, pdc_server_info_g[server_id].addr, @@ -6428,7 +6430,7 @@ PDC_Client_query_multi_storage_info(int nobj, char **obj_names, region_storage_m send_n_request++; debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); @@ -7005,7 +7007,7 @@ PDC_add_kvtag(pdcid_t obj_id, pdc_kvtag_t *kvtag, int is_cont) // Debug statistics for counting number of messages sent to each server. debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_add_kvtag_register_id_g, @@ -7102,7 +7104,7 @@ PDC_get_kvtag(pdcid_t obj_id, char *tag_name, pdc_kvtag_t **kvtag, int is_cont) server_id = PDC_get_server_by_obj_id(meta_id, pdc_server_num_g); debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_get_kvtag_register_id_g, @@ -7154,7 +7156,7 @@ PDCtag_delete(pdcid_t obj_id, char *tag_name) debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_del_kvtag_register_id_g, @@ -7323,7 +7325,7 @@ PDC_Client_query_kvtag_server(uint32_t server_id, const pdc_kvtag_t *kvtag, int *out = NULL; *n_res = 0; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); hg_ret = HG_Create(send_context_g, pdc_server_info_g[server_id].addr, query_kvtag_register_id_g, @@ -7948,7 +7950,7 @@ PDC_send_data_query(pdc_query_t *query, pdc_query_get_op_t get_op, uint64_t *nhi query_xfer->next_server_id = next_server; query_xfer->prev_server_id = prev_server; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, send_data_query_register_id_g, &handle); @@ -8102,7 +8104,7 @@ PDC_Client_get_sel_data(pdcid_t obj_id, pdc_selection_t *sel, void *data) server_id = PDC_get_server_by_obj_id(meta_id, pdc_server_num_g); debug_server_id_count[server_id]++; - if (PDC_Client_try_lookup_server(server_id) != SUCCEED) + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); HG_Create(send_context_g, pdc_server_info_g[server_id].addr, get_sel_data_register_id_g, &handle); diff --git a/src/server/include/pdc_client_server_common.h b/src/server/include/pdc_client_server_common.h index 7be76a1b3..a57e2169f 100644 --- a/src/server/include/pdc_client_server_common.h +++ b/src/server/include/pdc_client_server_common.h @@ -282,6 +282,7 @@ typedef struct { typedef struct { int32_t client_id; int32_t nclient; + int32_t is_init; char client_addr[ADDR_MAX]; } client_test_connect_args; @@ -688,6 +689,7 @@ typedef struct { typedef struct { uint32_t client_id; int32_t nclient; + int is_init; hg_string_t client_addr; } client_test_connect_in_t; @@ -2140,6 +2142,11 @@ hg_proc_client_test_connect_in_t(hg_proc_t proc, void *data) // HG_LOG_ERROR("Proc error"); return ret; } + ret = hg_proc_int32_t(proc, &struct_data->is_init); + if (ret != HG_SUCCESS) { + // HG_LOG_ERROR("Proc error"); + return ret; + } ret = hg_proc_hg_string_t(proc, &struct_data->client_addr); if (ret != HG_SUCCESS) { // HG_LOG_ERROR("Proc error"); diff --git a/src/server/pdc_client_server_common.c b/src/server/pdc_client_server_common.c index 14417485b..a0a7845a4 100644 --- a/src/server/pdc_client_server_common.c +++ b/src/server/pdc_client_server_common.c @@ -1656,6 +1656,7 @@ HG_TEST_RPC_CB(client_test_connect, handle) #endif args->client_id = in.client_id; args->nclient = in.nclient; + args->is_init = in.is_init; sprintf(args->client_addr, "%s", in.client_addr); #ifdef ENABLE_MULTITHREAD hg_thread_mutex_unlock(&pdc_client_info_mutex_g); diff --git a/src/server/pdc_server.c b/src/server/pdc_server.c index 1ffddb283..ccd6ca95c 100644 --- a/src/server/pdc_server.c +++ b/src/server/pdc_server.c @@ -82,7 +82,6 @@ pdc_client_info_t * pdc_client_info_g = NULL; pdc_remote_server_info_t *pdc_remote_server_info_g = NULL; char * all_addr_strings_1d_g = NULL; char ** all_addr_strings_g = NULL; -int is_all_client_connected_g = 0; int is_hash_table_init_g = 0; int lustre_stripe_size_mb_g = 16; int lustre_total_ost_g = 0; @@ -265,13 +264,14 @@ PDC_Server_get_client_addr(const struct hg_cb_info *callback_info) hg_thread_mutex_lock(&pdc_client_addr_mutex_g); #endif - if (is_all_client_connected_g == 1) { - printf("==PDC_SERVER[%d]: new application run detected, create new client info\n", pdc_server_rank_g); - fflush(stdout); + if (pdc_client_info_g && in->is_init == 1) { + if (is_debug_g && pdc_server_rank_g == 0) { + printf("==PDC_SERVER[%d]: new application run detected, create new client info\n", pdc_server_rank_g); + fflush(stdout); + } PDC_Server_destroy_client_info(pdc_client_info_g); pdc_client_info_g = NULL; - is_all_client_connected_g = 0; } #ifdef ENABLE_MULTITHREAD From 62798a4fa6fd886487434e7c8072fd489cbe70cb Mon Sep 17 00:00:00 2001 From: github-actions Date: Tue, 23 May 2023 22:22:57 +0000 Subject: [PATCH 186/806] Committing clang-format changes --- src/server/pdc_server.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/server/pdc_server.c b/src/server/pdc_server.c index ccd6ca95c..a3020628a 100644 --- a/src/server/pdc_server.c +++ b/src/server/pdc_server.c @@ -78,13 +78,13 @@ pdc_task_list_t *pdc_server_agg_task_head_g = NULL; pdc_task_list_t *pdc_server_s2s_task_head_g = NULL; int pdc_server_task_id_g = PDC_SERVER_TASK_INIT_VALUE; -pdc_client_info_t * pdc_client_info_g = NULL; -pdc_remote_server_info_t *pdc_remote_server_info_g = NULL; -char * all_addr_strings_1d_g = NULL; -char ** all_addr_strings_g = NULL; -int is_hash_table_init_g = 0; -int lustre_stripe_size_mb_g = 16; -int lustre_total_ost_g = 0; +pdc_client_info_t * pdc_client_info_g = NULL; +pdc_remote_server_info_t *pdc_remote_server_info_g = NULL; +char * all_addr_strings_1d_g = NULL; +char ** all_addr_strings_g = NULL; +int is_hash_table_init_g = 0; +int lustre_stripe_size_mb_g = 16; +int lustre_total_ost_g = 0; hg_id_t get_remote_metadata_register_id_g; hg_id_t buf_map_server_register_id_g; @@ -266,12 +266,13 @@ PDC_Server_get_client_addr(const struct hg_cb_info *callback_info) if (pdc_client_info_g && in->is_init == 1) { if (is_debug_g && pdc_server_rank_g == 0) { - printf("==PDC_SERVER[%d]: new application run detected, create new client info\n", pdc_server_rank_g); + printf("==PDC_SERVER[%d]: new application run detected, create new client info\n", + pdc_server_rank_g); fflush(stdout); } PDC_Server_destroy_client_info(pdc_client_info_g); - pdc_client_info_g = NULL; + pdc_client_info_g = NULL; } #ifdef ENABLE_MULTITHREAD From cec387e2d2735d348291f45a7fbf694d444f51b8 Mon Sep 17 00:00:00 2001 From: Jean Luca Bez Date: Mon, 5 Jun 2023 05:51:40 -0700 Subject: [PATCH 187/806] update metrics --- .github/workflows/store-metrics.py | 59 ++++++++++++++++++++++++++++++ .gitlab-ci.yml | 3 +- 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/.github/workflows/store-metrics.py b/.github/workflows/store-metrics.py index fed492af0..9d12b6276 100644 --- a/.github/workflows/store-metrics.py +++ b/.github/workflows/store-metrics.py @@ -154,6 +154,9 @@ if len(obj_create_time_node) > 0: observations = { + 'branch': sys.argv[3], + 'JOBID': sys.argv[4], + 'pdc_metadata_servers': pdc_metadata_servers, 'pdc_metadata_clients': pdc_metadata_clients, @@ -245,3 +248,59 @@ 'values': df_values } ) + + lines = [] + + # Record all the steps + for step in range(0, len(obj_create_time_node)): + line = { + 'branch': sys.argv[3], + 'JOBID': sys.argv[4], + + 'pdc_metadata_servers': pdc_metadata_servers, + 'pdc_metadata_clients': pdc_metadata_clients, + + 'date': str(today), + + 'step': step + 1, + + 'obj_create_time_step': obj_create_time_node[step], + 'xfer_create_time_step': xfer_create_time_node[step], + 'xfer_start_time_step': xfer_start_time_node[step], + 'xfer_wait_time_step': xfer_wait_time_node[step], + 'xfer_close_time_step': xfer_close_time_node[step], + 'obj_close_time_step': obj_close_time_node[step], + 'sleep_time_step': sleep_time_node[step] if len(sleep_time_node) < step else 0 + } + + lines.append(line) + + dataset = pd.DataFrame.from_dict(lines) + + print(dataset) + + dataset.applymap(lambda x: x.strip() if isinstance(x, str) else x) + + df_values = dataset.values.tolist() + + if False: + # Submit the header to the spreadsheet + gs.values_append( + '{} - Steps'.format(sys.argv[1]), + { + 'valueInputOption': 'USER_ENTERED' + }, + { + 'values': [dataset.columns.tolist()] + } + ) + + gs.values_append( + '{} - Steps'.format(sys.argv[1]), + { + 'valueInputOption': 'USER_ENTERED' + }, + { + 'values': df_values + } + ) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 03c29ca4a..c588c2429 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -162,6 +162,7 @@ perlmutter-metrics: PDC_JOB_OUTPUT: "pdc-metrics.log" script: - hostname + - echo "JOBID ${SLURM_JOB_ID}" - export NERSC_HOST=`cat /etc/clustername` - module load python - export LD_LIBRARY_PATH="$MERCURY_DIR/lib:$LD_LIBRARY_PATH" @@ -176,6 +177,6 @@ perlmutter-metrics: - echo "Installing dependencies..." - pip install pydrive gspread gspread-dataframe google - echo "Storing PDC metrics..." - - python3 ../../.github/workflows/store-metrics.py Perlmutter ${PDC_JOB_OUTPUT} + - python3 ../../.github/workflows/store-metrics.py Perlmutter ${PDC_JOB_OUTPUT} ${CI_COMMIT_BRANCH} ${SLURM_JOB_ID} - echo "Removing files..." - rm -rf ${PDC_TMPDIR} ${PDC_DATA_LOC} From d7c4f11315eb4eed550a0cd834497acaf46e9703 Mon Sep 17 00:00:00 2001 From: Jean Luca Bez Date: Tue, 13 Jun 2023 09:56:11 -0700 Subject: [PATCH 188/806] Update .gitlab-ci.yml --- .gitlab-ci.yml | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c588c2429..27e4d751c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,6 +30,8 @@ perlmutter-build: perlmutter-parallel-pdc: stage: test + rules: + - if: '$METRICS == null' needs: - perlmutter-build tags: @@ -48,6 +50,8 @@ perlmutter-parallel-pdc: perlmutter-parallel-obj: stage: test + rules: + - if: '$METRICS == null' needs: - perlmutter-build - perlmutter-parallel-pdc @@ -67,6 +71,8 @@ perlmutter-parallel-obj: perlmutter-parallel-cont: stage: test + rules: + - if: '$METRICS == null' needs: - perlmutter-build - perlmutter-parallel-pdc @@ -86,6 +92,8 @@ perlmutter-parallel-cont: perlmutter-parallel-prop: stage: test + rules: + - if: '$METRICS == null' needs: - perlmutter-build - perlmutter-parallel-pdc @@ -105,6 +113,8 @@ perlmutter-parallel-prop: perlmutter-parallel-region: stage: test + rules: + - if: '$METRICS == null' needs: - perlmutter-build - perlmutter-parallel-pdc @@ -124,6 +134,8 @@ perlmutter-parallel-region: perlmutter-parallel-region-all: stage: test + rules: + - if: '$METRICS == null' needs: - perlmutter-build - perlmutter-parallel-pdc @@ -143,14 +155,16 @@ perlmutter-parallel-region-all: perlmutter-metrics: stage: metrics + rules: + - if: '$METRICS == "true"' needs: - perlmutter-build tags: - perlmutter variables: - PDC_N_NODES: 64 + PDC_N_NODES: 4 PDC_N_CLIENTS: 127 - SCHEDULER_PARAMETERS: "-A m1248 --qos=regular --constraint=cpu --tasks-per-node=${PDC_N_CLIENTS} -N ${PDC_N_NODES} -t 00:30:00" + SCHEDULER_PARAMETERS: "-A m1248 --qos=debug --constraint=cpu --tasks-per-node=${PDC_N_CLIENTS} -N ${PDC_N_NODES} -t 00:30:00" SUPERCOMPUTER: "perlmutter" MERCURY_DIR: "/global/cfs/cdirs/m1248/pdc-perlmutter/mercury/install" PDC_TMPDIR: "${PDC_BUILD_PATH}/pdc-tmp-metrics" From a981716c286dcff60ed93163eb89c3ffbca1d0c3 Mon Sep 17 00:00:00 2001 From: Jean Luca Bez Date: Tue, 13 Jun 2023 10:06:47 -0700 Subject: [PATCH 189/806] update VPIC output timing precision (#88) * update VPIC output timing precision * update timing to make consistent --- src/tests/bdcats_v2.c | 8 +++---- src/tests/client_server.c | 2 +- src/tests/cont_add_del.c | 4 ++-- src/tests/create_obj_scale.c | 4 ++-- src/tests/data_server_read.c | 2 +- src/tests/data_server_read_multi.c | 2 +- src/tests/data_server_read_vpic_multits.c | 2 +- .../data_server_read_vpic_spatial_multits.c | 2 +- src/tests/data_server_write.c | 2 +- src/tests/data_server_write_multi.c | 2 +- src/tests/data_server_write_vpic_multits.c | 2 +- src/tests/delete_obj_scale.c | 2 +- src/tests/kvtag_add_get_benchmark.c | 24 +++++++++---------- src/tests/kvtag_query_scale.c | 2 +- src/tests/list_all.c | 2 +- src/tests/obj_lock.c | 4 ++-- src/tests/obj_map.c | 4 ++-- src/tests/obj_transformation.c | 6 ++--- src/tests/query_data.c | 2 +- src/tests/query_vpic.c | 6 ++--- src/tests/query_vpic_bin_sds1_nopreload.c | 4 ++-- src/tests/query_vpic_bin_sds1_preload.c | 4 ++-- src/tests/query_vpic_bin_sds_nopreload.c | 4 ++-- src/tests/query_vpic_bin_sds_preload.c | 4 ++-- src/tests/query_vpic_exyz_nopreload.c | 4 ++-- src/tests/query_vpic_exyz_preload.c | 4 ++-- src/tests/query_vpic_multi.c | 6 ++--- src/tests/query_vpic_multi_nopreload.c | 6 ++--- src/tests/query_vpic_multi_nopreload1.c | 6 ++--- src/tests/query_vpic_multi_preload.c | 6 ++--- src/tests/read_obj.c | 4 ++-- src/tests/search_obj.c | 6 ++--- src/tests/search_obj_scale.c | 4 ++-- src/tests/stat_obj.c | 4 ++-- src/tests/update_obj.c | 4 ++-- src/tests/vpicio.c | 10 ++++---- src/tests/vpicio_mts.c | 12 +++++----- src/tests/vpicio_v2.c | 6 ++--- src/tests/write_obj_shared.c | 2 +- 39 files changed, 92 insertions(+), 92 deletions(-) diff --git a/src/tests/bdcats_v2.c b/src/tests/bdcats_v2.c index aeea45134..88a78b094 100644 --- a/src/tests/bdcats_v2.c +++ b/src/tests/bdcats_v2.c @@ -226,7 +226,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to map with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to map with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } @@ -275,7 +275,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to lock with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to lock with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } @@ -324,7 +324,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to relese lock with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to relese lock with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } @@ -372,7 +372,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to read data with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to read data with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/client_server.c b/src/tests/client_server.c index cd1d8810f..3e2411ce5 100644 --- a/src/tests/client_server.c +++ b/src/tests/client_server.c @@ -237,7 +237,7 @@ main(int argc, char **argv) hg_time_get_current(&end_time); elapsed_time = hg_time_subtract(end_time, start_time); elapsed_time_double = hg_time_to_double(elapsed_time); - printf("Total elapsed time for PDC server connection: %.6fs\n", elapsed_time_double); + printf("Total elapsed time for PDC server connection: %.5e s\n", elapsed_time_double); for (i = 0; i < n_server; i++) { printf("\"%s\" obj_id = %d\n", client_lookup_args[i].obj_name, client_lookup_args[i].obj_id); diff --git a/src/tests/cont_add_del.c b/src/tests/cont_add_del.c index 993a41f16..de55ff6e7 100644 --- a/src/tests/cont_add_del.c +++ b/src/tests/cont_add_del.c @@ -198,7 +198,7 @@ main(int argc, char **argv) ht_total_end.tv_usec - ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("%10d created ... %.4f s\n", i * size, ht_total_sec); + printf("%10d created ... %.5e s\n", i * size, ht_total_sec); fflush(stdout); } #ifdef ENABLE_MPI @@ -215,7 +215,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to create %d obj/rank with %d ranks: %.6f\n", count, size, ht_total_sec); + printf("Time to create %d obj/rank with %d ranks: %.5e\n", count, size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/create_obj_scale.c b/src/tests/create_obj_scale.c index 61244a63d..ebd0d373e 100644 --- a/src/tests/create_obj_scale.c +++ b/src/tests/create_obj_scale.c @@ -201,7 +201,7 @@ main(int argc, char **argv) ht_total_end.tv_usec - ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("%10d created ... %.4f s\n", i * size, ht_total_sec); + printf("%10d created ... %.5e s\n", i * size, ht_total_sec); fflush(stdout); } #ifdef ENABLE_MPI @@ -218,7 +218,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to create %d obj/rank with %d ranks: %.6f\n", count, size, ht_total_sec); + printf("Time to create %d obj/rank with %d ranks: %.5e\n", count, size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/data_server_read.c b/src/tests/data_server_read.c index b57855a58..f56ac5f09 100644 --- a/src/tests/data_server_read.c +++ b/src/tests/data_server_read.c @@ -88,7 +88,7 @@ main(int argc, char **argv) ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to read data with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to read data with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/data_server_read_multi.c b/src/tests/data_server_read_multi.c index 2081547e2..9d6cdafda 100644 --- a/src/tests/data_server_read_multi.c +++ b/src/tests/data_server_read_multi.c @@ -197,7 +197,7 @@ main(int argc, char **argv) if (rank == 0) { printf( - "Total time read %d ts data each %luMB with %d ranks: %.6f, meta %.2f, wait %.2f, sleep %.2f\n", + "Total time read %d ts data each %luMB with %d ranks: %.5e, meta %.2f, wait %.2f, sleep %.2f\n", ntimestep, size_MB, size, total_elapsed / 1000000.0, total_meta_sec, total_wait_sec, sleepseconds * ntimestep); fflush(stdout); diff --git a/src/tests/data_server_read_vpic_multits.c b/src/tests/data_server_read_vpic_multits.c index 6222c1725..f260e8cfb 100644 --- a/src/tests/data_server_read_vpic_multits.c +++ b/src/tests/data_server_read_vpic_multits.c @@ -322,7 +322,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); #endif if (rank == 0) - printf("Timestep %d: query time %.4f, read time %.4f, wait time %.4f, compute time %.4f\n", ts, + printf("Timestep %d: query time %.5e, read time %.5e, wait time %.5e, compute time %.5e\n", ts, query_time, read_time, wait_time, true_sleep_time); } // end of for ts diff --git a/src/tests/data_server_read_vpic_spatial_multits.c b/src/tests/data_server_read_vpic_spatial_multits.c index 6b3c5a5ff..e667736cf 100644 --- a/src/tests/data_server_read_vpic_spatial_multits.c +++ b/src/tests/data_server_read_vpic_spatial_multits.c @@ -326,7 +326,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); #endif if (rank == 0) - printf("Timestep %d: query time %.4f, read time %.4f, wait time %.4f, compute time %.4f\n", ts, + printf("Timestep %d: query time %.5e, read time %.5e, wait time %.5e, compute time %.5e\n", ts, query_time, read_time, wait_time, true_sleep_time); } // end of for ts diff --git a/src/tests/data_server_write.c b/src/tests/data_server_write.c index b9c46527e..174d780fe 100644 --- a/src/tests/data_server_write.c +++ b/src/tests/data_server_write.c @@ -124,7 +124,7 @@ main(int argc, char **argv) ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to write data with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to write data with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/data_server_write_multi.c b/src/tests/data_server_write_multi.c index 541365069..fd5a02478 100644 --- a/src/tests/data_server_write_multi.c +++ b/src/tests/data_server_write_multi.c @@ -224,7 +224,7 @@ main(int argc, char **argv) if (rank == 0) { printf( - "Total time write %d ts data each %luMB with %d ranks: %.6f, meta %.2f, wait %.2f, sleep %.2f\n", + "Total time write %d ts data each %luMB with %d ranks: %.5e, meta %.2f, wait %.2f, sleep %.2f\n", ntimestep, size_MB, size, total_elapsed / 1000000.0, total_meta_sec, total_wait_sec, sleepseconds * ntimestep); fflush(stdout); diff --git a/src/tests/data_server_write_vpic_multits.c b/src/tests/data_server_write_vpic_multits.c index 6729c759f..6a943bb58 100644 --- a/src/tests/data_server_write_vpic_multits.c +++ b/src/tests/data_server_write_vpic_multits.c @@ -346,7 +346,7 @@ main(int argc, char **argv) } if (rank == 0) - printf("Timestep %d: create time %.6f, query time %.6f, write time %.6f, wait time %.6f\n", ts, + printf("Timestep %d: create time %.5e, query time %.5e, write time %.5e, wait time %.5e\n", ts, create_time, query_time, write_time, wait_time); } diff --git a/src/tests/delete_obj_scale.c b/src/tests/delete_obj_scale.c index ceff24141..e97aa409f 100644 --- a/src/tests/delete_obj_scale.c +++ b/src/tests/delete_obj_scale.c @@ -191,7 +191,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to create %d obj/rank with %d ranks: %.6f\n", count, size, ht_total_sec); + printf("Time to create %d obj/rank with %d ranks: %.5e\n", count, size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 5cda25433..ee5efdcbf 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -399,12 +399,12 @@ main(int argc, char *argv[]) if (my_rank == 0) { printf("Iteration %" PRIu64 " : Objects: %" PRIu64 - " , Time: %.4f sec. Object throughput in this iteration: " - "%.4f .\n", + " , Time: %.5e sec. Object throughput in this iteration: " + "%.5e .\n", k, n_obj_incr, step_elapse, ((double)n_obj_incr) / step_elapse); printf("Overall %" PRIu64 " : Objects: %" PRIu64 - " , Time: %.4f sec. Overall object throughput: " - "%.4f .\n", + " , Time: %.5e sec. Overall object throughput: " + "%.5e .\n", k, total_object_count, total_object_time, ((double)total_object_count) / total_object_time); } @@ -424,10 +424,10 @@ main(int argc, char *argv[]) #endif if (my_rank == 0) { printf("Iteration %" PRIu64 " : Tags: %" PRIu64 - " , Time: %.4f sec. Tag throughput in this iteration: %.4f .\n", + " , Time: %.5e sec. Tag throughput in this iteration: %.5e .\n", k, n_obj_incr * n_attr, step_elapse, (double)(n_obj_incr * n_attr) / step_elapse); printf("Overall %" PRIu64 " : Tags: %" PRIu64 - " , Time: %.4f sec. Overall tag throughput: %.4f .\n", + " , Time: %.5e sec. Overall tag throughput: %.5e .\n", k, total_tag_count, total_tag_time, ((double)total_tag_count) / total_tag_time); } @@ -448,12 +448,12 @@ main(int argc, char *argv[]) #endif if (my_rank == 0) { printf("Iteration %" PRIu64 " : Queries: %" PRIu64 - " , Time: %.4f sec. Query throughput in this iteration: " - "%.4f .\n", + " , Time: %.5e sec. Query throughput in this iteration: " + "%.5e .\n", k, n_query * n_attr, step_elapse, (double)(n_query * n_attr) / step_elapse); printf("Overall %" PRIu64 " : Queries: %" PRIu64 - " , Time: %.4f sec. Overall query throughput: " - "%.4f .\n", + " , Time: %.5e sec. Overall query throughput: " + "%.5e .\n", k, total_query_count, total_query_time, ((double)total_query_count) / total_query_time); } @@ -468,13 +468,13 @@ main(int argc, char *argv[]) if (my_rank == 0) { printf("Final Report: \n"); - printf("[Final Report 1] Servers: %" PRIu64 " , Clients: %" PRIu64 " , C/S ratio: %.4f \n", n_servers, + printf("[Final Report 1] Servers: %" PRIu64 " , Clients: %" PRIu64 " , C/S ratio: %.5e \n", n_servers, n_clients, (double)n_clients / (double)n_servers); printf("[Final Report 2] Iterations: %" PRIu64 " , Objects: %" PRIu64 " , Tags/Object: %" PRIu64 " , Queries/Iteration: " "%" PRIu64 " , \n", k, total_object_count, n_attr, n_query); - printf("[Final Report 3] Object throughput: %.4f , Tag Throughput: %.4f , Query Throughput: %.4f ,", + printf("[Final Report 3] Object throughput: %.5e , Tag Throughput: %.5e , Query Throughput: %.5e ,", (double)total_object_count / total_object_time, (double)(total_object_count * n_attr) / total_tag_time, (double)(total_query_count * n_attr) / total_query_time); diff --git a/src/tests/kvtag_query_scale.c b/src/tests/kvtag_query_scale.c index 80ca2a07c..a42cd5a5a 100644 --- a/src/tests/kvtag_query_scale.c +++ b/src/tests/kvtag_query_scale.c @@ -175,7 +175,7 @@ main(int argc, char *argv[]) #endif if (my_rank == 0) - printf("Total time to query %d objects with tag: %.4f\n", ntotal, total_time); + printf("Total time to query %d objects with tag: %.5e\n", ntotal, total_time); fflush(stdout); } diff --git a/src/tests/list_all.c b/src/tests/list_all.c index 66a282030..508d23db6 100644 --- a/src/tests/list_all.c +++ b/src/tests/list_all.c @@ -187,7 +187,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to create %d obj/rank with %d ranks: %.6f\n", count, size, ht_total_sec); + printf("Time to create %d obj/rank with %d ranks: %.5e\n", count, size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/obj_lock.c b/src/tests/obj_lock.c index bf7b365cc..8a3ea71d3 100644 --- a/src/tests/obj_lock.c +++ b/src/tests/obj_lock.c @@ -118,7 +118,7 @@ main(int argc, char **argv) total_lock_overhead = elapsed / 1000000.0; if (rank == 0) { - printf("Total lock overhead : %.6f\n", total_lock_overhead); + printf("Total lock overhead : %.5e\n", total_lock_overhead); } #ifdef ENABLE_MPI @@ -142,7 +142,7 @@ main(int argc, char **argv) total_lock_overhead = elapsed / 1000000.0; if (rank == 0) { - printf("Total lock release overhead: %.6f\n", total_lock_overhead); + printf("Total lock release overhead: %.5e\n", total_lock_overhead); } // close object diff --git a/src/tests/obj_map.c b/src/tests/obj_map.c index 88d7a8333..2efae2e43 100644 --- a/src/tests/obj_map.c +++ b/src/tests/obj_map.c @@ -168,7 +168,7 @@ main(int argc, char **argv) ht_total_elapsed = (ht_total_end.tv_sec - ht_total_start.tv_sec) * 1000000LL + ht_total_end.tv_usec - ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; - printf("Total map overhead : %.6f\n", ht_total_sec); + printf("Total map overhead : %.5e\n", ht_total_sec); fflush(stdout); gettimeofday(&ht_total_start, 0); @@ -182,7 +182,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; - printf("Total unmap overhead : %.6f\n", ht_total_sec); + printf("Total unmap overhead : %.5e\n", ht_total_sec); // close a container if (PDCcont_close(cont_id) < 0) diff --git a/src/tests/obj_transformation.c b/src/tests/obj_transformation.c index 1e9fe2036..a9e2a23d6 100644 --- a/src/tests/obj_transformation.c +++ b/src/tests/obj_transformation.c @@ -272,7 +272,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to map with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to map with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } @@ -321,7 +321,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to lock with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to lock with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } @@ -384,7 +384,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to update data with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to update data with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } #ifdef ENABLE_MPI diff --git a/src/tests/query_data.c b/src/tests/query_data.c index c7c28999f..62112f6ed 100644 --- a/src/tests/query_data.c +++ b/src/tests/query_data.c @@ -142,7 +142,7 @@ main(int argc, char **argv) ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to write data with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to write data with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/query_vpic.c b/src/tests/query_vpic.c index b41377c8c..5b3a43305 100644 --- a/src/tests/query_vpic.c +++ b/src/tests/query_vpic.c @@ -44,7 +44,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); double get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); printf(" Query results:\n"); if (sel.nhits < 500) @@ -63,12 +63,12 @@ main(void) gettimeofday(&pdc_timer_end, 0); double get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); printf("Query result energy data (%" PRIu64 " hits):\n", sel.nhits); for (i = 0; i < sel.nhits; i++) { if (energy_data[i] > energy_hi0 || energy_data[i] < energy_lo0) { - printf("Error with result %" PRIu64 ": %.4f\n", i, energy_data[i]); + printf("Error with result %" PRIu64 ": %.5e\n", i, energy_data[i]); } } printf("Verified: all correct!\n"); diff --git a/src/tests/query_vpic_bin_sds1_nopreload.c b/src/tests/query_vpic_bin_sds1_nopreload.c index f741f1634..c2ce6e8d2 100644 --- a/src/tests/query_vpic_bin_sds1_nopreload.c +++ b/src/tests/query_vpic_bin_sds1_nopreload.c @@ -79,7 +79,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); printf("Query result in (%" PRIu64 " hits):\n", sel.nhits); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); if (sel.nhits > 0) { energy_data = (float *)calloc(sel.nhits, sizeof(float)); @@ -95,7 +95,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); fflush(stdout); } diff --git a/src/tests/query_vpic_bin_sds1_preload.c b/src/tests/query_vpic_bin_sds1_preload.c index eff619513..b6e3dd5a3 100644 --- a/src/tests/query_vpic_bin_sds1_preload.c +++ b/src/tests/query_vpic_bin_sds1_preload.c @@ -92,7 +92,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); printf("Query result in (%" PRIu64 " hits):\n", sel.nhits); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); if (sel.nhits > 0) { energy_data = (float *)calloc(sel.nhits, sizeof(float)); @@ -108,7 +108,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); fflush(stdout); } diff --git a/src/tests/query_vpic_bin_sds_nopreload.c b/src/tests/query_vpic_bin_sds_nopreload.c index 2e6cea44f..d4a89dd44 100644 --- a/src/tests/query_vpic_bin_sds_nopreload.c +++ b/src/tests/query_vpic_bin_sds_nopreload.c @@ -98,7 +98,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); printf("Query result in (%" PRIu64 " hits):\n", sel.nhits); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); if (sel.nhits > 0) { energy_data = (float *)calloc(sel.nhits, sizeof(float)); @@ -114,7 +114,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); fflush(stdout); } diff --git a/src/tests/query_vpic_bin_sds_preload.c b/src/tests/query_vpic_bin_sds_preload.c index 940bee35d..c0606f66d 100644 --- a/src/tests/query_vpic_bin_sds_preload.c +++ b/src/tests/query_vpic_bin_sds_preload.c @@ -94,7 +94,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); printf("Query result in (%" PRIu64 " hits):\n", sel.nhits); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); if (sel.nhits > 0) { energy_data = (float *)calloc(sel.nhits, sizeof(float)); @@ -110,7 +110,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); fflush(stdout); } diff --git a/src/tests/query_vpic_exyz_nopreload.c b/src/tests/query_vpic_exyz_nopreload.c index b03167529..d3593b90a 100644 --- a/src/tests/query_vpic_exyz_nopreload.c +++ b/src/tests/query_vpic_exyz_nopreload.c @@ -111,7 +111,7 @@ main(int argc, char **argv) gettimeofday(&pdc_timer_end, 0); get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); printf("Query result in (%" PRIu64 " hits):\n", sel.nhits); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); if (sel.nhits > 0) { energy_data = (float *)calloc(sel.nhits, sizeof(float)); @@ -127,7 +127,7 @@ main(int argc, char **argv) gettimeofday(&pdc_timer_end, 0); get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); for (i = 0; i < sel.nhits; i++) { if (energy_data[i] < energy_lo) { diff --git a/src/tests/query_vpic_exyz_preload.c b/src/tests/query_vpic_exyz_preload.c index 0fe32d6e5..80313d58d 100644 --- a/src/tests/query_vpic_exyz_preload.c +++ b/src/tests/query_vpic_exyz_preload.c @@ -95,7 +95,7 @@ main(int argc, char **argv) gettimeofday(&pdc_timer_end, 0); get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); printf("Query result in (%" PRIu64 " hits):\n", sel.nhits); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); if (sel.nhits > 0) { energy_data = (float *)calloc(sel.nhits, sizeof(float)); @@ -109,7 +109,7 @@ main(int argc, char **argv) gettimeofday(&pdc_timer_end, 0); get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); fflush(stdout); } diff --git a/src/tests/query_vpic_multi.c b/src/tests/query_vpic_multi.c index 3bb013769..4704b3e1d 100644 --- a/src/tests/query_vpic_multi.c +++ b/src/tests/query_vpic_multi.c @@ -50,7 +50,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); printf("Querying Energy in [%.2f, %.2f]\n", energy_lo0, energy_hi0); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); if (sel.nhits > 0) { energy_data = (float *)calloc(sel.nhits, sizeof(float)); @@ -62,12 +62,12 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); printf("Query result energy data (%" PRIu64 " hits):\n", sel.nhits); for (i = 0; i < sel.nhits; i++) { if (energy_data[i] > energy_hi0 || energy_data[i] < energy_lo0) { - printf("Error with result %" PRIu64 ": %.4f\n", i, energy_data[i]); + printf("Error with result %" PRIu64 ": %.5e\n", i, energy_data[i]); } } printf("Verified: all correct!\n"); diff --git a/src/tests/query_vpic_multi_nopreload.c b/src/tests/query_vpic_multi_nopreload.c index d8debbbbb..dc0da4494 100644 --- a/src/tests/query_vpic_multi_nopreload.c +++ b/src/tests/query_vpic_multi_nopreload.c @@ -50,7 +50,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); printf("Querying Energy in [%.2f, %.2f]\n", energy_lo0, energy_hi0); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); if (sel.nhits > 0) { energy_data = (float *)calloc(sel.nhits, sizeof(float)); @@ -62,12 +62,12 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); printf("Query result energy data (%" PRIu64 " hits):\n", sel.nhits); for (i = 0; i < sel.nhits; i++) { if (energy_data[i] > energy_hi0 || energy_data[i] < energy_lo0) { - printf("Error with result %" PRIu64 ": %.4f\n", i, energy_data[i]); + printf("Error with result %" PRIu64 ": %.5e\n", i, energy_data[i]); } } printf("Verified: all correct!\n"); diff --git a/src/tests/query_vpic_multi_nopreload1.c b/src/tests/query_vpic_multi_nopreload1.c index 1cd7c56b1..f2c73f68b 100644 --- a/src/tests/query_vpic_multi_nopreload1.c +++ b/src/tests/query_vpic_multi_nopreload1.c @@ -50,7 +50,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); printf("Querying Energy in [%.2f, %.2f]\n", energy_lo0, energy_hi0); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); if (sel.nhits > 0) { energy_data = (float *)calloc(sel.nhits, sizeof(float)); @@ -62,12 +62,12 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); printf("Query result energy data (%" PRIu64 " hits):\n", sel.nhits); for (i = 0; i < sel.nhits; i++) { if (energy_data[i] > energy_hi0 || energy_data[i] < energy_lo0) { - printf("Error with result %" PRIu64 ": %.4f\n", i, energy_data[i]); + printf("Error with result %" PRIu64 ": %.5e\n", i, energy_data[i]); } } printf("Verified: all correct!\n"); diff --git a/src/tests/query_vpic_multi_preload.c b/src/tests/query_vpic_multi_preload.c index 3bb013769..4704b3e1d 100644 --- a/src/tests/query_vpic_multi_preload.c +++ b/src/tests/query_vpic_multi_preload.c @@ -50,7 +50,7 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_sel_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); printf("Querying Energy in [%.2f, %.2f]\n", energy_lo0, energy_hi0); - printf("Get selection time: %.4f\n", get_sel_time); + printf("Get selection time: %.5e\n", get_sel_time); if (sel.nhits > 0) { energy_data = (float *)calloc(sel.nhits, sizeof(float)); @@ -62,12 +62,12 @@ main(void) gettimeofday(&pdc_timer_end, 0); get_data_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); - printf("Get data time: %.4f\n", get_data_time); + printf("Get data time: %.5e\n", get_data_time); printf("Query result energy data (%" PRIu64 " hits):\n", sel.nhits); for (i = 0; i < sel.nhits; i++) { if (energy_data[i] > energy_hi0 || energy_data[i] < energy_lo0) { - printf("Error with result %" PRIu64 ": %.4f\n", i, energy_data[i]); + printf("Error with result %" PRIu64 ": %.5e\n", i, energy_data[i]); } } printf("Verified: all correct!\n"); diff --git a/src/tests/read_obj.c b/src/tests/read_obj.c index aa1fc3c3c..e688219f3 100644 --- a/src/tests/read_obj.c +++ b/src/tests/read_obj.c @@ -208,7 +208,7 @@ main(int argc, char **argv) write_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); if (rank == 0) { - printf("Time to process write data with %d ranks: %.6f\n", size, write_time); + printf("Time to process write data with %d ranks: %.5e\n", size, write_time); fflush(stdout); } @@ -259,7 +259,7 @@ main(int argc, char **argv) write_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); if (rank == 0) { - printf("Time to process read data with %d ranks: %.6f\n", size, write_time); + printf("Time to process read data with %d ranks: %.5e\n", size, write_time); fflush(stdout); } diff --git a/src/tests/search_obj.c b/src/tests/search_obj.c index 2701d729d..465a48b6c 100644 --- a/src/tests/search_obj.c +++ b/src/tests/search_obj.c @@ -233,7 +233,7 @@ main(int argc, char **argv) ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("searched %10d ... %.2f\n", i * size, ht_total_sec); + printf("searched %10d ... %.5e\n", i * size, ht_total_sec); fflush(stdout); } @@ -251,8 +251,8 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - /* printf("Time to full query %d obj/rank with %d ranks: %.6f\n\n\n", count, size, ht_total_sec); */ - printf("Time to partial query %d obj/rank with %d ranks: %.6f\n\n\n", count, size, ht_total_sec); + /* printf("Time to full query %d obj/rank with %d ranks: %.5e\n\n\n", count, size, ht_total_sec); */ + printf("Time to partial query %d obj/rank with %d ranks: %.5e\n\n\n", count, size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/search_obj_scale.c b/src/tests/search_obj_scale.c index b964c6bc2..88506ffc7 100644 --- a/src/tests/search_obj_scale.c +++ b/src/tests/search_obj_scale.c @@ -180,7 +180,7 @@ main(int argc, char **argv) ht_total_end.tv_usec - ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; - printf("%10d queried ... %.2fs\n", i * size, ht_total_sec); + printf("%10d queried ... %.5es\n", i * size, ht_total_sec); fflush(stdout); } } @@ -193,7 +193,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to create %d obj/rank with %d ranks: %.6f\n", count, size, ht_total_sec); + printf("Time to create %d obj/rank with %d ranks: %.5e\n", count, size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/stat_obj.c b/src/tests/stat_obj.c index da8a5f927..6a068edc7 100644 --- a/src/tests/stat_obj.c +++ b/src/tests/stat_obj.c @@ -207,7 +207,7 @@ main(int argc, char **argv) ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("stated %10d ... %.2f\n", i * size, ht_total_sec); + printf("stated %10d ... %.5e\n", i * size, ht_total_sec); fflush(stdout); } @@ -225,7 +225,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to stat %d obj/rank with %d ranks: %.6f\n", count, size, ht_total_sec); + printf("Time to stat %d obj/rank with %d ranks: %.5e\n", count, size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/update_obj.c b/src/tests/update_obj.c index a589725ce..b63082361 100644 --- a/src/tests/update_obj.c +++ b/src/tests/update_obj.c @@ -211,7 +211,7 @@ main(int argc, char **argv) ht_total_end.tv_usec - ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; - printf("updated %10d ... %.2f\n", i * size, ht_total_sec); + printf("updated %10d ... %.5e\n", i * size, ht_total_sec); fflush(stdout); } } @@ -225,7 +225,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to update %d obj/rank with %d ranks: %.6f\n\n\n", count, size, ht_total_sec); + printf("Time to update %d obj/rank with %d ranks: %.5e\n\n\n", count, size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/vpicio.c b/src/tests/vpicio.c index 1a6692628..081376f43 100644 --- a/src/tests/vpicio.c +++ b/src/tests/vpicio.c @@ -241,7 +241,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t1 = MPI_Wtime(); if (rank == 0) { - printf("Obj create time: %.2f\n", t1 - t0); + printf("Obj create time: %.5e\n", t1 - t0); } #endif @@ -290,7 +290,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t0 = MPI_Wtime(); if (rank == 0) { - printf("Transfer create time: %.2f\n", t0 - t1); + printf("Transfer create time: %.5e\n", t0 - t1); } #endif @@ -339,7 +339,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t1 = MPI_Wtime(); if (rank == 0) { - printf("Transfer start time: %.2f\n", t1 - t0); + printf("Transfer start time: %.5e\n", t1 - t0); } #endif @@ -388,7 +388,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t0 = MPI_Wtime(); if (rank == 0) { - printf("Transfer wait time: %.2f\n", t0 - t1); + printf("Transfer wait time: %.5e\n", t0 - t1); } #endif @@ -438,7 +438,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t1 = MPI_Wtime(); if (rank == 0) { - printf("Transfer close time: %.2f\n", t1 - t0); + printf("Transfer close time: %.5e\n", t1 - t0); } #endif diff --git a/src/tests/vpicio_mts.c b/src/tests/vpicio_mts.c index 110febbfd..eec66215b 100644 --- a/src/tests/vpicio_mts.c +++ b/src/tests/vpicio_mts.c @@ -258,7 +258,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t1 = MPI_Wtime(); if (rank == 0) { - printf("Obj create time: %.2f\n", t1 - t0); + printf("Obj create time: %.5e\n", t1 - t0); } #endif @@ -309,7 +309,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t0 = MPI_Wtime(); if (rank == 0) { - printf("Transfer create time: %.2f\n", t0 - t1); + printf("Transfer create time: %.5e\n", t0 - t1); } #endif @@ -358,7 +358,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t1 = MPI_Wtime(); if (rank == 0) { - printf("Transfer start time: %.2f\n", t1 - t0); + printf("Transfer start time: %.5e\n", t1 - t0); } #endif @@ -407,7 +407,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t0 = MPI_Wtime(); if (rank == 0) { - printf("Transfer wait time: %.2f\n", t0 - t1); + printf("Transfer wait time: %.5e\n", t0 - t1); } #endif @@ -456,7 +456,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t1 = MPI_Wtime(); if (rank == 0) { - printf("Transfer close time: %.2f\n", t1 - t0); + printf("Transfer close time: %.5e\n", t1 - t0); } #endif @@ -498,7 +498,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t0 = MPI_Wtime(); if (rank == 0) { - printf("Obj close time: %.2f\n", t0 - t1); + printf("Obj close time: %.5e\n", t0 - t1); } #endif if (i != steps - 1) { diff --git a/src/tests/vpicio_v2.c b/src/tests/vpicio_v2.c index ee3f3512a..89839352b 100644 --- a/src/tests/vpicio_v2.c +++ b/src/tests/vpicio_v2.c @@ -299,7 +299,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to map with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to map with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } @@ -348,7 +348,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to lock with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to lock with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } @@ -408,7 +408,7 @@ main(int argc, char **argv) ht_total_start.tv_usec; ht_total_sec = ht_total_elapsed / 1000000.0; if (rank == 0) { - printf("Time to update data with %d ranks: %.6f\n", size, ht_total_sec); + printf("Time to update data with %d ranks: %.5e\n", size, ht_total_sec); fflush(stdout); } diff --git a/src/tests/write_obj_shared.c b/src/tests/write_obj_shared.c index 9fa01107f..d8796bb61 100644 --- a/src/tests/write_obj_shared.c +++ b/src/tests/write_obj_shared.c @@ -209,7 +209,7 @@ main(int argc, char **argv) write_time = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); if (rank == 0) { - printf("Time to lock and release data with %d ranks: %.6f\n", size, write_time); + printf("Time to lock and release data with %d ranks: %.5e\n", size, write_time); fflush(stdout); } done: From e30f7b7d5c5360e23ac1b5849620f8305cd5d091 Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Thu, 15 Jun 2023 13:57:46 -0400 Subject: [PATCH 190/806] llsm_importer (#1) formatter on llsm_importer --- tools/CMakeLists.txt | 2 ++ tools/llsm/parallelReadTiff.c | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index e15e21992..b14402393 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -102,6 +102,8 @@ add_library(cjson cjson/cJSON.c) # endforeach(program) +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3 -fopenmp -DNDEBUG") + # Find LibTIFF option(USE_LIB_TIFF "Enable LibTiff." ON) if(USE_LIB_TIFF) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 4da885e61..c81928584 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -1,7 +1,7 @@ #include "parallelReadTiff.h" #include "tiffio.h" -// #define ENABLE_OPENMP +#define ENABLE_OPENMP #ifdef ENABLE_OPENMP #include "omp.h" @@ -543,7 +543,7 @@ readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char *fileName, TIFFClose(tif); lseek(fd, offset, SEEK_SET); uint64_t bytes = bits / 8; - //#pragma omp parallel for + // #pragma omp parallel for /* for(uint64_t i = 0; i < z; i++){ uint64_t cOffset = x*y*bytes*i; @@ -784,7 +784,7 @@ void parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_range, image_info_t **image_info) { - uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0, stripeSize = 0, is_imageJ = 0, imageJ_Z = 0; + uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0, stripeSize = 1, is_imageJ = 0, imageJ_Z = 0; get_tiff_info(fileName, strip_range, &x, &y, &z, &bits, &startSlice, &stripeSize, &is_imageJ, &imageJ_Z); From 917b4f5fa8f2b0f6e1d5d344fd55dd503a8c9a0d Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Thu, 15 Jun 2023 14:07:09 -0400 Subject: [PATCH 191/806] Tiff Parallel Reader sync to latest version (#89) * remove unnecessary install block from CMakeLists.txt * update output * Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. * build kvtag_add_get_scale * comment off free * update code * 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working * do while loop added, tested with 1m object and works * 1m objects test works, 10m object test fail as the original also fails * add new executable to test set * enlarge PDC_SERVER_ID_INTERVAL * update code * update console args * add p search test * add console arg for changing number of attributes per object * free allocated memory * fix query count issue * fix attr length definition * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * fix data type * fix data type * fix data type * add client side statistics * add client side statistics * fix format * clang formatter * update CMake * update CMake * update CMake * free allocated memory properly * clang format * clang format * clang-format-10 * change file name * address review comments * update llsm importer * update llsm importer * update server checkpoint intervals * update gitignore * adding job scripts * adding one debugging msg * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update output for uint64_t * add scripts * update output for uint64_t * update output for uint64_t * update output for uint64_t * update scripts * update scripts * delete debugging message * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * update tag names * update tag names * update query startingpos * update query startingpos * update job scripts * add progressive timing for kvtag_add_get_scale * fix iteration count in final report * update job scripts and benckmark program * update message format * update message format * update message format * update message format * clang format * update job scripts * comment off object/container close procedure in benchmark to save node hours * change the max number of object to 1M * change the max length of attribute value * change the max length of attribute value * llsm tiff import test * llsm tiff import test * llsm tiff import test * llsm tiff import test * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update cmake and llsm_importer * update cmake and llsm_importer * close if in cmake * cmake fix tiff * cmake policy to suppress warning * add pdc include dir * update code * update code * update code * update code * update code * update code * update array generating method * update array generating method * update array generating method * update array generating method * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * fix return type * fix return type * add timing * add timing * fix output * llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader * fix vairable name * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * add scripts * add scripts * add scripts * debugging for nonMPI program * debugging for nonMPI program * debugging for nonMPI program * clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created * enable MPI * enable MPI * enlarge BCase size * enlarge BCase size * enlarge BCase size * resolve bcast count * llsm data path in script * llsm data path in script * update csv reader * update csv reader * update csv reader * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * enlarge max write * update pdc * update pdc * update pdc * update pdc * update pdc_import.c * update pdc_import.c * update pdc_export.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update tools/cmake * clang format * clang format * added a tutorial for llsm_importer * added a tutorial for llsm_importer * make sure the line feed is included for string attribute * update timing for overall completion time * update formatting * llsm_importer (#1) formatter on llsm_importer --------- Co-authored-by: Houjun Tang --- tools/CMakeLists.txt | 2 ++ tools/llsm/parallelReadTiff.c | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index e15e21992..b14402393 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -102,6 +102,8 @@ add_library(cjson cjson/cJSON.c) # endforeach(program) +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3 -fopenmp -DNDEBUG") + # Find LibTIFF option(USE_LIB_TIFF "Enable LibTiff." ON) if(USE_LIB_TIFF) diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index 4da885e61..c81928584 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -1,7 +1,7 @@ #include "parallelReadTiff.h" #include "tiffio.h" -// #define ENABLE_OPENMP +#define ENABLE_OPENMP #ifdef ENABLE_OPENMP #include "omp.h" @@ -543,7 +543,7 @@ readTiffParallelImageJ(uint64_t x, uint64_t y, uint64_t z, const char *fileName, TIFFClose(tif); lseek(fd, offset, SEEK_SET); uint64_t bytes = bits / 8; - //#pragma omp parallel for + // #pragma omp parallel for /* for(uint64_t i = 0; i < z; i++){ uint64_t cOffset = x*y*bytes*i; @@ -784,7 +784,7 @@ void parallel_TIFF_load(char *fileName, uint8_t flipXY, parallel_tiff_range_t *strip_range, image_info_t **image_info) { - uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0, stripeSize = 0, is_imageJ = 0, imageJ_Z = 0; + uint64_t x = 1, y = 1, z = 1, bits = 1, startSlice = 0, stripeSize = 1, is_imageJ = 0, imageJ_Z = 0; get_tiff_info(fileName, strip_range, &x, &y, &z, &bits, &startSlice, &stripeSize, &is_imageJ, &imageJ_Z); From 8448043e83094c02da23cd1779b5ba9b0dca70e2 Mon Sep 17 00:00:00 2001 From: Jean Luca Bez Date: Fri, 16 Jun 2023 15:05:31 -0700 Subject: [PATCH 192/806] Update .gitlab-ci.yml --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 27e4d751c..39f940487 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -162,7 +162,7 @@ perlmutter-metrics: tags: - perlmutter variables: - PDC_N_NODES: 4 + PDC_N_NODES: 64 PDC_N_CLIENTS: 127 SCHEDULER_PARAMETERS: "-A m1248 --qos=debug --constraint=cpu --tasks-per-node=${PDC_N_CLIENTS} -N ${PDC_N_NODES} -t 00:30:00" SUPERCOMPUTER: "perlmutter" From 10e0bb1d37a03e428b803e557d753aecaf263833 Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Mon, 19 Jun 2023 17:11:10 -0400 Subject: [PATCH 193/806] add type for kvtag structure (#2) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled --- CMakeLists.txt | 3 +- docs/readme.md | 154 ++--- docs/source/api.rst | 37 +- src/api/CMakeLists.txt | 27 +- src/api/include/pdc_public.h | 78 --- src/api/pdc_client_connect.c | 18 +- src/api/pdc_obj/include/pdc_cont.h | 4 +- src/api/pdc_obj/include/pdc_obj.h | 6 +- src/api/pdc_obj/include/pdc_prop_pkg.h | 7 +- src/api/pdc_obj/pdc_dt_conv.c | 8 +- src/api/pdc_obj/pdc_obj.c | 1 + src/api/profiling/CMakeLists.txt | 120 ---- src/api/profiling/include/pdc_hashtab.h | 198 ------- src/api/profiling/include/pdc_stack_ops.h | 70 --- src/api/profiling/pdc_hashtab.c | 540 ------------------ src/api/profiling/pdc_stack_ops.c | 264 --------- src/commons/CMakeLists.txt | 227 ++++++++ src/commons/generic/include/pdc_generic.h | 219 +++++++ src/commons/serde/include/pdc_serde.h | 155 +++++ src/commons/serde/pdc_serde.c | 346 +++++++++++ src/server/CMakeLists.txt | 5 +- src/server/include/pdc_client_server_common.h | 5 + src/server/pdc_client_server_common.c | 6 +- src/server/pdc_server.c | 7 +- .../pdc_server_region_request_handler.h | 4 +- src/tests/cont_tags.c | 17 +- src/tests/kvtag_add_get.c | 35 +- src/tests/kvtag_add_get_benchmark.c | 5 +- src/tests/kvtag_add_get_scale.c | 8 +- src/tests/kvtag_get.c | 15 +- src/tests/kvtag_query.c | 11 +- src/tests/kvtag_query_scale.c | 4 +- src/tests/obj_tags.c | 17 +- src/tests/pdc_transforms_lib.c | 35 +- src/utils/include/pdc_id_pkg.h | 57 -- src/utils/include/pdc_linkedlist.h | 120 ---- src/utils/include/pdc_malloc.h | 59 -- src/utils/include/pdc_private.h | 202 ------- src/utils/include/pdc_timing.h | 193 ------- src/utils/pdc_interface.c | 2 + src/utils/pdc_malloc.c | 76 --- src/utils/pdc_timing.c | 537 ----------------- tools/pdc_export.c | 41 +- tools/pdc_import.c | 9 +- tools/pdc_ls.c | 40 +- 45 files changed, 1219 insertions(+), 2773 deletions(-) delete mode 100644 src/api/include/pdc_public.h delete mode 100644 src/api/profiling/CMakeLists.txt delete mode 100644 src/api/profiling/include/pdc_hashtab.h delete mode 100644 src/api/profiling/include/pdc_stack_ops.h delete mode 100644 src/api/profiling/pdc_hashtab.c delete mode 100644 src/api/profiling/pdc_stack_ops.c create mode 100644 src/commons/CMakeLists.txt create mode 100644 src/commons/generic/include/pdc_generic.h create mode 100644 src/commons/serde/include/pdc_serde.h create mode 100644 src/commons/serde/pdc_serde.c delete mode 100644 src/utils/include/pdc_id_pkg.h delete mode 100644 src/utils/include/pdc_linkedlist.h delete mode 100644 src/utils/include/pdc_malloc.h delete mode 100644 src/utils/include/pdc_private.h delete mode 100644 src/utils/include/pdc_timing.h delete mode 100644 src/utils/pdc_malloc.c delete mode 100644 src/utils/pdc_timing.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e353dbc2..5e7b65d94 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -257,7 +257,7 @@ endif() option(PDC_ENABLE_LUSTRE "Enable Lustre." OFF) if(PDC_ENABLE_LUSTRE) set(ENABLE_LUSTRE 1) - set(PDC_LUSTRE_TOTAL_OST "248" CACHE STRING "Number of Lustre OSTs") + set(PDC_LUSTRE_TOTAL_OST "256" CACHE STRING "Number of Lustre OSTs") endif() #----------------------------------------------------------------------------- @@ -384,6 +384,7 @@ configure_file( #----------------------------------------------------------------------------- # Source #----------------------------------------------------------------------------- +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src/commons) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src/api) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src/server) diff --git a/docs/readme.md b/docs/readme.md index df19eba94..74be2d0e4 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -1,58 +1,67 @@ # PDC Documentations - + [PDC user APIs](#pdc-user-apis) - - [PDC general APIs](#pdc-general-apis) - - [PDC container APIs](#pdc-container-apis) - - [PDC object APIs](#pdc-object-apis) - - [PDC region APIs](#pdc-region-apis) - - [PDC property APIs](#pdc-property-apis) - - [PDC query APIs](#pdc-query-apis) - + [PDC data types](#PDC-type-categories) - - [Basic types](#basic-types) - - [Histogram structure](#histogram-structure) - - [Container info](#container-info) - - [Container life time](#container-life-time) - - [Object property](#object-property) - - [Object info](#object-info) - - [Object structure](#object-structure) - - [Region info](#region-info) - - [Access type](#access-type) - - [Transfer request status](#transfer-request-status) - - [Query operators](#query-operators) - - [Query structures](#query-structures) - - [Selection structure](#selection-structure) - + [Developers notes](#developers-notes) - - [How to implement an RPC from client to server](#how-to-implement-an-rpc-from-client-to-server) - - [PDC Server metadata overview](#pdc-server-metadata-overview) - + [PDC metadata structure](#pdc-metadata-structure) - + [Metadata operations at client side](#metadata-operations-at-client-side) - - [PDC metadata management strategy](#pdc-metadata-management-strategy) - + [Managing metadata and data by the same server](#managing-metadata-and-data-by-the-same-server) - + [Separate metadata server from data server](#separate-metadata-server-from-data-server) - + [Static object region mappings](#static-object-region-mappings) - + [Dynamic object region mappings](#dynamic-object-region-mappings) - - [PDC metadata management implementation](#pdc-metadata-management-implementation) - + [Create metadata](#create-metadata) - + [Binding metadata to object](#binding-metadata-to-object) - + [Register object metadata at metadata server](#register-object-metadata-at-metadata-server) - + [Retrieve metadata from metadata server](#retrieve-metadata-from-metadata-server) - + [Object metadata at client](#object-metadata-at-client) - + [Metadata at data server](#metadata-at-data-server) - + [Object metadata update](#object-metadata-update) - + [Object region metadata](#object-region-metadata) - + [Metadata checkpoint](#object-metadata-update) - - [Region transfer request at client](#region-transfer-request-at-client) - + [Region transfer request create and close](#region-transfer-request-create-and-close) - + [Region transfer request start](#region-transfer-request-start) - + [Region transfer request wait](#region-transfer-request-wait) - - [Region transfer request at server](#region-transfer-request-at-server) - + [Server region transfer request RPC](#server-region-transfer-request-rpc) - - [Server nonblocking control](#server-nonblocking-control) - - [Server region transfer request start](#server-region-transfer-request-start) - - [Server region transfer request wait](#server-region-transfer-request-wait) - + [Server region storage](#server-region-storage) - - [Storage by file offset](#storage-by-file-offset) - - [Storage by region](#storage-by-region) - - [Open tasks for PDC](#open-tasks-for-pdc) +- [PDC Documentations](#pdc-documentations) +- [PDC user APIs](#pdc-user-apis) + - [PDC general APIs](#pdc-general-apis) + - [PDC container APIs](#pdc-container-apis) + - [PDC object APIs](#pdc-object-apis) + - [PDC region APIs](#pdc-region-apis) + - [PDC property APIs](#pdc-property-apis) + - [PDC query APIs](#pdc-query-apis) + - [PDC hist APIs](#pdc-hist-apis) +- [PDC Data types](#pdc-data-types) + - [Basic types](#basic-types) + - [region transfer partition type](#region-transfer-partition-type) + - [Object consistency semantics type](#object-consistency-semantics-type) + - [Histogram structure](#histogram-structure) + - [Container info](#container-info) + - [Container life time](#container-life-time) + - [Object property public](#object-property-public) + - [Object property](#object-property) + - [Object info](#object-info) + - [Object structure](#object-structure) + - [Region info](#region-info) + - [Access type](#access-type) + - [Transfer request status](#transfer-request-status) + - [Query operators](#query-operators) + - [Query structures](#query-structures) + - [Selection structure](#selection-structure) +- [Developers notes](#developers-notes) + - [How to implement an RPC from client to server](#how-to-implement-an-rpc-from-client-to-server) + - [PDC Server metadata overview](#pdc-server-metadata-overview) + - [PDC metadata structure](#pdc-metadata-structure) + - [Metadata operations at client side](#metadata-operations-at-client-side) + - [PDC metadata management strategy](#pdc-metadata-management-strategy) + - [Managing metadata and data by the same server](#managing-metadata-and-data-by-the-same-server) + - [Separate metadata server from data server](#separate-metadata-server-from-data-server) + - [Static object region mappings](#static-object-region-mappings) + - [Dynamic object region mappings](#dynamic-object-region-mappings) + - [PDC metadata management implementation](#pdc-metadata-management-implementation) + - [Create metadata](#create-metadata) + - [Binding metadata to object](#binding-metadata-to-object) + - [Register object metadata at metadata server](#register-object-metadata-at-metadata-server) + - [Retrieve metadata from metadata server](#retrieve-metadata-from-metadata-server) + - [Object metadata at client](#object-metadata-at-client) + - [Metadata at data server](#metadata-at-data-server) + - [Object metadata update](#object-metadata-update) + - [Object region metadata](#object-region-metadata) + - [Metadata checkpoint](#metadata-checkpoint) + - [Region transfer request at client](#region-transfer-request-at-client) + - [Region transfer request create and close](#region-transfer-request-create-and-close) + - [Region transfer request start](#region-transfer-request-start) + - [Region transfer request wait](#region-transfer-request-wait) + - [Region transfer request at server](#region-transfer-request-at-server) + - [Server region transfer request RPC](#server-region-transfer-request-rpc) + - [Server nonblocking control](#server-nonblocking-control) + - [Server region transfer request start](#server-region-transfer-request-start) + - [Server region transfer request wait](#server-region-transfer-request-wait) + - [Server region storage](#server-region-storage) + - [Storage by file offset](#storage-by-file-offset) + - [Storage by region](#storage-by-region) + - [Open tasks for PDC](#open-tasks-for-pdc) + - [Replacing individual modules with efficient Hash table data structures](#replacing-individual-modules-with-efficient-hash-table-data-structures) + - [Restarting pdc\_server.exe with different numbers of servers](#restarting-pdc_serverexe-with-different-numbers-of-servers) + - [Fast region search mechanisms](#fast-region-search-mechanisms) + - [Merge overlapping regions](#merge-overlapping-regions) # PDC user APIs ## PDC general APIs + pdcid_t PDCinit(const char *pdc_name) @@ -683,21 +692,28 @@ ## Basic types ``` typedef enum { - PDC_UNKNOWN = -1, /* error */ - PDC_INT = 0, /* integer types */ - PDC_FLOAT = 1, /* floating-point types */ - PDC_DOUBLE = 2, /* double types */ - PDC_CHAR = 3, /* character types */ - PDC_COMPOUND = 4, /* compound types */ - PDC_ENUM = 5, /* enumeration types */ - PDC_ARRAY = 6, /* Array types */ - PDC_UINT = 7, /* unsigned integer types */ - PDC_INT64 = 8, /* 64-bit integer types */ - PDC_UINT64 = 9, /* 64-bit unsigned integer types */ - PDC_INT16 = 10, - PDC_INT8 = 11, - NCLASSES = 12 /* this must be last */ - } pdc_var_type_t; + PDC_UNKNOWN = -1, /* error */ + PDC_INT = 0, /* integer types (identical to int32_t) */ + PDC_FLOAT = 1, /* floating-point types */ + PDC_DOUBLE = 2, /* double types */ + PDC_CHAR = 3, /* character types */ + PDC_STRING = 4, /* string types */ + PDC_BOOLEAN = 5, /* boolean types */ + PDC_SHORT = 6, /* short types */ + PDC_UINT = 7, /* unsigned integer types (identical to uint32_t) */ + PDC_INT64 = 8, /* 64-bit integer types */ + PDC_UINT64 = 9, /* 64-bit unsigned integer types */ + PDC_INT16 = 10, /* 16-bit integer types */ + PDC_INT8 = 11, /* 8-bit integer types */ + PDC_UINT8 = 12, /* 8-bit unsigned integer types */ + PDC_UINT16 = 13, /* 16-bit unsigned integer types */ + PDC_INT32 = 14, /* 32-bit integer types */ + PDC_UINT32 = 15, /* 32-bit unsigned integer types */ + PDC_LONG = 16, /* long types */ + PDC_VOID_PTR = 17, /* void pointer type */ + PDC_SIZE_T = 18, /* size_t type */ + PDC_TYPE_COUNT = 19 /* this is the number of var types and has to be the last */ + } pdc_c_var_type_t; ``` ## region transfer partition type ``` diff --git a/docs/source/api.rst b/docs/source/api.rst index e9b1e6567..ab058f10a 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -471,21 +471,28 @@ Basic types .. code-block:: c typedef enum { - PDC_UNKNOWN = -1, /* error */ - PDC_INT = 0, /* integer types */ - PDC_FLOAT = 1, /* floating-point types */ - PDC_DOUBLE = 2, /* double types */ - PDC_CHAR = 3, /* character types */ - PDC_COMPOUND = 4, /* compound types */ - PDC_ENUM = 5, /* enumeration types */ - PDC_ARRAY = 6, /* Array types */ - PDC_UINT = 7, /* unsigned integer types */ - PDC_INT64 = 8, /* 64-bit integer types */ - PDC_UINT64 = 9, /* 64-bit unsigned integer types */ - PDC_INT16 = 10, - PDC_INT8 = 11, - NCLASSES = 12 /* this must be last */ - } pdc_var_type_t; + PDC_UNKNOWN = -1, /* error */ + PDC_INT = 0, /* integer types (identical to int32_t) */ + PDC_FLOAT = 1, /* floating-point types */ + PDC_DOUBLE = 2, /* double types */ + PDC_CHAR = 3, /* character types */ + PDC_STRING = 4, /* string types */ + PDC_BOOLEAN = 5, /* boolean types */ + PDC_SHORT = 6, /* short types */ + PDC_UINT = 7, /* unsigned integer types (identical to uint32_t) */ + PDC_INT64 = 8, /* 64-bit integer types */ + PDC_UINT64 = 9, /* 64-bit unsigned integer types */ + PDC_INT16 = 10, /* 16-bit integer types */ + PDC_INT8 = 11, /* 8-bit integer types */ + PDC_UINT8 = 12, /* 8-bit unsigned integer types */ + PDC_UINT16 = 13, /* 16-bit unsigned integer types */ + PDC_INT32 = 14, /* 32-bit integer types */ + PDC_UINT32 = 15, /* 32-bit unsigned integer types */ + PDC_LONG = 16, /* long types */ + PDC_VOID_PTR = 17, /* void pointer type */ + PDC_SIZE_T = 18, /* size_t type */ + PDC_TYPE_COUNT = 19 /* this is the number of var types and has to be the last */ + } pdc_c_var_type_t; diff --git a/src/api/CMakeLists.txt b/src/api/CMakeLists.txt index 6d4e0b722..7ef5ec186 100644 --- a/src/api/CMakeLists.txt +++ b/src/api/CMakeLists.txt @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------ # Include source and build directories #------------------------------------------------------------------------------ -set( LOCAL_INCLUDE_DIR +set(LOCAL_INCLUDE_DIR ${PDC_INCLUDES_BUILD_TIME} ${PROJECT_BINARY_DIR} ${PDC_SOURCE_DIR} @@ -32,9 +32,9 @@ include_directories( # External dependencies #------------------------------------------------------------------------------ # profiling -#set(PDC_EXT_LIB_DEPENDENCIES pdcprof ${PDC_EXT_LIB_DEPENDENCIES}) -set(PDC_EXT_INCLUDE_DEPENDENCIES ${CMAKE_CURRENT_SOURCE_DIR}/profiling) -set(PDC_EXPORTED_LIBS pdcprof) +# set(PDC_EXT_LIB_DEPENDENCIES pdcprof ${PDC_EXT_LIB_DEPENDENCIES}) +# set(PDC_EXT_INCLUDE_DEPENDENCIES ${CMAKE_CURRENT_SOURCE_DIR}/profiling) +# set(PDC_EXPORTED_LIBS pdcprof) # Mercury find_package(MERCURY REQUIRED) @@ -87,22 +87,24 @@ set(PDC_SRCS ${PDC_SOURCE_DIR}/src/server/pdc_server_region/pdc_server_region_transfer.c ${PDC_SOURCE_DIR}/src/server/pdc_server_region/pdc_server_region_cache.c ${PDC_SOURCE_DIR}/src/server/pdc_server_region/pdc_server_region_transfer_metadata_query.c - ${PDC_SOURCE_DIR}/src/utils/pdc_timing.c - ${PDC_SOURCE_DIR}/src/utils/pdc_malloc.c ${PDC_SOURCE_DIR}/src/utils/pdc_interface.c ${PDC_SOURCE_DIR}/src/utils/pdc_region_utils.c ) - add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/profiling) + set(PDC_COMMON_INCLUDE_DIRS ${PDC_COMMON_INCLUDE_DIRS}) + # add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/profiling) #------------------------------------------------------------------------------ # Libraries #------------------------------------------------------------------------------ # PDC set(PDC_BUILD_INCLUDE_DEPENDENCIES + ${PDC_COMMON_INCLUDE_DIRS} ${LOCAL_INCLUDE_DIR} ) +message(STATUS "PDC_BUILD_INCLUDE_DEPENDENCIES: ${PDC_BUILD_INCLUDE_DEPENDENCIES}") + add_library(pdc ${PDC_SRCS}) target_include_directories(pdc @@ -110,7 +112,12 @@ target_include_directories(pdc $ ) +message(STATUS "PDC_EXPORTED_LIBS: ${PDC_EXPORTED_LIBS}") +message(STATUS "PDC_EXT_LIB_DEPENDENCIES: ${PDC_EXT_LIB_DEPENDENCIES}") +message(STATUS "PDC_COMMONS_LIBRARIES: ${PDC_COMMONS_LIBRARIES}") + target_link_libraries(pdc + ${PDC_COMMONS_LIBRARIES} ${PDC_EXPORTED_LIBS} ${PDC_EXT_LIB_DEPENDENCIES} -ldl @@ -122,7 +129,7 @@ set(PDC_EXPORTED_LIBS pdc ${PDC_EXPORTED_LIBS}) add_executable(close_server close_server.c ) -target_link_libraries(close_server pdc) +target_link_libraries(close_server pdc ${PDC_COMMON_LIBRARIES}) install( TARGETS @@ -145,6 +152,7 @@ set(PDC_HEADERS ${PDC_SOURCE_DIR}/src/api/pdc_query/include/pdc_query.h ${PDC_SOURCE_DIR}/src/api/pdc_region/include/pdc_region.h ${PDC_SOURCE_DIR}/src/api/pdc_transform/include/pdc_transform.h + ${PDC_SOURCE_DIR}/src/utils/include/pdc_interface.h ${PROJECT_BINARY_DIR}/pdc_config_sys.h ${PROJECT_BINARY_DIR}/pdc_config.h ) @@ -173,9 +181,10 @@ install( #----------------------------------------------------------------------------- # Add Target(s) to CMake Install #----------------------------------------------------------------------------- + install( TARGETS - pdc + pdc EXPORT ${PDC_EXPORTED_TARGETS} LIBRARY DESTINATION ${PDC_INSTALL_LIB_DIR} diff --git a/src/api/include/pdc_public.h b/src/api/include/pdc_public.h deleted file mode 100644 index 8c47976f0..000000000 --- a/src/api/include/pdc_public.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright Notice for - * Proactive Data Containers (PDC) Software Library and Utilities - * ----------------------------------------------------------------------------- - - *** Copyright Notice *** - - * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the - * University of California, through Lawrence Berkeley National Laboratory, - * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF - * Group (subject to receipt of any required approvals from the U.S. Dept. of - * Energy). All rights reserved. - - * If you have questions about your rights to use or distribute this software, - * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. - - * NOTICE. This Software was developed under funding from the U.S. Department of - * Energy and the U.S. Government consequently retains certain rights. As such, the - * U.S. Government has been granted for itself and others acting on its behalf a - * paid-up, nonexclusive, irrevocable, worldwide license in the Software to - * reproduce, distribute copies to the public, prepare derivative works, and - * perform publicly and display publicly, and to permit other to do so. - */ - -#ifndef PDC_PUBLIC_H -#define PDC_PUBLIC_H - -#include -#include -#include - -/*******************/ -/* Public Typedefs */ -/*******************/ -typedef int perr_t; -typedef uint64_t pdcid_t; -typedef unsigned long long psize_t; -typedef bool pbool_t; - -typedef int PDC_int_t; -typedef float PDC_float_t; -typedef double PDC_double_t; - -typedef enum { - PDC_UNKNOWN = -1, /* error */ - PDC_INT = 0, /* integer types */ - PDC_FLOAT = 1, /* floating-point types */ - PDC_DOUBLE = 2, /* double types */ - PDC_CHAR = 3, /* character types */ - PDC_COMPOUND = 4, /* compound types */ - PDC_ENUM = 5, /* enumeration types */ - PDC_ARRAY = 6, /* Array types */ - PDC_UINT = 7, /* unsigned integer types */ - PDC_INT64 = 8, /* 64-bit integer types */ - PDC_UINT64 = 9, /* 64-bit unsigned integer types */ - PDC_INT16 = 10, - PDC_INT8 = 11, - PDC_UINT8 = 12, - PDC_UINT16 = 13, - NCLASSES = 14 /* this must be last */ -} pdc_var_type_t; - -typedef enum { PDC_PERSIST, PDC_TRANSIENT } pdc_lifetime_t; - -typedef enum { PDC_SERVER_DEFAULT = 0, PDC_SERVER_PER_CLIENT = 1 } pdc_server_selection_t; - -typedef struct pdc_histogram_t { //????????? - pdc_var_type_t dtype; - int nbin; - double incr; - double * range; - uint64_t * bin; -} pdc_histogram_t; - -#define SUCCEED 0 -#define FAIL (-1) - -#endif /* PDC_PUBLIC_H */ diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index bdf967885..b6b969f8a 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -33,6 +33,7 @@ #include "pdc_utlist.h" #include "pdc_id_pkg.h" +#include "pdc_cont_pkg.h" #include "pdc_prop_pkg.h" #include "pdc_obj_pkg.h" #include "pdc_cont.h" @@ -7018,6 +7019,7 @@ PDC_add_kvtag(pdcid_t obj_id, pdc_kvtag_t *kvtag, int is_cont) if (kvtag != NULL && kvtag != NULL && kvtag->size != 0) { in.kvtag.name = kvtag->name; in.kvtag.value = kvtag->value; + in.kvtag.type = kvtag->type; in.kvtag.size = kvtag->size; } else @@ -7061,6 +7063,7 @@ metadata_get_kvtag_rpc_cb(const struct hg_cb_info *callback_info) client_lookup_args->ret = output.ret; client_lookup_args->kvtag->name = strdup(output.kvtag.name); client_lookup_args->kvtag->size = output.kvtag.size; + client_lookup_args->kvtag->type = output.kvtag.type; client_lookup_args->kvtag->value = malloc(output.kvtag.size); memcpy(client_lookup_args->kvtag->value, output.kvtag.value, output.kvtag.size); /* PDC_kvtag_dup(&(output.kvtag), &client_lookup_args->kvtag); */ @@ -7315,10 +7318,12 @@ PDC_Client_query_kvtag_server(uint32_t server_id, const pdc_kvtag_t *kvtag, int if (kvtag->value == NULL) { in.value = " "; + in.type = PDC_STRING; in.size = 1; } else { in.value = kvtag->value; + in.type = kvtag->type; in.size = kvtag->size; } @@ -7576,7 +7581,7 @@ PDCcont_get_objids(pdcid_t cont_id ATTRIBUTE(unused), int *nobj ATTRIBUTE(unused } perr_t -PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, psize_t value_size) +PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, psize_t value_size) { perr_t ret_value = SUCCEED; pdc_kvtag_t kvtag; @@ -7585,6 +7590,7 @@ PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, psize_t value_ kvtag.name = tag_name; kvtag.value = (void *)tag_value; + kvtag.type = value_type; kvtag.size = (uint64_t)value_size; ret_value = PDC_add_kvtag(cont_id, &kvtag, 1); @@ -7598,7 +7604,7 @@ PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, psize_t value_ } perr_t -PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, psize_t *value_size) +PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, psize_t *value_size) { perr_t ret_value = SUCCEED; pdc_kvtag_t *kvtag = NULL; @@ -7610,6 +7616,7 @@ PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, psize_t *valu PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: Error with PDC_get_kvtag", pdc_client_mpi_rank_g); *tag_value = kvtag->value; + *value_type = kvtag->type; *value_size = kvtag->size; done: @@ -7772,7 +7779,7 @@ PDC_Client_del_metadata(pdcid_t obj_id, int is_cont) } perr_t -PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, psize_t value_size) +PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, psize_t value_size) { perr_t ret_value = SUCCEED; pdc_kvtag_t kvtag; @@ -7781,6 +7788,7 @@ PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, psize_t value_si kvtag.name = tag_name; kvtag.value = (void *)tag_value; + kvtag.type = value_type; kvtag.size = (uint64_t)value_size; ret_value = PDC_add_kvtag(obj_id, &kvtag, 0); @@ -7793,7 +7801,8 @@ PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, psize_t value_si } perr_t -PDCobj_get_tag(pdcid_t obj_id, char *tag_name, void **tag_value, psize_t *value_size) +PDCobj_get_tag(pdcid_t obj_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, + psize_t *value_size) { perr_t ret_value = SUCCEED; pdc_kvtag_t *kvtag = NULL; @@ -7805,6 +7814,7 @@ PDCobj_get_tag(pdcid_t obj_id, char *tag_name, void **tag_value, psize_t *value_ PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: Error with PDC_get_kvtag", pdc_client_mpi_rank_g); *tag_value = kvtag->value; + *value_type = kvtag->type; *value_size = kvtag->size; done: diff --git a/src/api/pdc_obj/include/pdc_cont.h b/src/api/pdc_obj/include/pdc_cont.h index 844b15425..3a6180b65 100644 --- a/src/api/pdc_obj/include/pdc_cont.h +++ b/src/api/pdc_obj/include/pdc_cont.h @@ -191,7 +191,7 @@ perr_t PDCcont_del(pdcid_t cont_id); * * \return Non-negative on success/Negative on failure */ -perr_t PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, psize_t value_size); +perr_t PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, psize_t value_size); /** * *********** @@ -203,7 +203,7 @@ perr_t PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, psize_t * * \return Non-negative on success/Negative on failure */ -perr_t PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, psize_t *value_size); +perr_t PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, psize_t *value_size); /** * Deleta a tag from a container diff --git a/src/api/pdc_obj/include/pdc_obj.h b/src/api/pdc_obj/include/pdc_obj.h index f678adf7f..8ad7a285a 100644 --- a/src/api/pdc_obj/include/pdc_obj.h +++ b/src/api/pdc_obj/include/pdc_obj.h @@ -409,7 +409,8 @@ perr_t PDCobj_del(pdcid_t obj_id); * * \return Non-negative on success/Negative on failure */ -perr_t PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, psize_t value_size); +perr_t PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, + psize_t value_size); /** * Get tag information @@ -421,7 +422,8 @@ perr_t PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, psize_t v * * \return Non-negative on success/Negative on failure */ -perr_t PDCobj_get_tag(pdcid_t obj_id, char *tag_name, void **tag_value, psize_t *value_size); +perr_t PDCobj_get_tag(pdcid_t obj_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, + psize_t *value_size); /** * Delete a tag from the object diff --git a/src/api/pdc_obj/include/pdc_prop_pkg.h b/src/api/pdc_obj/include/pdc_prop_pkg.h index db73120ac..52d80efa1 100644 --- a/src/api/pdc_obj/include/pdc_prop_pkg.h +++ b/src/api/pdc_obj/include/pdc_prop_pkg.h @@ -37,9 +37,10 @@ struct _pdc_cont_prop { }; typedef struct pdc_kvtag_t { - char * name; - uint32_t size; - void * value; + char * name; + uint32_t size; + pdc_var_type_t type; + void * value; } pdc_kvtag_t; struct _pdc_transform_state { diff --git a/src/api/pdc_obj/pdc_dt_conv.c b/src/api/pdc_obj/pdc_dt_conv.c index 0806919ee..f18e4aa43 100644 --- a/src/api/pdc_obj/pdc_dt_conv.c +++ b/src/api/pdc_obj/pdc_dt_conv.c @@ -34,10 +34,10 @@ PDC_UNKNOWN = -1, PDC_INT = 0, PDC_FLOAT = 1, PDC_DOUBLE = 2, -PDC_STRING = 3, -PDC_COMPOUND = 4, -PDC_ENUM = 5, -PDC_ARRAY = 6, +PDC_CHAR = 3, +PDC_STRING = 4, +PDC_BOOLEAN = 5, +PDC_SHORT = 6, */ /* Called if overflow is possible */ diff --git a/src/api/pdc_obj/pdc_obj.c b/src/api/pdc_obj/pdc_obj.c index 073ece24c..d402782f9 100644 --- a/src/api/pdc_obj/pdc_obj.c +++ b/src/api/pdc_obj/pdc_obj.c @@ -27,6 +27,7 @@ #include "pdc_malloc.h" #include "pdc_id_pkg.h" #include "pdc_cont.h" +#include "pdc_cont_pkg.h" #include "pdc_prop_pkg.h" #include "pdc_obj_pkg.h" #include "pdc_obj.h" diff --git a/src/api/profiling/CMakeLists.txt b/src/api/profiling/CMakeLists.txt deleted file mode 100644 index 1b2ee8254..000000000 --- a/src/api/profiling/CMakeLists.txt +++ /dev/null @@ -1,120 +0,0 @@ -#------------------------------------------------------------------------------ -# Include source and build directories -#------------------------------------------------------------------------------ -include_directories( - ${PDC_INCLUDES_BUILD_TIME} - ${PROJECT_SOURCE_DIR} - ${PROJECT_BINARY_DIR} - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_BINARY_DIR} - ${PDC_SOURCE_DIR}/src/server/include - ${PDC_SOURCE_DIR}/src/server/pdc_server_region/include - ${PDC_SOURCE_DIR}/src/server/dablooms - ${PDC_SOURCE_DIR}/src/api/include - ${PDC_SOURCE_DIR}/src/api/pdc_obj/include - ${PDC_SOURCE_DIR}/src/api/pdc_region/include - ${PDC_SOURCE_DIR}/src/api/pdc_query/include - ${PDC_SOURCE_DIR}/src/api/pdc_transform/include - ${PDC_SOURCE_DIR}/src/api/pdc_analysis/include - ${PDC_SOURCE_DIR}/src/api/profiling/include - ${PDC_SOURCE_DIR}/src/utils/include - ${MERCURY_INCLUDE_DIR} - ${FASTBIT_INCLUDE_DIR} -) - -install( - FILES - ${CMAKE_BINARY_DIR}/pdc_config.h - DESTINATION - ${PDC_INSTALL_INCLUDE_DIR} - COMPONENT - headers -) - -#------------------------------------------------------------------------------ -# Options -#------------------------------------------------------------------------------ -#add_definitions(-DPDC_ENABLE_MPI=1) -#add_definitions(-DPDC_TIMING=1) -#add_definitions(-DPDC_ENABLE_CHECKPOINT=1) -#add_definitions(-DENABLE_MULTITHREAD=1) - -#------------------------------------------------------------------------------ -# Configure module header files -#------------------------------------------------------------------------------ -# Set unique vars used in the autogenerated config file (symbol import/export) -if(BUILD_SHARED_LIBS) - set(PDC_BUILD_SHARED_LIBS 1) - set(PDC_LIBTYPE SHARED) -else() - set(PDC_BUILD_SHARED_LIBS 0) - set(PDC_LIBTYPE STATIC) -endif() - -#------------------------------------------------------------------------------ -# Set sources -#------------------------------------------------------------------------------ -set(PDC_PROF_SRCS - ${CMAKE_CURRENT_SOURCE_DIR}/pdc_hashtab.c - ${CMAKE_CURRENT_SOURCE_DIR}/pdc_stack_ops.c - ) - -#------------------------------------------------------------------------------ -# Libraries -#------------------------------------------------------------------------------ -# PDCPROF -add_library(pdcprof ${PDC_PROF_SRCS}) -pdc_set_lib_options(pdcprof "pdcprof" ${PDC_LIBTYPE}) - -set(PDC_EXPORTED_LIBS pdcprof ${PDC_EXPORTED_LIBS}) - -#----------------------------------------------------------------------------- -# Specify project header files to be installed -#----------------------------------------------------------------------------- -set(PDC_PROF_HEADERS - ${CMAKE_CURRENT_SOURCE_DIR}/include/pdc_hashtab.h - ${CMAKE_CURRENT_SOURCE_DIR}/include/pdc_stack_ops.h - ) - -#----------------------------------------------------------------------------- -# Add file(s) to CMake Install -#----------------------------------------------------------------------------- -install( - FILES - ${PDC_PROF_HEADERS} - DESTINATION - ${PDC_INSTALL_INCLUDE_DIR} - COMPONENT - headers -) - -#----------------------------------------------------------------------------- -# Add Target(s) to CMake Install -#----------------------------------------------------------------------------- -install( - TARGETS - pdcprof - EXPORT - ${PDC_EXPORTED_TARGETS} - LIBRARY DESTINATION ${PDC_INSTALL_LIB_DIR} - ARCHIVE DESTINATION ${PDC_INSTALL_LIB_DIR} - RUNTIME DESTINATION ${PDC_INSTALL_BIN_DIR} -) - -#------------------------------------------------------------------------------ -# Set variables for parent scope -#------------------------------------------------------------------------------ -# Used by config.cmake.build.in and Testing -set(PDC_INCLUDES_BUILD_TIME - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_BINARY_DIR} - ${PDC_EXT_INCLUDE_DEPENDENCIES} - PARENT_SCOPE -) - -# Used by config.cmake.install.in -set(PDC_INCLUDES_INSTALL_TIME - ${PDC_INSTALL_INCLUDE_DIR} - ${PDC_EXT_INCLUDE_DEPENDENCIES} - PARENT_SCOPE -) diff --git a/src/api/profiling/include/pdc_hashtab.h b/src/api/profiling/include/pdc_hashtab.h deleted file mode 100644 index a664a8ce6..000000000 --- a/src/api/profiling/include/pdc_hashtab.h +++ /dev/null @@ -1,198 +0,0 @@ -/* An expandable hash tables datatype. - Copyright (C) 1999-2017 Free Software Foundation, Inc. - Contributed by Vladimir Makarov (vmakarov@cygnus.com). - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ - -/* This package implements basic hash table functionality. It is possible - to search for an entry, create an entry and destroy an entry. - - Elements in the table are generic pointers. - - The size of the table is not fixed; if the occupancy of the table - grows too high the hash table will be expanded. - - The abstract data implementation is based on generalized Algorithm D - from Knuth's book "The art of computer programming". Hash table is - expanded by creation of new hash table and transferring elements from - the old table to the new table. */ - -#ifndef __HASHTAB_H__ -#define __HASHTAB_H__ - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/* #include "ansidecl.h" */ -#define PTR void * - -/* The type for a hash code. */ -typedef unsigned int hashval_t; - -/* Callback function pointer types. */ - -/* Calculate hash of a table entry. */ -typedef hashval_t (*htab_hash)(const void *); - -/* Compare a table entry with a possible entry. The entry already in - the table always comes first, so the second element can be of a - different type (but in this case htab_find and htab_find_slot - cannot be used; instead the variants that accept a hash value - must be used). */ -typedef int (*htab_eq)(const void *, const void *); - -/* Cleanup function called whenever a live element is removed from - the hash table. */ -typedef void (*htab_del)(void *); - -/* Function called by htab_traverse for each live element. The first - arg is the slot of the element (which can be passed to htab_clear_slot - if desired), the second arg is the auxiliary pointer handed to - htab_traverse. Return 1 to continue scan, 0 to stop. */ -typedef int (*htab_trav)(void **, void *); - -/* Memory-allocation function, with the same functionality as calloc(). - Iff it returns NULL, the hash table implementation will pass an error - code back to the user, so if your code doesn't handle errors, - best if you use xcalloc instead. */ -typedef void *(*htab_alloc)(size_t, size_t); - -/* We also need a free() routine. */ -typedef void (*htab_free)(void *); - -/* Memory allocation and deallocation; variants which take an extra - argument. */ -typedef void *(*htab_alloc_with_arg)(void *, size_t, size_t); -typedef void (*htab_free_with_arg)(void *, void *); - -/* This macro defines reserved value for empty table entry. */ - -#define HTAB_EMPTY_ENTRY ((PTR)0) - -/* This macro defines reserved value for table entry which contained - a deleted element. */ - -#define HTAB_DELETED_ENTRY ((PTR)1) - -/* Hash tables are of the following type. The structure - (implementation) of this type is not needed for using the hash - tables. All work with hash table should be executed only through - functions mentioned below. The size of this structure is subject to - change. */ - -struct htab { - /* Pointer to hash function. */ - htab_hash hash_f; - - /* Pointer to comparison function. */ - htab_eq eq_f; - - /* Pointer to cleanup function. */ - htab_del del_f; - - /* Table itself. */ - void **entries; - - /* Current size (in entries) of the hash table. */ - size_t size; - - /* Current number of elements including also deleted elements. */ - size_t n_elements; - - /* Current number of deleted elements in the table. */ - size_t n_deleted; - - /* The following member is used for debugging. Its value is number - of all calls of `htab_find_slot' for the hash table. */ - unsigned int searches; - - /* The following member is used for debugging. Its value is number - of collisions fixed for time of work with the hash table. */ - unsigned int collisions; - - /* Pointers to allocate/free functions. */ - htab_alloc alloc_f; - htab_free free_f; - - /* Alternate allocate/free functions, which take an extra argument. */ - void * alloc_arg; - htab_alloc_with_arg alloc_with_arg_f; - htab_free_with_arg free_with_arg_f; - - /* Current size (in entries) of the hash table, as an index into the - table of primes. */ - unsigned int size_prime_index; -}; - -typedef struct htab *htab_t; - -/* An enum saying whether we insert into the hash table or not. */ -enum insert_option { NO_INSERT, INSERT }; - -/* The prototypes of the package functions. */ - -extern htab_t htab_create_alloc(size_t, htab_hash, htab_eq, htab_del, htab_alloc, htab_free); - -extern htab_t htab_create_alloc_ex(size_t, htab_hash, htab_eq, htab_del, void *, htab_alloc_with_arg, - htab_free_with_arg); - -extern htab_t htab_create_typed_alloc(size_t, htab_hash, htab_eq, htab_del, htab_alloc, htab_alloc, - htab_free); - -/* Backward-compatibility functions. */ -extern htab_t htab_create(size_t, htab_hash, htab_eq, htab_del); -extern htab_t htab_try_create(size_t, htab_hash, htab_eq, htab_del); - -extern void htab_set_functions_ex(htab_t, htab_hash, htab_eq, htab_del, void *, htab_alloc_with_arg, - htab_free_with_arg); - -extern void htab_delete(htab_t); -extern void htab_empty(htab_t); - -extern void * htab_find(htab_t, const void *); -extern void **htab_find_slot(htab_t, const void *, enum insert_option); -extern void * htab_find_with_hash(htab_t, const void *, hashval_t); -extern void **htab_find_slot_with_hash(htab_t, const void *, hashval_t, enum insert_option); -extern void htab_clear_slot(htab_t, void **); -extern void htab_remove_elt(htab_t, void *); -extern void htab_remove_elt_with_hash(htab_t, void *, hashval_t); - -extern void htab_traverse(htab_t, htab_trav, void *); -extern void htab_traverse_noresize(htab_t, htab_trav, void *); - -extern size_t htab_size(htab_t); -extern size_t htab_elements(htab_t); -extern double htab_collisions(htab_t); - -/* A hash function for pointers. */ -extern htab_hash htab_hash_pointer; - -/* An equality function for pointers. */ -extern htab_eq htab_eq_pointer; - -/* A hash function for null-terminated strings. */ -extern hashval_t htab_hash_string(const void *); - -/* An iterative hash function for arbitrary data. */ -extern hashval_t iterative_hash(const void *, size_t, hashval_t); -/* Shorthand for hashing something with an intrinsic size. */ -#define iterative_hash_object(OB, INIT) iterative_hash(&OB, sizeof(OB), INIT) - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* __HASHTAB_H */ diff --git a/src/api/profiling/include/pdc_stack_ops.h b/src/api/profiling/include/pdc_stack_ops.h deleted file mode 100644 index 110d03900..000000000 --- a/src/api/profiling/include/pdc_stack_ops.h +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef _STACK_OPS_H -#define _STACK_OPS_H - -#include "pdc_config.h" -#include "pdc_private.h" -#include -#include -#include -#include -#include - -typedef void *hash_table_t; - -typedef struct profileEntry { - struct profileEntry *next; - struct profileEntry *prev; - const char * ftnkey; - const char * tags; - int64_t count; - int64_t localTotal; - int64_t CumTotal; - int64_t locmin; - int64_t locmax; - double usecTotal; - struct timespec callTime; - struct timespec startTime; - struct timespec totalTime; - struct timespec selfTime; - - struct profileEntry *parent; -} profileEntry_t; - -// typedef enum _boolean {FALSE = 0, TRUE} bool_t; -extern pbool_t enableProfiling; - -#ifndef RESET_TIMER -#define RESET_TIMER(x) (x).tv_sec = (x).tv_nsec = 0; -#endif - -#ifndef TIMER_DIFF -/* t0 = t1 - t2 */ -#define TIMER_DIFF(t0, t1, t2) \ - { \ - if (t2.tv_nsec > (t1).tv_nsec) { \ - (t1).tv_nsec += 1000000000; \ - (t1).tv_sec -= 1; \ - } \ - (t0).tv_sec = (t1).tv_sec - (t2).tv_sec; \ - (t0).tv_nsec = (t1).tv_nsec - (t2).tv_nsec; \ - } -#endif - -#ifndef TIMER_ADD -/* t0 += t1 */ -#define TIMER_ADD(t0, t1) \ - { \ - (t0).tv_sec += (t1).tv_sec; \ - if (((t0).tv_nsec += (t1).tv_nsec) > 10000000000) { \ - (t0).tv_sec += 1; \ - (t0).tv_nsec -= 10000000000; \ - } \ - } -#endif - -void initialize_profile(void **table, size_t tabsize); -void finalize_profile(); -void push(const char *ftnkey, const char *tags); -void pop(); - -#endif diff --git a/src/api/profiling/pdc_hashtab.c b/src/api/profiling/pdc_hashtab.c deleted file mode 100644 index e59b7e3ba..000000000 --- a/src/api/profiling/pdc_hashtab.c +++ /dev/null @@ -1,540 +0,0 @@ -/* An expandable hash tables datatype. - Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc. - Contributed by Vladimir Makarov (vmakarov@cygnus.com). - -This file is part of the libiberty library. -Libiberty is free software; you can redistribute it and/or -modify it under the terms of the GNU Library General Public -License as published by the Free Software Foundation; either -version 2 of the License, or (at your option) any later version. - -Libiberty is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -Library General Public License for more details. - -You should have received a copy of the GNU Library General Public -License along with libiberty; see the file COPYING.LIB. If -not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ - -/* This package implements basic hash table functionality. It is possible - to search for an entry, create an entry and destroy an entry. - - Elements in the table are generic pointers. - - The size of the table is not fixed; if the occupancy of the table - grows too high the hash table will be expanded. - - The abstract data implementation is based on generalized Algorithm D - from Knuth's book "The art of computer programming". Hash table is - expanded by creation of new hash table and transferring elements from - the old table to the new table. */ - -#include -#include -#include -#include -#include "pdc_config.h" -#include "pdc_hashtab.h" - -/* This macro defines reserved value for empty table entry. */ - -#define EMPTY_ENTRY ((PTR)0) - -/* This macro defines reserved value for table entry which contained - a deleted element. */ - -#define DELETED_ENTRY ((PTR)1) - -static unsigned long higher_prime_number(unsigned long); -static hashval_t hash_pointer(const void *); -static int eq_pointer(const void *, const void *); -static int htab_expand(htab_t); -static PTR * find_empty_slot_for_expand(htab_t, hashval_t); - -/* At some point, we could make these be NULL, and modify the - hash-table routines to handle NULL specially; that would avoid - function-call overhead for the common case of hashing pointers. */ -htab_hash htab_hash_pointer = hash_pointer; -htab_eq htab_eq_pointer = eq_pointer; - -/* The following function returns a nearest prime number which is - greater than N, and near a power of two. */ - -static unsigned long higher_prime_number(n) unsigned long n; -{ - /* These are primes that are near, but slightly smaller than, a - power of two. */ - static const unsigned long primes[] = { - (unsigned long)7, - (unsigned long)13, - (unsigned long)31, - (unsigned long)61, - (unsigned long)127, - (unsigned long)251, - (unsigned long)509, - (unsigned long)1021, - (unsigned long)2039, - (unsigned long)4093, - (unsigned long)8191, - (unsigned long)16381, - (unsigned long)32749, - (unsigned long)65521, - (unsigned long)131071, - (unsigned long)262139, - (unsigned long)524287, - (unsigned long)1048573, - (unsigned long)2097143, - (unsigned long)4194301, - (unsigned long)8388593, - (unsigned long)16777213, - (unsigned long)33554393, - (unsigned long)67108859, - (unsigned long)134217689, - (unsigned long)268435399, - (unsigned long)536870909, - (unsigned long)1073741789, - (unsigned long)2147483647, - /* 4294967291L */ - ((unsigned long)2147483647) + ((unsigned long)2147483644), - }; - - const unsigned long *low = &primes[0]; - const unsigned long *high = &primes[sizeof(primes) / sizeof(primes[0])]; - - while (low != high) { - const unsigned long *mid = low + (high - low) / 2; - if (n > *mid) - low = mid + 1; - else - high = mid; - } - - /* If we've run out of primes, abort. */ - if (n > *low) { - fprintf(stderr, "Cannot find prime bigger than %lu\n", n); - abort(); - } - - return *low; -} - -/* Returns a hash code for P. */ - -static hashval_t hash_pointer(p) const PTR p; -{ - return (hashval_t)((long)p >> 3); -} - -/* Returns non-zero if P1 and P2 are equal. */ - -static int eq_pointer(p1, p2) const PTR p1; -const PTR p2; -{ - return p1 == p2; -} - -/* This function creates table with length slightly longer than given - source length. Created hash table is initiated as empty (all the - hash table entries are EMPTY_ENTRY). The function returns the - created hash table, or NULL if memory allocation fails. */ - -htab_t htab_create_alloc(size, hash_f, eq_f, del_f, alloc_f, free_f) size_t size; -htab_hash hash_f; -htab_eq eq_f; -htab_del del_f; -htab_alloc alloc_f; -htab_free free_f; -{ - htab_t result; - size = higher_prime_number(size); - result = (htab_t)(*alloc_f)(1, sizeof(struct htab)); - if (result == NULL) - return NULL; - - result->entries = (PTR *)(*alloc_f)(size, sizeof(PTR)); - if (result->entries == NULL) { - if (free_f != NULL) - (*free_f)(result); - return NULL; - } - result->size = size; - result->hash_f = hash_f; - result->eq_f = eq_f; - result->del_f = del_f; - result->alloc_f = alloc_f; - result->free_f = free_f; - - return result; -} - -/* These functions exist solely for backward compatibility. */ - -#undef htab_create -htab_t htab_create(size, hash_f, eq_f, del_f) size_t size; -htab_hash hash_f; -htab_eq eq_f; -htab_del del_f; -{ - return htab_create_alloc(size, hash_f, eq_f, del_f, calloc, free); -} - -htab_t htab_try_create(size, hash_f, eq_f, del_f) size_t size; -htab_hash hash_f; -htab_eq eq_f; -htab_del del_f; -{ - return htab_create_alloc(size, hash_f, eq_f, del_f, calloc, free); -} - -/* This function frees all memory allocated for given hash table. - Naturally the hash table must already exist. */ - -void htab_delete(htab) htab_t htab; -{ - int i; - - if (htab->del_f) { - for (i = htab->size - 1; i >= 0; i--) - if (htab->entries[i] != EMPTY_ENTRY && htab->entries[i] != DELETED_ENTRY) - (*htab->del_f)(htab->entries[i]); - } - if (htab->free_f != NULL) { - (*htab->free_f)(htab->entries); - (*htab->free_f)(htab); - } -} - -/* This function clears all entries in the given hash table. */ - -void htab_empty(htab) htab_t htab; -{ - int i; - - if (htab->del_f) - for (i = htab->size - 1; i >= 0; i--) - if (htab->entries[i] != EMPTY_ENTRY && htab->entries[i] != DELETED_ENTRY) - (*htab->del_f)(htab->entries[i]); - - memset(htab->entries, 0, htab->size * sizeof(PTR)); -} - -/* Similar to htab_find_slot, but without several unwanted side effects: - - Does not call htab->eq_f when it finds an existing entry. - - Does not change the count of elements/searches/collisions in the - hash table. - This function also assumes there are no deleted entries in the table. - HASH is the hash value for the element to be inserted. */ - -static PTR *find_empty_slot_for_expand(htab, hash) htab_t htab; -hashval_t hash; -{ - size_t size = htab->size; - unsigned int index = hash % size; - PTR * slot = htab->entries + index; - hashval_t hash2; - - if (*slot == EMPTY_ENTRY) - return slot; - else if (*slot == DELETED_ENTRY) - abort(); - - hash2 = 1 + hash % (size - 2); - for (;;) { - index += hash2; - if (index >= size) - index -= size; - - slot = htab->entries + index; - if (*slot == EMPTY_ENTRY) - return slot; - else if (*slot == DELETED_ENTRY) - abort(); - } -} - -/* The following function changes size of memory allocated for the - entries and repeatedly inserts the table elements. The occupancy - of the table after the call will be about 50%. Naturally the hash - table must already exist. Remember also that the place of the - table entries is changed. If memory allocation failures are allowed, - this function will return zero, indicating that the table could not be - expanded. If all goes well, it will return a non-zero value. */ - -static int htab_expand(htab) htab_t htab; -{ - PTR *oentries; - PTR *olimit; - PTR *p; - PTR *nentries; - - oentries = htab->entries; - olimit = oentries + htab->size; - - htab->size = higher_prime_number(htab->size * 2); - - nentries = (PTR *)(*htab->alloc_f)(htab->size, sizeof(PTR *)); - if (nentries == NULL) - return 0; - htab->entries = nentries; - htab->n_elements -= htab->n_deleted; - htab->n_deleted = 0; - - p = oentries; - do { - PTR x = *p; - - if (x != EMPTY_ENTRY && x != DELETED_ENTRY) { - PTR *q = find_empty_slot_for_expand(htab, (*htab->hash_f)(x)); - *q = x; - } - p++; - } while (p < olimit); - - if (htab->free_f != NULL) - (*htab->free_f)(oentries); - - return 1; -} - -/* This function searches for a hash table entry equal to the given - element. It cannot be used to insert or delete an element. */ - -PTR htab_find_with_hash(htab, element, hash) htab_t htab; -const PTR element; -hashval_t hash; -{ - unsigned int index; - hashval_t hash2; - size_t size; - PTR entry; - - htab->searches++; - size = htab->size; - index = hash % size; - - entry = htab->entries[index]; - if (entry == EMPTY_ENTRY || (entry != DELETED_ENTRY && (*htab->eq_f)(entry, element))) - return entry; - - hash2 = 1 + hash % (size - 2); - - for (;;) { - htab->collisions++; - index += hash2; - if (index >= size) - index -= size; - - entry = htab->entries[index]; - if (entry == EMPTY_ENTRY || (entry != DELETED_ENTRY && (*htab->eq_f)(entry, element))) - return entry; - } -} - -/* Like htab_find_slot_with_hash, but compute the hash value from the - element. */ - -PTR htab_find(htab, element) htab_t htab; -const PTR element; -{ - return htab_find_with_hash(htab, element, (*htab->hash_f)(element)); -} - -/* This function searches for a hash table slot containing an entry - equal to the given element. To delete an entry, call this with - INSERT = 0, then call htab_clear_slot on the slot returned (possibly - after doing some checks). To insert an entry, call this with - INSERT = 1, then write the value you want into the returned slot. - When inserting an entry, NULL may be returned if memory allocation - fails. */ - -PTR * htab_find_slot_with_hash(htab, element, hash, insert) htab_t htab; -const PTR element; -hashval_t hash; -enum insert_option insert; -{ - PTR * first_deleted_slot; - unsigned int index; - hashval_t hash2; - size_t size; - PTR entry; - - if (insert == INSERT && htab->size * 3 <= htab->n_elements * 4 && htab_expand(htab) == 0) - return NULL; - - size = htab->size; - index = hash % size; - - htab->searches++; - first_deleted_slot = NULL; - - entry = htab->entries[index]; - if (entry == EMPTY_ENTRY) - goto empty_entry; - else if (entry == DELETED_ENTRY) - first_deleted_slot = &htab->entries[index]; - else if ((*htab->eq_f)(entry, element)) - return &htab->entries[index]; - - hash2 = 1 + hash % (size - 2); - for (;;) { - htab->collisions++; - index += hash2; - if (index >= size) - index -= size; - - entry = htab->entries[index]; - if (entry == EMPTY_ENTRY) - goto empty_entry; - else if (entry == DELETED_ENTRY) { - if (!first_deleted_slot) - first_deleted_slot = &htab->entries[index]; - } - else if ((*htab->eq_f)(entry, element)) - return &htab->entries[index]; - } - -empty_entry: - if (insert == NO_INSERT) - return NULL; - - htab->n_elements++; - - if (first_deleted_slot) { - *first_deleted_slot = EMPTY_ENTRY; - return first_deleted_slot; - } - - return &htab->entries[index]; -} - -/* Like htab_find_slot_with_hash, but compute the hash value from the - element. */ - -PTR * htab_find_slot(htab, element, insert) htab_t htab; -const PTR element; -enum insert_option insert; -{ - return htab_find_slot_with_hash(htab, element, (*htab->hash_f)(element), insert); -} - -/* This function deletes an element with the given value from hash - table. If there is no matching element in the hash table, this - function does nothing. */ - -void htab_remove_elt(htab, element) htab_t htab; -PTR element; -{ - PTR *slot; - - slot = htab_find_slot(htab, element, NO_INSERT); - if (*slot == EMPTY_ENTRY) - return; - - if (htab->del_f) - (*htab->del_f)(*slot); - - *slot = DELETED_ENTRY; - htab->n_deleted++; -} - -/* This function clears a specified slot in a hash table. It is - useful when you've already done the lookup and don't want to do it - again. */ - -void htab_clear_slot(htab, slot) htab_t htab; -PTR *slot; -{ - if (slot < htab->entries || slot >= htab->entries + htab->size || *slot == EMPTY_ENTRY || - *slot == DELETED_ENTRY) - abort(); - - if (htab->del_f) - (*htab->del_f)(*slot); - - *slot = DELETED_ENTRY; - htab->n_deleted++; -} - -/* This function scans over the entire hash table calling - CALLBACK for each live entry. If CALLBACK returns false, - the iteration stops. INFO is passed as CALLBACK's second - argument. */ - -void htab_traverse(htab, callback, info) htab_t htab; -htab_trav callback; -PTR info; -{ - PTR *slot = htab->entries; - PTR *limit = slot + htab->size; - - do { - PTR x = *slot; - if (x != EMPTY_ENTRY && x != DELETED_ENTRY) - if (!(*callback)(slot, info)) - break; - } while (++slot < limit); -} - -/* Return the current size of given hash table. */ - -size_t htab_size(htab) htab_t htab; -{ - return htab->size; -} - -/* Return the current number of elements in given hash table. */ - -size_t htab_elements(htab) htab_t htab; -{ - return htab->n_elements - htab->n_deleted; -} - -/* Return the fraction of fixed collisions during all work with given - hash table. */ - -double htab_collisions(htab) htab_t htab; -{ - if (htab->searches == 0) - return 0.0; - - return (double)htab->collisions / (double)htab->searches; -} - -/* Hash P as a null-terminated string. - - Copied from gcc/hashtable.c. Zack had the following to say with respect - to applicability, though note that unlike hashtable.c, this hash table - implementation re-hashes rather than chain buckets. - - http://gcc.gnu.org/ml/gcc-patches/2001-08/msg01021.html - From: Zack Weinberg - Date: Fri, 17 Aug 2001 02:15:56 -0400 - - I got it by extracting all the identifiers from all the source code - I had lying around in mid-1999, and testing many recurrences of - the form "H_n = H_{n-1} * K + c_n * L + M" where K, L, M were either - prime numbers or the appropriate identity. This was the best one. - I don't remember exactly what constituted "best", except I was - looking at bucket-length distributions mostly. - - So it should be very good at hashing identifiers, but might not be - as good at arbitrary strings. - - I'll add that it thoroughly trounces the hash functions recommended - for this use at http://burtleburtle.net/bob/hash/index.html, both - on speed and bucket distribution. I haven't tried it against the - function they just started using for Perl's hashes. */ - -hashval_t htab_hash_string(p) const PTR p; -{ - const unsigned char *str = (const unsigned char *)p; - hashval_t r = 0; - unsigned char c; - - while ((c = *str++) != 0) - r = r * 67 + c - 113; - - return r; -} diff --git a/src/api/profiling/pdc_stack_ops.c b/src/api/profiling/pdc_stack_ops.c deleted file mode 100644 index fab9f274a..000000000 --- a/src/api/profiling/pdc_stack_ops.c +++ /dev/null @@ -1,264 +0,0 @@ -#include -#include -#include -#include "pdc_stack_ops.h" -#include "pdc_hashtab.h" - -profileEntry_t *calltree = NULL; -profileEntry_t *freelist = NULL; - -static int profilerrors = 0; - -hash_table_t hashtable; - -htab_t thisHashTable; - -/* For now we disable profiling (by default) - * Note that one can always ENABLE it by set the - * environment variable "PROFILE_ENABLE=true" - */ -pbool_t enableProfiling = FALSE; - -/* - * The idea of this implementation is to simulate the call stack - * of the running application. Each function that we care about - * begins with a FUNC_ENTER(x) declaration and finishes with - * FUNC_LEAVE(ret). These of course are macros and under - * the condition that we enable profiling, these expand into - * push and pop operations which we define below. - * - * Example: suppose that a user application is defined as follows - * int main() { - * a(); - * b(); - * c(); - * return 0; - * } - * - * void a() { - * aa(); - * aaa(); - * ab(); - * } - * - * void b() { - * bb(); - * bbb(); - * bc(); - * } - * - * void c() { - * cc(); - * ccc(); - * ca(); - * } - * - * Assume that all of the internal functions only make system calls - * or 3rd party libraries, i.e. the underlying functions will NOT - * be profiled. - * - * The implementation of stack_ops will maintain a call tree - * that mirrors that of the actual program, i.e. the alltree data - * structure will contain something like the following as we enter - * the first function contained by a(): - * - * ("main") --> ("a") --> ("aa") - * - * The entry for "main" has a /start_time and no /total_time - * Similarly, "a" has it's own /start_time and no /total_time - * The final entry: "aa" has a start-time and just prior to - * the return to it's parent ("a"), we sample the real-time - * clock as part of the POP functionality. Using the current - * time minus the start-time we establish the raw total elapsed - * time for the current function. - * NOTE: The actual runtime spent within the function is - * a calculation which subtracts out the total elapsed times - * of all of the lower-level functions, e.g. suppose ("a") - * has a total runtime of 10. If the total runtime of ("aa") - * in the simple call chain shown above is 5, then the actual - * profiled time spent in ("a") is 10 - 5 = 5. - * Ultimately, if were to execute the entire program and then - * sum all of the individual profile times, the total should - * match the execution time of the program. - */ - -void -push(const char *ftnkey, const char *tags) -{ - profileEntry_t *thisEntry; - if (freelist != NULL) { - thisEntry = freelist; - freelist = thisEntry->next; - } - else { - if ((thisEntry = (profileEntry_t *)malloc(sizeof(profileEntry_t))) == NULL) { - perror("malloc"); - profilerrors++; - } - } - - if (profilerrors) - return; - thisEntry->ftnkey = ftnkey; - thisEntry->tags = tags; - thisEntry->prev = calltree; - thisEntry->next = NULL; - calltree = thisEntry; - - /* Timing */ - clock_gettime(CLOCK_REALTIME, &thisEntry->startTime); - RESET_TIMER(thisEntry->callTime); - return; -} - -void -pop() -{ - struct timespec current_time; - profileEntry_t *master; - profileEntry_t *thisEntry = calltree; - int update_entry = TRUE; - if (thisEntry == NULL) - return; /* This shouldn't happen */ - - /* Timing */ - clock_gettime(CLOCK_REALTIME, ¤t_time); - TIMER_DIFF(thisEntry->totalTime, current_time, thisEntry->startTime); - TIMER_DIFF(thisEntry->selfTime, thisEntry->totalTime, thisEntry->callTime); - calltree = thisEntry->prev; - if (calltree != NULL) { - TIMER_ADD(calltree->callTime, thisEntry->totalTime); - } - /* Check to see if this function has already been added to the hashtable */ - void **tableEntry = htab_find_slot(thisHashTable, thisEntry, INSERT); - if (*tableEntry == NULL) { - /* No table entry found so add it now ... */ - master = (profileEntry_t *)malloc(sizeof(profileEntry_t)); - if (master) { - thisEntry->count = 1; - memcpy(master, thisEntry, sizeof(profileEntry_t)); - *tableEntry = master; - } - update_entry = FALSE; - } - - if (update_entry) { - master = *(profileEntry_t **)tableEntry; - master->count++; - TIMER_ADD(master->totalTime, thisEntry->totalTime); - TIMER_ADD(master->selfTime, thisEntry->selfTime); - } - - /* Rather than freeing the container, we add the - * current entry onto the freelist. - */ - thisEntry->next = freelist; - freelist = thisEntry; -} - -hashval_t -hash_profile_entry(const void *p) -{ - const profileEntry_t *thisEntry = (const profileEntry_t *)p; - return htab_hash_string(thisEntry->ftnkey); -} - -int -eq_profile_entry(const void *a, const void *b) -{ - const profileEntry_t *tp_a = (const profileEntry_t *)a; - const profileEntry_t *tp_b = (const profileEntry_t *)b; - return (tp_a->ftnkey == tp_b->ftnkey); -} - -void -initialize_profile(void **hashtab, size_t size) -{ - if (*hashtab == NULL) { - if ((thisHashTable = htab_try_create(size, hash_profile_entry, eq_profile_entry, free)) == NULL) { - return; - } - *hashtab = thisHashTable; - } -} - -int -show_profile_info(void **ht_live_entry, void *extraInfo ATTRIBUTE(unused)) -{ - static int count = 0; - char * LineBreak = "------------------------------------------------------------------------------"; - char * header = " item calls Time/call [Sec,nSec]\tftn_name"; - const profileEntry_t *thisEntry = *(const profileEntry_t **)ht_live_entry; - - if (thisEntry) { - struct timespec totalTime; - int64_t totalCalls = thisEntry->count; - if (count == 0) - puts(header); - totalTime = thisEntry->totalTime; - printf("%s\n %d\t%-6" PRId64 " %6" PRId64 ",%6" PRId64 "\t\t %s\n", LineBreak, ++count, totalCalls, - totalTime.tv_sec / totalCalls, totalTime.tv_nsec / totalCalls, thisEntry->ftnkey); - } - - return TRUE; -} - -/* Returns 1 if we set enableProfiling to TRUE - * otherwise returns 0. - */ -int -toggle_profile_enable() -{ - if (enableProfiling == FALSE) - enableProfiling = TRUE; - else - enableProfiling = FALSE; - - return (enableProfiling ? 1 : 0); -} - -/* These functions should be used when we've actually built the profiler as a shared library. - * Note: One might check an environment variable to see if a non-default size - * for the hashtable initialization should be used... - * The profile_fini should probably be used to dump the contents of the profile - * hashtable. - */ - -void __attribute__((constructor)) profile_init(void) -{ - int default_HashtableSize = 128; - char *size_override = NULL; - char *profile_enable = getenv("PROFILE_ENABLE"); - if (profile_enable != NULL) { - if (strcasecmp(profile_enable, "true") == 0) { - enableProfiling = TRUE; - } - else if (strcasecmp(profile_enable, "false") == 0) { - enableProfiling = FALSE; - } - } - // While it is tempting to skip creating a hashtable - // if we've disabled profiling (see above), I want - // to give the user the ability at runtime to - // possibly enable everything... - // I don't currently include any APIs to enable - // or disable profiling at runtime, but that is - // on the TODO list. - - size_override = getenv("PROFILE_HASHTABLESIZE"); - if (size_override != NULL) { - int override_value = atoi(size_override); - if (override_value > 0) { - default_HashtableSize = override_value; - } - } - initialize_profile(&hashtable, default_HashtableSize); -} - -void __attribute__((destructor)) finalize_profile(void) -{ - int count = 1; - if (thisHashTable != NULL) { - htab_traverse(thisHashTable, show_profile_info, &count); - } -} diff --git a/src/commons/CMakeLists.txt b/src/commons/CMakeLists.txt new file mode 100644 index 000000000..f29c25a7c --- /dev/null +++ b/src/commons/CMakeLists.txt @@ -0,0 +1,227 @@ +#------------------------------------------------------------------------------ +# PDC Commons +#------------------------------------------------------------------------------ + +set(PDC_COMMON_LIBRARY_NAME pdc_commons CACHE INTERNAL "") + +#------------------------------------------------------------------------------ +# External dependencies +#------------------------------------------------------------------------------ + +# #set(PDC_EXT_LIB_DEPENDENCIES ${PDC_COMMON_LIBRARY_NAME} ${PDC_EXT_LIB_DEPENDENCIES}) +# set(PDC_EXT_INCLUDE_DEPENDENCIES ${CMAKE_CURRENT_SOURCE_DIR}/profiling) +# set(PDC_EXPORTED_LIBS pdcprof) + +if(THREADS_HAVE_PTHREAD_ARG) + set_property(TARGET ${PDC_COMMON_LIBRARY_NAME} PROPERTY COMPILE_OPTIONS "-pthread") + set_property(TARGET ${PDC_COMMON_LIBRARY_NAME} PROPERTY INTERFACE_COMPILE_OPTIONS "-pthread") +endif() + + +# Mercury +find_package(MERCURY REQUIRED) +if(MERCURY_FOUND) + message(STATUS "mercury dir ${MERCURY_DIR}") + if(DEFINED MERCURY_DIR AND NOT "${MERCURY_DIR}" STREQUAL "") + # If MERCURY_DIR contains "share/" (or "share" is at the end), truncate it along with everything following it + string(REGEX REPLACE "/share.*" "" trimmed_mercury_dir ${MERCURY_DIR}) + # Check if the trimmed_mercury_dir ends with "/mercury" + string(REGEX MATCH ".*/mercury$" is_mercury_home ${trimmed_mercury_dir}) + # If trimmed_mercury_dir ends with "/mercury", consider it as the home directory + if(is_mercury_home) + set(MERCURY_HOME ${trimmed_mercury_dir}) + else() + # If not ending with "mercury", it's likely an error + message(FATAL_ERROR "Cannot determine MERCURY_HOME from MERCURY_DIR.") + endif() + + message("Mercury home is set to ${MERCURY_HOME}") + + set(MERCURY_INCLUDE_DIR ${MERCURY_HOME}/include) + set(MERCURY_LIBRARY_DIR ${MERCURY_HOME}/lib) + endif() + message(STATUS "mercury include dir ${MERCURY_INCLUDE_DIR}") + message(STATUS "mercury lib dir ${MERCURY_LIBRARY_DIR}") + set(PDC_EXT_INCLUDE_DEPENDENCIES ${MERCURY_INCLUDE_DIR} + ${PDC_EXT_INCLUDE_DEPENDENCIES} + ) + set(PDC_EXT_LIB_DEPENDENCIES mercury ${PDC_EXT_LIB_DEPENDENCIES}) +endif() + +include_directories(${PDC_EXT_INCLUDE_DEPENDENCIES}) + +#------------------------------------------------------------------------------ +# Include directories +#------------------------------------------------------------------------------ + +# Get a list of all directories that contain header files +file(GLOB_RECURSE LOCAL_INCLUDE_DIRS "*.h") + +# Remove the /filename.h at the end of each directory +list(TRANSFORM LOCAL_INCLUDE_DIRS REPLACE "/[^/]*$" "") + +# Remove duplicates +list(REMOVE_DUPLICATES LOCAL_INCLUDE_DIRS) + +set(PDC_COMMONS_INCLUDE_DIRS + ${LOCAL_INCLUDE_DIRS} + ${PDC_INCLUDES_BUILD_TIME} + ${PROJECT_BINARY_DIR} + ${PDC_SOURCE_DIR} + ${PDC_EXT_INCLUDE_DEPENDENCIES} +) + +include_directories( + ${PDC_COMMONS_INCLUDE_DIRS} +) + +message(STATUS "PDC_COMMONS_INCLUDE_DIRS: ${PDC_COMMONS_INCLUDE_DIRS}") + +install( + FILES + ${CMAKE_BINARY_DIR}/pdc_config.h + DESTINATION + ${PDC_INSTALL_INCLUDE_DIR} + COMPONENT + headers +) + +#------------------------------------------------------------------------------ +# Configure module header files +#------------------------------------------------------------------------------ +# Set unique vars used in the autogenerated config file (symbol import/export) +if(BUILD_SHARED_LIBS) + set(PDC_BUILD_SHARED_LIBS 1) + set(PDC_LIBTYPE SHARED) +else() + set(PDC_BUILD_SHARED_LIBS 0) + set(PDC_LIBTYPE STATIC) +endif() + +if(PDC_ENABLE_TIMING) + add_definitions(-DPDC_TIMING=1) +endif() + +#------------------------------------------------------------------------------ +# Set sources +#------------------------------------------------------------------------------ + +# Collect all source files +file(GLOB_RECURSE PDC_COMMONS_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.c) +file(GLOB_RECURSE PDC_COMMONS_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.h) + + +#------------------------------------------------------------------------------ +# Libraries +#------------------------------------------------------------------------------ +# PDC COMMONS + + +add_library(${PDC_COMMON_LIBRARY_NAME} ${PDC_LIBTYPE} ${PDC_COMMONS_SOURCES} ${PDC_COMMONS_HEADERS}) + +target_include_directories(${PDC_COMMON_LIBRARY_NAME} + PUBLIC "$" + $ +) + +target_link_libraries(${PDC_COMMON_LIBRARY_NAME} INTERFACE + ${PDC_EXT_LIB_DEPENDENCIES} +) + +set(PDC_EXPORTED_LIBS ${PDC_COMMON_LIBRARY_NAME} ${PDC_EXPORTED_LIBS}) + +#----------------------------------------------------------------------------- +# Specify project header files to be installed +#----------------------------------------------------------------------------- + + +set(PDC_PUBLIC_HEADERS "") +set(PUBLIC_HEADER_DIR_LIST + ${CMAKE_CURRENT_SOURCE_DIR}/include + ${CMAKE_CURRENT_SOURCE_DIR}/generic/include + ${CMAKE_CURRENT_SOURCE_DIR}/profiling/include + ${CMAKE_CURRENT_SOURCE_DIR}/utils/include +) + +foreach(_header_dir ${PUBLIC_HEADER_DIR_LIST}) + file(GLOB_RECURSE _dir_headers ${_header_dir}/*.h) + list(APPEND PDC_PUBLIC_HEADERS ${_dir_headers}) +endforeach() + +set(PDC_COMMONS_HEADERS + ${PDC_PUBLIC_HEADERS} + ${PROJECT_BINARY_DIR}/pdc_config_sys.h + ${PROJECT_BINARY_DIR}/pdc_config.h + ) + +#----------------------------------------------------------------------------- +# Add file(s) to CMake Install +#----------------------------------------------------------------------------- +install( + FILES + ${PDC_COMMONS_HEADERS} + DESTINATION + ${PDC_INSTALL_INCLUDE_DIR} + COMPONENT + headers +) + +#----------------------------------------------------------------------------- +# Add Target(s) to CMake Install +#----------------------------------------------------------------------------- + +install( + TARGETS + ${PDC_COMMON_LIBRARY_NAME} + EXPORT + ${PDC_EXPORTED_TARGETS} + LIBRARY DESTINATION ${PDC_INSTALL_LIB_DIR} + ARCHIVE DESTINATION ${PDC_INSTALL_LIB_DIR} + RUNTIME DESTINATION ${PDC_INSTALL_BIN_DIR} +) + +#----------------------------------------------------------------------------- +# Add Target(s) to CMake Install for import into other projects +#----------------------------------------------------------------------------- +install( + EXPORT + ${PDC_EXPORTED_TARGETS} + DESTINATION + ${PDC_INSTALL_DATA_DIR}/cmake/pdc + FILE + ${PDC_EXPORTED_TARGETS}.cmake +) + +#----------------------------------------------------------------------------- +# Export all exported targets to the build tree for use by parent project +#----------------------------------------------------------------------------- +if(NOT PDC_EXTERNALLY_CONFIGURED) +EXPORT ( + TARGETS + ${PDC_EXPORTED_LIBS} + FILE + ${PDC_EXPORTED_TARGETS}.cmake +) +endif() + +#------------------------------------------------------------------------------ +# Set variables for parent scope +#------------------------------------------------------------------------------ +# Used by config.cmake.build.in and Testing +set(PDC_INCLUDES_BUILD_TIME + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} + ${PDC_EXT_INCLUDE_DEPENDENCIES} + ${PDC_COMMONS_INCLUDE_DIRS} + PARENT_SCOPE +) + +# Used by config.cmake.install.in +set(PDC_INCLUDES_INSTALL_TIME + ${PDC_COMMONS_INCLUDE_DIRS} + ${PDC_INSTALL_INCLUDE_DIR} + ${PDC_EXT_INCLUDE_DEPENDENCIES} + PARENT_SCOPE +) + +set(PDC_COMMONS_LIBRARIES ${PDC_COMMON_LIBRARY_NAME} PARENT_SCOPE) \ No newline at end of file diff --git a/src/commons/generic/include/pdc_generic.h b/src/commons/generic/include/pdc_generic.h new file mode 100644 index 000000000..47322b553 --- /dev/null +++ b/src/commons/generic/include/pdc_generic.h @@ -0,0 +1,219 @@ +#ifndef PDC_GENERIC_H +#define PDC_GENERIC_H + +#include +#include +#include +#include +#include + +#ifndef __cplusplus +#if __STDC_VERSION__ >= 199901L +/* C99 or later */ +#include +#else +/* Pre-C99 */ +typedef enum { false = 0, true = 1 } bool; +#endif +#endif + +typedef enum { + PDC_UNKNOWN = -1, /* error */ + PDC_INT = 0, /* integer types (identical to int32_t) */ + PDC_FLOAT = 1, /* floating-point types */ + PDC_DOUBLE = 2, /* double types */ + PDC_CHAR = 3, /* character types */ + PDC_STRING = 4, /* string types */ + PDC_BOOLEAN = 5, /* boolean types */ + PDC_SHORT = 6, /* short types */ + PDC_UINT = 7, /* unsigned integer types (identical to uint32_t) */ + PDC_INT64 = 8, /* 64-bit integer types */ + PDC_UINT64 = 9, /* 64-bit unsigned integer types */ + PDC_INT16 = 10, /* 16-bit integer types */ + PDC_INT8 = 11, /* 8-bit integer types */ + PDC_UINT8 = 12, /* 8-bit unsigned integer types */ + PDC_UINT16 = 13, /* 16-bit unsigned integer types */ + PDC_INT32 = 14, /* 32-bit integer types */ + PDC_UINT32 = 15, /* 32-bit unsigned integer types */ + PDC_LONG = 16, /* long types */ + PDC_VOID_PTR = 17, /* void pointer type */ + PDC_SIZE_T = 18, /* size_t type */ + PDC_TYPE_COUNT = 19 /* this is the number of var types and has to be the last */ +} pdc_c_var_type_t; + +typedef pdc_c_var_type_t PDC_CType; + +typedef enum { + PDC_CLS_SCALAR, + PDC_CLS_ARRAY, + PDC_CLS_ENUM, // not implemented, users can use PDC_CT_INT + PDC_CLS_STRUCT, // not implemented, users can use embedded key value pairs for the members in a struct + PDC_CLS_UNION, // not implemented, users can use embedded key value pairs for the only one member value + // in a union. + PDC_CLS_POINTER, // not implemented, users can use PDC_CT_INT64_T to store the pointer address, but + // won't work for distributed memory. + PDC_CLS_FUNCTION, // not implemented, users can use PDC_CT_INT64_T to store the function address, but + // won't work for distributed memory. + PDC_CLS_COUNT // just the count of the enum. +} pdc_c_var_class_t; + +typedef pdc_c_var_class_t PDC_CType_Class; + +// clang-format off +static const size_t DataTypeSizes[PDC_TYPE_COUNT] = { + sizeof(int), + sizeof(float), + sizeof(double), + sizeof(char), + sizeof(char *), + sizeof(bool), + sizeof(short), + sizeof(unsigned int), + sizeof(int64_t), + sizeof(uint64_t), + sizeof(int16_t), + sizeof(int8_t), + sizeof(uint8_t), + sizeof(uint16_t), + sizeof(int32_t), + sizeof(uint32_t), + sizeof(long), + sizeof(void *), + sizeof(size_t) +}; + +static const char *DataTypeNames[PDC_TYPE_COUNT] = { + "int", + "float", + "double", + "char", + "char*", + "bool", + "short", + "unsigned int", + "int64_t", + "uint64_t", + "int16_t", + "int8_t", + "uint8_t", + "uint16_t", + "int32_t", + "uint32_t", + "long", + "void*", + "size_t" +}; + +static const char *DataTypeEnumNames[PDC_TYPE_COUNT] = { + "PDC_INT", + "PDC_FLOAT", + "PDC_DOUBLE", + "PDC_CHAR", + "PDC_STRING", + "PDC_BOOLEAN", + "PDC_SHORT", + "PDC_UINT", + "PDC_INT64", + "PDC_UINT64", + "PDC_INT16", + "PDC_INT8", + "PDC_UINT8", + "PDC_UINT16", + "PDC_INT32", + "PDC_UINT32", + "PDC_LONG", + "PDC_VOID_PTR", + "PDC_SIZE_T" +}; + +static const char *DataTypeFormat[PDC_TYPE_COUNT] = { + "%d", // int + "%f", // float + "%lf", // double + "%c", // char + "%s", // char* + "%d", // bool (represented as an integer) + "%hd", // short + "%u", // unsigned int + "%lld", // int64_t + "%llu", // uint64_t + "%hd", // int16_t + "%hhd", // int8_t + "%hhu", // uint8_t + "%hu", // uint16_t + "%d", // int32_t + "%u", // uint32_t + "%ld", // long + "%p", // void* (pointer) + "%zu" // size_t +}; + +// clang-format on + +static const char * +get_enum_name_by_dtype(pdc_c_var_type_t type) +{ + if (type < 0 || type >= PDC_TYPE_COUNT) { + return NULL; + } + return DataTypeEnumNames[type]; +} + +static const size_t +get_size_by_dtype(pdc_c_var_type_t type) +{ + if (type < 0 || type >= PDC_TYPE_COUNT) { + return 0; + } + return DataTypeSizes[type]; +} + +static const size_t +get_size_by_class_n_type(void *data, size_t item_count, pdc_c_var_class_t pdc_class, + pdc_c_var_type_t pdc_type) +{ + size_t size = 0; + if (pdc_class == PDC_CLS_SCALAR) { + if (pdc_type == PDC_STRING) { + size = (strlen((char *)data) + 1) * sizeof(char); + } + else { + size = get_size_by_dtype(pdc_type); + } + } + else if (pdc_class == PDC_CLS_ARRAY) { + if (pdc_type == PDC_STRING) { + char **str_arr = (char **)data; + int i = 0; + for (i = 0; i < item_count; i++) { + size = size + (strlen(str_arr[i]) + 1) * sizeof(char); + } + } + else { + size = item_count * get_size_by_dtype(pdc_type); + } + } + return size; +} + +static const char * +get_name_by_dtype(pdc_c_var_type_t type) +{ + if (type < 0 || type >= PDC_TYPE_COUNT) { + return NULL; + } + return DataTypeNames[type]; +} + +static pdc_c_var_type_t +get_dtype_by_enum_name(const char *enumName) +{ + for (int i = 0; i < PDC_TYPE_COUNT; i++) { + if (strcmp(DataTypeEnumNames[i], enumName) == 0) { + return (pdc_c_var_type_t)i; + } + } + return PDC_UNKNOWN; // assuming PDC_UNKNOWN is the enum value for "unknown" +} + +#endif /* PDC_GENERIC_H */ \ No newline at end of file diff --git a/src/commons/serde/include/pdc_serde.h b/src/commons/serde/include/pdc_serde.h new file mode 100644 index 000000000..6211e1917 --- /dev/null +++ b/src/commons/serde/include/pdc_serde.h @@ -0,0 +1,155 @@ +#ifndef PDC_SERDE_H +#define PDC_SERDE_H + +#include +#include +#include +#include "pdc_generic.h" + +#define MAX_KEYS 10 +#define MAX_BUFFER_SIZE 1000 + +typedef struct { + PDC_CType pdc_type; /**< Data type of the key */ + size_t size; /**< Size of the key */ + void * key; /**< Pointer to the key data */ +} PDC_SERDE_Key; + +typedef struct { + PDC_CType_Class pdc_class; /**< Class of the value */ + PDC_CType pdc_type; /**< Data type of the value */ + size_t size; // size of the data. If a string, it is strlen(data) + 1; + // if an array, it is the number of elements; + // if a struct, it is the totalSize of the data chunk of the struct, etc. + void *data; /**< Pointer to the value data */ +} PDC_SERDE_Value; + +typedef struct { + PDC_SERDE_Key *keys; /**< Array of keys */ + size_t numKeys; /**< Number of keys */ + size_t totalSize; /**< Total size of the header */ +} PDC_SERDE_Header; + +typedef struct { + size_t numValues; /**< Number of values */ + PDC_SERDE_Value *values; /**< Array of values */ + size_t totalSize; /**< Total size of the data */ +} PDC_SERDE_Data; + +typedef struct { + PDC_SERDE_Header *header; /**< Pointer to the header */ + PDC_SERDE_Data * data; /**< Pointer to the data */ + size_t totalSize; /**< Total size of the serialized data */ +} PDC_SERDE_SerializedData; + +/** + * @brief Initialize a serialized data structure + * + * @param initial_field_count Number of initial fields to allocate space for + * + * @return Pointer to the initialized PDC_SERDE_SerializedData structure + */ +PDC_SERDE_SerializedData *pdc_serde_init(int initial_field_count); + +/** + * @brief Append a key-value pair to the serialized data structure + * + * @param data Pointer to the PDC_SERDE_SerializedData structure + * @param key Pointer to the PDC_SERDE_Key structure representing the key + * @param value Pointer to the PDC_SERDE_Value structure representing the value + */ +void pdc_serde_append_key_value(PDC_SERDE_SerializedData *data, PDC_SERDE_Key *key, PDC_SERDE_Value *value); + +/** + * @brief get the total size of PDC_SERDE_SerializedData structure instance + * + * @param data Pointer to the PDC_SERDE_SerializedData structure instance + * + * @return total size of the PDC_SERDE_SerializedData structure instance +*/ +size_t get_total_size_for_serialized_data(PDC_SERDE_SerializedData *data); + +/** + * @brief Serialize the data in the serialized data structure and return the buffer + * + * @param data Pointer to the PDC_SERDE_SerializedData structure + * + * @return Pointer to the buffer containing the serialized data + */ +void *pdc_serde_serialize(PDC_SERDE_SerializedData *data); + +/** + * @brief Deserialize the buffer and return the deserialized data structure + * + * @param buffer Pointer to the buffer containing the serialized data + * + * @return Pointer to the deserialized PDC_SERDE_SerializedData structure + */ +PDC_SERDE_SerializedData *pdc_serde_deserialize(void *buffer); + +/** + * @brief Free the memory allocated for the serialized data structure + * + * @param data Pointer to the PDC_SERDE_SerializedData structure to be freed + */ +void pdc_serde_free(PDC_SERDE_SerializedData *data); + +/** + * @brief Print the contents of the serialized data structure + * + * @param data Pointer to the PDC_SERDE_SerializedData structure to be printed + */ +void pdc_serde_print(PDC_SERDE_SerializedData *data); + +/** + * @brief Create a PDC_SERDE_Key structure + * + * @param key Pointer to the key data + * @param pdc_type Data type of the key. For SERDE_Key, we only support PDC_CLS_SCALAR class. + * @param size Size of the key data + * + * @return Pointer to the created PDC_SERDE_Key structure + */ +static inline PDC_SERDE_Key * +PDC_SERDE_KEY(void *key, PDC_CType pdc_type, size_t size) +{ + PDC_SERDE_Key *pdc_key = (PDC_SERDE_Key *)malloc(sizeof(PDC_SERDE_Key)); + size_t key_size = (size_t) get_size_by_class_n_type(key, size, PDC_CLS_SCALAR, pdc_type); + pdc_key->key = malloc(key_size); + memcpy(pdc_key->key, key, key_size); + pdc_key->pdc_type = pdc_type; + pdc_key->size = key_size; + return pdc_key; +} + +/** + * @brief Create a PDC_SERDE_Value structure + * + * @param data Pointer to the value data + * @param pdc_type Data type of the value + * @param pdc_class Class of the value + * @param size Size of the value data + * + * @return Pointer to the created PDC_SERDE_Value structure + */ +static inline PDC_SERDE_Value * +PDC_SERDE_VALUE(void *data, PDC_CType pdc_type, PDC_CType_Class pdc_class, size_t size) +{ + PDC_SERDE_Value *pdc_value = (PDC_SERDE_Value *)malloc(sizeof(PDC_SERDE_Value)); + size_t value_size = 0; + if (pdc_class == PDC_CLS_STRUCT) { + // TODO: we need to check if data is a valid PDC_SERDE_SerializedData structure. + PDC_SERDE_SerializedData *struct_data = (PDC_SERDE_SerializedData *)data; + size = struct_data->totalSize; + } else { + value_size = (size_t) get_size_by_class_n_type(data, size, pdc_class, pdc_type); + } + pdc_value->data = malloc(value_size); + memcpy(pdc_value->data, data, value_size); + pdc_value->pdc_class = pdc_class; + pdc_value->pdc_type = pdc_type; + pdc_value->size = value_size; + return pdc_value; +} + +#endif /* PDC_SERDE_H */ \ No newline at end of file diff --git a/src/commons/serde/pdc_serde.c b/src/commons/serde/pdc_serde.c new file mode 100644 index 000000000..e0959b5cd --- /dev/null +++ b/src/commons/serde/pdc_serde.c @@ -0,0 +1,346 @@ +#include "pdc_serde.h" + +PDC_SERDE_SerializedData * +pdc_serde_init(int initial_field_count) +{ + PDC_SERDE_SerializedData *data = malloc(sizeof(PDC_SERDE_SerializedData)); + data->header = malloc(sizeof(PDC_SERDE_Header)); + data->header->keys = malloc(sizeof(PDC_SERDE_Key) * initial_field_count); + data->header->numKeys = 0; + data->header->totalSize = 0; + data->data = malloc(sizeof(PDC_SERDE_Data)); + data->data->values = malloc(sizeof(PDC_SERDE_Value) * initial_field_count); + data->data->numValues = 0; + data->data->totalSize = 0; + return data; +} + +void +pdc_serde_append_key_value(PDC_SERDE_SerializedData *data, PDC_SERDE_Key *key, PDC_SERDE_Value *value) +{ + data->header->keys[data->header->numKeys] = *key; + data->header->numKeys++; + // append type, size, and key + data->header->totalSize += (sizeof(int) + sizeof(size_t) + key->size); + data->data->values[data->data->numValues] = *value; + data->data->numValues++; + // append class, type, size, and data + data->data->totalSize += (sizeof(int) + sizeof(int) + sizeof(size_t) + value->size); +} + +size_t get_total_size_for_serialized_data(PDC_SERDE_SerializedData *data) { + if (data->totalSize <= 0) { + size_t total_size = data->header->totalSize + data->data->totalSize + sizeof(size_t) * 6; + data->totalSize = total_size; + } + return data->totalSize; +} + +// clang-format off +/** + * This function serializes the entire PDC_SERDE_SerializedData structure. + * + * The overview of the serialized binary data layout is: + * +---------------------+---------------------+----------------------+---------------------+----------------------+----------------------+----------------------+----------------------+ + * | Size of the Header | Size of the Data | Number of Keys | Header Region | Data Offset | Number of Values | Data Region | Data Offset | + * | (size_t) | (size_t) | (size_t) | | (size_t) | (size_t) | | (size_t) | + * +---------------------+---------------------+----------------------+---------------------+----------------------+----------------------+----------------------+----------------------+ + * + * The first 2 field is called meta-header, which provides metadata about size of the header region and the size of the data region. + * Note that the size of the header region doesn't include the 'Number of Keys' field. + * Also, the size of the data region doesn't include the 'Data Offset' field. + * + * Then the following is the header region with two keys: + * +-----------------------+-------------------------+-----------------------------+---------------------------+--------------------------+-----------------------------+---------------------------+ + * | Number of Keys | Key 1 Type | Key 1 Size | Key 1 Data | Key 2 Type | Key 2 Size | Key 2 Data | + * | (size_t) | (int8_t) | (size_t) | (Variable size depending | (int8_t) | (size_t) | (Variable size depending | + * | | | | on Key 1 Size) | | | on Key 2 Size) | + * +-----------------------+-------------------------+-----------------------------+---------------------------+--------------------------+-----------------------------+---------------------------+ + * + * Then, the following is a header offset validation point and the data region with the final offset validation point. + * + * |----------------------------------------------------------------------------------------------------------| + * | Data Offset (size_t) | Number of Value Entries (size_t) | Value 1 Class (int8_t) | Value 1 Type (int8_t) | + * |----------------------------------------------------------------------------------------------------------| + * | Value 1 Size (size_t)| Value 1 Data (Variable size depending on Value 1 Size) | Value 2 Class (int8_t) | + * |----------------------------------------------------------------------------------------------------------| + * | Value 2 Type (int8_t)| Value 2 Size (size_t) | Value 2 Data (Variable size depending on Value 2 Size) | + * |----------------------------------------------------------------------------------------------------------| + * | ...repeated for the number of value entries in the data... | + * |----------------------------------------------------------------------------------------------------------| + * | Final Data Offset (size_t) | + * |----------------------------------------------------------------------------------------------------------| + * + * Please refer to `get_size_by_class_n_type` function in pdc_generic.h for size calculation on scalar values and array values. + * + */ +// clang-format on +void * +pdc_serde_serialize(PDC_SERDE_SerializedData *data) +{ + // The buffer contains: + // the size of the header (size_t) + + // the size of the data (size_t) + + // the number of keys (size_t) + + // the header region + + // the data offset (size_t) + + // the number of value entries (size_t) + + // the data region + void *buffer = malloc(get_total_size_for_serialized_data(data)); + // serialize the meta header, which contains only the size of the header and the size of the data region. + memcpy(buffer, &data->header->totalSize, sizeof(size_t)); + memcpy(buffer + sizeof(size_t), &data->data->totalSize, sizeof(size_t)); + + // serialize the header + // start with the number of keys + memcpy(buffer + sizeof(size_t) * 2, &data->header->numKeys, sizeof(size_t)); + // then the keys + size_t offset = sizeof(size_t) * 3; + for (int i = 0; i < data->header->numKeys; i++) { + int8_t pdc_type = (int8_t)(data->header->keys[i].pdc_type); + memcpy(buffer + offset, &pdc_type, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(buffer + offset, &data->header->keys[i].size, sizeof(size_t)); + offset += sizeof(size_t); + memcpy(buffer + offset, data->header->keys[i].key, data->header->keys[i].size); + offset += data->header->keys[i].size; + } + + // serialize the data offset, this is for validation purpose to see if header region is corrupted. + memcpy(buffer + offset, &offset, sizeof(size_t)); + offset += sizeof(size_t); + + // serialize the data + // start with the number of value entries + memcpy(buffer + offset, &data->data->numValues, sizeof(size_t)); + offset += sizeof(size_t); + // then the values + for (int i = 0; i < data->data->numValues; i++) { + int8_t pdc_class = (int8_t)data->data->values[i].pdc_class; + int8_t pdc_type = (int8_t)data->data->values[i].pdc_type; + memcpy(buffer + offset, &pdc_class, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(buffer + offset, &pdc_type, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(buffer + offset, &data->data->values[i].size, sizeof(size_t)); + offset += sizeof(size_t); + memcpy(buffer + offset, data->data->values[i].data, data->data->values[i].size); + offset += data->data->values[i].size; + } + // serialize the data offset again, this is for validation purpose to see if data region is corrupted. + memcpy(buffer + offset, &offset, sizeof(size_t)); + offset += sizeof(size_t); + return buffer; +} + +PDC_SERDE_SerializedData * +pdc_serde_deserialize(void *buffer) +{ + size_t offset = 0; + // read the meta header + size_t headerSize; + size_t dataSize; + memcpy(&headerSize, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + memcpy(&dataSize, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + + // read the header + size_t numKeys; + memcpy(&numKeys, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + PDC_SERDE_Header *header = malloc(sizeof(PDC_SERDE_Header)); + header->keys = malloc(sizeof(PDC_SERDE_Key) * numKeys); + header->numKeys = numKeys; + header->totalSize = headerSize; + for (int i = 0; i < numKeys; i++) { + int8_t pdc_type; + size_t size; + memcpy(&pdc_type, buffer + offset, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(&size, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + void *key = malloc(size); + memcpy(key, buffer + offset, size); + offset += size; + header->keys[i].key = key; + header->keys[i].pdc_type = (PDC_CType)pdc_type; + header->keys[i].size = size; + } + + // read the data offset + size_t dataOffset; + memcpy(&dataOffset, buffer + offset, sizeof(size_t)); + // check the data offset + if (dataOffset != offset) { + printf("Error: data offset does not match the expected offset.\n"); + return NULL; + } + offset += sizeof(size_t); + + // read the data + size_t numValues; + memcpy(&numValues, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + PDC_SERDE_Data *data = malloc(sizeof(PDC_SERDE_Data)); + data->values = malloc(sizeof(PDC_SERDE_Value) * numValues); + data->numValues = numValues; + data->totalSize = dataSize; + for (int i = 0; i < numValues; i++) { + int8_t pdc_class; + int8_t pdc_type; + size_t size; + memcpy(&pdc_class, buffer + offset, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(&pdc_type, buffer + offset, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(&size, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + void *value = malloc(size); + memcpy(value, buffer + offset, size); + offset += size; + data->values[i].data = value; + data->values[i].pdc_class = (PDC_CType_Class)pdc_class; + data->values[i].pdc_type = (PDC_CType)pdc_type; + data->values[i].size = size; + } + // check the total size + memcpy(&dataOffset, buffer + offset, sizeof(size_t)); + // check the data offset + if (dataOffset != offset) { + printf("Error: data offset does not match the expected offset.\n"); + return NULL; + } + offset += sizeof(size_t); + if (offset != headerSize + sizeof(size_t) * 6 + dataSize) { + printf("Error: total size does not match the expected size.\n"); + return NULL; + } + // create the serialized data + PDC_SERDE_SerializedData *serializedData = malloc(sizeof(PDC_SERDE_SerializedData)); + serializedData->header = header; + serializedData->data = data; + serializedData->totalSize = headerSize + dataSize + sizeof(size_t) * 6; + + return serializedData; +} + +void +pdc_serde_free(PDC_SERDE_SerializedData *data) +{ + for (int i = 0; i < data->header->numKeys; i++) { + free(data->header->keys[i].key); + } + free(data->header->keys); + for (int i = 0; i < data->data->numValues; i++) { + free(data->data->values[i].data); + } + free(data->data->values); + free(data->header); + free(data->data); + free(data); +} + +void +pdc_serde_print(PDC_SERDE_SerializedData *data) +{ + printf("Header:\n"); + printf(" numKeys: %zu\n", data->header->numKeys); + printf(" totalSize: %zu\n", data->header->totalSize); + for (int i = 0; i < data->header->numKeys; i++) { + printf(" key %d:\n", i); + printf(" type: %d\n", data->header->keys[i].pdc_type); + printf(" size: %zu\n", data->header->keys[i].size); + printf(" key: %s\n", (char *)data->header->keys[i].key); + } + printf("Data:\n"); + printf(" numValues: %zu\n", data->data->numValues); + printf(" totalSize: %zu\n", data->data->totalSize); + for (int i = 0; i < data->data->numValues; i++) { + printf(" value %d:\n", i); + printf(" class: %d\n", data->data->values[i].pdc_class); + printf(" type: %d\n", data->data->values[i].pdc_type); + printf(" size: %zu\n", data->data->values[i].size); + printf(" data: "); + if (data->data->values[i].pdc_class == PDC_STRING) { + printf("%s\n", (char *)data->data->values[i].data); + } + else { + printf("\n"); + } + } +} + +int +test_serde_framework() +{ + // Initialize a serialized data structure + PDC_SERDE_SerializedData *data = pdc_serde_init(5); + + // Create and append key-value pairs for different data types + char * intKey_str = "int"; + int intVal = 42; + PDC_SERDE_Key * intKey = PDC_SERDE_KEY(intKey_str, PDC_STRING, sizeof(intKey_str)); + PDC_SERDE_Value *intValue = PDC_SERDE_VALUE(&intVal, PDC_INT, PDC_CLS_SCALAR, sizeof(int)); + pdc_serde_append_key_value(data, intKey, intValue); + + char * doubleKey_str = "double"; + double doubleVal = 3.14159; + PDC_SERDE_Key * doubleKey = PDC_SERDE_KEY(doubleKey_str, PDC_STRING, sizeof(doubleKey_str)); + PDC_SERDE_Value *doubleValue = + PDC_SERDE_VALUE(&doubleVal, PDC_DOUBLE, PDC_CLS_SCALAR, sizeof(double)); + pdc_serde_append_key_value(data, doubleKey, doubleValue); + + char * strKey_str = "string"; + char * strVal = "Hello, World!"; + PDC_SERDE_Key *strKey = PDC_SERDE_KEY(strKey_str, PDC_STRING, (strlen(strKey_str) + 1) * sizeof(char)); + PDC_SERDE_Value *strValue = + PDC_SERDE_VALUE(strVal, PDC_STRING, PDC_CLS_SCALAR, (strlen(strVal) + 1) * sizeof(char)); + pdc_serde_append_key_value(data, strKey, strValue); + + char * arrayKey_str = "array"; + int intArray[3] = {1, 2, 3}; + PDC_SERDE_Key * arrayKey = PDC_SERDE_KEY(arrayKey_str, PDC_STRING, sizeof(arrayKey_str)); + PDC_SERDE_Value *arrayValue = PDC_SERDE_VALUE(intArray, PDC_INT, PDC_CLS_ARRAY, sizeof(int) * 3); + pdc_serde_append_key_value(data, arrayKey, arrayValue); + + typedef struct { + int x; + int y; + } Point; + + char *pointKey = "point"; + Point pointVal = {10, 20}; + + PDC_SERDE_SerializedData *point_data = pdc_serde_init(2); + PDC_SERDE_Key * x_name = PDC_SERDE_KEY("x", PDC_STRING, sizeof(char *)); + PDC_SERDE_Value *x_value = PDC_SERDE_VALUE(&pointVal.x, PDC_INT, PDC_CLS_SCALAR, sizeof(int)); + + PDC_SERDE_Key * y_name = PDC_SERDE_KEY("y", PDC_STRING, sizeof(char *)); + PDC_SERDE_Value *y_value = PDC_SERDE_VALUE(&pointVal.y, PDC_INT, PDC_CLS_SCALAR, sizeof(int)); + + pdc_serde_append_key_value(point_data, x_name, x_value); + pdc_serde_append_key_value(point_data, y_name, y_value); + void *point_buffer = pdc_serde_serialize(point_data); + + PDC_SERDE_Key * structKey = PDC_SERDE_KEY(pointKey, PDC_STRING, sizeof(pointKey)); + PDC_SERDE_Value *structValue = + PDC_SERDE_VALUE(point_buffer, PDC_VOID_PTR, PDC_CLS_STRUCT, sizeof(Point)); + pdc_serde_append_key_value(data, structKey, structValue); + + // Serialize the data + void *buffer = pdc_serde_serialize(data); + + // Deserialize the buffer + PDC_SERDE_SerializedData *deserializedData = pdc_serde_deserialize(buffer); + + // Print the deserialized data + pdc_serde_print(deserializedData); + + // Free the memory + pdc_serde_free(data); + pdc_serde_free(deserializedData); + free(buffer); + + return 0; +} \ No newline at end of file diff --git a/src/server/CMakeLists.txt b/src/server/CMakeLists.txt index 449b64f10..abe3917c6 100644 --- a/src/server/CMakeLists.txt +++ b/src/server/CMakeLists.txt @@ -48,7 +48,6 @@ add_executable(pdc_server.exe ${PDC_SOURCE_DIR}/src/server/pdc_server_region/pdc_server_region_transfer.c ${PDC_SOURCE_DIR}/src/server/pdc_server_region/pdc_server_region_transfer_metadata_query.c ${PDC_SOURCE_DIR}/src/utils/pdc_region_utils.c - ${PDC_SOURCE_DIR}/src/utils/pdc_timing.c ${PDC_SOURCE_DIR}/src/api/pdc_analysis/pdc_analysis_common.c ${PDC_SOURCE_DIR}/src/api/pdc_transform/pdc_transforms_common.c ${PDC_SOURCE_DIR}/src/api/pdc_analysis/pdc_hist_pkg.c @@ -70,9 +69,9 @@ endif() if(PDC_ENABLE_FASTBIT) message(STATUS "Enabled fastbit") - target_link_libraries(pdc_server.exe mercury pdcprof -lm -ldl ${PDC_EXT_LIB_DEPENDENCIES} ${FASTBIT_LIBRARY}/libfastbit.so) + target_link_libraries(pdc_server.exe mercury ${PDC_COMMONS_LIBRARIES} -lm -ldl ${PDC_EXT_LIB_DEPENDENCIES} ${FASTBIT_LIBRARY}/libfastbit.so) else() - target_link_libraries(pdc_server.exe mercury pdcprof -lm -ldl ${PDC_EXT_LIB_DEPENDENCIES}) + target_link_libraries(pdc_server.exe mercury ${PDC_COMMONS_LIBRARIES} -lm -ldl ${PDC_EXT_LIB_DEPENDENCIES}) endif() diff --git a/src/server/include/pdc_client_server_common.h b/src/server/include/pdc_client_server_common.h index a57e2169f..e125af9a5 100644 --- a/src/server/include/pdc_client_server_common.h +++ b/src/server/include/pdc_client_server_common.h @@ -1190,6 +1190,11 @@ hg_proc_pdc_kvtag_t(hg_proc_t proc, void *data) // HG_LOG_ERROR("Proc error"); return ret; } + ret = hg_proc_int8_t(proc, &struct_data->type); + if (ret != HG_SUCCESS) { + // HG_LOG_ERROR("Proc error"); + return ret; + } if (struct_data->size) { switch (hg_proc_get_op(proc)) { case HG_DECODE: diff --git a/src/server/pdc_client_server_common.c b/src/server/pdc_client_server_common.c index a0a7845a4..dbbff7bfd 100644 --- a/src/server/pdc_client_server_common.c +++ b/src/server/pdc_client_server_common.c @@ -3096,7 +3096,7 @@ HG_TEST_RPC_CB(region_release, handle) size2 = HG_Bulk_get_size(remote_bulk_handle); if (size != size2) { error = 1; - printf("==PDC_SERVER: local size %lu, remote %lu\n", size, size2); + printf("==PDC_SERVER: local size %llu, remote %llu\n", size, size2); PGOTO_ERROR(HG_OTHER_ERROR, "===PDC SERVER: HG_TEST_RPC_CB(region_release, " "handle) local and remote bulk size does not match"); } @@ -3269,7 +3269,7 @@ HG_TEST_RPC_CB(region_release, handle) size2 = HG_Bulk_get_size(remote_bulk_handle); if (size != size2) { error = 1; - printf("==PDC_SERVER: local size %lu, remote %lu\n", size, size2); + printf("==PDC_SERVER: local size %llu, remote %llu\n", size, size2); /* PGOTO_ERROR(HG_OTHER_ERROR, "===PDC SERVER: HG_TEST_RPC_CB(region_release, * handle) local and remote bulk size does not match"); */ } @@ -6849,7 +6849,7 @@ PDC_kvtag_dup(pdc_kvtag_t *from, pdc_kvtag_t **to) (*to) = (pdc_kvtag_t *)calloc(1, sizeof(pdc_kvtag_t)); (*to)->name = (char *)malloc(strlen(from->name) + 1); (*to)->size = from->size; - + (*to)->type = from->type; (*to)->value = (void *)malloc(from->size); memcpy((void *)(*to)->name, (void *)from->name, strlen(from->name) + 1); memcpy((void *)(*to)->value, (void *)from->value, from->size); diff --git a/src/server/pdc_server.c b/src/server/pdc_server.c index a3020628a..449b31c06 100644 --- a/src/server/pdc_server.c +++ b/src/server/pdc_server.c @@ -1224,6 +1224,7 @@ PDC_Server_checkpoint() fwrite(&key_len, sizeof(int), 1, file); fwrite(kvlist_elt->kvtag->name, key_len, 1, file); fwrite(&kvlist_elt->kvtag->size, sizeof(uint32_t), 1, file); + fwrite(&kvlist_elt->kvtag->type, sizeof(int8_t), 1, file); fwrite(kvlist_elt->kvtag->value, kvlist_elt->kvtag->size, 1, file); } @@ -1403,7 +1404,8 @@ PDC_Server_restart(char *filename) } // init hash table - PDC_Server_init_hash_table(); + // FIXME: check if we need to init the hash table again. + // PDC_Server_init_hash_table(); if (fread(&n_cont, sizeof(int), 1, file) != 1) { printf("Read failed for n_count\n"); @@ -1492,6 +1494,9 @@ PDC_Server_restart(char *filename) if (fread(&kvtag_list->kvtag->size, sizeof(uint32_t), 1, file) != 1) { printf("Read failed for kvtag_list->kvtag->size\n"); } + if (fread(&kvtag_list->kvtag->type, sizeof(int8_t), 1, file) != 1) { + printf("Read failed for kvtag_list->kvtag->type\n"); + } kvtag_list->kvtag->value = malloc(kvtag_list->kvtag->size); if (fread(kvtag_list->kvtag->value, kvtag_list->kvtag->size, 1, file) != 1) { printf("Read failed for kvtag_list->kvtag->value\n"); diff --git a/src/server/pdc_server_region/pdc_server_region_request_handler.h b/src/server/pdc_server_region/pdc_server_region_request_handler.h index f3dc64cde..04ab48481 100644 --- a/src/server/pdc_server_region/pdc_server_region_request_handler.h +++ b/src/server/pdc_server_region/pdc_server_region_request_handler.h @@ -811,7 +811,7 @@ HG_TEST_RPC_CB(transfer_request, handle) ret_value = HG_Respond(handle, NULL, NULL, &out); if (in.access_type == PDC_WRITE) { ret_value = HG_Bulk_create(info->hg_class, 1, &(local_bulk_args->data_buf), - &(local_bulk_args->total_mem_size), HG_BULK_READWRITE, + (const hg_size_t *)&(local_bulk_args->total_mem_size), HG_BULK_READWRITE, &(local_bulk_args->bulk_handle)); if (ret_value != HG_SUCCESS) { printf("Error at HG_TEST_RPC_CB(transfer_request, handle): @ line %d \n", __LINE__); @@ -881,7 +881,7 @@ HG_TEST_RPC_CB(transfer_request, handle) *((int *)(local_bulk_args->data_buf + sizeof(int)))); */ ret_value = HG_Bulk_create(info->hg_class, 1, &(local_bulk_args->data_buf), - &(local_bulk_args->total_mem_size), HG_BULK_READWRITE, + (const hg_size_t *)&(local_bulk_args->total_mem_size), HG_BULK_READWRITE, &(local_bulk_args->bulk_handle)); if (ret_value != HG_SUCCESS) { printf("Error at HG_TEST_RPC_CB(transfer_request, handle): @ line %d \n", __LINE__); diff --git a/src/tests/cont_tags.c b/src/tests/cont_tags.c index 04132c332..f6c961a71 100644 --- a/src/tests/cont_tags.c +++ b/src/tests/cont_tags.c @@ -37,6 +37,7 @@ main(int argc, char **argv) int rank = 0, size = 1; char tag_value[128], tag_value2[128], *tag_value_ret; + pdc_var_type_t value_type; psize_t value_size; strcpy(tag_value, "some tag value"); strcpy(tag_value2, "some tag value 2 is longer than tag 1"); @@ -78,31 +79,31 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCcont_put_tag(cont, "some tag", tag_value, strlen(tag_value) + 1); + ret = PDCcont_put_tag(cont, "some tag", tag_value, PDC_STRING, strlen(tag_value) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 1\n"); ret_value = 1; } - ret = PDCcont_put_tag(cont, "some tag 2", tag_value2, strlen(tag_value2) + 1); + ret = PDCcont_put_tag(cont, "some tag 2", tag_value2, PDC_STRING, strlen(tag_value2) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 1\n"); ret_value = 1; } - ret = PDCcont_put_tag(cont2, "some tag", tag_value, strlen(tag_value) + 1); + ret = PDCcont_put_tag(cont2, "some tag", tag_value, PDC_STRING, strlen(tag_value) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 2\n"); ret_value = 1; } - ret = PDCcont_put_tag(cont2, "some tag 2", tag_value2, strlen(tag_value2) + 1); + ret = PDCcont_put_tag(cont2, "some tag 2", tag_value2, PDC_STRING, strlen(tag_value2) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 2\n"); ret_value = 1; } - ret = PDCcont_get_tag(cont, "some tag", (void **)&tag_value_ret, &value_size); + ret = PDCcont_get_tag(cont, "some tag", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 1\n"); ret_value = 1; @@ -112,7 +113,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCcont_get_tag(cont, "some tag 2", (void **)&tag_value_ret, &value_size); + ret = PDCcont_get_tag(cont, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 1\n"); ret_value = 1; @@ -123,7 +124,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCcont_get_tag(cont2, "some tag", (void **)&tag_value_ret, &value_size); + ret = PDCcont_get_tag(cont2, "some tag", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 2\n"); ret_value = 1; @@ -134,7 +135,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCcont_get_tag(cont2, "some tag 2", (void **)&tag_value_ret, &value_size); + ret = PDCcont_get_tag(cont2, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 2\n"); ret_value = 1; diff --git a/src/tests/kvtag_add_get.c b/src/tests/kvtag_add_get.c index 97eadffed..91686b9be 100644 --- a/src/tests/kvtag_add_get.c +++ b/src/tests/kvtag_add_get.c @@ -33,13 +33,14 @@ int main() { - pdcid_t pdc, cont_prop, cont, obj_prop1, obj_prop2, obj1, obj2; - pdc_kvtag_t kvtag1, kvtag2, kvtag3; - char * v1 = "value1"; - int v2 = 2; - double v3 = 3.45; - void * value1, *value2, *value3; - psize_t value_size; + pdcid_t pdc, cont_prop, cont, obj_prop1, obj_prop2, obj1, obj2; + pdc_kvtag_t kvtag1, kvtag2, kvtag3; + char * v1 = "value1"; + int v2 = 2; + double v3 = 3.45; + pdc_var_type_t type1, type2, type3; + void * value1, *value2, *value3; + psize_t value_size; // create a pdc pdc = PDCinit("pdc"); @@ -88,42 +89,45 @@ main() kvtag1.name = "key1string"; kvtag1.value = (void *)v1; + kvtag1.type = PDC_STRING; kvtag1.size = strlen(v1) + 1; kvtag2.name = "key2int"; kvtag2.value = (void *)&v2; + kvtag1.type = PDC_INT; kvtag2.size = sizeof(int); kvtag3.name = "key3double"; kvtag3.value = (void *)&v3; + kvtag1.type = PDC_DOUBLE; kvtag3.size = sizeof(double); - if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.size) < 0) + if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.type, kvtag1.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); - if (PDCobj_put_tag(obj2, kvtag2.name, kvtag2.value, kvtag2.size) < 0) + if (PDCobj_put_tag(obj2, kvtag2.name, kvtag2.value, kvtag2.type, kvtag2.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); - if (PDCobj_put_tag(obj2, kvtag3.name, kvtag3.value, kvtag3.size) < 0) + if (PDCobj_put_tag(obj2, kvtag3.name, kvtag3.value, kvtag3.type, kvtag3.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); - if (PDCobj_get_tag(obj1, kvtag1.name, (void *)&value1, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj1, kvtag1.name, (void *)&value1, (void *)&type1, (void *)&value_size) < 0) printf("fail to get a kvtag from o1\n"); else printf("successfully retrieved a kvtag [%s] = [%s] from o1\n", kvtag1.name, (char *)value1); - if (PDCobj_get_tag(obj2, kvtag2.name, (void *)&value2, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj2, kvtag2.name, (void *)&value2, (void *)&type2, (void *)&value_size) < 0) printf("fail to get a kvtag from o2\n"); else printf("successfully retrieved a kvtag [%s] = [%d] from o2\n", kvtag2.name, *(int *)value2); - if (PDCobj_get_tag(obj2, kvtag3.name, (void *)&value3, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj2, kvtag3.name, (void *)&value3, (void *)&type3, (void *)&value_size) < 0) printf("fail to get a kvtag from o2\n"); else printf("successfully retrieved a kvtag [%s] = [%f] from o2\n", kvtag3.name, *(double *)value3); @@ -135,15 +139,16 @@ main() v1 = "New Value After Delete"; kvtag1.value = (void *)v1; + kvtag1.type = PDC_STRING; kvtag1.size = strlen(v1) + 1; - if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.size) < 0) + if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.type, kvtag1.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); /* PDC_free_kvtag(&value1); */ - if (PDCobj_get_tag(obj1, kvtag1.name, (void *)&value1, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj1, kvtag1.name, (void *)&value1, (void *)&type1, (void *)&value_size) < 0) printf("fail to get a kvtag from o1\n"); else printf("successfully retrieved a kvtag [%s] = [%s] from o1\n", kvtag1.name, (char *)value1); diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index ee5efdcbf..2ccc67828 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -220,7 +220,7 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value v = i + my_obj_s; for (j = 0; j < n_attr; j++) { sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", v, j); - if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)tag_values[j], tag_value_len + 1) < 0) + if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)tag_values[j], PDC_STRING, tag_value_len + 1) < 0) printf("fail to add a kvtag to o%" PRIu64 "\n", v); } } @@ -240,10 +240,11 @@ get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, uint64_t n_attr, void **tag { uint64_t i; char tag_name[256]; + pdc_var_type_t tag_type; for (i = 0; i < n_attr; i++) { sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", obj_name_v, i); - if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[i], (void *)&value_size[i]) < 0) + if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[i], &tag_type, (void *)&value_size[i]) < 0) printf("fail to get a kvtag from o%" PRIu64 "\n", obj_name_v); } } diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index 280eda25b..d145560d0 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -76,7 +76,8 @@ main(int argc, char *argv[]) double stime, total_time, percent_time; pdc_kvtag_t kvtag; void ** values; - size_t value_size; + pdc_var_type_t value_type; + size_t value_size; #ifdef ENABLE_MPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &proc_num); @@ -167,6 +168,7 @@ main(int argc, char *argv[]) // Add tags kvtag.name = "Group"; kvtag.value = (void *)&v; + kvtag.type = PDC_INT; kvtag.size = sizeof(int); #ifdef ENABLE_MPI @@ -175,7 +177,7 @@ main(int argc, char *argv[]) #endif for (i = 0; i < my_add_tag; i++) { v = i + my_add_tag_s; - if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) + if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.type, kvtag.size) < 0) printf("fail to add a kvtag to o%d\n", i + my_obj_s); if (i % tag_1percent == 0) { @@ -208,7 +210,7 @@ main(int argc, char *argv[]) stime = MPI_Wtime(); #endif for (i = 0; i < my_query; i++) { - if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_size) < 0) + if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_type, (void *)&value_size) < 0) printf("fail to get a kvtag from o%d\n", i + my_query_s); if (i % query_1percent == 0) { diff --git a/src/tests/kvtag_get.c b/src/tests/kvtag_get.c index a8fcf70f1..7ca1e7c0e 100644 --- a/src/tests/kvtag_get.c +++ b/src/tests/kvtag_get.c @@ -33,9 +33,10 @@ int main() { - pdcid_t pdc, cont_prop, cont, obj_prop1, obj_prop2, obj1, obj2; - pdc_kvtag_t *value1, *value2, *value3; - psize_t value_size; + pdcid_t pdc, cont_prop, cont, obj_prop1, obj_prop2, obj1, obj2; + pdc_kvtag_t * value1, *value2, *value3; + pdc_var_type_t type1, type2, type3; + psize_t value_size; // create a pdc pdc = PDCinit("pdc"); @@ -82,17 +83,17 @@ main() else printf("Fail to create object @ line %d!\n", __LINE__); - if (PDCobj_get_tag(obj1, "key1string", (void *)&value1, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj1, "key1string", (void *)&value1, (void *)&type1, (void *)&value_size) < 0) printf("fail to get a kvtag from o1\n"); else printf("successfully retrieved a kvtag [%s] = [%s] from o1\n", value1->name, (char *)value1->value); - if (PDCobj_get_tag(obj2, "key2int", (void *)&value2, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj2, "key2int", (void *)&value2, (void *)&type2, (void *)&value_size) < 0) printf("fail to get a kvtag from o2\n"); else printf("successfully retrieved a kvtag [%s] = [%d] from o2\n", value2->name, *(int *)value2->value); - if (PDCobj_get_tag(obj2, "key3double", (void *)&value3, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj2, "key3double", (void *)&value3, (void *)&type3, (void *)&value_size) < 0) printf("fail to get a kvtag from o2\n"); else printf("successfully retrieved a kvtag [%s] = [%f] from o2\n", value3->name, @@ -100,7 +101,7 @@ main() PDC_free_kvtag(&value1); - if (PDCobj_get_tag(obj1, "key1string", (void *)&value1, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj1, "key1string", (void *)&value1, (void *)&type1, (void *)&value_size) < 0) printf("fail to get a kvtag from o1\n"); else printf("successfully retrieved a kvtag [%s] = [%s] from o1\n", value1->name, (char *)value1->value); diff --git a/src/tests/kvtag_query.c b/src/tests/kvtag_query.c index 88e6621d7..cf1e80dcb 100644 --- a/src/tests/kvtag_query.c +++ b/src/tests/kvtag_query.c @@ -91,32 +91,35 @@ main() kvtag1.name = "key1string"; kvtag1.value = (void *)v1; + kvtag1.type = PDC_STRING; kvtag1.size = strlen(v1) + 1; kvtag2.name = "key2int"; kvtag2.value = (void *)&v2; + kvtag2.type = PDC_INT; kvtag2.size = sizeof(int); kvtag3.name = "key3double"; kvtag3.value = (void *)&v3; + kvtag3.type = PDC_DOUBLE; kvtag3.size = sizeof(double); - if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.size) < 0) + if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.type, kvtag1.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); - if (PDCobj_put_tag(obj1, kvtag2.name, kvtag2.value, kvtag2.size) < 0) + if (PDCobj_put_tag(obj1, kvtag2.name, kvtag2.value, kvtag2.type, kvtag2.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); - if (PDCobj_put_tag(obj2, kvtag2.name, kvtag2.value, kvtag2.size) < 0) + if (PDCobj_put_tag(obj2, kvtag2.name, kvtag2.value, kvtag2.type, kvtag2.size) < 0) printf("fail to add a kvtag to o2\n"); else printf("successfully added a kvtag to o2\n"); - if (PDCobj_put_tag(obj2, kvtag3.name, kvtag3.value, kvtag3.size) < 0) + if (PDCobj_put_tag(obj2, kvtag3.name, kvtag3.value, kvtag3.type, kvtag3.size) < 0) printf("fail to add a kvtag to o2\n"); else printf("successfully added a kvtag to o2\n"); diff --git a/src/tests/kvtag_query_scale.c b/src/tests/kvtag_query_scale.c index a42cd5a5a..b9e4a6ffa 100644 --- a/src/tests/kvtag_query_scale.c +++ b/src/tests/kvtag_query_scale.c @@ -129,6 +129,7 @@ main(int argc, char *argv[]) // Add tags kvtag.name = "Group"; kvtag.value = (void *)&v; + kvtag.type = PDC_INT; kvtag.size = sizeof(int); for (iter = 0; iter < round; iter++) { @@ -136,7 +137,7 @@ main(int argc, char *argv[]) v = iter; for (i = 0; i < my_add_tag; i++) { - if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) + if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.type, kvtag.size) < 0) printf("fail to add a kvtag to o%d\n", i + my_obj_s); } @@ -153,6 +154,7 @@ main(int argc, char *argv[]) kvtag.name = "Group"; kvtag.value = (void *)&v; + kvtag.type = PDC_INT; kvtag.size = sizeof(int); for (iter = 0; iter < round; iter++) { diff --git a/src/tests/obj_tags.c b/src/tests/obj_tags.c index 2651cf64e..db99bec1b 100644 --- a/src/tests/obj_tags.c +++ b/src/tests/obj_tags.c @@ -44,6 +44,7 @@ main(int argc, char **argv) dims[2] = 4; char tag_value[128], tag_value2[128], *tag_value_ret; char cont_name[128], obj_name1[128], obj_name2[128]; + pdc_var_type_t value_type; psize_t value_size; strcpy(tag_value, "some tag value"); @@ -119,30 +120,30 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCobj_put_tag(obj1, "some tag", tag_value, strlen(tag_value) + 1); + ret = PDCobj_put_tag(obj1, "some tag", tag_value, PDC_STRING, strlen(tag_value) + 1); if (ret != SUCCEED) { printf("Put tag failed at object 1\n"); ret_value = 1; } - ret = PDCobj_put_tag(obj1, "some tag 2", tag_value2, strlen(tag_value2) + 1); + ret = PDCobj_put_tag(obj1, "some tag 2", tag_value2, PDC_STRING, strlen(tag_value2) + 1); if (ret != SUCCEED) { printf("Put tag failed at object 1\n"); ret_value = 1; } - ret = PDCobj_put_tag(obj2, "some tag", tag_value, strlen(tag_value) + 1); + ret = PDCobj_put_tag(obj2, "some tag", tag_value, PDC_STRING, strlen(tag_value) + 1); if (ret != SUCCEED) { printf("Put tag failed at object 2\n"); ret_value = 1; } - ret = PDCobj_put_tag(obj2, "some tag 2", tag_value2, strlen(tag_value2) + 1); + ret = PDCobj_put_tag(obj2, "some tag 2", tag_value2, PDC_STRING, strlen(tag_value2) + 1); if (ret != SUCCEED) { printf("Put tag failed at object 2\n"); ret_value = 1; } - ret = PDCobj_get_tag(obj1, "some tag", (void **)&tag_value_ret, &value_size); + ret = PDCobj_get_tag(obj1, "some tag", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at object 1\n"); ret_value = 1; @@ -153,7 +154,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCobj_get_tag(obj1, "some tag 2", (void **)&tag_value_ret, &value_size); + ret = PDCobj_get_tag(obj1, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at object 1\n"); ret_value = 1; @@ -164,7 +165,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCobj_get_tag(obj2, "some tag", (void **)&tag_value_ret, &value_size); + ret = PDCobj_get_tag(obj2, "some tag", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at object 2\n"); ret_value = 1; @@ -175,7 +176,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCobj_get_tag(obj2, "some tag 2", (void **)&tag_value_ret, &value_size); + ret = PDCobj_get_tag(obj2, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at object 2\n"); ret_value = 1; diff --git a/src/tests/pdc_transforms_lib.c b/src/tests/pdc_transforms_lib.c index f1a83f742..6397bdc70 100644 --- a/src/tests/pdc_transforms_lib.c +++ b/src/tests/pdc_transforms_lib.c @@ -6,21 +6,26 @@ * >> pdc_public.h * * typedef enum { - * PDC_UNKNOWN = -1, - * PDC_INT = 0, - * PDC_FLOAT = 1, - * PDC_DOUBLE = 2, - * PDC_STRING = 3, - * PDC_COMPOUND = 4, - * PDC_ENUM = 5, - * PDC_ARRAY = 6, - * PDC_UINT = 7, - * PDC_INT64 = 8, - * PDC_UINT64 = 9, - * PDC_INT16 = 10, - * PDC_INT8 = 11, - * NCLASSES = 12 - * } PDC_var_type_t; + * PDC_UNKNOWN = -1, * error * + * PDC_INT = 0, * integer types (identical to int32_t) * + * PDC_FLOAT = 1, * floating-point types * + * PDC_DOUBLE = 2, * double types * + * PDC_CHAR = 3, * character types * + * PDC_STRING = 4, * string types * + * PDC_BOOLEAN = 5, * boolean types * + * PDC_SHORT = 6, * short types * + * PDC_UINT = 7, * unsigned integer types (identical to uint32_t) * + * PDC_INT64 = 8, * 64-bit integer types * + * PDC_UINT64 = 9, * 64-bit unsigned integer types * + * PDC_INT16 = 10, * 16-bit integer types * + * PDC_INT8 = 11, * 8-bit integer types * + * PDC_UINT8 = 12, * 8-bit unsigned integer types * + * PDC_UINT16 = 13, * 16-bit unsigned integer types * + * PDC_LONG = 14, * long types * + * PDC_VOID_PTR = 15, * void pointer type * + * PDC_SIZE_T = 16, * size_t type * + * TYPE_COUNT = 17 * this is the number of var types and has to be the last * + * } pdc_c_var_type_t; */ static int diff --git a/src/utils/include/pdc_id_pkg.h b/src/utils/include/pdc_id_pkg.h deleted file mode 100644 index 9623661fd..000000000 --- a/src/utils/include/pdc_id_pkg.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Notice for - * Proactive Data Containers (PDC) Software Library and Utilities - * ----------------------------------------------------------------------------- - - *** Copyright Notice *** - - * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the - * University of California, through Lawrence Berkeley National Laboratory, - * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF - * Group (subject to receipt of any required approvals from the U.S. Dept. of - * Energy). All rights reserved. - - * If you have questions about your rights to use or distribute this software, - * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. - - * NOTICE. This Software was developed under funding from the U.S. Department of - * Energy and the U.S. Government consequently retains certain rights. As such, the - * U.S. Government has been granted for itself and others acting on its behalf a - * paid-up, nonexclusive, irrevocable, worldwide license in the Software to - * reproduce, distribute copies to the public, prepare derivative works, and - * perform publicly and display publicly, and to permit other to do so. - */ - -#ifndef PDC_ID_PKG_H -#define PDC_ID_PKG_H - -#include "pdc_private.h" -#include "pdc_linkedlist.h" -#include "mercury_atomic.h" -/* - * Number of bits to use for ID Type in each atom. Increase if more types - * are needed (though this will decrease the number of available IDs per - * type). This is the only number that must be changed since all other bit - * field sizes and masks are calculated from TYPE_BITS. - */ -#define TYPE_BITS 8 -#define TYPE_MASK (((pdcid_t)1 << TYPE_BITS) - 1) -#define PDC_MAX_NUM_TYPES TYPE_MASK -/* - * Number of bits to use for the Atom index in each atom (assumes 8-bit - * bytes). We don't use the sign bit. - */ -#define ID_BITS ((sizeof(pdcid_t) * 8) - (TYPE_BITS + 1)) -#define ID_MASK (((pdcid_t)1 << ID_BITS) - 1) - -/* Map an atom to an ID type number */ -#define PDC_TYPE(a) ((PDC_type_t)(((pdcid_t)(a) >> ID_BITS) & TYPE_MASK)) - -struct _pdc_id_info { - pdcid_t id; /* ID for this info */ - hg_atomic_int32_t count; /* ref. count for this atom */ - void * obj_ptr; /* pointer associated with the atom */ - PDC_LIST_ENTRY(_pdc_id_info) entry; -}; - -#endif /* PDC_ID_PKG_H */ diff --git a/src/utils/include/pdc_linkedlist.h b/src/utils/include/pdc_linkedlist.h deleted file mode 100644 index 6fa7ce08a..000000000 --- a/src/utils/include/pdc_linkedlist.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright (C) 2013-2016 Argonne National Laboratory, Department of Energy, - * UChicago Argonne, LLC and The HDF Group. - * All rights reserved. - * - * The full copyright notice, including terms governing use, modification, - * and redistribution, is contained in the COPYING file that can be - * found at the root of the source code distribution tree. - */ - -/* Code below is derived from sys/queue.h which follows the below notice: - * - * Copyright (c) 1991, 1993 - * The Regents of the University of California. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)queue.h 8.5 (Berkeley) 8/20/94 - */ - -#ifndef PDC_LINKEDLIST_H -#define PDC_LINKEDLIST_H - -#include "pdc_cont_pkg.h" -#include "pdc_cont.h" -#include "mercury_thread_mutex.h" -#include - -#define PDC_LIST_HEAD_INITIALIZER(name) \ - { \ - NULL \ - } - -#define PDC_LIST_HEAD_INIT(struct_head_name, var_name) \ - struct struct_head_name var_name = PDC_LIST_HEAD_INITIALIZER(var_name) - -#define PDC_LIST_HEAD_DECL(struct_head_name, struct_entry_name) \ - struct struct_head_name { \ - struct struct_entry_name *head; \ - } - -#define PDC_LIST_HEAD(struct_entry_name) \ - struct { \ - struct struct_entry_name *head; \ - hg_thread_mutex_t lock; \ - } - -#define PDC_LIST_ENTRY(struct_entry_name) \ - struct { \ - struct struct_entry_name * next; \ - struct struct_entry_name **prev; \ - } - -#define PDC_LIST_INIT(head_ptr) \ - do { \ - (head_ptr)->head = NULL; \ - hg_thread_mutex_init(&(head_ptr)->lock); \ - } while (/*CONSTCOND*/ 0) - -#define PDC_LIST_IS_EMPTY(head_ptr) ((head_ptr)->head == NULL) - -#define PDC_LIST_FIRST(head_ptr) ((head_ptr)->head) - -#define PDC_LIST_GET_FIRST(var, head_ptr) (var = (head_ptr)->head) - -#define PDC_LIST_NEXT(entry_ptr, entry_field_name) ((entry_ptr)->entry_field_name.next) - -#define PDC_LIST_TO_NEXT(entry_ptr, entry_field_name) ((entry_ptr) = (entry_ptr)->entry_field_name.next) - -#define PDC_LIST_INSERT_HEAD(head_ptr, entry_ptr, entry_field_name) \ - do { \ - if (((entry_ptr)->entry_field_name.next = (head_ptr)->head) != NULL) \ - (head_ptr)->head->entry_field_name.prev = &(entry_ptr)->entry_field_name.next; \ - (head_ptr)->head = (entry_ptr); \ - (entry_ptr)->entry_field_name.prev = &(head_ptr)->head; \ - } while (/*CONSTCOND*/ 0) - -/* TODO would be nice to not have any condition */ -#define PDC_LIST_REMOVE(entry_ptr, entry_field_name) \ - do { \ - if ((entry_ptr)->entry_field_name.next != NULL) \ - (entry_ptr)->entry_field_name.next->entry_field_name.prev = (entry_ptr)->entry_field_name.prev; \ - *(entry_ptr)->entry_field_name.prev = (entry_ptr)->entry_field_name.next; \ - } while (/*CONSTCOND*/ 0) - -#define PDC_LIST_FOREACH(var, head_ptr, entry_field_name) \ - for ((var) = ((head_ptr)->head); (var); (var) = ((var)->entry_field_name.next)) - -#define PDC_LIST_SEARCH(var, head_ptr, entry_field_name, item, value) \ - for ((var) = ((head_ptr)->head); (((var)->item != value) && (var)); \ - (var) = ((var)->entry_field_name.next)) - -#define PDC_LIST_SEARCH_CONT_NAME(var, head_ptr, entry_field_name, member, n, name) \ - for ((var) = ((head_ptr)->head); \ - ((var) && strcmp(((struct _pdc_cont_info *)((var)->member))->cont_info_pub->n, name) != 0); \ - (var) = ((var)->entry_field_name.next)) - -#endif /* PDC_LINKEDLIST_H */ diff --git a/src/utils/include/pdc_malloc.h b/src/utils/include/pdc_malloc.h deleted file mode 100644 index e8ea1941e..000000000 --- a/src/utils/include/pdc_malloc.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright Notice for - * Proactive Data Containers (PDC) Software Library and Utilities - * ----------------------------------------------------------------------------- - - *** Copyright Notice *** - - * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the - * University of California, through Lawrence Berkeley National Laboratory, - * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF - * Group (subject to receipt of any required approvals from the U.S. Dept. of - * Energy). All rights reserved. - - * If you have questions about your rights to use or distribute this software, - * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. - - * NOTICE. This Software was developed under funding from the U.S. Department of - * Energy and the U.S. Government consequently retains certain rights. As such, the - * U.S. Government has been granted for itself and others acting on its behalf a - * paid-up, nonexclusive, irrevocable, worldwide license in the Software to - * reproduce, distribute copies to the public, prepare derivative works, and - * perform publicly and display publicly, and to permit other to do so. - */ - -#ifndef PDC_MALLOC_H -#define PDC_MALLOC_H - -#include - -/***************************************/ -/* Library-private Function Prototypes */ -/***************************************/ -/** - * Create an object - * - * \param size [IN] Size of the struct to be malloced - */ -void *PDC_malloc(size_t size); - -/** - * Create an object - * - * \param size [IN] Size of the struct to be calloced - */ -void *PDC_calloc(size_t size); - -/** - * Create an object - * - * \param mem [IN] Starting address of memory - */ -void *PDC_free(void *mem); - -#define PDC_MALLOC(t) (t *)PDC_malloc(sizeof(t)) -#define PDC_CALLOC(t) (t *)PDC_calloc(sizeof(t)) - -#define PDC_FREE(t, obj) (t *)(intptr_t) PDC_free(obj) - -#endif /* PDC_MALLOC_H */ diff --git a/src/utils/include/pdc_private.h b/src/utils/include/pdc_private.h deleted file mode 100644 index b6ca3bcfd..000000000 --- a/src/utils/include/pdc_private.h +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright Notice for - * Proactive Data Containers (PDC) Software Library and Utilities - * ----------------------------------------------------------------------------- - - *** Copyright Notice *** - - * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the - * University of California, through Lawrence Berkeley National Laboratory, - * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF - * Group (subject to receipt of any required approvals from the U.S. Dept. of - * Energy). All rights reserved. - - * If you have questions about your rights to use or distribute this software, - * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. - - * NOTICE. This Software was developed under funding from the U.S. Department of - * Energy and the U.S. Government consequently retains certain rights. As such, the - * U.S. Government has been granted for itself and others acting on its behalf a - * paid-up, nonexclusive, irrevocable, worldwide license in the Software to - * reproduce, distribute copies to the public, prepare derivative works, and - * perform publicly and display publicly, and to permit other to do so. - */ - -#ifndef PDC_PRIVATE_H -#define PDC_PRIVATE_H - -#include "pdc_config.h" -#include "pdc_public.h" -#include -// #include /* gettimeofday() */ - -/****************************/ -/* Library Private Typedefs */ -/****************************/ -typedef enum { - UNKNOWN = 0, - SERVER_MEMORY = 1, - CLIENT_MEMORY = 2, - FLASH = 3, - DISK = 4, - FILESYSTEM = 5, - TAPE = 6 -} _pdc_loci_t; - -/* Query type */ -typedef enum { - PDC_Q_TYPE_DATA_ELEM, /* selects data elements */ - PDC_Q_TYPE_ATTR_VALUE, /* selects attribute values */ - PDC_Q_TYPE_ATTR_NAME, /* selects attributes */ - PDC_Q_TYPE_LINK_NAME, /* selects objects */ - PDC_Q_TYPE_MISC /* (for combine queries) selects misc objects */ -} _pdc_query_type_t; - -/* Query match conditions */ -typedef enum { - PDC_Q_MATCH_EQUAL, /* equal */ - PDC_Q_MATCH_NOT_EQUAL, /* not equal */ - PDC_Q_MATCH_LESS_THAN, /* less than */ - PDC_Q_MATCH_GREATER_THAN /* greater than */ -} _pdc_query_op_t; - -typedef enum { ROW_major, COL_major } _pdc_major_type_t; - -typedef enum { C_lang = 0, FORTRAN_lang, PYTHON_lang, JULIA_lang, N_LANGUAGES } _pdc_analysis_language_t; - -/***************************/ -/* Library Private Structs */ -/***************************/ -struct _pdc_class { - char * name; - pdcid_t local_id; -}; - -#ifdef __cplusplus -#define ATTRIBUTE(a) -#else /* __cplusplus */ -#if defined(HAVE_ATTRIBUTE) -#define ATTRIBUTE(a) __attribute__((a)) -#else -#define ATTRIBUTE(a) -#endif -#endif /* __cplusplus */ - -#ifdef __cplusplus -#define ATTR_UNUSED /*void*/ -#else /* __cplusplus */ -#if defined(HAVE_ATTRIBUTE) && !defined(__SUNPRO_C) -#define ATTR_UNUSED __attribute__((unused)) -#else -#define ATTR_UNUSED /*void*/ -#endif -#endif /* __cplusplus */ - -#define PDCmemset(X, C, Z) memset((void *)(X), C, Z) - -/* - * PDC Boolean type. - */ -#ifndef FALSE -#define FALSE 0 -#endif -#ifndef TRUE -#define TRUE 1 -#endif - -extern pbool_t err_occurred; - -/* - * PGOTO_DONE macro. The argument is the return value which is - * assigned to the `ret_value' variable. Control branches to - * the `done' label. - */ -#define PGOTO_DONE(ret_val) \ - do { \ - ret_value = ret_val; \ - goto done; \ - } while (0) - -#define PGOTO_DONE_VOID \ - do { \ - goto done; \ - } while (0) - -/* - * PGOTO_ERROR macro. The arguments are the return value and an - * error string. The return value is assigned to a variable `ret_value' and - * control branches to the `done' label. - */ -#define PGOTO_ERROR(ret_val, ...) \ - do { \ - fprintf(stderr, "Error in %s:%d\n", __FILE__, __LINE__); \ - fprintf(stderr, " # %s(): ", __func__); \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - PGOTO_DONE(ret_val); \ - } while (0) - -#define PGOTO_ERROR_VOID(...) \ - do { \ - fprintf(stderr, "Error in %s:%d\n", __FILE__, __LINE__); \ - fprintf(stderr, " # %s(): ", __func__); \ - fprintf(stderr, "\n"); \ - PGOTO_DONE_VOID; \ - } while (0) - -/* Include a basic profiling interface */ -#ifdef ENABLE_PROFILING -#include "stack_ops.h" - -#define FUNC_ENTER(X) \ - do { \ - if (enableProfiling) \ - push(__func__, (X)); \ - } while (0) - -#define FUNC_LEAVE(ret_value) \ - do { \ - if (enableProfiling) \ - pop(); \ - return (ret_value); \ - } while (0) - -#define FUNC_LEAVE_VOID \ - do { \ - if (enableProfiling) \ - pop(); \ - return; \ - } while (0) - -#else -/* #define FUNC_ENTER(X) \ */ -/* do { \ */ -/* time_t now; \ */ -/* time(&now); \ */ -/* fprintf(stderr, "%ld enter %s\n", now, __func__); \ */ -/* } while (0) */ - -/* #define FUNC_LEAVE(ret_value) \ */ -/* do { \ */ -/* time_t now; \ */ -/* time(&now); \ */ -/* fprintf(stderr, "%ld leave %s\n", now, __func__); \ */ -/* return (ret_value); \ */ -/* } while (0) */ - -#define FUNC_ENTER(X) \ - do { \ - } while (0) - -#define FUNC_LEAVE(ret_value) \ - do { \ - return (ret_value); \ - } while (0) - -#define FUNC_LEAVE_VOID \ - do { \ - return; \ - } while (0) -#endif - -#endif /* PDC_PRIVATE_H */ diff --git a/src/utils/include/pdc_timing.h b/src/utils/include/pdc_timing.h deleted file mode 100644 index 1ea2f475b..000000000 --- a/src/utils/include/pdc_timing.h +++ /dev/null @@ -1,193 +0,0 @@ -#ifndef PDC_TIMING_H -#define PDC_TIMING_H - -#ifndef HOST_NAME_MAX -#if defined(__APPLE__) -#define HOST_NAME_MAX 255 -#else -#define HOST_NAME_MAX 64 -#endif /* __APPLE__ */ -#endif /* HOST_NAME_MAX */ - -#include "pdc_config.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef PDC_TIMING -typedef struct pdc_timing { - double PDCbuf_obj_map_rpc; - double PDCbuf_obj_unmap_rpc; - - double PDCreg_obtain_lock_write_rpc; - double PDCreg_obtain_lock_read_rpc; - - double PDCreg_release_lock_write_rpc; - double PDCreg_release_lock_read_rpc; - - double PDCbuf_obj_map_rpc_wait; - double PDCbuf_obj_unmap_rpc_wait; - - double PDCreg_obtain_lock_write_rpc_wait; - double PDCreg_obtain_lock_read_rpc_wait; - double PDCreg_release_lock_write_rpc_wait; - double PDCreg_release_lock_read_rpc_wait; - - double PDCtransfer_request_start_write_rpc; - double PDCtransfer_request_wait_write_rpc; - double PDCtransfer_request_start_read_rpc; - double PDCtransfer_request_wait_read_rpc; - - double PDCtransfer_request_start_write_rpc_wait; - double PDCtransfer_request_start_read_rpc_wait; - double PDCtransfer_request_wait_write_rpc_wait; - double PDCtransfer_request_wait_read_rpc_wait; - - double PDCtransfer_request_start_all_write_rpc; - double PDCtransfer_request_start_all_read_rpc; - double PDCtransfer_request_wait_all_rpc; - - double PDCtransfer_request_start_all_write_rpc_wait; - double PDCtransfer_request_start_all_read_rpc_wait; - double PDCtransfer_request_wait_all_rpc_wait; - - double PDCtransfer_request_metadata_query_rpc; - - double PDCclient_obj_create_rpc; - double PDCclient_cont_create_rpc; - -} pdc_timing; - -pdc_timing pdc_timings; - -typedef struct pdc_server_timing { - double PDCbuf_obj_map_rpc; - double PDCbuf_obj_unmap_rpc; - - double PDCreg_obtain_lock_write_rpc; - double PDCreg_obtain_lock_read_rpc; - double PDCreg_release_lock_write_rpc; - double PDCreg_release_lock_read_rpc; - double PDCreg_release_lock_bulk_transfer_write_rpc; - double PDCreg_release_lock_bulk_transfer_read_rpc; - double PDCreg_release_lock_bulk_transfer_inner_write_rpc; - double PDCreg_release_lock_bulk_transfer_inner_read_rpc; - - double PDCreg_transfer_request_start_write_rpc; - double PDCreg_transfer_request_start_read_rpc; - double PDCreg_transfer_request_wait_write_rpc; - double PDCreg_transfer_request_wait_read_rpc; - double PDCreg_transfer_request_start_write_bulk_rpc; - double PDCreg_transfer_request_inner_write_bulk_rpc; - double PDCreg_transfer_request_start_read_bulk_rpc; - double PDCreg_transfer_request_inner_read_bulk_rpc; - - double PDCreg_transfer_request_start_all_write_rpc; - double PDCreg_transfer_request_start_all_read_rpc; - double PDCreg_transfer_request_start_all_write_bulk_rpc; - double PDCreg_transfer_request_start_all_read_bulk_rpc; - double PDCreg_transfer_request_inner_write_all_bulk_rpc; - double PDCreg_transfer_request_inner_read_all_bulk_rpc; - double PDCreg_transfer_request_wait_all_rpc; - double PDCreg_transfer_request_wait_all_bulk_rpc; - - double PDCdata_server_write_out; - double PDCdata_server_read_from; - double PDCcache_write; - double PDCcache_read; - double PDCcache_flush; - double PDCcache_clean; - double PDCdata_server_write_posix; - double PDCdata_server_read_posix; - - double PDCserver_obj_create_rpc; - double PDCserver_cont_create_rpc; - - double PDCserver_restart; - double PDCserver_checkpoint; - double PDCserver_start_total; -} pdc_server_timing; - -typedef struct pdc_timestamp { - double *start; - double *end; - size_t timestamp_max_size; - size_t timestamp_size; -} pdc_timestamp; - -pdc_server_timing *pdc_server_timings; -pdc_timestamp * pdc_buf_obj_map_timestamps; -pdc_timestamp * pdc_buf_obj_unmap_timestamps; - -pdc_timestamp *pdc_obtain_lock_write_timestamps; -pdc_timestamp *pdc_obtain_lock_read_timestamps; -pdc_timestamp *pdc_release_lock_write_timestamps; -pdc_timestamp *pdc_release_lock_read_timestamps; -pdc_timestamp *pdc_release_lock_bulk_transfer_write_timestamps; -pdc_timestamp *pdc_release_lock_bulk_transfer_inner_write_timestamps; -pdc_timestamp *pdc_release_lock_bulk_transfer_read_timestamps; -pdc_timestamp *pdc_release_lock_bulk_transfer_inner_read_timestamps; - -pdc_timestamp *pdc_transfer_request_start_write_timestamps; -pdc_timestamp *pdc_transfer_request_start_read_timestamps; -pdc_timestamp *pdc_transfer_request_wait_write_timestamps; -pdc_timestamp *pdc_transfer_request_wait_read_timestamps; -pdc_timestamp *pdc_transfer_request_start_write_bulk_timestamps; -pdc_timestamp *pdc_transfer_request_inner_write_bulk_timestamps; -pdc_timestamp *pdc_transfer_request_start_read_bulk_timestamps; -pdc_timestamp *pdc_transfer_request_inner_read_bulk_timestamps; - -pdc_timestamp *pdc_transfer_request_start_all_write_timestamps; -pdc_timestamp *pdc_transfer_request_start_all_read_timestamps; -pdc_timestamp *pdc_transfer_request_start_all_write_bulk_timestamps; -pdc_timestamp *pdc_transfer_request_start_all_read_bulk_timestamps; -pdc_timestamp *pdc_transfer_request_wait_all_timestamps; -pdc_timestamp *pdc_transfer_request_inner_write_all_bulk_timestamps; -pdc_timestamp *pdc_transfer_request_inner_read_all_bulk_timestamps; - -pdc_timestamp *pdc_client_buf_obj_map_timestamps; -pdc_timestamp *pdc_client_buf_obj_unmap_timestamps; -pdc_timestamp *pdc_client_obtain_lock_write_timestamps; -pdc_timestamp *pdc_client_obtain_lock_read_timestamps; -pdc_timestamp *pdc_client_release_lock_write_timestamps; -pdc_timestamp *pdc_client_release_lock_read_timestamps; - -pdc_timestamp *pdc_client_transfer_request_start_write_timestamps; -pdc_timestamp *pdc_client_transfer_request_start_read_timestamps; -pdc_timestamp *pdc_client_transfer_request_wait_write_timestamps; -pdc_timestamp *pdc_client_transfer_request_wait_read_timestamps; - -pdc_timestamp *pdc_client_transfer_request_start_all_write_timestamps; -pdc_timestamp *pdc_client_transfer_request_start_all_read_timestamps; -pdc_timestamp *pdc_client_transfer_request_wait_all_timestamps; - -pdc_timestamp *pdc_client_create_cont_timestamps; -pdc_timestamp *pdc_client_create_obj_timestamps; - -pdc_timestamp *pdc_client_transfer_request_metadata_query_timestamps; - -int PDC_timing_init(); -int PDC_timing_finalize(); -int PDC_timing_report(const char *prefix); -int PDC_server_timing_init(); -int pdc_timestamp_register(pdc_timestamp *timestamp, double start, double end); -int PDC_server_timing_report(); -#else -int PDC_timing_report(const char *prefix); -#endif - -#endif diff --git a/src/utils/pdc_interface.c b/src/utils/pdc_interface.c index 4cca6b0bc..218d6518e 100644 --- a/src/utils/pdc_interface.c +++ b/src/utils/pdc_interface.c @@ -25,6 +25,8 @@ #include "pdc_malloc.h" #include "pdc_id_pkg.h" #include "pdc_interface.h" +#include "pdc_cont_pkg.h" +#include "pdc_cont.h" #include #include diff --git a/src/utils/pdc_malloc.c b/src/utils/pdc_malloc.c deleted file mode 100644 index 40379d400..000000000 --- a/src/utils/pdc_malloc.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright Notice for - * Proactive Data Containers (PDC) Software Library and Utilities - * ----------------------------------------------------------------------------- - - *** Copyright Notice *** - - * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the - * University of California, through Lawrence Berkeley National Laboratory, - * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF - * Group (subject to receipt of any required approvals from the U.S. Dept. of - * Energy). All rights reserved. - - * If you have questions about your rights to use or distribute this software, - * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. - - * NOTICE. This Software was developed under funding from the U.S. Department of - * Energy and the U.S. Government consequently retains certain rights. As such, the - * U.S. Government has been granted for itself and others acting on its behalf a - * paid-up, nonexclusive, irrevocable, worldwide license in the Software to - * reproduce, distribute copies to the public, prepare derivative works, and - * perform publicly and display publicly, and to permit other to do so. - */ - -#include -#include -#include "pdc_malloc.h" -#include "pdc_private.h" - -void * -PDC_malloc(size_t size) -{ - void *ret_value; - - FUNC_ENTER(NULL); - - assert(size); - - if (size) - ret_value = malloc(size); - else - ret_value = NULL; - - FUNC_LEAVE(ret_value); -} - -void * -PDC_calloc(size_t size) -{ - void *ret_value; - - FUNC_ENTER(NULL); - - assert(size); - - if (size) - ret_value = calloc(1, size); - else - ret_value = NULL; - - FUNC_LEAVE(ret_value); -} - -void * -PDC_free(void *mem) -{ - void *ret_value = NULL; - - FUNC_ENTER(NULL); - - if (mem) { - free(mem); - } - - FUNC_LEAVE(ret_value); -} diff --git a/src/utils/pdc_timing.c b/src/utils/pdc_timing.c deleted file mode 100644 index 95826b56b..000000000 --- a/src/utils/pdc_timing.c +++ /dev/null @@ -1,537 +0,0 @@ -#include "pdc_timing.h" - -#ifdef PDC_TIMING -static double pdc_base_time; - -static int -pdc_timestamp_clean(pdc_timestamp *timestamp) -{ - if (timestamp->timestamp_size) { - free(timestamp->start); - } - return 0; -} - -static int -timestamp_log(FILE *stream, const char *header, pdc_timestamp *timestamp) -{ - size_t i; - double total = 0.0; - fprintf(stream, "%s", header); - for (i = 0; i < timestamp->timestamp_size; ++i) { - fprintf(stream, ",%4f-%4f", timestamp->start[i], timestamp->end[i]); - total += timestamp->end[i] - timestamp->start[i]; - } - fprintf(stream, "\n"); - - if (i > 0) - fprintf(stream, "%s_total, %f\n", header, total); - - return 0; -} - -int -PDC_timing_init() -{ - char hostname[HOST_NAME_MAX]; - int rank; - pdc_timestamp *ptr; - - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - gethostname(hostname, HOST_NAME_MAX); - if (!(rank % 31)) { - printf("client process rank %d, hostname = %s\n", rank, hostname); - } - MPI_Barrier(MPI_COMM_WORLD); - - memset(&pdc_timings, 0, sizeof(pdc_timing)); - - pdc_client_buf_obj_map_timestamps = calloc(16, sizeof(pdc_timestamp)); - ptr = pdc_client_buf_obj_map_timestamps + 1; - pdc_client_buf_obj_unmap_timestamps = ptr; - ptr++; - pdc_client_obtain_lock_write_timestamps = ptr; - ptr++; - pdc_client_obtain_lock_read_timestamps = ptr; - ptr++; - pdc_client_release_lock_write_timestamps = ptr; - ptr++; - pdc_client_release_lock_read_timestamps = ptr; - ptr++; - - pdc_client_transfer_request_start_write_timestamps = ptr; - ptr++; - pdc_client_transfer_request_start_read_timestamps = ptr; - ptr++; - pdc_client_transfer_request_wait_write_timestamps = ptr; - ptr++; - pdc_client_transfer_request_wait_read_timestamps = ptr; - ptr++; - - pdc_client_transfer_request_start_all_write_timestamps = ptr; - ptr++; - pdc_client_transfer_request_start_all_read_timestamps = ptr; - ptr++; - pdc_client_transfer_request_wait_all_timestamps = ptr; - ptr++; - - pdc_client_create_cont_timestamps = ptr; - ptr++; - pdc_client_create_obj_timestamps = ptr; - - ptr++; - pdc_client_transfer_request_metadata_query_timestamps = ptr; - - return 0; -} - -int -PDC_timing_finalize() -{ - pdc_timestamp_clean(pdc_client_buf_obj_map_timestamps); - pdc_timestamp_clean(pdc_client_buf_obj_unmap_timestamps); - - pdc_timestamp_clean(pdc_client_obtain_lock_write_timestamps); - pdc_timestamp_clean(pdc_client_obtain_lock_read_timestamps); - pdc_timestamp_clean(pdc_client_release_lock_write_timestamps); - pdc_timestamp_clean(pdc_client_release_lock_read_timestamps); - - pdc_timestamp_clean(pdc_client_transfer_request_start_write_timestamps); - pdc_timestamp_clean(pdc_client_transfer_request_start_read_timestamps); - pdc_timestamp_clean(pdc_client_transfer_request_wait_write_timestamps); - pdc_timestamp_clean(pdc_client_transfer_request_wait_read_timestamps); - pdc_timestamp_clean(pdc_client_create_cont_timestamps); - pdc_timestamp_clean(pdc_client_create_obj_timestamps); - pdc_timestamp_clean(pdc_client_transfer_request_start_all_write_timestamps); - pdc_timestamp_clean(pdc_client_transfer_request_start_all_read_timestamps); - pdc_timestamp_clean(pdc_client_transfer_request_wait_all_timestamps); - pdc_timestamp_clean(pdc_client_transfer_request_metadata_query_timestamps); - - free(pdc_client_buf_obj_map_timestamps); - return 0; -} - -int -PDC_timing_report(const char *prefix) -{ - pdc_timing max_timings; - int rank; - char filename[256], header[256]; - FILE * stream; - char hostname[HOST_NAME_MAX]; - time_t now; - - time(&now); - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - gethostname(hostname, HOST_NAME_MAX); - if (!(rank % 32)) { - printf("client process rank %d, hostname = %s\n", rank, hostname); - } - MPI_Reduce(&pdc_timings, &max_timings, sizeof(pdc_timing) / sizeof(double), MPI_DOUBLE, MPI_MAX, 0, - MPI_COMM_WORLD); - if (rank == 0) { - printf("PDCbuf_obj_map_rpc = %lf, wait = %lf\n", max_timings.PDCbuf_obj_map_rpc, - max_timings.PDCbuf_obj_map_rpc_wait); - printf("PDCreg_obtain_lock_write_rpc = %lf, wait = %lf\n", max_timings.PDCreg_obtain_lock_write_rpc, - max_timings.PDCreg_obtain_lock_write_rpc_wait); - printf("PDCreg_obtain_lock_read_rpc = %lf, wait = %lf\n", max_timings.PDCreg_obtain_lock_read_rpc, - max_timings.PDCreg_obtain_lock_read_rpc_wait); - - printf("PDCreg_release_lock_write_rpc = %lf, wait = %lf\n", max_timings.PDCreg_release_lock_write_rpc, - max_timings.PDCreg_release_lock_write_rpc_wait); - printf("PDCreg_release_lock_read_rpc = %lf, wait = %lf\n", max_timings.PDCreg_release_lock_read_rpc, - max_timings.PDCreg_release_lock_read_rpc_wait); - printf("PDCbuf_obj_unmap_rpc = %lf, wait = %lf\n", max_timings.PDCbuf_obj_unmap_rpc, - max_timings.PDCbuf_obj_unmap_rpc_wait); - - printf("PDCtransfer_request_start_write = %lf, wait = %lf\n", - max_timings.PDCtransfer_request_start_write_rpc, - max_timings.PDCtransfer_request_start_write_rpc_wait); - printf("PDCtransfer_request_start_read = %lf, wait = %lf\n", - max_timings.PDCtransfer_request_start_read_rpc, - max_timings.PDCtransfer_request_start_read_rpc_wait); - printf("PDCtransfer_request_wait_write = %lf, wait = %lf\n", - max_timings.PDCtransfer_request_wait_write_rpc, - max_timings.PDCtransfer_request_wait_write_rpc_wait); - printf("PDCtransfer_request_wait_read = %lf, wait = %lf\n", - max_timings.PDCtransfer_request_wait_read_rpc, - max_timings.PDCtransfer_request_wait_read_rpc_wait); - printf("PDCtransfer_request_start_all_write = %lf, wait = %lf\n", - max_timings.PDCtransfer_request_start_all_write_rpc, - max_timings.PDCtransfer_request_start_all_write_rpc_wait); - printf("PDCtransfer_request_start_all_read = %lf, wait = %lf\n", - max_timings.PDCtransfer_request_start_all_read_rpc, - max_timings.PDCtransfer_request_start_all_read_rpc_wait); - printf("PDCtransfer_request_wait_write = %lf, wait = %lf\n", - max_timings.PDCtransfer_request_wait_all_rpc, - max_timings.PDCtransfer_request_wait_all_rpc_wait); - } - - sprintf(filename, "pdc_client_log_rank_%d.csv", rank); - stream = fopen(filename, "r"); - if (stream) { - fclose(stream); - stream = fopen(filename, "a"); - } - else { - stream = fopen(filename, "w"); - } - - fprintf(stream, "%s", ctime(&now)); - - sprintf(header, "buf_obj_map_%s", prefix); - timestamp_log(stream, header, pdc_client_buf_obj_map_timestamps); - sprintf(header, "buf_obj_unmap_%s", prefix); - timestamp_log(stream, header, pdc_client_buf_obj_unmap_timestamps); - - sprintf(header, "obtain_lock_write_%s", prefix); - timestamp_log(stream, header, pdc_client_obtain_lock_write_timestamps); - sprintf(header, "obtain_lock_read_%s", prefix); - timestamp_log(stream, header, pdc_client_obtain_lock_read_timestamps); - - sprintf(header, "release_lock_write_%s", prefix); - timestamp_log(stream, header, pdc_client_release_lock_write_timestamps); - sprintf(header, "release_lock_read_%s", prefix); - timestamp_log(stream, header, pdc_client_release_lock_read_timestamps); - - sprintf(header, "transfer_request_start_write_%s", prefix); - timestamp_log(stream, header, pdc_client_transfer_request_start_write_timestamps); - - sprintf(header, "transfer_request_start_read_%s", prefix); - timestamp_log(stream, header, pdc_client_transfer_request_start_read_timestamps); - - sprintf(header, "transfer_request_wait_write_%s", prefix); - timestamp_log(stream, header, pdc_client_transfer_request_wait_write_timestamps); - - sprintf(header, "transfer_request_wait_read_%s", prefix); - timestamp_log(stream, header, pdc_client_transfer_request_wait_read_timestamps); - - sprintf(header, "transfer_request_start_all_write_%s", prefix); - timestamp_log(stream, header, pdc_client_transfer_request_start_all_write_timestamps); - - sprintf(header, "transfer_request_start_all_read_%s", prefix); - timestamp_log(stream, header, pdc_client_transfer_request_start_all_read_timestamps); - - sprintf(header, "transfer_request_wait_all_%s", prefix); - timestamp_log(stream, header, pdc_client_transfer_request_wait_all_timestamps); - - sprintf(header, "create_cont"); - timestamp_log(stream, header, pdc_client_create_cont_timestamps); - - sprintf(header, "create_obj"); - timestamp_log(stream, header, pdc_client_create_obj_timestamps); - - fprintf(stream, "\n"); - fclose(stream); - - pdc_client_buf_obj_map_timestamps->timestamp_size = 0; - pdc_client_buf_obj_unmap_timestamps->timestamp_size = 0; - - pdc_client_obtain_lock_write_timestamps->timestamp_size = 0; - pdc_client_obtain_lock_read_timestamps->timestamp_size = 0; - pdc_client_release_lock_write_timestamps->timestamp_size = 0; - pdc_client_release_lock_read_timestamps->timestamp_size = 0; - - pdc_client_transfer_request_start_write_timestamps->timestamp_size = 0; - pdc_client_transfer_request_start_read_timestamps->timestamp_size = 0; - pdc_client_transfer_request_wait_write_timestamps->timestamp_size = 0; - pdc_client_transfer_request_wait_read_timestamps->timestamp_size = 0; - - pdc_client_transfer_request_start_all_write_timestamps->timestamp_size = 0; - pdc_client_transfer_request_start_all_read_timestamps->timestamp_size = 0; - pdc_client_transfer_request_wait_all_timestamps->timestamp_size = 0; - - pdc_client_create_cont_timestamps->timestamp_size = 0; - pdc_client_create_obj_timestamps->timestamp_size = 0; - - pdc_client_transfer_request_metadata_query_timestamps->timestamp_size = 0; - - memset(&pdc_timings, 0, sizeof(pdc_timings)); - - return 0; -} - -int -PDC_server_timing_init() -{ - char hostname[HOST_NAME_MAX]; - int rank; - - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - gethostname(hostname, HOST_NAME_MAX); - - printf("server process rank %d, hostname = %s\n", rank, hostname); - /* - printf("rank = %d, hostname = %s, PDCbuf_obj_map_rpc = %lf, PDCreg_obtain_lock_rpc = %lf, " - "PDCreg_release_lock_write_rpc = " - "%lf, PDCreg_release_lock_read_rpc = %lf, PDCbuf_obj_unmap_rpc = %lf, " - "region_release_bulk_transfer_cb = %lf\n", - rank, hostname, server_timings->PDCbuf_obj_map_rpc, server_timings->PDCreg_obtain_lock_rpc, - server_timings->PDCreg_release_lock_write_rpc, server_timings->PDCreg_release_lock_read_rpc, - server_timings->PDCbuf_obj_unmap_rpc, server_timings->PDCreg_release_lock_bulk_transfer_rpc); - */ - MPI_Barrier(MPI_COMM_WORLD); - - pdc_server_timings = calloc(1, sizeof(pdc_server_timing)); - pdc_timestamp *ptr = calloc(25, sizeof(pdc_timestamp)); - pdc_buf_obj_map_timestamps = ptr; - ptr++; - pdc_buf_obj_unmap_timestamps = ptr; - ptr++; - pdc_obtain_lock_write_timestamps = ptr; - ptr++; - pdc_obtain_lock_read_timestamps = ptr; - ptr++; - pdc_release_lock_write_timestamps = ptr; - ptr++; - pdc_release_lock_read_timestamps = ptr; - ptr++; - pdc_release_lock_bulk_transfer_write_timestamps = ptr; - ptr++; - pdc_release_lock_bulk_transfer_read_timestamps = ptr; - ptr++; - pdc_release_lock_bulk_transfer_inner_write_timestamps = ptr; - ptr++; - pdc_release_lock_bulk_transfer_inner_read_timestamps = ptr; - ptr++; - - pdc_transfer_request_start_write_timestamps = ptr; - ptr++; - pdc_transfer_request_start_read_timestamps = ptr; - ptr++; - pdc_transfer_request_wait_write_timestamps = ptr; - ptr++; - pdc_transfer_request_wait_read_timestamps = ptr; - ptr++; - pdc_transfer_request_start_write_bulk_timestamps = ptr; - ptr++; - pdc_transfer_request_start_read_bulk_timestamps = ptr; - ptr++; - pdc_transfer_request_inner_write_bulk_timestamps = ptr; - ptr++; - pdc_transfer_request_inner_read_bulk_timestamps = ptr; - ptr++; - - pdc_transfer_request_start_all_write_timestamps = ptr; - ptr++; - pdc_transfer_request_start_all_read_timestamps = ptr; - ptr++; - pdc_transfer_request_wait_all_timestamps = ptr; - ptr++; - pdc_transfer_request_start_all_write_bulk_timestamps = ptr; - ptr++; - pdc_transfer_request_start_all_read_bulk_timestamps = ptr; - ptr++; - pdc_transfer_request_inner_write_all_bulk_timestamps = ptr; - ptr++; - pdc_transfer_request_inner_read_all_bulk_timestamps = ptr; - ptr++; - - // 25 timestamps - - pdc_base_time = MPI_Wtime(); - return 0; -} - -int -pdc_timestamp_register(pdc_timestamp *timestamp, double start, double end) -{ - double *temp; - - if (timestamp->timestamp_max_size == 0) { - timestamp->timestamp_max_size = 256; - timestamp->start = (double *)malloc(sizeof(double) * timestamp->timestamp_max_size * 2); - timestamp->end = timestamp->start + timestamp->timestamp_max_size; - timestamp->timestamp_size = 0; - } - else if (timestamp->timestamp_size == timestamp->timestamp_max_size) { - temp = (double *)malloc(sizeof(double) * timestamp->timestamp_max_size * 4); - memcpy(temp, timestamp->start, sizeof(double) * timestamp->timestamp_max_size); - memcpy(temp + timestamp->timestamp_max_size * 2, timestamp->end, - sizeof(double) * timestamp->timestamp_max_size); - timestamp->start = temp; - timestamp->end = temp + timestamp->timestamp_max_size * 2; - timestamp->timestamp_max_size *= 2; - } - timestamp->start[timestamp->timestamp_size] = start; - timestamp->end[timestamp->timestamp_size] = end; - timestamp->timestamp_size++; - return 0; -} - -int -PDC_server_timing_report() -{ - pdc_server_timing max_timings; - int rank; - char filename[256]; - FILE * stream; - - // char hostname[HOST_NAME_MAX]; - time_t now; - - time(&now); - - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - MPI_Reduce(pdc_server_timings, &max_timings, sizeof(pdc_server_timing) / sizeof(double), MPI_DOUBLE, - MPI_MAX, 0, MPI_COMM_WORLD); - sprintf(filename, "pdc_server_log_rank_%d.csv", rank); - - stream = fopen(filename, "w"); - - fprintf(stream, "%s", ctime(&now)); - timestamp_log(stream, "buf_obj_map", pdc_buf_obj_map_timestamps); - timestamp_log(stream, "buf_obj_unmap", pdc_buf_obj_unmap_timestamps); - - timestamp_log(stream, "obtain_lock_write", pdc_obtain_lock_write_timestamps); - timestamp_log(stream, "obtain_lock_read", pdc_obtain_lock_read_timestamps); - timestamp_log(stream, "release_lock_write", pdc_release_lock_write_timestamps); - timestamp_log(stream, "release_lock_read", pdc_release_lock_read_timestamps); - timestamp_log(stream, "release_lock_bulk_transfer_write", - pdc_release_lock_bulk_transfer_write_timestamps); - timestamp_log(stream, "release_lock_bulk_transfer_read", pdc_release_lock_bulk_transfer_read_timestamps); - timestamp_log(stream, "release_lock_bulk_transfer_inner_write", - pdc_release_lock_bulk_transfer_inner_write_timestamps); - timestamp_log(stream, "release_lock_bulk_transfer_inner_read", - pdc_release_lock_bulk_transfer_inner_read_timestamps); - - timestamp_log(stream, "transfer_request_start_write", pdc_transfer_request_start_write_timestamps); - timestamp_log(stream, "transfer_request_wait_write", pdc_transfer_request_wait_write_timestamps); - timestamp_log(stream, "transfer_request_start_write_bulk", - pdc_transfer_request_start_write_bulk_timestamps); - timestamp_log(stream, "transfer_request_inner_write_bulk", - pdc_transfer_request_inner_write_bulk_timestamps); - timestamp_log(stream, "transfer_request_start_read", pdc_transfer_request_start_read_timestamps); - timestamp_log(stream, "transfer_request_wait_read", pdc_transfer_request_wait_read_timestamps); - timestamp_log(stream, "transfer_request_start_read_bulk", - pdc_transfer_request_start_read_bulk_timestamps); - timestamp_log(stream, "transfer_request_inner_read_bulk", - pdc_transfer_request_inner_read_bulk_timestamps); - - timestamp_log(stream, "transfer_request_start_all_write", - pdc_transfer_request_start_all_write_timestamps); - timestamp_log(stream, "transfer_request_start_all_write_bulk", - pdc_transfer_request_start_all_write_bulk_timestamps); - timestamp_log(stream, "transfer_request_start_all_read", pdc_transfer_request_start_all_read_timestamps); - timestamp_log(stream, "transfer_request_start_all_read_bulk", - pdc_transfer_request_start_all_read_bulk_timestamps); - timestamp_log(stream, "transfer_request_inner_write_all_bulk", - pdc_transfer_request_inner_write_all_bulk_timestamps); - timestamp_log(stream, "transfer_request_inner_read_all_bulk", - pdc_transfer_request_inner_read_all_bulk_timestamps); - timestamp_log(stream, "transfer_request_wait_all", pdc_transfer_request_wait_all_timestamps); - - /* timestamp_log(stream, "create_obj", create_obj_timestamps); */ - /* timestamp_log(stream, "create_cont", create_cont_timestamps); */ - fclose(stream); - - sprintf(filename, "pdc_server_timings_%d.csv", rank); - stream = fopen(filename, "w"); - fprintf(stream, "%s", ctime(&now)); - fprintf(stream, "PDCbuf_obj_map_rpc, %lf\n", pdc_server_timings->PDCbuf_obj_map_rpc); - fprintf(stream, "PDCreg_obtain_lock_write_rpc, %lf\n", pdc_server_timings->PDCreg_obtain_lock_write_rpc); - fprintf(stream, "PDCreg_obtain_lock_read_rpc, %lf\n", pdc_server_timings->PDCreg_obtain_lock_read_rpc); - fprintf(stream, "PDCreg_release_lock_write_rpc, %lf\n", - pdc_server_timings->PDCreg_release_lock_write_rpc); - fprintf(stream, "PDCreg_release_lock_read_rpc, %lf\n", pdc_server_timings->PDCreg_release_lock_read_rpc); - fprintf(stream, "PDCbuf_obj_unmap_rpc, %lf\n", pdc_server_timings->PDCbuf_obj_unmap_rpc); - fprintf(stream, "PDCreg_release_lock_bulk_transfer_write_rpc, %lf\n", - pdc_server_timings->PDCreg_release_lock_bulk_transfer_write_rpc); - fprintf(stream, "PDCreg_release_lock_bulk_transfer_read_rpc, %lf\n", - pdc_server_timings->PDCreg_release_lock_bulk_transfer_read_rpc); - fprintf(stream, "PDCreg_release_lock_bulk_transfer_inner_write_rpc, %lf\n", - pdc_server_timings->PDCreg_release_lock_bulk_transfer_inner_write_rpc); - fprintf(stream, "PDCreg_release_lock_bulk_transfer_inner_read_rpc, %lf\n", - pdc_server_timings->PDCreg_release_lock_bulk_transfer_inner_read_rpc); - fprintf(stream, "PDCregion_transfer_start_write_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_start_write_rpc); - fprintf(stream, "PDCregion_transfer_wait_write_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_wait_write_rpc); - fprintf(stream, "PDCregion_transfer_start_write_bulk_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_start_write_bulk_rpc); - fprintf(stream, "PDCregion_transfer_request_inner_write_bulk_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_inner_write_bulk_rpc); - fprintf(stream, "PDCregion_transfer_start_read_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_start_read_rpc); - fprintf(stream, "PDCregion_transfer_wait_read_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_wait_read_rpc); - fprintf(stream, "PDCregion_transfer_start_read_bulk_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_start_read_bulk_rpc); - fprintf(stream, "PDCregion_transfer_request_inner_read_bulk_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_inner_read_bulk_rpc); - - fprintf(stream, "PDCregion_transfer_start_write_all_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_start_all_write_rpc); - fprintf(stream, "PDCregion_transfer_request_inner_write_all_bulk_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_inner_write_all_bulk_rpc); - fprintf(stream, "PDCregion_transfer_start_all_read_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_start_all_read_rpc); - fprintf(stream, "PDCregion_transfer_request_inner_read_all_bulk_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_inner_read_all_bulk_rpc); - fprintf(stream, "PDCregion_transfer_wait_all_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_wait_all_rpc); - fprintf(stream, "PDCregion_transfer_wait_all_bulk_rpc, %lf\n", - pdc_server_timings->PDCreg_transfer_request_wait_all_bulk_rpc); - - fprintf(stream, "PDCserver_obj_create_rpc, %lf\n", pdc_server_timings->PDCserver_obj_create_rpc); - fprintf(stream, "PDCserver_cont_create_rpc, %lf\n", pdc_server_timings->PDCserver_cont_create_rpc); - - fprintf(stream, "PDCdata_server_write_out, %lf\n", pdc_server_timings->PDCdata_server_write_out); - fprintf(stream, "PDCdata_server_read_from, %lf\n", pdc_server_timings->PDCdata_server_read_from); - fprintf(stream, "PDCcache_write, %lf\n", pdc_server_timings->PDCcache_write); - fprintf(stream, "PDCcache_read, %lf\n", pdc_server_timings->PDCcache_read); - fprintf(stream, "PDCcache_flush, %lf\n", pdc_server_timings->PDCcache_flush); - fprintf(stream, "PDCcache_clean, %lf\n", pdc_server_timings->PDCcache_clean); - fprintf(stream, "PDCdata_server_write_posix, %lf\n", pdc_server_timings->PDCdata_server_write_posix); - fprintf(stream, "PDCdata_server_read_posix, %lf\n", pdc_server_timings->PDCdata_server_read_posix); - - fprintf(stream, "PDCserver_restart, %lf\n", pdc_server_timings->PDCserver_restart); - fprintf(stream, "PDCserver_checkpoint, %lf\n", pdc_server_timings->PDCserver_checkpoint); - fprintf(stream, "PDCstart_server_total, %lf\n", pdc_server_timings->PDCserver_start_total); - - fclose(stream); - - free(pdc_server_timings); - pdc_timestamp_clean(pdc_buf_obj_map_timestamps); - pdc_timestamp_clean(pdc_buf_obj_unmap_timestamps); - - pdc_timestamp_clean(pdc_obtain_lock_write_timestamps); - pdc_timestamp_clean(pdc_obtain_lock_read_timestamps); - pdc_timestamp_clean(pdc_release_lock_write_timestamps); - pdc_timestamp_clean(pdc_release_lock_read_timestamps); - pdc_timestamp_clean(pdc_release_lock_bulk_transfer_write_timestamps); - pdc_timestamp_clean(pdc_release_lock_bulk_transfer_read_timestamps); - pdc_timestamp_clean(pdc_release_lock_bulk_transfer_inner_write_timestamps); - pdc_timestamp_clean(pdc_release_lock_bulk_transfer_inner_read_timestamps); - - pdc_timestamp_clean(pdc_transfer_request_start_write_timestamps); - pdc_timestamp_clean(pdc_transfer_request_start_read_timestamps); - pdc_timestamp_clean(pdc_transfer_request_wait_write_timestamps); - pdc_timestamp_clean(pdc_transfer_request_wait_read_timestamps); - pdc_timestamp_clean(pdc_transfer_request_start_write_bulk_timestamps); - pdc_timestamp_clean(pdc_transfer_request_start_read_bulk_timestamps); - pdc_timestamp_clean(pdc_transfer_request_inner_write_bulk_timestamps); - pdc_timestamp_clean(pdc_transfer_request_inner_read_bulk_timestamps); - - pdc_timestamp_clean(pdc_transfer_request_start_all_write_timestamps); - pdc_timestamp_clean(pdc_transfer_request_start_all_read_timestamps); - pdc_timestamp_clean(pdc_transfer_request_start_all_write_bulk_timestamps); - pdc_timestamp_clean(pdc_transfer_request_start_all_read_bulk_timestamps); - pdc_timestamp_clean(pdc_transfer_request_wait_all_timestamps); - pdc_timestamp_clean(pdc_transfer_request_inner_write_all_bulk_timestamps); - pdc_timestamp_clean(pdc_transfer_request_inner_read_all_bulk_timestamps); - - /* pdc_timestamp_clean(pdc_create_obj_timestamps); */ - /* pdc_timestamp_clean(pdc_create_cont_timestamps); */ - - free(pdc_buf_obj_map_timestamps); - return 0; -} - -#else -int -PDC_timing_report(const char *prefix __attribute__((unused))) -{ - return 0; -} -#endif diff --git a/tools/pdc_export.c b/tools/pdc_export.c index a67f444e3..9c0b572b2 100644 --- a/tools/pdc_export.c +++ b/tools/pdc_export.c @@ -7,6 +7,7 @@ #include #include #include "hdf5.h" +#include "pdc_generic.h" // #define ENABLE_MPI 1 @@ -240,45 +241,11 @@ get_data_type(int data_type) if (data_type == -1) { return "PDC_UNKNOWN"; } - else if (data_type == 0) { - return "PDC_INT"; - } - else if (data_type == 1) { - return "PDC_FLOAT"; - } - else if (data_type == 2) { - return "PDC_DOUBLE"; - } - else if (data_type == 3) { - return "PDC_CHAR"; - } - else if (data_type == 4) { - return "PDC_COMPOUND"; - } - else if (data_type == 5) { - return "PDC_ENUM"; - } - else if (data_type == 6) { - return "PDC_ARRAY"; - } - else if (data_type == 7) { - return "PDC_UINT"; - } - else if (data_type == 8) { - return "PDC_INT64"; - } - else if (data_type == 9) { - return "PDC_UINT64"; - } - else if (data_type == 10) { - return "PDC_INT16"; - } - else if (data_type == 11) { - return "PDC_INT16"; - } - else { + char *result = get_enum_name_by_dtype(data_type); + if (result == NULL) { return "NULL"; } + return result; } char * diff --git a/tools/pdc_import.c b/tools/pdc_import.c index f51f587c8..adf8cd46e 100644 --- a/tools/pdc_import.c +++ b/tools/pdc_import.c @@ -711,9 +711,10 @@ do_attr(hid_t aid, pdcid_t obj_id) char buf[MAX_NAME] = {0}; char read_buf[TAG_LEN_MAX] = {0}; // pdc_kvtag_t kvtag1; - char * tag_name; - void * tag_value; - size_t tag_size; + char * tag_name; + void * tag_value; + pdc_var_type_t value_type; + size_t tag_size; /* * Get the name of the attribute. @@ -735,7 +736,7 @@ do_attr(hid_t aid, pdcid_t obj_id) else { tag_size = H5Tget_size(atype); } - PDCobj_put_tag(obj_id, tag_name, tag_value, tag_size); + PDCobj_put_tag(obj_id, tag_name, tag_value, value_type, tag_size); /* * Get attribute information: dataspace, data type diff --git a/tools/pdc_ls.c b/tools/pdc_ls.c index 9d3db32fe..5963b33e2 100644 --- a/tools/pdc_ls.c +++ b/tools/pdc_ls.c @@ -201,45 +201,11 @@ get_data_type(int data_type) if (data_type == -1) { return "PDC_UNKNOWN"; } - else if (data_type == 0) { - return "PDC_INT"; - } - else if (data_type == 1) { - return "PDC_FLOAT"; - } - else if (data_type == 2) { - return "PDC_DOUBLE"; - } - else if (data_type == 3) { - return "PDC_CHAR"; - } - else if (data_type == 4) { - return "PDC_COMPOUND"; - } - else if (data_type == 5) { - return "PDC_ENUM"; - } - else if (data_type == 6) { - return "PDC_ARRAY"; - } - else if (data_type == 7) { - return "PDC_UINT"; - } - else if (data_type == 8) { - return "PDC_INT64"; - } - else if (data_type == 9) { - return "PDC_UINT64"; - } - else if (data_type == 10) { - return "PDC_INT16"; - } - else if (data_type == 11) { - return "PDC_INT16"; - } - else { + char *result = get_enum_name_by_dtype(data_type); + if (result == NULL) { return "NULL"; } + return result; } char * From 9fd90c29fd7aa61bdbc06e9ad6744b40346d9cae Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Mon, 19 Jun 2023 18:38:26 -0400 Subject: [PATCH 194/806] Feature/metadata type (#3) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * remove unnecessary header files from installation * resolve conflict * add important files * clang formatting * update cmake --- src/api/CMakeLists.txt | 4 +- src/api/pdc_client_connect.c | 6 +- src/api/pdc_obj/include/pdc_cont.h | 6 +- src/commons/CMakeLists.txt | 27 +- src/commons/include/pdc_public.h | 62 ++ src/commons/profiling/CMakeLists.txt | 127 ++++ src/commons/profiling/include/pdc_hashtab.h | 198 +++++++ src/commons/profiling/include/pdc_stack_ops.h | 70 +++ src/commons/profiling/pdc_hashtab.c | 540 ++++++++++++++++++ src/commons/profiling/pdc_stack_ops.c | 264 +++++++++ src/commons/serde/include/pdc_serde.h | 21 +- src/commons/serde/pdc_serde.c | 22 +- src/commons/utils/include/pdc_id_pkg.h | 57 ++ src/commons/utils/include/pdc_linkedlist.h | 120 ++++ src/commons/utils/include/pdc_malloc.h | 59 ++ src/commons/utils/include/pdc_private.h | 202 +++++++ src/commons/utils/include/pdc_timing.h | 193 +++++++ src/commons/utils/pdc_malloc.c | 76 +++ src/commons/utils/pdc_timing.c | 537 +++++++++++++++++ src/server/pdc_client_server_common.c | 8 +- src/tests/cont_tags.c | 4 +- src/tests/kvtag_add_get_benchmark.c | 7 +- src/tests/kvtag_add_get_scale.c | 3 +- src/tests/obj_tags.c | 6 +- src/tests/read_obj_shared.c | 4 +- src/tests/vpicio_mts.c | 2 +- 26 files changed, 2559 insertions(+), 66 deletions(-) create mode 100644 src/commons/include/pdc_public.h create mode 100644 src/commons/profiling/CMakeLists.txt create mode 100644 src/commons/profiling/include/pdc_hashtab.h create mode 100644 src/commons/profiling/include/pdc_stack_ops.h create mode 100644 src/commons/profiling/pdc_hashtab.c create mode 100644 src/commons/profiling/pdc_stack_ops.c create mode 100644 src/commons/utils/include/pdc_id_pkg.h create mode 100644 src/commons/utils/include/pdc_linkedlist.h create mode 100644 src/commons/utils/include/pdc_malloc.h create mode 100644 src/commons/utils/include/pdc_private.h create mode 100644 src/commons/utils/include/pdc_timing.h create mode 100644 src/commons/utils/pdc_malloc.c create mode 100644 src/commons/utils/pdc_timing.c diff --git a/src/api/CMakeLists.txt b/src/api/CMakeLists.txt index 7ef5ec186..1ba4b8389 100644 --- a/src/api/CMakeLists.txt +++ b/src/api/CMakeLists.txt @@ -142,7 +142,6 @@ install( #----------------------------------------------------------------------------- set(PDC_HEADERS ${PDC_SOURCE_DIR}/src/api/include/pdc.h - ${PDC_SOURCE_DIR}/src/api/include/pdc_public.h ${PDC_SOURCE_DIR}/src/api/pdc_analysis/include/pdc_analysis.h ${PDC_SOURCE_DIR}/src/api/pdc_obj/include/pdc_cont.h ${PDC_SOURCE_DIR}/src/api/pdc_obj/include/pdc_mpi.h @@ -152,7 +151,6 @@ set(PDC_HEADERS ${PDC_SOURCE_DIR}/src/api/pdc_query/include/pdc_query.h ${PDC_SOURCE_DIR}/src/api/pdc_region/include/pdc_region.h ${PDC_SOURCE_DIR}/src/api/pdc_transform/include/pdc_transform.h - ${PDC_SOURCE_DIR}/src/utils/include/pdc_interface.h ${PROJECT_BINARY_DIR}/pdc_config_sys.h ${PROJECT_BINARY_DIR}/pdc_config.h ) @@ -232,4 +230,4 @@ set(PDC_INCLUDES_INSTALL_TIME ${PDC_INSTALL_INCLUDE_DIR} ${PDC_EXT_INCLUDE_DEPENDENCIES} PARENT_SCOPE -) +) \ No newline at end of file diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index b6b969f8a..fff47f027 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -7581,7 +7581,8 @@ PDCcont_get_objids(pdcid_t cont_id ATTRIBUTE(unused), int *nobj ATTRIBUTE(unused } perr_t -PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, psize_t value_size) +PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, + psize_t value_size) { perr_t ret_value = SUCCEED; pdc_kvtag_t kvtag; @@ -7604,7 +7605,8 @@ PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, pdc_var_type_t } perr_t -PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, psize_t *value_size) +PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, + psize_t *value_size) { perr_t ret_value = SUCCEED; pdc_kvtag_t *kvtag = NULL; diff --git a/src/api/pdc_obj/include/pdc_cont.h b/src/api/pdc_obj/include/pdc_cont.h index 3a6180b65..33e924c14 100644 --- a/src/api/pdc_obj/include/pdc_cont.h +++ b/src/api/pdc_obj/include/pdc_cont.h @@ -191,7 +191,8 @@ perr_t PDCcont_del(pdcid_t cont_id); * * \return Non-negative on success/Negative on failure */ -perr_t PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, psize_t value_size); +perr_t PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, + psize_t value_size); /** * *********** @@ -203,7 +204,8 @@ perr_t PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, pdc_var * * \return Non-negative on success/Negative on failure */ -perr_t PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, psize_t *value_size); +perr_t PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, + psize_t *value_size); /** * Deleta a tag from a container diff --git a/src/commons/CMakeLists.txt b/src/commons/CMakeLists.txt index f29c25a7c..9f8abf690 100644 --- a/src/commons/CMakeLists.txt +++ b/src/commons/CMakeLists.txt @@ -21,27 +21,11 @@ endif() # Mercury find_package(MERCURY REQUIRED) if(MERCURY_FOUND) - message(STATUS "mercury dir ${MERCURY_DIR}") - if(DEFINED MERCURY_DIR AND NOT "${MERCURY_DIR}" STREQUAL "") - # If MERCURY_DIR contains "share/" (or "share" is at the end), truncate it along with everything following it - string(REGEX REPLACE "/share.*" "" trimmed_mercury_dir ${MERCURY_DIR}) - # Check if the trimmed_mercury_dir ends with "/mercury" - string(REGEX MATCH ".*/mercury$" is_mercury_home ${trimmed_mercury_dir}) - # If trimmed_mercury_dir ends with "/mercury", consider it as the home directory - if(is_mercury_home) - set(MERCURY_HOME ${trimmed_mercury_dir}) - else() - # If not ending with "mercury", it's likely an error - message(FATAL_ERROR "Cannot determine MERCURY_HOME from MERCURY_DIR.") - endif() - - message("Mercury home is set to ${MERCURY_HOME}") - - set(MERCURY_INCLUDE_DIR ${MERCURY_HOME}/include) - set(MERCURY_LIBRARY_DIR ${MERCURY_HOME}/lib) - endif() - message(STATUS "mercury include dir ${MERCURY_INCLUDE_DIR}") - message(STATUS "mercury lib dir ${MERCURY_LIBRARY_DIR}") + message(STATUS "mercury dir = ${MERCURY_DIR}") + find_path(MERCURY_INCLUDE_DIR mercury.h HINTS ${MERCURY_DIR}) + find_library(MERCURY_LIBRARY mercury HINTS ${MERCURY_DIR}) + message(STATUS "mercury include dir = ${MERCURY_INCLUDE_DIR}") + message(STATUS "mercury lib = ${MERCURY_LIBRARY}") set(PDC_EXT_INCLUDE_DEPENDENCIES ${MERCURY_INCLUDE_DIR} ${PDC_EXT_INCLUDE_DEPENDENCIES} ) @@ -140,7 +124,6 @@ set(PUBLIC_HEADER_DIR_LIST ${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/generic/include ${CMAKE_CURRENT_SOURCE_DIR}/profiling/include - ${CMAKE_CURRENT_SOURCE_DIR}/utils/include ) foreach(_header_dir ${PUBLIC_HEADER_DIR_LIST}) diff --git a/src/commons/include/pdc_public.h b/src/commons/include/pdc_public.h new file mode 100644 index 000000000..6c135f657 --- /dev/null +++ b/src/commons/include/pdc_public.h @@ -0,0 +1,62 @@ +/* + * Copyright Notice for + * Proactive Data Containers (PDC) Software Library and Utilities + * ----------------------------------------------------------------------------- + + *** Copyright Notice *** + + * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the + * University of California, through Lawrence Berkeley National Laboratory, + * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF + * Group (subject to receipt of any required approvals from the U.S. Dept. of + * Energy). All rights reserved. + + * If you have questions about your rights to use or distribute this software, + * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. + + * NOTICE. This Software was developed under funding from the U.S. Department of + * Energy and the U.S. Government consequently retains certain rights. As such, the + * U.S. Government has been granted for itself and others acting on its behalf a + * paid-up, nonexclusive, irrevocable, worldwide license in the Software to + * reproduce, distribute copies to the public, prepare derivative works, and + * perform publicly and display publicly, and to permit other to do so. + */ + +#ifndef PDC_PUBLIC_H +#define PDC_PUBLIC_H + +#include +#include +#include +#include "pdc_generic.h" + +/*******************/ +/* Public Typedefs */ +/*******************/ +typedef int perr_t; +typedef uint64_t pdcid_t; +typedef unsigned long long psize_t; +typedef bool pbool_t; + +typedef int PDC_int_t; +typedef float PDC_float_t; +typedef double PDC_double_t; + +typedef pdc_c_var_type_t pdc_var_type_t; + +typedef enum { PDC_PERSIST, PDC_TRANSIENT } pdc_lifetime_t; + +typedef enum { PDC_SERVER_DEFAULT = 0, PDC_SERVER_PER_CLIENT = 1 } pdc_server_selection_t; + +typedef struct pdc_histogram_t { //????????? + pdc_var_type_t dtype; + int nbin; + double incr; + double * range; + uint64_t * bin; +} pdc_histogram_t; + +#define SUCCEED 0 +#define FAIL (-1) + +#endif /* PDC_PUBLIC_H */ diff --git a/src/commons/profiling/CMakeLists.txt b/src/commons/profiling/CMakeLists.txt new file mode 100644 index 000000000..05e2c90b9 --- /dev/null +++ b/src/commons/profiling/CMakeLists.txt @@ -0,0 +1,127 @@ +#------------------------------------------------------------------------------ +# Include source and build directories +#------------------------------------------------------------------------------ +set(PROFILING_INCLUDE_DIRS + ${PDC_COMMON_INCLUDE_DIRS} + ${PDC_INCLUDES_BUILD_TIME} + ${PROJECT_SOURCE_DIR} + ${PROJECT_BINARY_DIR} + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} + ${PDC_SOURCE_DIR}/src/server/include + ${PDC_SOURCE_DIR}/src/server/pdc_server_region/include + ${PDC_SOURCE_DIR}/src/server/dablooms + ${PDC_SOURCE_DIR}/src/api/include + ${PDC_SOURCE_DIR}/src/api/pdc_obj/include + ${PDC_SOURCE_DIR}/src/api/pdc_region/include + ${PDC_SOURCE_DIR}/src/api/pdc_query/include + ${PDC_SOURCE_DIR}/src/api/pdc_transform/include + ${PDC_SOURCE_DIR}/src/api/pdc_analysis/include + ${PDC_SOURCE_DIR}/src/api/profiling/include + ${PDC_SOURCE_DIR}/src/utils/include + ${MERCURY_INCLUDE_DIR} + ${FASTBIT_INCLUDE_DIR} +) +message(STATUS "PDC_COMMON_INCLUDE_DIRS: ${PDC_COMMON_INCLUDE_DIRS}") +include_directories( + ${PROFILING_INCLUDE_DIRS} +) + +install( + FILES + ${CMAKE_BINARY_DIR}/pdc_config.h + DESTINATION + ${PDC_INSTALL_INCLUDE_DIR} + COMPONENT + headers +) + +#------------------------------------------------------------------------------ +# Options +#------------------------------------------------------------------------------ +#add_definitions(-DPDC_ENABLE_MPI=1) +#add_definitions(-DPDC_TIMING=1) +#add_definitions(-DPDC_ENABLE_CHECKPOINT=1) +#add_definitions(-DENABLE_MULTITHREAD=1) + +#------------------------------------------------------------------------------ +# Configure module header files +#------------------------------------------------------------------------------ +# Set unique vars used in the autogenerated config file (symbol import/export) +if(BUILD_SHARED_LIBS) + set(PDC_BUILD_SHARED_LIBS 1) + set(PDC_LIBTYPE SHARED) +else() + set(PDC_BUILD_SHARED_LIBS 0) + set(PDC_LIBTYPE STATIC) +endif() + +#------------------------------------------------------------------------------ +# Set sources +#------------------------------------------------------------------------------ +set(PDC_PROF_SRCS + ${CMAKE_CURRENT_SOURCE_DIR}/pdc_hashtab.c + ${CMAKE_CURRENT_SOURCE_DIR}/pdc_stack_ops.c + ) + +#------------------------------------------------------------------------------ +# Libraries +#------------------------------------------------------------------------------ +# PDCPROF +add_library(pdcprof ${PDC_PROF_SRCS}) +pdc_set_lib_options(pdcprof "pdcprof" ${PDC_LIBTYPE}) +target_include_directories(pdcprof PUBLIC "$" + $) + +set(PDC_EXPORTED_LIBS pdcprof ${PDC_EXPORTED_LIBS}) + +#----------------------------------------------------------------------------- +# Specify project header files to be installed +#----------------------------------------------------------------------------- +set(PDC_PROF_HEADERS + ${CMAKE_CURRENT_SOURCE_DIR}/include/pdc_hashtab.h + ${CMAKE_CURRENT_SOURCE_DIR}/include/pdc_stack_ops.h + ) + +#----------------------------------------------------------------------------- +# Add file(s) to CMake Install +#----------------------------------------------------------------------------- +install( + FILES + ${PDC_PROF_HEADERS} + DESTINATION + ${PDC_INSTALL_INCLUDE_DIR} + COMPONENT + headers +) + +#----------------------------------------------------------------------------- +# Add Target(s) to CMake Install +#----------------------------------------------------------------------------- +install( + TARGETS + pdcprof + EXPORT + ${PDC_EXPORTED_TARGETS} + LIBRARY DESTINATION ${PDC_INSTALL_LIB_DIR} + ARCHIVE DESTINATION ${PDC_INSTALL_LIB_DIR} + RUNTIME DESTINATION ${PDC_INSTALL_BIN_DIR} +) + +#------------------------------------------------------------------------------ +# Set variables for parent scope +#------------------------------------------------------------------------------ +# Used by config.cmake.build.in and Testing +set(PDC_INCLUDES_BUILD_TIME + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} + ${PDC_EXT_INCLUDE_DEPENDENCIES} + PARENT_SCOPE +) + +# Used by config.cmake.install.in +set(PDC_INCLUDES_INSTALL_TIME + ${PDC_INSTALL_INCLUDE_DIR} + ${PDC_EXT_INCLUDE_DEPENDENCIES} + PARENT_SCOPE +) diff --git a/src/commons/profiling/include/pdc_hashtab.h b/src/commons/profiling/include/pdc_hashtab.h new file mode 100644 index 000000000..a664a8ce6 --- /dev/null +++ b/src/commons/profiling/include/pdc_hashtab.h @@ -0,0 +1,198 @@ +/* An expandable hash tables datatype. + Copyright (C) 1999-2017 Free Software Foundation, Inc. + Contributed by Vladimir Makarov (vmakarov@cygnus.com). + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ + +/* This package implements basic hash table functionality. It is possible + to search for an entry, create an entry and destroy an entry. + + Elements in the table are generic pointers. + + The size of the table is not fixed; if the occupancy of the table + grows too high the hash table will be expanded. + + The abstract data implementation is based on generalized Algorithm D + from Knuth's book "The art of computer programming". Hash table is + expanded by creation of new hash table and transferring elements from + the old table to the new table. */ + +#ifndef __HASHTAB_H__ +#define __HASHTAB_H__ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* #include "ansidecl.h" */ +#define PTR void * + +/* The type for a hash code. */ +typedef unsigned int hashval_t; + +/* Callback function pointer types. */ + +/* Calculate hash of a table entry. */ +typedef hashval_t (*htab_hash)(const void *); + +/* Compare a table entry with a possible entry. The entry already in + the table always comes first, so the second element can be of a + different type (but in this case htab_find and htab_find_slot + cannot be used; instead the variants that accept a hash value + must be used). */ +typedef int (*htab_eq)(const void *, const void *); + +/* Cleanup function called whenever a live element is removed from + the hash table. */ +typedef void (*htab_del)(void *); + +/* Function called by htab_traverse for each live element. The first + arg is the slot of the element (which can be passed to htab_clear_slot + if desired), the second arg is the auxiliary pointer handed to + htab_traverse. Return 1 to continue scan, 0 to stop. */ +typedef int (*htab_trav)(void **, void *); + +/* Memory-allocation function, with the same functionality as calloc(). + Iff it returns NULL, the hash table implementation will pass an error + code back to the user, so if your code doesn't handle errors, + best if you use xcalloc instead. */ +typedef void *(*htab_alloc)(size_t, size_t); + +/* We also need a free() routine. */ +typedef void (*htab_free)(void *); + +/* Memory allocation and deallocation; variants which take an extra + argument. */ +typedef void *(*htab_alloc_with_arg)(void *, size_t, size_t); +typedef void (*htab_free_with_arg)(void *, void *); + +/* This macro defines reserved value for empty table entry. */ + +#define HTAB_EMPTY_ENTRY ((PTR)0) + +/* This macro defines reserved value for table entry which contained + a deleted element. */ + +#define HTAB_DELETED_ENTRY ((PTR)1) + +/* Hash tables are of the following type. The structure + (implementation) of this type is not needed for using the hash + tables. All work with hash table should be executed only through + functions mentioned below. The size of this structure is subject to + change. */ + +struct htab { + /* Pointer to hash function. */ + htab_hash hash_f; + + /* Pointer to comparison function. */ + htab_eq eq_f; + + /* Pointer to cleanup function. */ + htab_del del_f; + + /* Table itself. */ + void **entries; + + /* Current size (in entries) of the hash table. */ + size_t size; + + /* Current number of elements including also deleted elements. */ + size_t n_elements; + + /* Current number of deleted elements in the table. */ + size_t n_deleted; + + /* The following member is used for debugging. Its value is number + of all calls of `htab_find_slot' for the hash table. */ + unsigned int searches; + + /* The following member is used for debugging. Its value is number + of collisions fixed for time of work with the hash table. */ + unsigned int collisions; + + /* Pointers to allocate/free functions. */ + htab_alloc alloc_f; + htab_free free_f; + + /* Alternate allocate/free functions, which take an extra argument. */ + void * alloc_arg; + htab_alloc_with_arg alloc_with_arg_f; + htab_free_with_arg free_with_arg_f; + + /* Current size (in entries) of the hash table, as an index into the + table of primes. */ + unsigned int size_prime_index; +}; + +typedef struct htab *htab_t; + +/* An enum saying whether we insert into the hash table or not. */ +enum insert_option { NO_INSERT, INSERT }; + +/* The prototypes of the package functions. */ + +extern htab_t htab_create_alloc(size_t, htab_hash, htab_eq, htab_del, htab_alloc, htab_free); + +extern htab_t htab_create_alloc_ex(size_t, htab_hash, htab_eq, htab_del, void *, htab_alloc_with_arg, + htab_free_with_arg); + +extern htab_t htab_create_typed_alloc(size_t, htab_hash, htab_eq, htab_del, htab_alloc, htab_alloc, + htab_free); + +/* Backward-compatibility functions. */ +extern htab_t htab_create(size_t, htab_hash, htab_eq, htab_del); +extern htab_t htab_try_create(size_t, htab_hash, htab_eq, htab_del); + +extern void htab_set_functions_ex(htab_t, htab_hash, htab_eq, htab_del, void *, htab_alloc_with_arg, + htab_free_with_arg); + +extern void htab_delete(htab_t); +extern void htab_empty(htab_t); + +extern void * htab_find(htab_t, const void *); +extern void **htab_find_slot(htab_t, const void *, enum insert_option); +extern void * htab_find_with_hash(htab_t, const void *, hashval_t); +extern void **htab_find_slot_with_hash(htab_t, const void *, hashval_t, enum insert_option); +extern void htab_clear_slot(htab_t, void **); +extern void htab_remove_elt(htab_t, void *); +extern void htab_remove_elt_with_hash(htab_t, void *, hashval_t); + +extern void htab_traverse(htab_t, htab_trav, void *); +extern void htab_traverse_noresize(htab_t, htab_trav, void *); + +extern size_t htab_size(htab_t); +extern size_t htab_elements(htab_t); +extern double htab_collisions(htab_t); + +/* A hash function for pointers. */ +extern htab_hash htab_hash_pointer; + +/* An equality function for pointers. */ +extern htab_eq htab_eq_pointer; + +/* A hash function for null-terminated strings. */ +extern hashval_t htab_hash_string(const void *); + +/* An iterative hash function for arbitrary data. */ +extern hashval_t iterative_hash(const void *, size_t, hashval_t); +/* Shorthand for hashing something with an intrinsic size. */ +#define iterative_hash_object(OB, INIT) iterative_hash(&OB, sizeof(OB), INIT) + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __HASHTAB_H */ diff --git a/src/commons/profiling/include/pdc_stack_ops.h b/src/commons/profiling/include/pdc_stack_ops.h new file mode 100644 index 000000000..110d03900 --- /dev/null +++ b/src/commons/profiling/include/pdc_stack_ops.h @@ -0,0 +1,70 @@ +#ifndef _STACK_OPS_H +#define _STACK_OPS_H + +#include "pdc_config.h" +#include "pdc_private.h" +#include +#include +#include +#include +#include + +typedef void *hash_table_t; + +typedef struct profileEntry { + struct profileEntry *next; + struct profileEntry *prev; + const char * ftnkey; + const char * tags; + int64_t count; + int64_t localTotal; + int64_t CumTotal; + int64_t locmin; + int64_t locmax; + double usecTotal; + struct timespec callTime; + struct timespec startTime; + struct timespec totalTime; + struct timespec selfTime; + + struct profileEntry *parent; +} profileEntry_t; + +// typedef enum _boolean {FALSE = 0, TRUE} bool_t; +extern pbool_t enableProfiling; + +#ifndef RESET_TIMER +#define RESET_TIMER(x) (x).tv_sec = (x).tv_nsec = 0; +#endif + +#ifndef TIMER_DIFF +/* t0 = t1 - t2 */ +#define TIMER_DIFF(t0, t1, t2) \ + { \ + if (t2.tv_nsec > (t1).tv_nsec) { \ + (t1).tv_nsec += 1000000000; \ + (t1).tv_sec -= 1; \ + } \ + (t0).tv_sec = (t1).tv_sec - (t2).tv_sec; \ + (t0).tv_nsec = (t1).tv_nsec - (t2).tv_nsec; \ + } +#endif + +#ifndef TIMER_ADD +/* t0 += t1 */ +#define TIMER_ADD(t0, t1) \ + { \ + (t0).tv_sec += (t1).tv_sec; \ + if (((t0).tv_nsec += (t1).tv_nsec) > 10000000000) { \ + (t0).tv_sec += 1; \ + (t0).tv_nsec -= 10000000000; \ + } \ + } +#endif + +void initialize_profile(void **table, size_t tabsize); +void finalize_profile(); +void push(const char *ftnkey, const char *tags); +void pop(); + +#endif diff --git a/src/commons/profiling/pdc_hashtab.c b/src/commons/profiling/pdc_hashtab.c new file mode 100644 index 000000000..e59b7e3ba --- /dev/null +++ b/src/commons/profiling/pdc_hashtab.c @@ -0,0 +1,540 @@ +/* An expandable hash tables datatype. + Copyright (C) 1999, 2000, 2001, 2002 Free Software Foundation, Inc. + Contributed by Vladimir Makarov (vmakarov@cygnus.com). + +This file is part of the libiberty library. +Libiberty is free software; you can redistribute it and/or +modify it under the terms of the GNU Library General Public +License as published by the Free Software Foundation; either +version 2 of the License, or (at your option) any later version. + +Libiberty is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Library General Public License for more details. + +You should have received a copy of the GNU Library General Public +License along with libiberty; see the file COPYING.LIB. If +not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* This package implements basic hash table functionality. It is possible + to search for an entry, create an entry and destroy an entry. + + Elements in the table are generic pointers. + + The size of the table is not fixed; if the occupancy of the table + grows too high the hash table will be expanded. + + The abstract data implementation is based on generalized Algorithm D + from Knuth's book "The art of computer programming". Hash table is + expanded by creation of new hash table and transferring elements from + the old table to the new table. */ + +#include +#include +#include +#include +#include "pdc_config.h" +#include "pdc_hashtab.h" + +/* This macro defines reserved value for empty table entry. */ + +#define EMPTY_ENTRY ((PTR)0) + +/* This macro defines reserved value for table entry which contained + a deleted element. */ + +#define DELETED_ENTRY ((PTR)1) + +static unsigned long higher_prime_number(unsigned long); +static hashval_t hash_pointer(const void *); +static int eq_pointer(const void *, const void *); +static int htab_expand(htab_t); +static PTR * find_empty_slot_for_expand(htab_t, hashval_t); + +/* At some point, we could make these be NULL, and modify the + hash-table routines to handle NULL specially; that would avoid + function-call overhead for the common case of hashing pointers. */ +htab_hash htab_hash_pointer = hash_pointer; +htab_eq htab_eq_pointer = eq_pointer; + +/* The following function returns a nearest prime number which is + greater than N, and near a power of two. */ + +static unsigned long higher_prime_number(n) unsigned long n; +{ + /* These are primes that are near, but slightly smaller than, a + power of two. */ + static const unsigned long primes[] = { + (unsigned long)7, + (unsigned long)13, + (unsigned long)31, + (unsigned long)61, + (unsigned long)127, + (unsigned long)251, + (unsigned long)509, + (unsigned long)1021, + (unsigned long)2039, + (unsigned long)4093, + (unsigned long)8191, + (unsigned long)16381, + (unsigned long)32749, + (unsigned long)65521, + (unsigned long)131071, + (unsigned long)262139, + (unsigned long)524287, + (unsigned long)1048573, + (unsigned long)2097143, + (unsigned long)4194301, + (unsigned long)8388593, + (unsigned long)16777213, + (unsigned long)33554393, + (unsigned long)67108859, + (unsigned long)134217689, + (unsigned long)268435399, + (unsigned long)536870909, + (unsigned long)1073741789, + (unsigned long)2147483647, + /* 4294967291L */ + ((unsigned long)2147483647) + ((unsigned long)2147483644), + }; + + const unsigned long *low = &primes[0]; + const unsigned long *high = &primes[sizeof(primes) / sizeof(primes[0])]; + + while (low != high) { + const unsigned long *mid = low + (high - low) / 2; + if (n > *mid) + low = mid + 1; + else + high = mid; + } + + /* If we've run out of primes, abort. */ + if (n > *low) { + fprintf(stderr, "Cannot find prime bigger than %lu\n", n); + abort(); + } + + return *low; +} + +/* Returns a hash code for P. */ + +static hashval_t hash_pointer(p) const PTR p; +{ + return (hashval_t)((long)p >> 3); +} + +/* Returns non-zero if P1 and P2 are equal. */ + +static int eq_pointer(p1, p2) const PTR p1; +const PTR p2; +{ + return p1 == p2; +} + +/* This function creates table with length slightly longer than given + source length. Created hash table is initiated as empty (all the + hash table entries are EMPTY_ENTRY). The function returns the + created hash table, or NULL if memory allocation fails. */ + +htab_t htab_create_alloc(size, hash_f, eq_f, del_f, alloc_f, free_f) size_t size; +htab_hash hash_f; +htab_eq eq_f; +htab_del del_f; +htab_alloc alloc_f; +htab_free free_f; +{ + htab_t result; + size = higher_prime_number(size); + result = (htab_t)(*alloc_f)(1, sizeof(struct htab)); + if (result == NULL) + return NULL; + + result->entries = (PTR *)(*alloc_f)(size, sizeof(PTR)); + if (result->entries == NULL) { + if (free_f != NULL) + (*free_f)(result); + return NULL; + } + result->size = size; + result->hash_f = hash_f; + result->eq_f = eq_f; + result->del_f = del_f; + result->alloc_f = alloc_f; + result->free_f = free_f; + + return result; +} + +/* These functions exist solely for backward compatibility. */ + +#undef htab_create +htab_t htab_create(size, hash_f, eq_f, del_f) size_t size; +htab_hash hash_f; +htab_eq eq_f; +htab_del del_f; +{ + return htab_create_alloc(size, hash_f, eq_f, del_f, calloc, free); +} + +htab_t htab_try_create(size, hash_f, eq_f, del_f) size_t size; +htab_hash hash_f; +htab_eq eq_f; +htab_del del_f; +{ + return htab_create_alloc(size, hash_f, eq_f, del_f, calloc, free); +} + +/* This function frees all memory allocated for given hash table. + Naturally the hash table must already exist. */ + +void htab_delete(htab) htab_t htab; +{ + int i; + + if (htab->del_f) { + for (i = htab->size - 1; i >= 0; i--) + if (htab->entries[i] != EMPTY_ENTRY && htab->entries[i] != DELETED_ENTRY) + (*htab->del_f)(htab->entries[i]); + } + if (htab->free_f != NULL) { + (*htab->free_f)(htab->entries); + (*htab->free_f)(htab); + } +} + +/* This function clears all entries in the given hash table. */ + +void htab_empty(htab) htab_t htab; +{ + int i; + + if (htab->del_f) + for (i = htab->size - 1; i >= 0; i--) + if (htab->entries[i] != EMPTY_ENTRY && htab->entries[i] != DELETED_ENTRY) + (*htab->del_f)(htab->entries[i]); + + memset(htab->entries, 0, htab->size * sizeof(PTR)); +} + +/* Similar to htab_find_slot, but without several unwanted side effects: + - Does not call htab->eq_f when it finds an existing entry. + - Does not change the count of elements/searches/collisions in the + hash table. + This function also assumes there are no deleted entries in the table. + HASH is the hash value for the element to be inserted. */ + +static PTR *find_empty_slot_for_expand(htab, hash) htab_t htab; +hashval_t hash; +{ + size_t size = htab->size; + unsigned int index = hash % size; + PTR * slot = htab->entries + index; + hashval_t hash2; + + if (*slot == EMPTY_ENTRY) + return slot; + else if (*slot == DELETED_ENTRY) + abort(); + + hash2 = 1 + hash % (size - 2); + for (;;) { + index += hash2; + if (index >= size) + index -= size; + + slot = htab->entries + index; + if (*slot == EMPTY_ENTRY) + return slot; + else if (*slot == DELETED_ENTRY) + abort(); + } +} + +/* The following function changes size of memory allocated for the + entries and repeatedly inserts the table elements. The occupancy + of the table after the call will be about 50%. Naturally the hash + table must already exist. Remember also that the place of the + table entries is changed. If memory allocation failures are allowed, + this function will return zero, indicating that the table could not be + expanded. If all goes well, it will return a non-zero value. */ + +static int htab_expand(htab) htab_t htab; +{ + PTR *oentries; + PTR *olimit; + PTR *p; + PTR *nentries; + + oentries = htab->entries; + olimit = oentries + htab->size; + + htab->size = higher_prime_number(htab->size * 2); + + nentries = (PTR *)(*htab->alloc_f)(htab->size, sizeof(PTR *)); + if (nentries == NULL) + return 0; + htab->entries = nentries; + htab->n_elements -= htab->n_deleted; + htab->n_deleted = 0; + + p = oentries; + do { + PTR x = *p; + + if (x != EMPTY_ENTRY && x != DELETED_ENTRY) { + PTR *q = find_empty_slot_for_expand(htab, (*htab->hash_f)(x)); + *q = x; + } + p++; + } while (p < olimit); + + if (htab->free_f != NULL) + (*htab->free_f)(oentries); + + return 1; +} + +/* This function searches for a hash table entry equal to the given + element. It cannot be used to insert or delete an element. */ + +PTR htab_find_with_hash(htab, element, hash) htab_t htab; +const PTR element; +hashval_t hash; +{ + unsigned int index; + hashval_t hash2; + size_t size; + PTR entry; + + htab->searches++; + size = htab->size; + index = hash % size; + + entry = htab->entries[index]; + if (entry == EMPTY_ENTRY || (entry != DELETED_ENTRY && (*htab->eq_f)(entry, element))) + return entry; + + hash2 = 1 + hash % (size - 2); + + for (;;) { + htab->collisions++; + index += hash2; + if (index >= size) + index -= size; + + entry = htab->entries[index]; + if (entry == EMPTY_ENTRY || (entry != DELETED_ENTRY && (*htab->eq_f)(entry, element))) + return entry; + } +} + +/* Like htab_find_slot_with_hash, but compute the hash value from the + element. */ + +PTR htab_find(htab, element) htab_t htab; +const PTR element; +{ + return htab_find_with_hash(htab, element, (*htab->hash_f)(element)); +} + +/* This function searches for a hash table slot containing an entry + equal to the given element. To delete an entry, call this with + INSERT = 0, then call htab_clear_slot on the slot returned (possibly + after doing some checks). To insert an entry, call this with + INSERT = 1, then write the value you want into the returned slot. + When inserting an entry, NULL may be returned if memory allocation + fails. */ + +PTR * htab_find_slot_with_hash(htab, element, hash, insert) htab_t htab; +const PTR element; +hashval_t hash; +enum insert_option insert; +{ + PTR * first_deleted_slot; + unsigned int index; + hashval_t hash2; + size_t size; + PTR entry; + + if (insert == INSERT && htab->size * 3 <= htab->n_elements * 4 && htab_expand(htab) == 0) + return NULL; + + size = htab->size; + index = hash % size; + + htab->searches++; + first_deleted_slot = NULL; + + entry = htab->entries[index]; + if (entry == EMPTY_ENTRY) + goto empty_entry; + else if (entry == DELETED_ENTRY) + first_deleted_slot = &htab->entries[index]; + else if ((*htab->eq_f)(entry, element)) + return &htab->entries[index]; + + hash2 = 1 + hash % (size - 2); + for (;;) { + htab->collisions++; + index += hash2; + if (index >= size) + index -= size; + + entry = htab->entries[index]; + if (entry == EMPTY_ENTRY) + goto empty_entry; + else if (entry == DELETED_ENTRY) { + if (!first_deleted_slot) + first_deleted_slot = &htab->entries[index]; + } + else if ((*htab->eq_f)(entry, element)) + return &htab->entries[index]; + } + +empty_entry: + if (insert == NO_INSERT) + return NULL; + + htab->n_elements++; + + if (first_deleted_slot) { + *first_deleted_slot = EMPTY_ENTRY; + return first_deleted_slot; + } + + return &htab->entries[index]; +} + +/* Like htab_find_slot_with_hash, but compute the hash value from the + element. */ + +PTR * htab_find_slot(htab, element, insert) htab_t htab; +const PTR element; +enum insert_option insert; +{ + return htab_find_slot_with_hash(htab, element, (*htab->hash_f)(element), insert); +} + +/* This function deletes an element with the given value from hash + table. If there is no matching element in the hash table, this + function does nothing. */ + +void htab_remove_elt(htab, element) htab_t htab; +PTR element; +{ + PTR *slot; + + slot = htab_find_slot(htab, element, NO_INSERT); + if (*slot == EMPTY_ENTRY) + return; + + if (htab->del_f) + (*htab->del_f)(*slot); + + *slot = DELETED_ENTRY; + htab->n_deleted++; +} + +/* This function clears a specified slot in a hash table. It is + useful when you've already done the lookup and don't want to do it + again. */ + +void htab_clear_slot(htab, slot) htab_t htab; +PTR *slot; +{ + if (slot < htab->entries || slot >= htab->entries + htab->size || *slot == EMPTY_ENTRY || + *slot == DELETED_ENTRY) + abort(); + + if (htab->del_f) + (*htab->del_f)(*slot); + + *slot = DELETED_ENTRY; + htab->n_deleted++; +} + +/* This function scans over the entire hash table calling + CALLBACK for each live entry. If CALLBACK returns false, + the iteration stops. INFO is passed as CALLBACK's second + argument. */ + +void htab_traverse(htab, callback, info) htab_t htab; +htab_trav callback; +PTR info; +{ + PTR *slot = htab->entries; + PTR *limit = slot + htab->size; + + do { + PTR x = *slot; + if (x != EMPTY_ENTRY && x != DELETED_ENTRY) + if (!(*callback)(slot, info)) + break; + } while (++slot < limit); +} + +/* Return the current size of given hash table. */ + +size_t htab_size(htab) htab_t htab; +{ + return htab->size; +} + +/* Return the current number of elements in given hash table. */ + +size_t htab_elements(htab) htab_t htab; +{ + return htab->n_elements - htab->n_deleted; +} + +/* Return the fraction of fixed collisions during all work with given + hash table. */ + +double htab_collisions(htab) htab_t htab; +{ + if (htab->searches == 0) + return 0.0; + + return (double)htab->collisions / (double)htab->searches; +} + +/* Hash P as a null-terminated string. + + Copied from gcc/hashtable.c. Zack had the following to say with respect + to applicability, though note that unlike hashtable.c, this hash table + implementation re-hashes rather than chain buckets. + + http://gcc.gnu.org/ml/gcc-patches/2001-08/msg01021.html + From: Zack Weinberg + Date: Fri, 17 Aug 2001 02:15:56 -0400 + + I got it by extracting all the identifiers from all the source code + I had lying around in mid-1999, and testing many recurrences of + the form "H_n = H_{n-1} * K + c_n * L + M" where K, L, M were either + prime numbers or the appropriate identity. This was the best one. + I don't remember exactly what constituted "best", except I was + looking at bucket-length distributions mostly. + + So it should be very good at hashing identifiers, but might not be + as good at arbitrary strings. + + I'll add that it thoroughly trounces the hash functions recommended + for this use at http://burtleburtle.net/bob/hash/index.html, both + on speed and bucket distribution. I haven't tried it against the + function they just started using for Perl's hashes. */ + +hashval_t htab_hash_string(p) const PTR p; +{ + const unsigned char *str = (const unsigned char *)p; + hashval_t r = 0; + unsigned char c; + + while ((c = *str++) != 0) + r = r * 67 + c - 113; + + return r; +} diff --git a/src/commons/profiling/pdc_stack_ops.c b/src/commons/profiling/pdc_stack_ops.c new file mode 100644 index 000000000..fab9f274a --- /dev/null +++ b/src/commons/profiling/pdc_stack_ops.c @@ -0,0 +1,264 @@ +#include +#include +#include +#include "pdc_stack_ops.h" +#include "pdc_hashtab.h" + +profileEntry_t *calltree = NULL; +profileEntry_t *freelist = NULL; + +static int profilerrors = 0; + +hash_table_t hashtable; + +htab_t thisHashTable; + +/* For now we disable profiling (by default) + * Note that one can always ENABLE it by set the + * environment variable "PROFILE_ENABLE=true" + */ +pbool_t enableProfiling = FALSE; + +/* + * The idea of this implementation is to simulate the call stack + * of the running application. Each function that we care about + * begins with a FUNC_ENTER(x) declaration and finishes with + * FUNC_LEAVE(ret). These of course are macros and under + * the condition that we enable profiling, these expand into + * push and pop operations which we define below. + * + * Example: suppose that a user application is defined as follows + * int main() { + * a(); + * b(); + * c(); + * return 0; + * } + * + * void a() { + * aa(); + * aaa(); + * ab(); + * } + * + * void b() { + * bb(); + * bbb(); + * bc(); + * } + * + * void c() { + * cc(); + * ccc(); + * ca(); + * } + * + * Assume that all of the internal functions only make system calls + * or 3rd party libraries, i.e. the underlying functions will NOT + * be profiled. + * + * The implementation of stack_ops will maintain a call tree + * that mirrors that of the actual program, i.e. the alltree data + * structure will contain something like the following as we enter + * the first function contained by a(): + * + * ("main") --> ("a") --> ("aa") + * + * The entry for "main" has a /start_time and no /total_time + * Similarly, "a" has it's own /start_time and no /total_time + * The final entry: "aa" has a start-time and just prior to + * the return to it's parent ("a"), we sample the real-time + * clock as part of the POP functionality. Using the current + * time minus the start-time we establish the raw total elapsed + * time for the current function. + * NOTE: The actual runtime spent within the function is + * a calculation which subtracts out the total elapsed times + * of all of the lower-level functions, e.g. suppose ("a") + * has a total runtime of 10. If the total runtime of ("aa") + * in the simple call chain shown above is 5, then the actual + * profiled time spent in ("a") is 10 - 5 = 5. + * Ultimately, if were to execute the entire program and then + * sum all of the individual profile times, the total should + * match the execution time of the program. + */ + +void +push(const char *ftnkey, const char *tags) +{ + profileEntry_t *thisEntry; + if (freelist != NULL) { + thisEntry = freelist; + freelist = thisEntry->next; + } + else { + if ((thisEntry = (profileEntry_t *)malloc(sizeof(profileEntry_t))) == NULL) { + perror("malloc"); + profilerrors++; + } + } + + if (profilerrors) + return; + thisEntry->ftnkey = ftnkey; + thisEntry->tags = tags; + thisEntry->prev = calltree; + thisEntry->next = NULL; + calltree = thisEntry; + + /* Timing */ + clock_gettime(CLOCK_REALTIME, &thisEntry->startTime); + RESET_TIMER(thisEntry->callTime); + return; +} + +void +pop() +{ + struct timespec current_time; + profileEntry_t *master; + profileEntry_t *thisEntry = calltree; + int update_entry = TRUE; + if (thisEntry == NULL) + return; /* This shouldn't happen */ + + /* Timing */ + clock_gettime(CLOCK_REALTIME, ¤t_time); + TIMER_DIFF(thisEntry->totalTime, current_time, thisEntry->startTime); + TIMER_DIFF(thisEntry->selfTime, thisEntry->totalTime, thisEntry->callTime); + calltree = thisEntry->prev; + if (calltree != NULL) { + TIMER_ADD(calltree->callTime, thisEntry->totalTime); + } + /* Check to see if this function has already been added to the hashtable */ + void **tableEntry = htab_find_slot(thisHashTable, thisEntry, INSERT); + if (*tableEntry == NULL) { + /* No table entry found so add it now ... */ + master = (profileEntry_t *)malloc(sizeof(profileEntry_t)); + if (master) { + thisEntry->count = 1; + memcpy(master, thisEntry, sizeof(profileEntry_t)); + *tableEntry = master; + } + update_entry = FALSE; + } + + if (update_entry) { + master = *(profileEntry_t **)tableEntry; + master->count++; + TIMER_ADD(master->totalTime, thisEntry->totalTime); + TIMER_ADD(master->selfTime, thisEntry->selfTime); + } + + /* Rather than freeing the container, we add the + * current entry onto the freelist. + */ + thisEntry->next = freelist; + freelist = thisEntry; +} + +hashval_t +hash_profile_entry(const void *p) +{ + const profileEntry_t *thisEntry = (const profileEntry_t *)p; + return htab_hash_string(thisEntry->ftnkey); +} + +int +eq_profile_entry(const void *a, const void *b) +{ + const profileEntry_t *tp_a = (const profileEntry_t *)a; + const profileEntry_t *tp_b = (const profileEntry_t *)b; + return (tp_a->ftnkey == tp_b->ftnkey); +} + +void +initialize_profile(void **hashtab, size_t size) +{ + if (*hashtab == NULL) { + if ((thisHashTable = htab_try_create(size, hash_profile_entry, eq_profile_entry, free)) == NULL) { + return; + } + *hashtab = thisHashTable; + } +} + +int +show_profile_info(void **ht_live_entry, void *extraInfo ATTRIBUTE(unused)) +{ + static int count = 0; + char * LineBreak = "------------------------------------------------------------------------------"; + char * header = " item calls Time/call [Sec,nSec]\tftn_name"; + const profileEntry_t *thisEntry = *(const profileEntry_t **)ht_live_entry; + + if (thisEntry) { + struct timespec totalTime; + int64_t totalCalls = thisEntry->count; + if (count == 0) + puts(header); + totalTime = thisEntry->totalTime; + printf("%s\n %d\t%-6" PRId64 " %6" PRId64 ",%6" PRId64 "\t\t %s\n", LineBreak, ++count, totalCalls, + totalTime.tv_sec / totalCalls, totalTime.tv_nsec / totalCalls, thisEntry->ftnkey); + } + + return TRUE; +} + +/* Returns 1 if we set enableProfiling to TRUE + * otherwise returns 0. + */ +int +toggle_profile_enable() +{ + if (enableProfiling == FALSE) + enableProfiling = TRUE; + else + enableProfiling = FALSE; + + return (enableProfiling ? 1 : 0); +} + +/* These functions should be used when we've actually built the profiler as a shared library. + * Note: One might check an environment variable to see if a non-default size + * for the hashtable initialization should be used... + * The profile_fini should probably be used to dump the contents of the profile + * hashtable. + */ + +void __attribute__((constructor)) profile_init(void) +{ + int default_HashtableSize = 128; + char *size_override = NULL; + char *profile_enable = getenv("PROFILE_ENABLE"); + if (profile_enable != NULL) { + if (strcasecmp(profile_enable, "true") == 0) { + enableProfiling = TRUE; + } + else if (strcasecmp(profile_enable, "false") == 0) { + enableProfiling = FALSE; + } + } + // While it is tempting to skip creating a hashtable + // if we've disabled profiling (see above), I want + // to give the user the ability at runtime to + // possibly enable everything... + // I don't currently include any APIs to enable + // or disable profiling at runtime, but that is + // on the TODO list. + + size_override = getenv("PROFILE_HASHTABLESIZE"); + if (size_override != NULL) { + int override_value = atoi(size_override); + if (override_value > 0) { + default_HashtableSize = override_value; + } + } + initialize_profile(&hashtable, default_HashtableSize); +} + +void __attribute__((destructor)) finalize_profile(void) +{ + int count = 1; + if (thisHashTable != NULL) { + htab_traverse(thisHashTable, show_profile_info, &count); + } +} diff --git a/src/commons/serde/include/pdc_serde.h b/src/commons/serde/include/pdc_serde.h index 6211e1917..8c5efae9e 100644 --- a/src/commons/serde/include/pdc_serde.h +++ b/src/commons/serde/include/pdc_serde.h @@ -62,11 +62,11 @@ void pdc_serde_append_key_value(PDC_SERDE_SerializedData *data, PDC_SERDE_Key *k /** * @brief get the total size of PDC_SERDE_SerializedData structure instance - * + * * @param data Pointer to the PDC_SERDE_SerializedData structure instance - * + * * @return total size of the PDC_SERDE_SerializedData structure instance -*/ + */ size_t get_total_size_for_serialized_data(PDC_SERDE_SerializedData *data); /** @@ -113,9 +113,9 @@ void pdc_serde_print(PDC_SERDE_SerializedData *data); static inline PDC_SERDE_Key * PDC_SERDE_KEY(void *key, PDC_CType pdc_type, size_t size) { - PDC_SERDE_Key *pdc_key = (PDC_SERDE_Key *)malloc(sizeof(PDC_SERDE_Key)); - size_t key_size = (size_t) get_size_by_class_n_type(key, size, PDC_CLS_SCALAR, pdc_type); - pdc_key->key = malloc(key_size); + PDC_SERDE_Key *pdc_key = (PDC_SERDE_Key *)malloc(sizeof(PDC_SERDE_Key)); + size_t key_size = (size_t)get_size_by_class_n_type(key, size, PDC_CLS_SCALAR, pdc_type); + pdc_key->key = malloc(key_size); memcpy(pdc_key->key, key, key_size); pdc_key->pdc_type = pdc_type; pdc_key->size = key_size; @@ -135,14 +135,15 @@ PDC_SERDE_KEY(void *key, PDC_CType pdc_type, size_t size) static inline PDC_SERDE_Value * PDC_SERDE_VALUE(void *data, PDC_CType pdc_type, PDC_CType_Class pdc_class, size_t size) { - PDC_SERDE_Value *pdc_value = (PDC_SERDE_Value *)malloc(sizeof(PDC_SERDE_Value)); - size_t value_size = 0; + PDC_SERDE_Value *pdc_value = (PDC_SERDE_Value *)malloc(sizeof(PDC_SERDE_Value)); + size_t value_size = 0; if (pdc_class == PDC_CLS_STRUCT) { // TODO: we need to check if data is a valid PDC_SERDE_SerializedData structure. PDC_SERDE_SerializedData *struct_data = (PDC_SERDE_SerializedData *)data; size = struct_data->totalSize; - } else { - value_size = (size_t) get_size_by_class_n_type(data, size, pdc_class, pdc_type); + } + else { + value_size = (size_t)get_size_by_class_n_type(data, size, pdc_class, pdc_type); } pdc_value->data = malloc(value_size); memcpy(pdc_value->data, data, value_size); diff --git a/src/commons/serde/pdc_serde.c b/src/commons/serde/pdc_serde.c index e0959b5cd..fe9e9aa64 100644 --- a/src/commons/serde/pdc_serde.c +++ b/src/commons/serde/pdc_serde.c @@ -28,10 +28,12 @@ pdc_serde_append_key_value(PDC_SERDE_SerializedData *data, PDC_SERDE_Key *key, P data->data->totalSize += (sizeof(int) + sizeof(int) + sizeof(size_t) + value->size); } -size_t get_total_size_for_serialized_data(PDC_SERDE_SerializedData *data) { +size_t +get_total_size_for_serialized_data(PDC_SERDE_SerializedData *data) +{ if (data->totalSize <= 0) { size_t total_size = data->header->totalSize + data->data->totalSize + sizeof(size_t) * 6; - data->totalSize = total_size; + data->totalSize = total_size; } return data->totalSize; } @@ -287,13 +289,12 @@ test_serde_framework() char * doubleKey_str = "double"; double doubleVal = 3.14159; PDC_SERDE_Key * doubleKey = PDC_SERDE_KEY(doubleKey_str, PDC_STRING, sizeof(doubleKey_str)); - PDC_SERDE_Value *doubleValue = - PDC_SERDE_VALUE(&doubleVal, PDC_DOUBLE, PDC_CLS_SCALAR, sizeof(double)); + PDC_SERDE_Value *doubleValue = PDC_SERDE_VALUE(&doubleVal, PDC_DOUBLE, PDC_CLS_SCALAR, sizeof(double)); pdc_serde_append_key_value(data, doubleKey, doubleValue); - char * strKey_str = "string"; - char * strVal = "Hello, World!"; - PDC_SERDE_Key *strKey = PDC_SERDE_KEY(strKey_str, PDC_STRING, (strlen(strKey_str) + 1) * sizeof(char)); + char * strKey_str = "string"; + char * strVal = "Hello, World!"; + PDC_SERDE_Key * strKey = PDC_SERDE_KEY(strKey_str, PDC_STRING, (strlen(strKey_str) + 1) * sizeof(char)); PDC_SERDE_Value *strValue = PDC_SERDE_VALUE(strVal, PDC_STRING, PDC_CLS_SCALAR, (strlen(strVal) + 1) * sizeof(char)); pdc_serde_append_key_value(data, strKey, strValue); @@ -314,7 +315,7 @@ test_serde_framework() PDC_SERDE_SerializedData *point_data = pdc_serde_init(2); PDC_SERDE_Key * x_name = PDC_SERDE_KEY("x", PDC_STRING, sizeof(char *)); - PDC_SERDE_Value *x_value = PDC_SERDE_VALUE(&pointVal.x, PDC_INT, PDC_CLS_SCALAR, sizeof(int)); + PDC_SERDE_Value * x_value = PDC_SERDE_VALUE(&pointVal.x, PDC_INT, PDC_CLS_SCALAR, sizeof(int)); PDC_SERDE_Key * y_name = PDC_SERDE_KEY("y", PDC_STRING, sizeof(char *)); PDC_SERDE_Value *y_value = PDC_SERDE_VALUE(&pointVal.y, PDC_INT, PDC_CLS_SCALAR, sizeof(int)); @@ -323,9 +324,8 @@ test_serde_framework() pdc_serde_append_key_value(point_data, y_name, y_value); void *point_buffer = pdc_serde_serialize(point_data); - PDC_SERDE_Key * structKey = PDC_SERDE_KEY(pointKey, PDC_STRING, sizeof(pointKey)); - PDC_SERDE_Value *structValue = - PDC_SERDE_VALUE(point_buffer, PDC_VOID_PTR, PDC_CLS_STRUCT, sizeof(Point)); + PDC_SERDE_Key * structKey = PDC_SERDE_KEY(pointKey, PDC_STRING, sizeof(pointKey)); + PDC_SERDE_Value *structValue = PDC_SERDE_VALUE(point_buffer, PDC_VOID_PTR, PDC_CLS_STRUCT, sizeof(Point)); pdc_serde_append_key_value(data, structKey, structValue); // Serialize the data diff --git a/src/commons/utils/include/pdc_id_pkg.h b/src/commons/utils/include/pdc_id_pkg.h new file mode 100644 index 000000000..9623661fd --- /dev/null +++ b/src/commons/utils/include/pdc_id_pkg.h @@ -0,0 +1,57 @@ +/* + * Copyright Notice for + * Proactive Data Containers (PDC) Software Library and Utilities + * ----------------------------------------------------------------------------- + + *** Copyright Notice *** + + * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the + * University of California, through Lawrence Berkeley National Laboratory, + * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF + * Group (subject to receipt of any required approvals from the U.S. Dept. of + * Energy). All rights reserved. + + * If you have questions about your rights to use or distribute this software, + * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. + + * NOTICE. This Software was developed under funding from the U.S. Department of + * Energy and the U.S. Government consequently retains certain rights. As such, the + * U.S. Government has been granted for itself and others acting on its behalf a + * paid-up, nonexclusive, irrevocable, worldwide license in the Software to + * reproduce, distribute copies to the public, prepare derivative works, and + * perform publicly and display publicly, and to permit other to do so. + */ + +#ifndef PDC_ID_PKG_H +#define PDC_ID_PKG_H + +#include "pdc_private.h" +#include "pdc_linkedlist.h" +#include "mercury_atomic.h" +/* + * Number of bits to use for ID Type in each atom. Increase if more types + * are needed (though this will decrease the number of available IDs per + * type). This is the only number that must be changed since all other bit + * field sizes and masks are calculated from TYPE_BITS. + */ +#define TYPE_BITS 8 +#define TYPE_MASK (((pdcid_t)1 << TYPE_BITS) - 1) +#define PDC_MAX_NUM_TYPES TYPE_MASK +/* + * Number of bits to use for the Atom index in each atom (assumes 8-bit + * bytes). We don't use the sign bit. + */ +#define ID_BITS ((sizeof(pdcid_t) * 8) - (TYPE_BITS + 1)) +#define ID_MASK (((pdcid_t)1 << ID_BITS) - 1) + +/* Map an atom to an ID type number */ +#define PDC_TYPE(a) ((PDC_type_t)(((pdcid_t)(a) >> ID_BITS) & TYPE_MASK)) + +struct _pdc_id_info { + pdcid_t id; /* ID for this info */ + hg_atomic_int32_t count; /* ref. count for this atom */ + void * obj_ptr; /* pointer associated with the atom */ + PDC_LIST_ENTRY(_pdc_id_info) entry; +}; + +#endif /* PDC_ID_PKG_H */ diff --git a/src/commons/utils/include/pdc_linkedlist.h b/src/commons/utils/include/pdc_linkedlist.h new file mode 100644 index 000000000..a9de691b0 --- /dev/null +++ b/src/commons/utils/include/pdc_linkedlist.h @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2013-2016 Argonne National Laboratory, Department of Energy, + * UChicago Argonne, LLC and The HDF Group. + * All rights reserved. + * + * The full copyright notice, including terms governing use, modification, + * and redistribution, is contained in the COPYING file that can be + * found at the root of the source code distribution tree. + */ + +/* Code below is derived from sys/queue.h which follows the below notice: + * + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef PDC_LINKEDLIST_H +#define PDC_LINKEDLIST_H + +// #include "pdc_cont_pkg.h" +// #include "pdc_cont.h" +#include "mercury_thread_mutex.h" +#include + +#define PDC_LIST_HEAD_INITIALIZER(name) \ + { \ + NULL \ + } + +#define PDC_LIST_HEAD_INIT(struct_head_name, var_name) \ + struct struct_head_name var_name = PDC_LIST_HEAD_INITIALIZER(var_name) + +#define PDC_LIST_HEAD_DECL(struct_head_name, struct_entry_name) \ + struct struct_head_name { \ + struct struct_entry_name *head; \ + } + +#define PDC_LIST_HEAD(struct_entry_name) \ + struct { \ + struct struct_entry_name *head; \ + hg_thread_mutex_t lock; \ + } + +#define PDC_LIST_ENTRY(struct_entry_name) \ + struct { \ + struct struct_entry_name * next; \ + struct struct_entry_name **prev; \ + } + +#define PDC_LIST_INIT(head_ptr) \ + do { \ + (head_ptr)->head = NULL; \ + hg_thread_mutex_init(&(head_ptr)->lock); \ + } while (/*CONSTCOND*/ 0) + +#define PDC_LIST_IS_EMPTY(head_ptr) ((head_ptr)->head == NULL) + +#define PDC_LIST_FIRST(head_ptr) ((head_ptr)->head) + +#define PDC_LIST_GET_FIRST(var, head_ptr) (var = (head_ptr)->head) + +#define PDC_LIST_NEXT(entry_ptr, entry_field_name) ((entry_ptr)->entry_field_name.next) + +#define PDC_LIST_TO_NEXT(entry_ptr, entry_field_name) ((entry_ptr) = (entry_ptr)->entry_field_name.next) + +#define PDC_LIST_INSERT_HEAD(head_ptr, entry_ptr, entry_field_name) \ + do { \ + if (((entry_ptr)->entry_field_name.next = (head_ptr)->head) != NULL) \ + (head_ptr)->head->entry_field_name.prev = &(entry_ptr)->entry_field_name.next; \ + (head_ptr)->head = (entry_ptr); \ + (entry_ptr)->entry_field_name.prev = &(head_ptr)->head; \ + } while (/*CONSTCOND*/ 0) + +/* TODO would be nice to not have any condition */ +#define PDC_LIST_REMOVE(entry_ptr, entry_field_name) \ + do { \ + if ((entry_ptr)->entry_field_name.next != NULL) \ + (entry_ptr)->entry_field_name.next->entry_field_name.prev = (entry_ptr)->entry_field_name.prev; \ + *(entry_ptr)->entry_field_name.prev = (entry_ptr)->entry_field_name.next; \ + } while (/*CONSTCOND*/ 0) + +#define PDC_LIST_FOREACH(var, head_ptr, entry_field_name) \ + for ((var) = ((head_ptr)->head); (var); (var) = ((var)->entry_field_name.next)) + +#define PDC_LIST_SEARCH(var, head_ptr, entry_field_name, item, value) \ + for ((var) = ((head_ptr)->head); (((var)->item != value) && (var)); \ + (var) = ((var)->entry_field_name.next)) + +#define PDC_LIST_SEARCH_CONT_NAME(var, head_ptr, entry_field_name, member, n, name) \ + for ((var) = ((head_ptr)->head); \ + ((var) && strcmp(((struct _pdc_cont_info *)((var)->member))->cont_info_pub->n, name) != 0); \ + (var) = ((var)->entry_field_name.next)) + +#endif /* PDC_LINKEDLIST_H */ diff --git a/src/commons/utils/include/pdc_malloc.h b/src/commons/utils/include/pdc_malloc.h new file mode 100644 index 000000000..e8ea1941e --- /dev/null +++ b/src/commons/utils/include/pdc_malloc.h @@ -0,0 +1,59 @@ +/* + * Copyright Notice for + * Proactive Data Containers (PDC) Software Library and Utilities + * ----------------------------------------------------------------------------- + + *** Copyright Notice *** + + * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the + * University of California, through Lawrence Berkeley National Laboratory, + * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF + * Group (subject to receipt of any required approvals from the U.S. Dept. of + * Energy). All rights reserved. + + * If you have questions about your rights to use or distribute this software, + * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. + + * NOTICE. This Software was developed under funding from the U.S. Department of + * Energy and the U.S. Government consequently retains certain rights. As such, the + * U.S. Government has been granted for itself and others acting on its behalf a + * paid-up, nonexclusive, irrevocable, worldwide license in the Software to + * reproduce, distribute copies to the public, prepare derivative works, and + * perform publicly and display publicly, and to permit other to do so. + */ + +#ifndef PDC_MALLOC_H +#define PDC_MALLOC_H + +#include + +/***************************************/ +/* Library-private Function Prototypes */ +/***************************************/ +/** + * Create an object + * + * \param size [IN] Size of the struct to be malloced + */ +void *PDC_malloc(size_t size); + +/** + * Create an object + * + * \param size [IN] Size of the struct to be calloced + */ +void *PDC_calloc(size_t size); + +/** + * Create an object + * + * \param mem [IN] Starting address of memory + */ +void *PDC_free(void *mem); + +#define PDC_MALLOC(t) (t *)PDC_malloc(sizeof(t)) +#define PDC_CALLOC(t) (t *)PDC_calloc(sizeof(t)) + +#define PDC_FREE(t, obj) (t *)(intptr_t) PDC_free(obj) + +#endif /* PDC_MALLOC_H */ diff --git a/src/commons/utils/include/pdc_private.h b/src/commons/utils/include/pdc_private.h new file mode 100644 index 000000000..b0fa48f14 --- /dev/null +++ b/src/commons/utils/include/pdc_private.h @@ -0,0 +1,202 @@ +/* + * Copyright Notice for + * Proactive Data Containers (PDC) Software Library and Utilities + * ----------------------------------------------------------------------------- + + *** Copyright Notice *** + + * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the + * University of California, through Lawrence Berkeley National Laboratory, + * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF + * Group (subject to receipt of any required approvals from the U.S. Dept. of + * Energy). All rights reserved. + + * If you have questions about your rights to use or distribute this software, + * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. + + * NOTICE. This Software was developed under funding from the U.S. Department of + * Energy and the U.S. Government consequently retains certain rights. As such, the + * U.S. Government has been granted for itself and others acting on its behalf a + * paid-up, nonexclusive, irrevocable, worldwide license in the Software to + * reproduce, distribute copies to the public, prepare derivative works, and + * perform publicly and display publicly, and to permit other to do so. + */ + +#ifndef PDC_PRIVATE_H +#define PDC_PRIVATE_H + +#include "pdc_config.h" +#include "pdc_public.h" +#include +// #include /* gettimeofday() */ + +/****************************/ +/* Library Private Typedefs */ +/****************************/ +typedef enum { + UNKNOWN = 0, + SERVER_MEMORY = 1, + CLIENT_MEMORY = 2, + FLASH = 3, + DISK = 4, + FILESYSTEM = 5, + TAPE = 6 +} _pdc_loci_t; + +/* Query type */ +typedef enum { + PDC_Q_TYPE_DATA_ELEM, /* selects data elements */ + PDC_Q_TYPE_ATTR_VALUE, /* selects attribute values */ + PDC_Q_TYPE_ATTR_NAME, /* selects attributes */ + PDC_Q_TYPE_LINK_NAME, /* selects objects */ + PDC_Q_TYPE_MISC /* (for combine queries) selects misc objects */ +} _pdc_query_type_t; + +/* Query match conditions */ +typedef enum { + PDC_Q_MATCH_EQUAL, /* equal */ + PDC_Q_MATCH_NOT_EQUAL, /* not equal */ + PDC_Q_MATCH_LESS_THAN, /* less than */ + PDC_Q_MATCH_GREATER_THAN /* greater than */ +} _pdc_query_op_t; + +typedef enum { ROW_major, COL_major } _pdc_major_type_t; + +typedef enum { C_lang = 0, FORTRAN_lang, PYTHON_lang, JULIA_lang, N_LANGUAGES } _pdc_analysis_language_t; + +/***************************/ +/* Library Private Structs */ +/***************************/ +struct _pdc_class { + char * name; + pdcid_t local_id; +}; + +#ifdef __cplusplus +#define ATTRIBUTE(a) +#else /* __cplusplus */ +#if defined(HAVE_ATTRIBUTE) +#define ATTRIBUTE(a) __attribute__((a)) +#else +#define ATTRIBUTE(a) +#endif +#endif /* __cplusplus */ + +#ifdef __cplusplus +#define ATTR_UNUSED /*void*/ +#else /* __cplusplus */ +#if defined(HAVE_ATTRIBUTE) && !defined(__SUNPRO_C) +#define ATTR_UNUSED __attribute__((unused)) +#else +#define ATTR_UNUSED /*void*/ +#endif +#endif /* __cplusplus */ + +#define PDCmemset(X, C, Z) memset((void *)(X), C, Z) + +/* + * PDC Boolean type. + */ +#ifndef FALSE +#define FALSE 0 +#endif +#ifndef TRUE +#define TRUE 1 +#endif + +extern pbool_t err_occurred; + +/* + * PGOTO_DONE macro. The argument is the return value which is + * assigned to the `ret_value' variable. Control branches to + * the `done' label. + */ +#define PGOTO_DONE(ret_val) \ + do { \ + ret_value = ret_val; \ + goto done; \ + } while (0) + +#define PGOTO_DONE_VOID \ + do { \ + goto done; \ + } while (0) + +/* + * PGOTO_ERROR macro. The arguments are the return value and an + * error string. The return value is assigned to a variable `ret_value' and + * control branches to the `done' label. + */ +#define PGOTO_ERROR(ret_val, ...) \ + do { \ + fprintf(stderr, "Error in %s:%d\n", __FILE__, __LINE__); \ + fprintf(stderr, " # %s(): ", __func__); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + PGOTO_DONE(ret_val); \ + } while (0) + +#define PGOTO_ERROR_VOID(...) \ + do { \ + fprintf(stderr, "Error in %s:%d\n", __FILE__, __LINE__); \ + fprintf(stderr, " # %s(): ", __func__); \ + fprintf(stderr, "\n"); \ + PGOTO_DONE_VOID; \ + } while (0) + +/* Include a basic profiling interface */ +#ifdef ENABLE_PROFILING +#include "pdc_stack_ops.h" + +#define FUNC_ENTER(X) \ + do { \ + if (enableProfiling) \ + push(__func__, (X)); \ + } while (0) + +#define FUNC_LEAVE(ret_value) \ + do { \ + if (enableProfiling) \ + pop(); \ + return (ret_value); \ + } while (0) + +#define FUNC_LEAVE_VOID \ + do { \ + if (enableProfiling) \ + pop(); \ + return; \ + } while (0) + +#else +/* #define FUNC_ENTER(X) \ */ +/* do { \ */ +/* time_t now; \ */ +/* time(&now); \ */ +/* fprintf(stderr, "%ld enter %s\n", now, __func__); \ */ +/* } while (0) */ + +/* #define FUNC_LEAVE(ret_value) \ */ +/* do { \ */ +/* time_t now; \ */ +/* time(&now); \ */ +/* fprintf(stderr, "%ld leave %s\n", now, __func__); \ */ +/* return (ret_value); \ */ +/* } while (0) */ + +#define FUNC_ENTER(X) \ + do { \ + } while (0) + +#define FUNC_LEAVE(ret_value) \ + do { \ + return (ret_value); \ + } while (0) + +#define FUNC_LEAVE_VOID \ + do { \ + return; \ + } while (0) +#endif + +#endif /* PDC_PRIVATE_H */ diff --git a/src/commons/utils/include/pdc_timing.h b/src/commons/utils/include/pdc_timing.h new file mode 100644 index 000000000..1ea2f475b --- /dev/null +++ b/src/commons/utils/include/pdc_timing.h @@ -0,0 +1,193 @@ +#ifndef PDC_TIMING_H +#define PDC_TIMING_H + +#ifndef HOST_NAME_MAX +#if defined(__APPLE__) +#define HOST_NAME_MAX 255 +#else +#define HOST_NAME_MAX 64 +#endif /* __APPLE__ */ +#endif /* HOST_NAME_MAX */ + +#include "pdc_config.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef PDC_TIMING +typedef struct pdc_timing { + double PDCbuf_obj_map_rpc; + double PDCbuf_obj_unmap_rpc; + + double PDCreg_obtain_lock_write_rpc; + double PDCreg_obtain_lock_read_rpc; + + double PDCreg_release_lock_write_rpc; + double PDCreg_release_lock_read_rpc; + + double PDCbuf_obj_map_rpc_wait; + double PDCbuf_obj_unmap_rpc_wait; + + double PDCreg_obtain_lock_write_rpc_wait; + double PDCreg_obtain_lock_read_rpc_wait; + double PDCreg_release_lock_write_rpc_wait; + double PDCreg_release_lock_read_rpc_wait; + + double PDCtransfer_request_start_write_rpc; + double PDCtransfer_request_wait_write_rpc; + double PDCtransfer_request_start_read_rpc; + double PDCtransfer_request_wait_read_rpc; + + double PDCtransfer_request_start_write_rpc_wait; + double PDCtransfer_request_start_read_rpc_wait; + double PDCtransfer_request_wait_write_rpc_wait; + double PDCtransfer_request_wait_read_rpc_wait; + + double PDCtransfer_request_start_all_write_rpc; + double PDCtransfer_request_start_all_read_rpc; + double PDCtransfer_request_wait_all_rpc; + + double PDCtransfer_request_start_all_write_rpc_wait; + double PDCtransfer_request_start_all_read_rpc_wait; + double PDCtransfer_request_wait_all_rpc_wait; + + double PDCtransfer_request_metadata_query_rpc; + + double PDCclient_obj_create_rpc; + double PDCclient_cont_create_rpc; + +} pdc_timing; + +pdc_timing pdc_timings; + +typedef struct pdc_server_timing { + double PDCbuf_obj_map_rpc; + double PDCbuf_obj_unmap_rpc; + + double PDCreg_obtain_lock_write_rpc; + double PDCreg_obtain_lock_read_rpc; + double PDCreg_release_lock_write_rpc; + double PDCreg_release_lock_read_rpc; + double PDCreg_release_lock_bulk_transfer_write_rpc; + double PDCreg_release_lock_bulk_transfer_read_rpc; + double PDCreg_release_lock_bulk_transfer_inner_write_rpc; + double PDCreg_release_lock_bulk_transfer_inner_read_rpc; + + double PDCreg_transfer_request_start_write_rpc; + double PDCreg_transfer_request_start_read_rpc; + double PDCreg_transfer_request_wait_write_rpc; + double PDCreg_transfer_request_wait_read_rpc; + double PDCreg_transfer_request_start_write_bulk_rpc; + double PDCreg_transfer_request_inner_write_bulk_rpc; + double PDCreg_transfer_request_start_read_bulk_rpc; + double PDCreg_transfer_request_inner_read_bulk_rpc; + + double PDCreg_transfer_request_start_all_write_rpc; + double PDCreg_transfer_request_start_all_read_rpc; + double PDCreg_transfer_request_start_all_write_bulk_rpc; + double PDCreg_transfer_request_start_all_read_bulk_rpc; + double PDCreg_transfer_request_inner_write_all_bulk_rpc; + double PDCreg_transfer_request_inner_read_all_bulk_rpc; + double PDCreg_transfer_request_wait_all_rpc; + double PDCreg_transfer_request_wait_all_bulk_rpc; + + double PDCdata_server_write_out; + double PDCdata_server_read_from; + double PDCcache_write; + double PDCcache_read; + double PDCcache_flush; + double PDCcache_clean; + double PDCdata_server_write_posix; + double PDCdata_server_read_posix; + + double PDCserver_obj_create_rpc; + double PDCserver_cont_create_rpc; + + double PDCserver_restart; + double PDCserver_checkpoint; + double PDCserver_start_total; +} pdc_server_timing; + +typedef struct pdc_timestamp { + double *start; + double *end; + size_t timestamp_max_size; + size_t timestamp_size; +} pdc_timestamp; + +pdc_server_timing *pdc_server_timings; +pdc_timestamp * pdc_buf_obj_map_timestamps; +pdc_timestamp * pdc_buf_obj_unmap_timestamps; + +pdc_timestamp *pdc_obtain_lock_write_timestamps; +pdc_timestamp *pdc_obtain_lock_read_timestamps; +pdc_timestamp *pdc_release_lock_write_timestamps; +pdc_timestamp *pdc_release_lock_read_timestamps; +pdc_timestamp *pdc_release_lock_bulk_transfer_write_timestamps; +pdc_timestamp *pdc_release_lock_bulk_transfer_inner_write_timestamps; +pdc_timestamp *pdc_release_lock_bulk_transfer_read_timestamps; +pdc_timestamp *pdc_release_lock_bulk_transfer_inner_read_timestamps; + +pdc_timestamp *pdc_transfer_request_start_write_timestamps; +pdc_timestamp *pdc_transfer_request_start_read_timestamps; +pdc_timestamp *pdc_transfer_request_wait_write_timestamps; +pdc_timestamp *pdc_transfer_request_wait_read_timestamps; +pdc_timestamp *pdc_transfer_request_start_write_bulk_timestamps; +pdc_timestamp *pdc_transfer_request_inner_write_bulk_timestamps; +pdc_timestamp *pdc_transfer_request_start_read_bulk_timestamps; +pdc_timestamp *pdc_transfer_request_inner_read_bulk_timestamps; + +pdc_timestamp *pdc_transfer_request_start_all_write_timestamps; +pdc_timestamp *pdc_transfer_request_start_all_read_timestamps; +pdc_timestamp *pdc_transfer_request_start_all_write_bulk_timestamps; +pdc_timestamp *pdc_transfer_request_start_all_read_bulk_timestamps; +pdc_timestamp *pdc_transfer_request_wait_all_timestamps; +pdc_timestamp *pdc_transfer_request_inner_write_all_bulk_timestamps; +pdc_timestamp *pdc_transfer_request_inner_read_all_bulk_timestamps; + +pdc_timestamp *pdc_client_buf_obj_map_timestamps; +pdc_timestamp *pdc_client_buf_obj_unmap_timestamps; +pdc_timestamp *pdc_client_obtain_lock_write_timestamps; +pdc_timestamp *pdc_client_obtain_lock_read_timestamps; +pdc_timestamp *pdc_client_release_lock_write_timestamps; +pdc_timestamp *pdc_client_release_lock_read_timestamps; + +pdc_timestamp *pdc_client_transfer_request_start_write_timestamps; +pdc_timestamp *pdc_client_transfer_request_start_read_timestamps; +pdc_timestamp *pdc_client_transfer_request_wait_write_timestamps; +pdc_timestamp *pdc_client_transfer_request_wait_read_timestamps; + +pdc_timestamp *pdc_client_transfer_request_start_all_write_timestamps; +pdc_timestamp *pdc_client_transfer_request_start_all_read_timestamps; +pdc_timestamp *pdc_client_transfer_request_wait_all_timestamps; + +pdc_timestamp *pdc_client_create_cont_timestamps; +pdc_timestamp *pdc_client_create_obj_timestamps; + +pdc_timestamp *pdc_client_transfer_request_metadata_query_timestamps; + +int PDC_timing_init(); +int PDC_timing_finalize(); +int PDC_timing_report(const char *prefix); +int PDC_server_timing_init(); +int pdc_timestamp_register(pdc_timestamp *timestamp, double start, double end); +int PDC_server_timing_report(); +#else +int PDC_timing_report(const char *prefix); +#endif + +#endif diff --git a/src/commons/utils/pdc_malloc.c b/src/commons/utils/pdc_malloc.c new file mode 100644 index 000000000..40379d400 --- /dev/null +++ b/src/commons/utils/pdc_malloc.c @@ -0,0 +1,76 @@ +/* + * Copyright Notice for + * Proactive Data Containers (PDC) Software Library and Utilities + * ----------------------------------------------------------------------------- + + *** Copyright Notice *** + + * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the + * University of California, through Lawrence Berkeley National Laboratory, + * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF + * Group (subject to receipt of any required approvals from the U.S. Dept. of + * Energy). All rights reserved. + + * If you have questions about your rights to use or distribute this software, + * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. + + * NOTICE. This Software was developed under funding from the U.S. Department of + * Energy and the U.S. Government consequently retains certain rights. As such, the + * U.S. Government has been granted for itself and others acting on its behalf a + * paid-up, nonexclusive, irrevocable, worldwide license in the Software to + * reproduce, distribute copies to the public, prepare derivative works, and + * perform publicly and display publicly, and to permit other to do so. + */ + +#include +#include +#include "pdc_malloc.h" +#include "pdc_private.h" + +void * +PDC_malloc(size_t size) +{ + void *ret_value; + + FUNC_ENTER(NULL); + + assert(size); + + if (size) + ret_value = malloc(size); + else + ret_value = NULL; + + FUNC_LEAVE(ret_value); +} + +void * +PDC_calloc(size_t size) +{ + void *ret_value; + + FUNC_ENTER(NULL); + + assert(size); + + if (size) + ret_value = calloc(1, size); + else + ret_value = NULL; + + FUNC_LEAVE(ret_value); +} + +void * +PDC_free(void *mem) +{ + void *ret_value = NULL; + + FUNC_ENTER(NULL); + + if (mem) { + free(mem); + } + + FUNC_LEAVE(ret_value); +} diff --git a/src/commons/utils/pdc_timing.c b/src/commons/utils/pdc_timing.c new file mode 100644 index 000000000..95826b56b --- /dev/null +++ b/src/commons/utils/pdc_timing.c @@ -0,0 +1,537 @@ +#include "pdc_timing.h" + +#ifdef PDC_TIMING +static double pdc_base_time; + +static int +pdc_timestamp_clean(pdc_timestamp *timestamp) +{ + if (timestamp->timestamp_size) { + free(timestamp->start); + } + return 0; +} + +static int +timestamp_log(FILE *stream, const char *header, pdc_timestamp *timestamp) +{ + size_t i; + double total = 0.0; + fprintf(stream, "%s", header); + for (i = 0; i < timestamp->timestamp_size; ++i) { + fprintf(stream, ",%4f-%4f", timestamp->start[i], timestamp->end[i]); + total += timestamp->end[i] - timestamp->start[i]; + } + fprintf(stream, "\n"); + + if (i > 0) + fprintf(stream, "%s_total, %f\n", header, total); + + return 0; +} + +int +PDC_timing_init() +{ + char hostname[HOST_NAME_MAX]; + int rank; + pdc_timestamp *ptr; + + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + gethostname(hostname, HOST_NAME_MAX); + if (!(rank % 31)) { + printf("client process rank %d, hostname = %s\n", rank, hostname); + } + MPI_Barrier(MPI_COMM_WORLD); + + memset(&pdc_timings, 0, sizeof(pdc_timing)); + + pdc_client_buf_obj_map_timestamps = calloc(16, sizeof(pdc_timestamp)); + ptr = pdc_client_buf_obj_map_timestamps + 1; + pdc_client_buf_obj_unmap_timestamps = ptr; + ptr++; + pdc_client_obtain_lock_write_timestamps = ptr; + ptr++; + pdc_client_obtain_lock_read_timestamps = ptr; + ptr++; + pdc_client_release_lock_write_timestamps = ptr; + ptr++; + pdc_client_release_lock_read_timestamps = ptr; + ptr++; + + pdc_client_transfer_request_start_write_timestamps = ptr; + ptr++; + pdc_client_transfer_request_start_read_timestamps = ptr; + ptr++; + pdc_client_transfer_request_wait_write_timestamps = ptr; + ptr++; + pdc_client_transfer_request_wait_read_timestamps = ptr; + ptr++; + + pdc_client_transfer_request_start_all_write_timestamps = ptr; + ptr++; + pdc_client_transfer_request_start_all_read_timestamps = ptr; + ptr++; + pdc_client_transfer_request_wait_all_timestamps = ptr; + ptr++; + + pdc_client_create_cont_timestamps = ptr; + ptr++; + pdc_client_create_obj_timestamps = ptr; + + ptr++; + pdc_client_transfer_request_metadata_query_timestamps = ptr; + + return 0; +} + +int +PDC_timing_finalize() +{ + pdc_timestamp_clean(pdc_client_buf_obj_map_timestamps); + pdc_timestamp_clean(pdc_client_buf_obj_unmap_timestamps); + + pdc_timestamp_clean(pdc_client_obtain_lock_write_timestamps); + pdc_timestamp_clean(pdc_client_obtain_lock_read_timestamps); + pdc_timestamp_clean(pdc_client_release_lock_write_timestamps); + pdc_timestamp_clean(pdc_client_release_lock_read_timestamps); + + pdc_timestamp_clean(pdc_client_transfer_request_start_write_timestamps); + pdc_timestamp_clean(pdc_client_transfer_request_start_read_timestamps); + pdc_timestamp_clean(pdc_client_transfer_request_wait_write_timestamps); + pdc_timestamp_clean(pdc_client_transfer_request_wait_read_timestamps); + pdc_timestamp_clean(pdc_client_create_cont_timestamps); + pdc_timestamp_clean(pdc_client_create_obj_timestamps); + pdc_timestamp_clean(pdc_client_transfer_request_start_all_write_timestamps); + pdc_timestamp_clean(pdc_client_transfer_request_start_all_read_timestamps); + pdc_timestamp_clean(pdc_client_transfer_request_wait_all_timestamps); + pdc_timestamp_clean(pdc_client_transfer_request_metadata_query_timestamps); + + free(pdc_client_buf_obj_map_timestamps); + return 0; +} + +int +PDC_timing_report(const char *prefix) +{ + pdc_timing max_timings; + int rank; + char filename[256], header[256]; + FILE * stream; + char hostname[HOST_NAME_MAX]; + time_t now; + + time(&now); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + gethostname(hostname, HOST_NAME_MAX); + if (!(rank % 32)) { + printf("client process rank %d, hostname = %s\n", rank, hostname); + } + MPI_Reduce(&pdc_timings, &max_timings, sizeof(pdc_timing) / sizeof(double), MPI_DOUBLE, MPI_MAX, 0, + MPI_COMM_WORLD); + if (rank == 0) { + printf("PDCbuf_obj_map_rpc = %lf, wait = %lf\n", max_timings.PDCbuf_obj_map_rpc, + max_timings.PDCbuf_obj_map_rpc_wait); + printf("PDCreg_obtain_lock_write_rpc = %lf, wait = %lf\n", max_timings.PDCreg_obtain_lock_write_rpc, + max_timings.PDCreg_obtain_lock_write_rpc_wait); + printf("PDCreg_obtain_lock_read_rpc = %lf, wait = %lf\n", max_timings.PDCreg_obtain_lock_read_rpc, + max_timings.PDCreg_obtain_lock_read_rpc_wait); + + printf("PDCreg_release_lock_write_rpc = %lf, wait = %lf\n", max_timings.PDCreg_release_lock_write_rpc, + max_timings.PDCreg_release_lock_write_rpc_wait); + printf("PDCreg_release_lock_read_rpc = %lf, wait = %lf\n", max_timings.PDCreg_release_lock_read_rpc, + max_timings.PDCreg_release_lock_read_rpc_wait); + printf("PDCbuf_obj_unmap_rpc = %lf, wait = %lf\n", max_timings.PDCbuf_obj_unmap_rpc, + max_timings.PDCbuf_obj_unmap_rpc_wait); + + printf("PDCtransfer_request_start_write = %lf, wait = %lf\n", + max_timings.PDCtransfer_request_start_write_rpc, + max_timings.PDCtransfer_request_start_write_rpc_wait); + printf("PDCtransfer_request_start_read = %lf, wait = %lf\n", + max_timings.PDCtransfer_request_start_read_rpc, + max_timings.PDCtransfer_request_start_read_rpc_wait); + printf("PDCtransfer_request_wait_write = %lf, wait = %lf\n", + max_timings.PDCtransfer_request_wait_write_rpc, + max_timings.PDCtransfer_request_wait_write_rpc_wait); + printf("PDCtransfer_request_wait_read = %lf, wait = %lf\n", + max_timings.PDCtransfer_request_wait_read_rpc, + max_timings.PDCtransfer_request_wait_read_rpc_wait); + printf("PDCtransfer_request_start_all_write = %lf, wait = %lf\n", + max_timings.PDCtransfer_request_start_all_write_rpc, + max_timings.PDCtransfer_request_start_all_write_rpc_wait); + printf("PDCtransfer_request_start_all_read = %lf, wait = %lf\n", + max_timings.PDCtransfer_request_start_all_read_rpc, + max_timings.PDCtransfer_request_start_all_read_rpc_wait); + printf("PDCtransfer_request_wait_write = %lf, wait = %lf\n", + max_timings.PDCtransfer_request_wait_all_rpc, + max_timings.PDCtransfer_request_wait_all_rpc_wait); + } + + sprintf(filename, "pdc_client_log_rank_%d.csv", rank); + stream = fopen(filename, "r"); + if (stream) { + fclose(stream); + stream = fopen(filename, "a"); + } + else { + stream = fopen(filename, "w"); + } + + fprintf(stream, "%s", ctime(&now)); + + sprintf(header, "buf_obj_map_%s", prefix); + timestamp_log(stream, header, pdc_client_buf_obj_map_timestamps); + sprintf(header, "buf_obj_unmap_%s", prefix); + timestamp_log(stream, header, pdc_client_buf_obj_unmap_timestamps); + + sprintf(header, "obtain_lock_write_%s", prefix); + timestamp_log(stream, header, pdc_client_obtain_lock_write_timestamps); + sprintf(header, "obtain_lock_read_%s", prefix); + timestamp_log(stream, header, pdc_client_obtain_lock_read_timestamps); + + sprintf(header, "release_lock_write_%s", prefix); + timestamp_log(stream, header, pdc_client_release_lock_write_timestamps); + sprintf(header, "release_lock_read_%s", prefix); + timestamp_log(stream, header, pdc_client_release_lock_read_timestamps); + + sprintf(header, "transfer_request_start_write_%s", prefix); + timestamp_log(stream, header, pdc_client_transfer_request_start_write_timestamps); + + sprintf(header, "transfer_request_start_read_%s", prefix); + timestamp_log(stream, header, pdc_client_transfer_request_start_read_timestamps); + + sprintf(header, "transfer_request_wait_write_%s", prefix); + timestamp_log(stream, header, pdc_client_transfer_request_wait_write_timestamps); + + sprintf(header, "transfer_request_wait_read_%s", prefix); + timestamp_log(stream, header, pdc_client_transfer_request_wait_read_timestamps); + + sprintf(header, "transfer_request_start_all_write_%s", prefix); + timestamp_log(stream, header, pdc_client_transfer_request_start_all_write_timestamps); + + sprintf(header, "transfer_request_start_all_read_%s", prefix); + timestamp_log(stream, header, pdc_client_transfer_request_start_all_read_timestamps); + + sprintf(header, "transfer_request_wait_all_%s", prefix); + timestamp_log(stream, header, pdc_client_transfer_request_wait_all_timestamps); + + sprintf(header, "create_cont"); + timestamp_log(stream, header, pdc_client_create_cont_timestamps); + + sprintf(header, "create_obj"); + timestamp_log(stream, header, pdc_client_create_obj_timestamps); + + fprintf(stream, "\n"); + fclose(stream); + + pdc_client_buf_obj_map_timestamps->timestamp_size = 0; + pdc_client_buf_obj_unmap_timestamps->timestamp_size = 0; + + pdc_client_obtain_lock_write_timestamps->timestamp_size = 0; + pdc_client_obtain_lock_read_timestamps->timestamp_size = 0; + pdc_client_release_lock_write_timestamps->timestamp_size = 0; + pdc_client_release_lock_read_timestamps->timestamp_size = 0; + + pdc_client_transfer_request_start_write_timestamps->timestamp_size = 0; + pdc_client_transfer_request_start_read_timestamps->timestamp_size = 0; + pdc_client_transfer_request_wait_write_timestamps->timestamp_size = 0; + pdc_client_transfer_request_wait_read_timestamps->timestamp_size = 0; + + pdc_client_transfer_request_start_all_write_timestamps->timestamp_size = 0; + pdc_client_transfer_request_start_all_read_timestamps->timestamp_size = 0; + pdc_client_transfer_request_wait_all_timestamps->timestamp_size = 0; + + pdc_client_create_cont_timestamps->timestamp_size = 0; + pdc_client_create_obj_timestamps->timestamp_size = 0; + + pdc_client_transfer_request_metadata_query_timestamps->timestamp_size = 0; + + memset(&pdc_timings, 0, sizeof(pdc_timings)); + + return 0; +} + +int +PDC_server_timing_init() +{ + char hostname[HOST_NAME_MAX]; + int rank; + + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + gethostname(hostname, HOST_NAME_MAX); + + printf("server process rank %d, hostname = %s\n", rank, hostname); + /* + printf("rank = %d, hostname = %s, PDCbuf_obj_map_rpc = %lf, PDCreg_obtain_lock_rpc = %lf, " + "PDCreg_release_lock_write_rpc = " + "%lf, PDCreg_release_lock_read_rpc = %lf, PDCbuf_obj_unmap_rpc = %lf, " + "region_release_bulk_transfer_cb = %lf\n", + rank, hostname, server_timings->PDCbuf_obj_map_rpc, server_timings->PDCreg_obtain_lock_rpc, + server_timings->PDCreg_release_lock_write_rpc, server_timings->PDCreg_release_lock_read_rpc, + server_timings->PDCbuf_obj_unmap_rpc, server_timings->PDCreg_release_lock_bulk_transfer_rpc); + */ + MPI_Barrier(MPI_COMM_WORLD); + + pdc_server_timings = calloc(1, sizeof(pdc_server_timing)); + pdc_timestamp *ptr = calloc(25, sizeof(pdc_timestamp)); + pdc_buf_obj_map_timestamps = ptr; + ptr++; + pdc_buf_obj_unmap_timestamps = ptr; + ptr++; + pdc_obtain_lock_write_timestamps = ptr; + ptr++; + pdc_obtain_lock_read_timestamps = ptr; + ptr++; + pdc_release_lock_write_timestamps = ptr; + ptr++; + pdc_release_lock_read_timestamps = ptr; + ptr++; + pdc_release_lock_bulk_transfer_write_timestamps = ptr; + ptr++; + pdc_release_lock_bulk_transfer_read_timestamps = ptr; + ptr++; + pdc_release_lock_bulk_transfer_inner_write_timestamps = ptr; + ptr++; + pdc_release_lock_bulk_transfer_inner_read_timestamps = ptr; + ptr++; + + pdc_transfer_request_start_write_timestamps = ptr; + ptr++; + pdc_transfer_request_start_read_timestamps = ptr; + ptr++; + pdc_transfer_request_wait_write_timestamps = ptr; + ptr++; + pdc_transfer_request_wait_read_timestamps = ptr; + ptr++; + pdc_transfer_request_start_write_bulk_timestamps = ptr; + ptr++; + pdc_transfer_request_start_read_bulk_timestamps = ptr; + ptr++; + pdc_transfer_request_inner_write_bulk_timestamps = ptr; + ptr++; + pdc_transfer_request_inner_read_bulk_timestamps = ptr; + ptr++; + + pdc_transfer_request_start_all_write_timestamps = ptr; + ptr++; + pdc_transfer_request_start_all_read_timestamps = ptr; + ptr++; + pdc_transfer_request_wait_all_timestamps = ptr; + ptr++; + pdc_transfer_request_start_all_write_bulk_timestamps = ptr; + ptr++; + pdc_transfer_request_start_all_read_bulk_timestamps = ptr; + ptr++; + pdc_transfer_request_inner_write_all_bulk_timestamps = ptr; + ptr++; + pdc_transfer_request_inner_read_all_bulk_timestamps = ptr; + ptr++; + + // 25 timestamps + + pdc_base_time = MPI_Wtime(); + return 0; +} + +int +pdc_timestamp_register(pdc_timestamp *timestamp, double start, double end) +{ + double *temp; + + if (timestamp->timestamp_max_size == 0) { + timestamp->timestamp_max_size = 256; + timestamp->start = (double *)malloc(sizeof(double) * timestamp->timestamp_max_size * 2); + timestamp->end = timestamp->start + timestamp->timestamp_max_size; + timestamp->timestamp_size = 0; + } + else if (timestamp->timestamp_size == timestamp->timestamp_max_size) { + temp = (double *)malloc(sizeof(double) * timestamp->timestamp_max_size * 4); + memcpy(temp, timestamp->start, sizeof(double) * timestamp->timestamp_max_size); + memcpy(temp + timestamp->timestamp_max_size * 2, timestamp->end, + sizeof(double) * timestamp->timestamp_max_size); + timestamp->start = temp; + timestamp->end = temp + timestamp->timestamp_max_size * 2; + timestamp->timestamp_max_size *= 2; + } + timestamp->start[timestamp->timestamp_size] = start; + timestamp->end[timestamp->timestamp_size] = end; + timestamp->timestamp_size++; + return 0; +} + +int +PDC_server_timing_report() +{ + pdc_server_timing max_timings; + int rank; + char filename[256]; + FILE * stream; + + // char hostname[HOST_NAME_MAX]; + time_t now; + + time(&now); + + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Reduce(pdc_server_timings, &max_timings, sizeof(pdc_server_timing) / sizeof(double), MPI_DOUBLE, + MPI_MAX, 0, MPI_COMM_WORLD); + sprintf(filename, "pdc_server_log_rank_%d.csv", rank); + + stream = fopen(filename, "w"); + + fprintf(stream, "%s", ctime(&now)); + timestamp_log(stream, "buf_obj_map", pdc_buf_obj_map_timestamps); + timestamp_log(stream, "buf_obj_unmap", pdc_buf_obj_unmap_timestamps); + + timestamp_log(stream, "obtain_lock_write", pdc_obtain_lock_write_timestamps); + timestamp_log(stream, "obtain_lock_read", pdc_obtain_lock_read_timestamps); + timestamp_log(stream, "release_lock_write", pdc_release_lock_write_timestamps); + timestamp_log(stream, "release_lock_read", pdc_release_lock_read_timestamps); + timestamp_log(stream, "release_lock_bulk_transfer_write", + pdc_release_lock_bulk_transfer_write_timestamps); + timestamp_log(stream, "release_lock_bulk_transfer_read", pdc_release_lock_bulk_transfer_read_timestamps); + timestamp_log(stream, "release_lock_bulk_transfer_inner_write", + pdc_release_lock_bulk_transfer_inner_write_timestamps); + timestamp_log(stream, "release_lock_bulk_transfer_inner_read", + pdc_release_lock_bulk_transfer_inner_read_timestamps); + + timestamp_log(stream, "transfer_request_start_write", pdc_transfer_request_start_write_timestamps); + timestamp_log(stream, "transfer_request_wait_write", pdc_transfer_request_wait_write_timestamps); + timestamp_log(stream, "transfer_request_start_write_bulk", + pdc_transfer_request_start_write_bulk_timestamps); + timestamp_log(stream, "transfer_request_inner_write_bulk", + pdc_transfer_request_inner_write_bulk_timestamps); + timestamp_log(stream, "transfer_request_start_read", pdc_transfer_request_start_read_timestamps); + timestamp_log(stream, "transfer_request_wait_read", pdc_transfer_request_wait_read_timestamps); + timestamp_log(stream, "transfer_request_start_read_bulk", + pdc_transfer_request_start_read_bulk_timestamps); + timestamp_log(stream, "transfer_request_inner_read_bulk", + pdc_transfer_request_inner_read_bulk_timestamps); + + timestamp_log(stream, "transfer_request_start_all_write", + pdc_transfer_request_start_all_write_timestamps); + timestamp_log(stream, "transfer_request_start_all_write_bulk", + pdc_transfer_request_start_all_write_bulk_timestamps); + timestamp_log(stream, "transfer_request_start_all_read", pdc_transfer_request_start_all_read_timestamps); + timestamp_log(stream, "transfer_request_start_all_read_bulk", + pdc_transfer_request_start_all_read_bulk_timestamps); + timestamp_log(stream, "transfer_request_inner_write_all_bulk", + pdc_transfer_request_inner_write_all_bulk_timestamps); + timestamp_log(stream, "transfer_request_inner_read_all_bulk", + pdc_transfer_request_inner_read_all_bulk_timestamps); + timestamp_log(stream, "transfer_request_wait_all", pdc_transfer_request_wait_all_timestamps); + + /* timestamp_log(stream, "create_obj", create_obj_timestamps); */ + /* timestamp_log(stream, "create_cont", create_cont_timestamps); */ + fclose(stream); + + sprintf(filename, "pdc_server_timings_%d.csv", rank); + stream = fopen(filename, "w"); + fprintf(stream, "%s", ctime(&now)); + fprintf(stream, "PDCbuf_obj_map_rpc, %lf\n", pdc_server_timings->PDCbuf_obj_map_rpc); + fprintf(stream, "PDCreg_obtain_lock_write_rpc, %lf\n", pdc_server_timings->PDCreg_obtain_lock_write_rpc); + fprintf(stream, "PDCreg_obtain_lock_read_rpc, %lf\n", pdc_server_timings->PDCreg_obtain_lock_read_rpc); + fprintf(stream, "PDCreg_release_lock_write_rpc, %lf\n", + pdc_server_timings->PDCreg_release_lock_write_rpc); + fprintf(stream, "PDCreg_release_lock_read_rpc, %lf\n", pdc_server_timings->PDCreg_release_lock_read_rpc); + fprintf(stream, "PDCbuf_obj_unmap_rpc, %lf\n", pdc_server_timings->PDCbuf_obj_unmap_rpc); + fprintf(stream, "PDCreg_release_lock_bulk_transfer_write_rpc, %lf\n", + pdc_server_timings->PDCreg_release_lock_bulk_transfer_write_rpc); + fprintf(stream, "PDCreg_release_lock_bulk_transfer_read_rpc, %lf\n", + pdc_server_timings->PDCreg_release_lock_bulk_transfer_read_rpc); + fprintf(stream, "PDCreg_release_lock_bulk_transfer_inner_write_rpc, %lf\n", + pdc_server_timings->PDCreg_release_lock_bulk_transfer_inner_write_rpc); + fprintf(stream, "PDCreg_release_lock_bulk_transfer_inner_read_rpc, %lf\n", + pdc_server_timings->PDCreg_release_lock_bulk_transfer_inner_read_rpc); + fprintf(stream, "PDCregion_transfer_start_write_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_start_write_rpc); + fprintf(stream, "PDCregion_transfer_wait_write_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_wait_write_rpc); + fprintf(stream, "PDCregion_transfer_start_write_bulk_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_start_write_bulk_rpc); + fprintf(stream, "PDCregion_transfer_request_inner_write_bulk_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_inner_write_bulk_rpc); + fprintf(stream, "PDCregion_transfer_start_read_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_start_read_rpc); + fprintf(stream, "PDCregion_transfer_wait_read_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_wait_read_rpc); + fprintf(stream, "PDCregion_transfer_start_read_bulk_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_start_read_bulk_rpc); + fprintf(stream, "PDCregion_transfer_request_inner_read_bulk_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_inner_read_bulk_rpc); + + fprintf(stream, "PDCregion_transfer_start_write_all_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_start_all_write_rpc); + fprintf(stream, "PDCregion_transfer_request_inner_write_all_bulk_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_inner_write_all_bulk_rpc); + fprintf(stream, "PDCregion_transfer_start_all_read_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_start_all_read_rpc); + fprintf(stream, "PDCregion_transfer_request_inner_read_all_bulk_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_inner_read_all_bulk_rpc); + fprintf(stream, "PDCregion_transfer_wait_all_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_wait_all_rpc); + fprintf(stream, "PDCregion_transfer_wait_all_bulk_rpc, %lf\n", + pdc_server_timings->PDCreg_transfer_request_wait_all_bulk_rpc); + + fprintf(stream, "PDCserver_obj_create_rpc, %lf\n", pdc_server_timings->PDCserver_obj_create_rpc); + fprintf(stream, "PDCserver_cont_create_rpc, %lf\n", pdc_server_timings->PDCserver_cont_create_rpc); + + fprintf(stream, "PDCdata_server_write_out, %lf\n", pdc_server_timings->PDCdata_server_write_out); + fprintf(stream, "PDCdata_server_read_from, %lf\n", pdc_server_timings->PDCdata_server_read_from); + fprintf(stream, "PDCcache_write, %lf\n", pdc_server_timings->PDCcache_write); + fprintf(stream, "PDCcache_read, %lf\n", pdc_server_timings->PDCcache_read); + fprintf(stream, "PDCcache_flush, %lf\n", pdc_server_timings->PDCcache_flush); + fprintf(stream, "PDCcache_clean, %lf\n", pdc_server_timings->PDCcache_clean); + fprintf(stream, "PDCdata_server_write_posix, %lf\n", pdc_server_timings->PDCdata_server_write_posix); + fprintf(stream, "PDCdata_server_read_posix, %lf\n", pdc_server_timings->PDCdata_server_read_posix); + + fprintf(stream, "PDCserver_restart, %lf\n", pdc_server_timings->PDCserver_restart); + fprintf(stream, "PDCserver_checkpoint, %lf\n", pdc_server_timings->PDCserver_checkpoint); + fprintf(stream, "PDCstart_server_total, %lf\n", pdc_server_timings->PDCserver_start_total); + + fclose(stream); + + free(pdc_server_timings); + pdc_timestamp_clean(pdc_buf_obj_map_timestamps); + pdc_timestamp_clean(pdc_buf_obj_unmap_timestamps); + + pdc_timestamp_clean(pdc_obtain_lock_write_timestamps); + pdc_timestamp_clean(pdc_obtain_lock_read_timestamps); + pdc_timestamp_clean(pdc_release_lock_write_timestamps); + pdc_timestamp_clean(pdc_release_lock_read_timestamps); + pdc_timestamp_clean(pdc_release_lock_bulk_transfer_write_timestamps); + pdc_timestamp_clean(pdc_release_lock_bulk_transfer_read_timestamps); + pdc_timestamp_clean(pdc_release_lock_bulk_transfer_inner_write_timestamps); + pdc_timestamp_clean(pdc_release_lock_bulk_transfer_inner_read_timestamps); + + pdc_timestamp_clean(pdc_transfer_request_start_write_timestamps); + pdc_timestamp_clean(pdc_transfer_request_start_read_timestamps); + pdc_timestamp_clean(pdc_transfer_request_wait_write_timestamps); + pdc_timestamp_clean(pdc_transfer_request_wait_read_timestamps); + pdc_timestamp_clean(pdc_transfer_request_start_write_bulk_timestamps); + pdc_timestamp_clean(pdc_transfer_request_start_read_bulk_timestamps); + pdc_timestamp_clean(pdc_transfer_request_inner_write_bulk_timestamps); + pdc_timestamp_clean(pdc_transfer_request_inner_read_bulk_timestamps); + + pdc_timestamp_clean(pdc_transfer_request_start_all_write_timestamps); + pdc_timestamp_clean(pdc_transfer_request_start_all_read_timestamps); + pdc_timestamp_clean(pdc_transfer_request_start_all_write_bulk_timestamps); + pdc_timestamp_clean(pdc_transfer_request_start_all_read_bulk_timestamps); + pdc_timestamp_clean(pdc_transfer_request_wait_all_timestamps); + pdc_timestamp_clean(pdc_transfer_request_inner_write_all_bulk_timestamps); + pdc_timestamp_clean(pdc_transfer_request_inner_read_all_bulk_timestamps); + + /* pdc_timestamp_clean(pdc_create_obj_timestamps); */ + /* pdc_timestamp_clean(pdc_create_cont_timestamps); */ + + free(pdc_buf_obj_map_timestamps); + return 0; +} + +#else +int +PDC_timing_report(const char *prefix __attribute__((unused))) +{ + return 0; +} +#endif diff --git a/src/server/pdc_client_server_common.c b/src/server/pdc_client_server_common.c index dbbff7bfd..859ce7bb8 100644 --- a/src/server/pdc_client_server_common.c +++ b/src/server/pdc_client_server_common.c @@ -6846,10 +6846,10 @@ PDC_kvtag_dup(pdc_kvtag_t *from, pdc_kvtag_t **to) if (from == NULL || to == NULL) PGOTO_DONE(FAIL); - (*to) = (pdc_kvtag_t *)calloc(1, sizeof(pdc_kvtag_t)); - (*to)->name = (char *)malloc(strlen(from->name) + 1); - (*to)->size = from->size; - (*to)->type = from->type; + (*to) = (pdc_kvtag_t *)calloc(1, sizeof(pdc_kvtag_t)); + (*to)->name = (char *)malloc(strlen(from->name) + 1); + (*to)->size = from->size; + (*to)->type = from->type; (*to)->value = (void *)malloc(from->size); memcpy((void *)(*to)->name, (void *)from->name, strlen(from->name) + 1); memcpy((void *)(*to)->value, (void *)from->value, from->size); diff --git a/src/tests/cont_tags.c b/src/tests/cont_tags.c index f6c961a71..caaf87b49 100644 --- a/src/tests/cont_tags.c +++ b/src/tests/cont_tags.c @@ -36,9 +36,9 @@ main(int argc, char **argv) int rank = 0, size = 1; - char tag_value[128], tag_value2[128], *tag_value_ret; + char tag_value[128], tag_value2[128], *tag_value_ret; pdc_var_type_t value_type; - psize_t value_size; + psize_t value_size; strcpy(tag_value, "some tag value"); strcpy(tag_value2, "some tag value 2 is longer than tag 1"); diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index 2ccc67828..a682e30ee 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -220,7 +220,8 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value v = i + my_obj_s; for (j = 0; j < n_attr; j++) { sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", v, j); - if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)tag_values[j], PDC_STRING, tag_value_len + 1) < 0) + if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)tag_values[j], PDC_STRING, tag_value_len + 1) < + 0) printf("fail to add a kvtag to o%" PRIu64 "\n", v); } } @@ -238,8 +239,8 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value void get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, uint64_t n_attr, void **tag_values, uint64_t *value_size) { - uint64_t i; - char tag_name[256]; + uint64_t i; + char tag_name[256]; pdc_var_type_t tag_type; for (i = 0; i < n_attr; i++) { diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index d145560d0..dc14c597e 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -210,7 +210,8 @@ main(int argc, char *argv[]) stime = MPI_Wtime(); #endif for (i = 0; i < my_query; i++) { - if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_type, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_type, + (void *)&value_size) < 0) printf("fail to get a kvtag from o%d\n", i + my_query_s); if (i % query_1percent == 0) { diff --git a/src/tests/obj_tags.c b/src/tests/obj_tags.c index db99bec1b..5906e3dd9 100644 --- a/src/tests/obj_tags.c +++ b/src/tests/obj_tags.c @@ -42,10 +42,10 @@ main(int argc, char **argv) dims[0] = 64; dims[1] = 3; dims[2] = 4; - char tag_value[128], tag_value2[128], *tag_value_ret; - char cont_name[128], obj_name1[128], obj_name2[128]; + char tag_value[128], tag_value2[128], *tag_value_ret; + char cont_name[128], obj_name1[128], obj_name2[128]; pdc_var_type_t value_type; - psize_t value_size; + psize_t value_size; strcpy(tag_value, "some tag value"); strcpy(tag_value2, "some tag value 2 is longer"); diff --git a/src/tests/read_obj_shared.c b/src/tests/read_obj_shared.c index 4b0058dbd..6f56965d8 100644 --- a/src/tests/read_obj_shared.c +++ b/src/tests/read_obj_shared.c @@ -163,7 +163,7 @@ main(int argc, char **argv) offset[0] = rank * my_data_size; local_offset[0] = 0; mysize[0] = my_data_size; - printf("rank %d offset = %lu, length = %lu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); + printf("rank %d offset = %llu, length = %llu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); local_region = PDCregion_create(ndim, local_offset, mysize); global_region = PDCregion_create(ndim, offset, mysize); @@ -220,7 +220,7 @@ main(int argc, char **argv) offset[0] = rank * my_data_size; local_offset[0] = 0; mysize[0] = my_data_size; - printf("rank %d offset = %lu, length = %lu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); + printf("rank %d offset = %llu, length = %llu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); local_region = PDCregion_create(ndim, local_offset, mysize); global_region = PDCregion_create(ndim, offset, mysize); diff --git a/src/tests/vpicio_mts.c b/src/tests/vpicio_mts.c index eec66215b..65b7b4dc8 100644 --- a/src/tests/vpicio_mts.c +++ b/src/tests/vpicio_mts.c @@ -199,7 +199,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t0 = MPI_Wtime(); if (rank == 0) { - printf("\n#Step %d\n", i); + printf("\n#Step %llu\n", i); } #endif PDCprop_set_obj_time_step(obj_prop_xx, i); From a8cb2aaa39060e754762ca71338a58541893b12e Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Wed, 21 Jun 2023 18:23:56 -0400 Subject: [PATCH 195/806] Data type for new kvtag (including refactoring and serde framework) (#90) * remove unnecessary install block from CMakeLists.txt * update output * Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. * build kvtag_add_get_scale * comment off free * update code * 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working * do while loop added, tested with 1m object and works * 1m objects test works, 10m object test fail as the original also fails * add new executable to test set * enlarge PDC_SERVER_ID_INTERVAL * update code * update console args * add p search test * add console arg for changing number of attributes per object * free allocated memory * fix query count issue * fix attr length definition * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * fix data type * fix data type * fix data type * add client side statistics * add client side statistics * fix format * clang formatter * update CMake * update CMake * update CMake * free allocated memory properly * clang format * clang format * clang-format-10 * change file name * address review comments * update llsm importer * update llsm importer * update server checkpoint intervals * update gitignore * adding job scripts * adding one debugging msg * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update output for uint64_t * add scripts * update output for uint64_t * update output for uint64_t * update output for uint64_t * update scripts * update scripts * delete debugging message * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * update tag names * update tag names * update query startingpos * update query startingpos * update job scripts * add progressive timing for kvtag_add_get_scale * fix iteration count in final report * update job scripts and benckmark program * update message format * update message format * update message format * update message format * clang format * update job scripts * comment off object/container close procedure in benchmark to save node hours * change the max number of object to 1M * change the max length of attribute value * change the max length of attribute value * llsm tiff import test * llsm tiff import test * llsm tiff import test * llsm tiff import test * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update cmake and llsm_importer * update cmake and llsm_importer * close if in cmake * cmake fix tiff * cmake policy to suppress warning * add pdc include dir * update code * update code * update code * update code * update code * update code * update array generating method * update array generating method * update array generating method * update array generating method * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * fix return type * fix return type * add timing * add timing * fix output * llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader * fix vairable name * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * add scripts * add scripts * add scripts * debugging for nonMPI program * debugging for nonMPI program * debugging for nonMPI program * clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created * enable MPI * enable MPI * enlarge BCase size * enlarge BCase size * enlarge BCase size * resolve bcast count * llsm data path in script * llsm data path in script * update csv reader * update csv reader * update csv reader * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * enlarge max write * update pdc * update pdc * update pdc * update pdc * update pdc_import.c * update pdc_import.c * update pdc_export.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update tools/cmake * clang format * clang format * added a tutorial for llsm_importer * added a tutorial for llsm_importer * make sure the line feed is included for string attribute * update timing for overall completion time * update formatting * llsm_importer (#1) formatter on llsm_importer * add type for kvtag structure (#2) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * Feature/metadata type (#3) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * remove unnecessary header files from installation * resolve conflict * add important files * clang formatting * update cmake --------- Co-authored-by: Houjun Tang --- CMakeLists.txt | 3 +- docs/readme.md | 154 ++++---- docs/source/api.rst | 37 +- src/api/CMakeLists.txt | 29 +- src/api/pdc_client_connect.c | 20 +- src/api/pdc_obj/include/pdc_cont.h | 6 +- src/api/pdc_obj/include/pdc_obj.h | 6 +- src/api/pdc_obj/include/pdc_prop_pkg.h | 7 +- src/api/pdc_obj/pdc_dt_conv.c | 8 +- src/api/pdc_obj/pdc_obj.c | 1 + src/commons/CMakeLists.txt | 210 +++++++++++ src/commons/generic/include/pdc_generic.h | 219 +++++++++++ src/{api => commons}/include/pdc_public.h | 20 +- src/{api => commons}/profiling/CMakeLists.txt | 9 +- .../profiling/include/pdc_hashtab.h | 0 .../profiling/include/pdc_stack_ops.h | 0 src/{api => commons}/profiling/pdc_hashtab.c | 0 .../profiling/pdc_stack_ops.c | 0 src/commons/serde/include/pdc_serde.h | 156 ++++++++ src/commons/serde/pdc_serde.c | 346 ++++++++++++++++++ src/{ => commons}/utils/include/pdc_id_pkg.h | 0 .../utils/include/pdc_linkedlist.h | 4 +- src/{ => commons}/utils/include/pdc_malloc.h | 0 src/{ => commons}/utils/include/pdc_private.h | 2 +- src/{ => commons}/utils/include/pdc_timing.h | 0 src/{ => commons}/utils/pdc_malloc.c | 0 src/{ => commons}/utils/pdc_timing.c | 0 src/server/CMakeLists.txt | 5 +- src/server/include/pdc_client_server_common.h | 5 + src/server/pdc_client_server_common.c | 12 +- src/server/pdc_server.c | 7 +- .../pdc_server_region_request_handler.h | 4 +- src/tests/cont_tags.c | 21 +- src/tests/kvtag_add_get.c | 35 +- src/tests/kvtag_add_get_benchmark.c | 10 +- src/tests/kvtag_add_get_scale.c | 9 +- src/tests/kvtag_get.c | 15 +- src/tests/kvtag_query.c | 11 +- src/tests/kvtag_query_scale.c | 4 +- src/tests/obj_tags.c | 23 +- src/tests/pdc_transforms_lib.c | 35 +- src/tests/read_obj_shared.c | 4 +- src/tests/vpicio_mts.c | 2 +- src/utils/pdc_interface.c | 2 + tools/pdc_export.c | 41 +-- tools/pdc_import.c | 9 +- tools/pdc_ls.c | 40 +- 47 files changed, 1235 insertions(+), 296 deletions(-) create mode 100644 src/commons/CMakeLists.txt create mode 100644 src/commons/generic/include/pdc_generic.h rename src/{api => commons}/include/pdc_public.h (66%) rename src/{api => commons}/profiling/CMakeLists.txt (92%) rename src/{api => commons}/profiling/include/pdc_hashtab.h (100%) rename src/{api => commons}/profiling/include/pdc_stack_ops.h (100%) rename src/{api => commons}/profiling/pdc_hashtab.c (100%) rename src/{api => commons}/profiling/pdc_stack_ops.c (100%) create mode 100644 src/commons/serde/include/pdc_serde.h create mode 100644 src/commons/serde/pdc_serde.c rename src/{ => commons}/utils/include/pdc_id_pkg.h (100%) rename src/{ => commons}/utils/include/pdc_linkedlist.h (99%) rename src/{ => commons}/utils/include/pdc_malloc.h (100%) rename src/{ => commons}/utils/include/pdc_private.h (99%) rename src/{ => commons}/utils/include/pdc_timing.h (100%) rename src/{ => commons}/utils/pdc_malloc.c (100%) rename src/{ => commons}/utils/pdc_timing.c (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e353dbc2..5e7b65d94 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -257,7 +257,7 @@ endif() option(PDC_ENABLE_LUSTRE "Enable Lustre." OFF) if(PDC_ENABLE_LUSTRE) set(ENABLE_LUSTRE 1) - set(PDC_LUSTRE_TOTAL_OST "248" CACHE STRING "Number of Lustre OSTs") + set(PDC_LUSTRE_TOTAL_OST "256" CACHE STRING "Number of Lustre OSTs") endif() #----------------------------------------------------------------------------- @@ -384,6 +384,7 @@ configure_file( #----------------------------------------------------------------------------- # Source #----------------------------------------------------------------------------- +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src/commons) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src/api) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src/server) diff --git a/docs/readme.md b/docs/readme.md index df19eba94..74be2d0e4 100644 --- a/docs/readme.md +++ b/docs/readme.md @@ -1,58 +1,67 @@ # PDC Documentations - + [PDC user APIs](#pdc-user-apis) - - [PDC general APIs](#pdc-general-apis) - - [PDC container APIs](#pdc-container-apis) - - [PDC object APIs](#pdc-object-apis) - - [PDC region APIs](#pdc-region-apis) - - [PDC property APIs](#pdc-property-apis) - - [PDC query APIs](#pdc-query-apis) - + [PDC data types](#PDC-type-categories) - - [Basic types](#basic-types) - - [Histogram structure](#histogram-structure) - - [Container info](#container-info) - - [Container life time](#container-life-time) - - [Object property](#object-property) - - [Object info](#object-info) - - [Object structure](#object-structure) - - [Region info](#region-info) - - [Access type](#access-type) - - [Transfer request status](#transfer-request-status) - - [Query operators](#query-operators) - - [Query structures](#query-structures) - - [Selection structure](#selection-structure) - + [Developers notes](#developers-notes) - - [How to implement an RPC from client to server](#how-to-implement-an-rpc-from-client-to-server) - - [PDC Server metadata overview](#pdc-server-metadata-overview) - + [PDC metadata structure](#pdc-metadata-structure) - + [Metadata operations at client side](#metadata-operations-at-client-side) - - [PDC metadata management strategy](#pdc-metadata-management-strategy) - + [Managing metadata and data by the same server](#managing-metadata-and-data-by-the-same-server) - + [Separate metadata server from data server](#separate-metadata-server-from-data-server) - + [Static object region mappings](#static-object-region-mappings) - + [Dynamic object region mappings](#dynamic-object-region-mappings) - - [PDC metadata management implementation](#pdc-metadata-management-implementation) - + [Create metadata](#create-metadata) - + [Binding metadata to object](#binding-metadata-to-object) - + [Register object metadata at metadata server](#register-object-metadata-at-metadata-server) - + [Retrieve metadata from metadata server](#retrieve-metadata-from-metadata-server) - + [Object metadata at client](#object-metadata-at-client) - + [Metadata at data server](#metadata-at-data-server) - + [Object metadata update](#object-metadata-update) - + [Object region metadata](#object-region-metadata) - + [Metadata checkpoint](#object-metadata-update) - - [Region transfer request at client](#region-transfer-request-at-client) - + [Region transfer request create and close](#region-transfer-request-create-and-close) - + [Region transfer request start](#region-transfer-request-start) - + [Region transfer request wait](#region-transfer-request-wait) - - [Region transfer request at server](#region-transfer-request-at-server) - + [Server region transfer request RPC](#server-region-transfer-request-rpc) - - [Server nonblocking control](#server-nonblocking-control) - - [Server region transfer request start](#server-region-transfer-request-start) - - [Server region transfer request wait](#server-region-transfer-request-wait) - + [Server region storage](#server-region-storage) - - [Storage by file offset](#storage-by-file-offset) - - [Storage by region](#storage-by-region) - - [Open tasks for PDC](#open-tasks-for-pdc) +- [PDC Documentations](#pdc-documentations) +- [PDC user APIs](#pdc-user-apis) + - [PDC general APIs](#pdc-general-apis) + - [PDC container APIs](#pdc-container-apis) + - [PDC object APIs](#pdc-object-apis) + - [PDC region APIs](#pdc-region-apis) + - [PDC property APIs](#pdc-property-apis) + - [PDC query APIs](#pdc-query-apis) + - [PDC hist APIs](#pdc-hist-apis) +- [PDC Data types](#pdc-data-types) + - [Basic types](#basic-types) + - [region transfer partition type](#region-transfer-partition-type) + - [Object consistency semantics type](#object-consistency-semantics-type) + - [Histogram structure](#histogram-structure) + - [Container info](#container-info) + - [Container life time](#container-life-time) + - [Object property public](#object-property-public) + - [Object property](#object-property) + - [Object info](#object-info) + - [Object structure](#object-structure) + - [Region info](#region-info) + - [Access type](#access-type) + - [Transfer request status](#transfer-request-status) + - [Query operators](#query-operators) + - [Query structures](#query-structures) + - [Selection structure](#selection-structure) +- [Developers notes](#developers-notes) + - [How to implement an RPC from client to server](#how-to-implement-an-rpc-from-client-to-server) + - [PDC Server metadata overview](#pdc-server-metadata-overview) + - [PDC metadata structure](#pdc-metadata-structure) + - [Metadata operations at client side](#metadata-operations-at-client-side) + - [PDC metadata management strategy](#pdc-metadata-management-strategy) + - [Managing metadata and data by the same server](#managing-metadata-and-data-by-the-same-server) + - [Separate metadata server from data server](#separate-metadata-server-from-data-server) + - [Static object region mappings](#static-object-region-mappings) + - [Dynamic object region mappings](#dynamic-object-region-mappings) + - [PDC metadata management implementation](#pdc-metadata-management-implementation) + - [Create metadata](#create-metadata) + - [Binding metadata to object](#binding-metadata-to-object) + - [Register object metadata at metadata server](#register-object-metadata-at-metadata-server) + - [Retrieve metadata from metadata server](#retrieve-metadata-from-metadata-server) + - [Object metadata at client](#object-metadata-at-client) + - [Metadata at data server](#metadata-at-data-server) + - [Object metadata update](#object-metadata-update) + - [Object region metadata](#object-region-metadata) + - [Metadata checkpoint](#metadata-checkpoint) + - [Region transfer request at client](#region-transfer-request-at-client) + - [Region transfer request create and close](#region-transfer-request-create-and-close) + - [Region transfer request start](#region-transfer-request-start) + - [Region transfer request wait](#region-transfer-request-wait) + - [Region transfer request at server](#region-transfer-request-at-server) + - [Server region transfer request RPC](#server-region-transfer-request-rpc) + - [Server nonblocking control](#server-nonblocking-control) + - [Server region transfer request start](#server-region-transfer-request-start) + - [Server region transfer request wait](#server-region-transfer-request-wait) + - [Server region storage](#server-region-storage) + - [Storage by file offset](#storage-by-file-offset) + - [Storage by region](#storage-by-region) + - [Open tasks for PDC](#open-tasks-for-pdc) + - [Replacing individual modules with efficient Hash table data structures](#replacing-individual-modules-with-efficient-hash-table-data-structures) + - [Restarting pdc\_server.exe with different numbers of servers](#restarting-pdc_serverexe-with-different-numbers-of-servers) + - [Fast region search mechanisms](#fast-region-search-mechanisms) + - [Merge overlapping regions](#merge-overlapping-regions) # PDC user APIs ## PDC general APIs + pdcid_t PDCinit(const char *pdc_name) @@ -683,21 +692,28 @@ ## Basic types ``` typedef enum { - PDC_UNKNOWN = -1, /* error */ - PDC_INT = 0, /* integer types */ - PDC_FLOAT = 1, /* floating-point types */ - PDC_DOUBLE = 2, /* double types */ - PDC_CHAR = 3, /* character types */ - PDC_COMPOUND = 4, /* compound types */ - PDC_ENUM = 5, /* enumeration types */ - PDC_ARRAY = 6, /* Array types */ - PDC_UINT = 7, /* unsigned integer types */ - PDC_INT64 = 8, /* 64-bit integer types */ - PDC_UINT64 = 9, /* 64-bit unsigned integer types */ - PDC_INT16 = 10, - PDC_INT8 = 11, - NCLASSES = 12 /* this must be last */ - } pdc_var_type_t; + PDC_UNKNOWN = -1, /* error */ + PDC_INT = 0, /* integer types (identical to int32_t) */ + PDC_FLOAT = 1, /* floating-point types */ + PDC_DOUBLE = 2, /* double types */ + PDC_CHAR = 3, /* character types */ + PDC_STRING = 4, /* string types */ + PDC_BOOLEAN = 5, /* boolean types */ + PDC_SHORT = 6, /* short types */ + PDC_UINT = 7, /* unsigned integer types (identical to uint32_t) */ + PDC_INT64 = 8, /* 64-bit integer types */ + PDC_UINT64 = 9, /* 64-bit unsigned integer types */ + PDC_INT16 = 10, /* 16-bit integer types */ + PDC_INT8 = 11, /* 8-bit integer types */ + PDC_UINT8 = 12, /* 8-bit unsigned integer types */ + PDC_UINT16 = 13, /* 16-bit unsigned integer types */ + PDC_INT32 = 14, /* 32-bit integer types */ + PDC_UINT32 = 15, /* 32-bit unsigned integer types */ + PDC_LONG = 16, /* long types */ + PDC_VOID_PTR = 17, /* void pointer type */ + PDC_SIZE_T = 18, /* size_t type */ + PDC_TYPE_COUNT = 19 /* this is the number of var types and has to be the last */ + } pdc_c_var_type_t; ``` ## region transfer partition type ``` diff --git a/docs/source/api.rst b/docs/source/api.rst index e9b1e6567..ab058f10a 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -471,21 +471,28 @@ Basic types .. code-block:: c typedef enum { - PDC_UNKNOWN = -1, /* error */ - PDC_INT = 0, /* integer types */ - PDC_FLOAT = 1, /* floating-point types */ - PDC_DOUBLE = 2, /* double types */ - PDC_CHAR = 3, /* character types */ - PDC_COMPOUND = 4, /* compound types */ - PDC_ENUM = 5, /* enumeration types */ - PDC_ARRAY = 6, /* Array types */ - PDC_UINT = 7, /* unsigned integer types */ - PDC_INT64 = 8, /* 64-bit integer types */ - PDC_UINT64 = 9, /* 64-bit unsigned integer types */ - PDC_INT16 = 10, - PDC_INT8 = 11, - NCLASSES = 12 /* this must be last */ - } pdc_var_type_t; + PDC_UNKNOWN = -1, /* error */ + PDC_INT = 0, /* integer types (identical to int32_t) */ + PDC_FLOAT = 1, /* floating-point types */ + PDC_DOUBLE = 2, /* double types */ + PDC_CHAR = 3, /* character types */ + PDC_STRING = 4, /* string types */ + PDC_BOOLEAN = 5, /* boolean types */ + PDC_SHORT = 6, /* short types */ + PDC_UINT = 7, /* unsigned integer types (identical to uint32_t) */ + PDC_INT64 = 8, /* 64-bit integer types */ + PDC_UINT64 = 9, /* 64-bit unsigned integer types */ + PDC_INT16 = 10, /* 16-bit integer types */ + PDC_INT8 = 11, /* 8-bit integer types */ + PDC_UINT8 = 12, /* 8-bit unsigned integer types */ + PDC_UINT16 = 13, /* 16-bit unsigned integer types */ + PDC_INT32 = 14, /* 32-bit integer types */ + PDC_UINT32 = 15, /* 32-bit unsigned integer types */ + PDC_LONG = 16, /* long types */ + PDC_VOID_PTR = 17, /* void pointer type */ + PDC_SIZE_T = 18, /* size_t type */ + PDC_TYPE_COUNT = 19 /* this is the number of var types and has to be the last */ + } pdc_c_var_type_t; diff --git a/src/api/CMakeLists.txt b/src/api/CMakeLists.txt index 6d4e0b722..1ba4b8389 100644 --- a/src/api/CMakeLists.txt +++ b/src/api/CMakeLists.txt @@ -1,7 +1,7 @@ #------------------------------------------------------------------------------ # Include source and build directories #------------------------------------------------------------------------------ -set( LOCAL_INCLUDE_DIR +set(LOCAL_INCLUDE_DIR ${PDC_INCLUDES_BUILD_TIME} ${PROJECT_BINARY_DIR} ${PDC_SOURCE_DIR} @@ -32,9 +32,9 @@ include_directories( # External dependencies #------------------------------------------------------------------------------ # profiling -#set(PDC_EXT_LIB_DEPENDENCIES pdcprof ${PDC_EXT_LIB_DEPENDENCIES}) -set(PDC_EXT_INCLUDE_DEPENDENCIES ${CMAKE_CURRENT_SOURCE_DIR}/profiling) -set(PDC_EXPORTED_LIBS pdcprof) +# set(PDC_EXT_LIB_DEPENDENCIES pdcprof ${PDC_EXT_LIB_DEPENDENCIES}) +# set(PDC_EXT_INCLUDE_DEPENDENCIES ${CMAKE_CURRENT_SOURCE_DIR}/profiling) +# set(PDC_EXPORTED_LIBS pdcprof) # Mercury find_package(MERCURY REQUIRED) @@ -87,22 +87,24 @@ set(PDC_SRCS ${PDC_SOURCE_DIR}/src/server/pdc_server_region/pdc_server_region_transfer.c ${PDC_SOURCE_DIR}/src/server/pdc_server_region/pdc_server_region_cache.c ${PDC_SOURCE_DIR}/src/server/pdc_server_region/pdc_server_region_transfer_metadata_query.c - ${PDC_SOURCE_DIR}/src/utils/pdc_timing.c - ${PDC_SOURCE_DIR}/src/utils/pdc_malloc.c ${PDC_SOURCE_DIR}/src/utils/pdc_interface.c ${PDC_SOURCE_DIR}/src/utils/pdc_region_utils.c ) - add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/profiling) + set(PDC_COMMON_INCLUDE_DIRS ${PDC_COMMON_INCLUDE_DIRS}) + # add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/profiling) #------------------------------------------------------------------------------ # Libraries #------------------------------------------------------------------------------ # PDC set(PDC_BUILD_INCLUDE_DEPENDENCIES + ${PDC_COMMON_INCLUDE_DIRS} ${LOCAL_INCLUDE_DIR} ) +message(STATUS "PDC_BUILD_INCLUDE_DEPENDENCIES: ${PDC_BUILD_INCLUDE_DEPENDENCIES}") + add_library(pdc ${PDC_SRCS}) target_include_directories(pdc @@ -110,7 +112,12 @@ target_include_directories(pdc $ ) +message(STATUS "PDC_EXPORTED_LIBS: ${PDC_EXPORTED_LIBS}") +message(STATUS "PDC_EXT_LIB_DEPENDENCIES: ${PDC_EXT_LIB_DEPENDENCIES}") +message(STATUS "PDC_COMMONS_LIBRARIES: ${PDC_COMMONS_LIBRARIES}") + target_link_libraries(pdc + ${PDC_COMMONS_LIBRARIES} ${PDC_EXPORTED_LIBS} ${PDC_EXT_LIB_DEPENDENCIES} -ldl @@ -122,7 +129,7 @@ set(PDC_EXPORTED_LIBS pdc ${PDC_EXPORTED_LIBS}) add_executable(close_server close_server.c ) -target_link_libraries(close_server pdc) +target_link_libraries(close_server pdc ${PDC_COMMON_LIBRARIES}) install( TARGETS @@ -135,7 +142,6 @@ install( #----------------------------------------------------------------------------- set(PDC_HEADERS ${PDC_SOURCE_DIR}/src/api/include/pdc.h - ${PDC_SOURCE_DIR}/src/api/include/pdc_public.h ${PDC_SOURCE_DIR}/src/api/pdc_analysis/include/pdc_analysis.h ${PDC_SOURCE_DIR}/src/api/pdc_obj/include/pdc_cont.h ${PDC_SOURCE_DIR}/src/api/pdc_obj/include/pdc_mpi.h @@ -173,9 +179,10 @@ install( #----------------------------------------------------------------------------- # Add Target(s) to CMake Install #----------------------------------------------------------------------------- + install( TARGETS - pdc + pdc EXPORT ${PDC_EXPORTED_TARGETS} LIBRARY DESTINATION ${PDC_INSTALL_LIB_DIR} @@ -223,4 +230,4 @@ set(PDC_INCLUDES_INSTALL_TIME ${PDC_INSTALL_INCLUDE_DIR} ${PDC_EXT_INCLUDE_DEPENDENCIES} PARENT_SCOPE -) +) \ No newline at end of file diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index bdf967885..fff47f027 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -33,6 +33,7 @@ #include "pdc_utlist.h" #include "pdc_id_pkg.h" +#include "pdc_cont_pkg.h" #include "pdc_prop_pkg.h" #include "pdc_obj_pkg.h" #include "pdc_cont.h" @@ -7018,6 +7019,7 @@ PDC_add_kvtag(pdcid_t obj_id, pdc_kvtag_t *kvtag, int is_cont) if (kvtag != NULL && kvtag != NULL && kvtag->size != 0) { in.kvtag.name = kvtag->name; in.kvtag.value = kvtag->value; + in.kvtag.type = kvtag->type; in.kvtag.size = kvtag->size; } else @@ -7061,6 +7063,7 @@ metadata_get_kvtag_rpc_cb(const struct hg_cb_info *callback_info) client_lookup_args->ret = output.ret; client_lookup_args->kvtag->name = strdup(output.kvtag.name); client_lookup_args->kvtag->size = output.kvtag.size; + client_lookup_args->kvtag->type = output.kvtag.type; client_lookup_args->kvtag->value = malloc(output.kvtag.size); memcpy(client_lookup_args->kvtag->value, output.kvtag.value, output.kvtag.size); /* PDC_kvtag_dup(&(output.kvtag), &client_lookup_args->kvtag); */ @@ -7315,10 +7318,12 @@ PDC_Client_query_kvtag_server(uint32_t server_id, const pdc_kvtag_t *kvtag, int if (kvtag->value == NULL) { in.value = " "; + in.type = PDC_STRING; in.size = 1; } else { in.value = kvtag->value; + in.type = kvtag->type; in.size = kvtag->size; } @@ -7576,7 +7581,8 @@ PDCcont_get_objids(pdcid_t cont_id ATTRIBUTE(unused), int *nobj ATTRIBUTE(unused } perr_t -PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, psize_t value_size) +PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, + psize_t value_size) { perr_t ret_value = SUCCEED; pdc_kvtag_t kvtag; @@ -7585,6 +7591,7 @@ PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, psize_t value_ kvtag.name = tag_name; kvtag.value = (void *)tag_value; + kvtag.type = value_type; kvtag.size = (uint64_t)value_size; ret_value = PDC_add_kvtag(cont_id, &kvtag, 1); @@ -7598,7 +7605,8 @@ PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, psize_t value_ } perr_t -PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, psize_t *value_size) +PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, + psize_t *value_size) { perr_t ret_value = SUCCEED; pdc_kvtag_t *kvtag = NULL; @@ -7610,6 +7618,7 @@ PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, psize_t *valu PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: Error with PDC_get_kvtag", pdc_client_mpi_rank_g); *tag_value = kvtag->value; + *value_type = kvtag->type; *value_size = kvtag->size; done: @@ -7772,7 +7781,7 @@ PDC_Client_del_metadata(pdcid_t obj_id, int is_cont) } perr_t -PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, psize_t value_size) +PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, psize_t value_size) { perr_t ret_value = SUCCEED; pdc_kvtag_t kvtag; @@ -7781,6 +7790,7 @@ PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, psize_t value_si kvtag.name = tag_name; kvtag.value = (void *)tag_value; + kvtag.type = value_type; kvtag.size = (uint64_t)value_size; ret_value = PDC_add_kvtag(obj_id, &kvtag, 0); @@ -7793,7 +7803,8 @@ PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, psize_t value_si } perr_t -PDCobj_get_tag(pdcid_t obj_id, char *tag_name, void **tag_value, psize_t *value_size) +PDCobj_get_tag(pdcid_t obj_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, + psize_t *value_size) { perr_t ret_value = SUCCEED; pdc_kvtag_t *kvtag = NULL; @@ -7805,6 +7816,7 @@ PDCobj_get_tag(pdcid_t obj_id, char *tag_name, void **tag_value, psize_t *value_ PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: Error with PDC_get_kvtag", pdc_client_mpi_rank_g); *tag_value = kvtag->value; + *value_type = kvtag->type; *value_size = kvtag->size; done: diff --git a/src/api/pdc_obj/include/pdc_cont.h b/src/api/pdc_obj/include/pdc_cont.h index 844b15425..33e924c14 100644 --- a/src/api/pdc_obj/include/pdc_cont.h +++ b/src/api/pdc_obj/include/pdc_cont.h @@ -191,7 +191,8 @@ perr_t PDCcont_del(pdcid_t cont_id); * * \return Non-negative on success/Negative on failure */ -perr_t PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, psize_t value_size); +perr_t PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, + psize_t value_size); /** * *********** @@ -203,7 +204,8 @@ perr_t PDCcont_put_tag(pdcid_t cont_id, char *tag_name, void *tag_value, psize_t * * \return Non-negative on success/Negative on failure */ -perr_t PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, psize_t *value_size); +perr_t PDCcont_get_tag(pdcid_t cont_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, + psize_t *value_size); /** * Deleta a tag from a container diff --git a/src/api/pdc_obj/include/pdc_obj.h b/src/api/pdc_obj/include/pdc_obj.h index f678adf7f..8ad7a285a 100644 --- a/src/api/pdc_obj/include/pdc_obj.h +++ b/src/api/pdc_obj/include/pdc_obj.h @@ -409,7 +409,8 @@ perr_t PDCobj_del(pdcid_t obj_id); * * \return Non-negative on success/Negative on failure */ -perr_t PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, psize_t value_size); +perr_t PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, pdc_var_type_t value_type, + psize_t value_size); /** * Get tag information @@ -421,7 +422,8 @@ perr_t PDCobj_put_tag(pdcid_t obj_id, char *tag_name, void *tag_value, psize_t v * * \return Non-negative on success/Negative on failure */ -perr_t PDCobj_get_tag(pdcid_t obj_id, char *tag_name, void **tag_value, psize_t *value_size); +perr_t PDCobj_get_tag(pdcid_t obj_id, char *tag_name, void **tag_value, pdc_var_type_t *value_type, + psize_t *value_size); /** * Delete a tag from the object diff --git a/src/api/pdc_obj/include/pdc_prop_pkg.h b/src/api/pdc_obj/include/pdc_prop_pkg.h index db73120ac..52d80efa1 100644 --- a/src/api/pdc_obj/include/pdc_prop_pkg.h +++ b/src/api/pdc_obj/include/pdc_prop_pkg.h @@ -37,9 +37,10 @@ struct _pdc_cont_prop { }; typedef struct pdc_kvtag_t { - char * name; - uint32_t size; - void * value; + char * name; + uint32_t size; + pdc_var_type_t type; + void * value; } pdc_kvtag_t; struct _pdc_transform_state { diff --git a/src/api/pdc_obj/pdc_dt_conv.c b/src/api/pdc_obj/pdc_dt_conv.c index 0806919ee..f18e4aa43 100644 --- a/src/api/pdc_obj/pdc_dt_conv.c +++ b/src/api/pdc_obj/pdc_dt_conv.c @@ -34,10 +34,10 @@ PDC_UNKNOWN = -1, PDC_INT = 0, PDC_FLOAT = 1, PDC_DOUBLE = 2, -PDC_STRING = 3, -PDC_COMPOUND = 4, -PDC_ENUM = 5, -PDC_ARRAY = 6, +PDC_CHAR = 3, +PDC_STRING = 4, +PDC_BOOLEAN = 5, +PDC_SHORT = 6, */ /* Called if overflow is possible */ diff --git a/src/api/pdc_obj/pdc_obj.c b/src/api/pdc_obj/pdc_obj.c index 073ece24c..d402782f9 100644 --- a/src/api/pdc_obj/pdc_obj.c +++ b/src/api/pdc_obj/pdc_obj.c @@ -27,6 +27,7 @@ #include "pdc_malloc.h" #include "pdc_id_pkg.h" #include "pdc_cont.h" +#include "pdc_cont_pkg.h" #include "pdc_prop_pkg.h" #include "pdc_obj_pkg.h" #include "pdc_obj.h" diff --git a/src/commons/CMakeLists.txt b/src/commons/CMakeLists.txt new file mode 100644 index 000000000..9f8abf690 --- /dev/null +++ b/src/commons/CMakeLists.txt @@ -0,0 +1,210 @@ +#------------------------------------------------------------------------------ +# PDC Commons +#------------------------------------------------------------------------------ + +set(PDC_COMMON_LIBRARY_NAME pdc_commons CACHE INTERNAL "") + +#------------------------------------------------------------------------------ +# External dependencies +#------------------------------------------------------------------------------ + +# #set(PDC_EXT_LIB_DEPENDENCIES ${PDC_COMMON_LIBRARY_NAME} ${PDC_EXT_LIB_DEPENDENCIES}) +# set(PDC_EXT_INCLUDE_DEPENDENCIES ${CMAKE_CURRENT_SOURCE_DIR}/profiling) +# set(PDC_EXPORTED_LIBS pdcprof) + +if(THREADS_HAVE_PTHREAD_ARG) + set_property(TARGET ${PDC_COMMON_LIBRARY_NAME} PROPERTY COMPILE_OPTIONS "-pthread") + set_property(TARGET ${PDC_COMMON_LIBRARY_NAME} PROPERTY INTERFACE_COMPILE_OPTIONS "-pthread") +endif() + + +# Mercury +find_package(MERCURY REQUIRED) +if(MERCURY_FOUND) + message(STATUS "mercury dir = ${MERCURY_DIR}") + find_path(MERCURY_INCLUDE_DIR mercury.h HINTS ${MERCURY_DIR}) + find_library(MERCURY_LIBRARY mercury HINTS ${MERCURY_DIR}) + message(STATUS "mercury include dir = ${MERCURY_INCLUDE_DIR}") + message(STATUS "mercury lib = ${MERCURY_LIBRARY}") + set(PDC_EXT_INCLUDE_DEPENDENCIES ${MERCURY_INCLUDE_DIR} + ${PDC_EXT_INCLUDE_DEPENDENCIES} + ) + set(PDC_EXT_LIB_DEPENDENCIES mercury ${PDC_EXT_LIB_DEPENDENCIES}) +endif() + +include_directories(${PDC_EXT_INCLUDE_DEPENDENCIES}) + +#------------------------------------------------------------------------------ +# Include directories +#------------------------------------------------------------------------------ + +# Get a list of all directories that contain header files +file(GLOB_RECURSE LOCAL_INCLUDE_DIRS "*.h") + +# Remove the /filename.h at the end of each directory +list(TRANSFORM LOCAL_INCLUDE_DIRS REPLACE "/[^/]*$" "") + +# Remove duplicates +list(REMOVE_DUPLICATES LOCAL_INCLUDE_DIRS) + +set(PDC_COMMONS_INCLUDE_DIRS + ${LOCAL_INCLUDE_DIRS} + ${PDC_INCLUDES_BUILD_TIME} + ${PROJECT_BINARY_DIR} + ${PDC_SOURCE_DIR} + ${PDC_EXT_INCLUDE_DEPENDENCIES} +) + +include_directories( + ${PDC_COMMONS_INCLUDE_DIRS} +) + +message(STATUS "PDC_COMMONS_INCLUDE_DIRS: ${PDC_COMMONS_INCLUDE_DIRS}") + +install( + FILES + ${CMAKE_BINARY_DIR}/pdc_config.h + DESTINATION + ${PDC_INSTALL_INCLUDE_DIR} + COMPONENT + headers +) + +#------------------------------------------------------------------------------ +# Configure module header files +#------------------------------------------------------------------------------ +# Set unique vars used in the autogenerated config file (symbol import/export) +if(BUILD_SHARED_LIBS) + set(PDC_BUILD_SHARED_LIBS 1) + set(PDC_LIBTYPE SHARED) +else() + set(PDC_BUILD_SHARED_LIBS 0) + set(PDC_LIBTYPE STATIC) +endif() + +if(PDC_ENABLE_TIMING) + add_definitions(-DPDC_TIMING=1) +endif() + +#------------------------------------------------------------------------------ +# Set sources +#------------------------------------------------------------------------------ + +# Collect all source files +file(GLOB_RECURSE PDC_COMMONS_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.c) +file(GLOB_RECURSE PDC_COMMONS_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.h) + + +#------------------------------------------------------------------------------ +# Libraries +#------------------------------------------------------------------------------ +# PDC COMMONS + + +add_library(${PDC_COMMON_LIBRARY_NAME} ${PDC_LIBTYPE} ${PDC_COMMONS_SOURCES} ${PDC_COMMONS_HEADERS}) + +target_include_directories(${PDC_COMMON_LIBRARY_NAME} + PUBLIC "$" + $ +) + +target_link_libraries(${PDC_COMMON_LIBRARY_NAME} INTERFACE + ${PDC_EXT_LIB_DEPENDENCIES} +) + +set(PDC_EXPORTED_LIBS ${PDC_COMMON_LIBRARY_NAME} ${PDC_EXPORTED_LIBS}) + +#----------------------------------------------------------------------------- +# Specify project header files to be installed +#----------------------------------------------------------------------------- + + +set(PDC_PUBLIC_HEADERS "") +set(PUBLIC_HEADER_DIR_LIST + ${CMAKE_CURRENT_SOURCE_DIR}/include + ${CMAKE_CURRENT_SOURCE_DIR}/generic/include + ${CMAKE_CURRENT_SOURCE_DIR}/profiling/include +) + +foreach(_header_dir ${PUBLIC_HEADER_DIR_LIST}) + file(GLOB_RECURSE _dir_headers ${_header_dir}/*.h) + list(APPEND PDC_PUBLIC_HEADERS ${_dir_headers}) +endforeach() + +set(PDC_COMMONS_HEADERS + ${PDC_PUBLIC_HEADERS} + ${PROJECT_BINARY_DIR}/pdc_config_sys.h + ${PROJECT_BINARY_DIR}/pdc_config.h + ) + +#----------------------------------------------------------------------------- +# Add file(s) to CMake Install +#----------------------------------------------------------------------------- +install( + FILES + ${PDC_COMMONS_HEADERS} + DESTINATION + ${PDC_INSTALL_INCLUDE_DIR} + COMPONENT + headers +) + +#----------------------------------------------------------------------------- +# Add Target(s) to CMake Install +#----------------------------------------------------------------------------- + +install( + TARGETS + ${PDC_COMMON_LIBRARY_NAME} + EXPORT + ${PDC_EXPORTED_TARGETS} + LIBRARY DESTINATION ${PDC_INSTALL_LIB_DIR} + ARCHIVE DESTINATION ${PDC_INSTALL_LIB_DIR} + RUNTIME DESTINATION ${PDC_INSTALL_BIN_DIR} +) + +#----------------------------------------------------------------------------- +# Add Target(s) to CMake Install for import into other projects +#----------------------------------------------------------------------------- +install( + EXPORT + ${PDC_EXPORTED_TARGETS} + DESTINATION + ${PDC_INSTALL_DATA_DIR}/cmake/pdc + FILE + ${PDC_EXPORTED_TARGETS}.cmake +) + +#----------------------------------------------------------------------------- +# Export all exported targets to the build tree for use by parent project +#----------------------------------------------------------------------------- +if(NOT PDC_EXTERNALLY_CONFIGURED) +EXPORT ( + TARGETS + ${PDC_EXPORTED_LIBS} + FILE + ${PDC_EXPORTED_TARGETS}.cmake +) +endif() + +#------------------------------------------------------------------------------ +# Set variables for parent scope +#------------------------------------------------------------------------------ +# Used by config.cmake.build.in and Testing +set(PDC_INCLUDES_BUILD_TIME + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} + ${PDC_EXT_INCLUDE_DEPENDENCIES} + ${PDC_COMMONS_INCLUDE_DIRS} + PARENT_SCOPE +) + +# Used by config.cmake.install.in +set(PDC_INCLUDES_INSTALL_TIME + ${PDC_COMMONS_INCLUDE_DIRS} + ${PDC_INSTALL_INCLUDE_DIR} + ${PDC_EXT_INCLUDE_DEPENDENCIES} + PARENT_SCOPE +) + +set(PDC_COMMONS_LIBRARIES ${PDC_COMMON_LIBRARY_NAME} PARENT_SCOPE) \ No newline at end of file diff --git a/src/commons/generic/include/pdc_generic.h b/src/commons/generic/include/pdc_generic.h new file mode 100644 index 000000000..47322b553 --- /dev/null +++ b/src/commons/generic/include/pdc_generic.h @@ -0,0 +1,219 @@ +#ifndef PDC_GENERIC_H +#define PDC_GENERIC_H + +#include +#include +#include +#include +#include + +#ifndef __cplusplus +#if __STDC_VERSION__ >= 199901L +/* C99 or later */ +#include +#else +/* Pre-C99 */ +typedef enum { false = 0, true = 1 } bool; +#endif +#endif + +typedef enum { + PDC_UNKNOWN = -1, /* error */ + PDC_INT = 0, /* integer types (identical to int32_t) */ + PDC_FLOAT = 1, /* floating-point types */ + PDC_DOUBLE = 2, /* double types */ + PDC_CHAR = 3, /* character types */ + PDC_STRING = 4, /* string types */ + PDC_BOOLEAN = 5, /* boolean types */ + PDC_SHORT = 6, /* short types */ + PDC_UINT = 7, /* unsigned integer types (identical to uint32_t) */ + PDC_INT64 = 8, /* 64-bit integer types */ + PDC_UINT64 = 9, /* 64-bit unsigned integer types */ + PDC_INT16 = 10, /* 16-bit integer types */ + PDC_INT8 = 11, /* 8-bit integer types */ + PDC_UINT8 = 12, /* 8-bit unsigned integer types */ + PDC_UINT16 = 13, /* 16-bit unsigned integer types */ + PDC_INT32 = 14, /* 32-bit integer types */ + PDC_UINT32 = 15, /* 32-bit unsigned integer types */ + PDC_LONG = 16, /* long types */ + PDC_VOID_PTR = 17, /* void pointer type */ + PDC_SIZE_T = 18, /* size_t type */ + PDC_TYPE_COUNT = 19 /* this is the number of var types and has to be the last */ +} pdc_c_var_type_t; + +typedef pdc_c_var_type_t PDC_CType; + +typedef enum { + PDC_CLS_SCALAR, + PDC_CLS_ARRAY, + PDC_CLS_ENUM, // not implemented, users can use PDC_CT_INT + PDC_CLS_STRUCT, // not implemented, users can use embedded key value pairs for the members in a struct + PDC_CLS_UNION, // not implemented, users can use embedded key value pairs for the only one member value + // in a union. + PDC_CLS_POINTER, // not implemented, users can use PDC_CT_INT64_T to store the pointer address, but + // won't work for distributed memory. + PDC_CLS_FUNCTION, // not implemented, users can use PDC_CT_INT64_T to store the function address, but + // won't work for distributed memory. + PDC_CLS_COUNT // just the count of the enum. +} pdc_c_var_class_t; + +typedef pdc_c_var_class_t PDC_CType_Class; + +// clang-format off +static const size_t DataTypeSizes[PDC_TYPE_COUNT] = { + sizeof(int), + sizeof(float), + sizeof(double), + sizeof(char), + sizeof(char *), + sizeof(bool), + sizeof(short), + sizeof(unsigned int), + sizeof(int64_t), + sizeof(uint64_t), + sizeof(int16_t), + sizeof(int8_t), + sizeof(uint8_t), + sizeof(uint16_t), + sizeof(int32_t), + sizeof(uint32_t), + sizeof(long), + sizeof(void *), + sizeof(size_t) +}; + +static const char *DataTypeNames[PDC_TYPE_COUNT] = { + "int", + "float", + "double", + "char", + "char*", + "bool", + "short", + "unsigned int", + "int64_t", + "uint64_t", + "int16_t", + "int8_t", + "uint8_t", + "uint16_t", + "int32_t", + "uint32_t", + "long", + "void*", + "size_t" +}; + +static const char *DataTypeEnumNames[PDC_TYPE_COUNT] = { + "PDC_INT", + "PDC_FLOAT", + "PDC_DOUBLE", + "PDC_CHAR", + "PDC_STRING", + "PDC_BOOLEAN", + "PDC_SHORT", + "PDC_UINT", + "PDC_INT64", + "PDC_UINT64", + "PDC_INT16", + "PDC_INT8", + "PDC_UINT8", + "PDC_UINT16", + "PDC_INT32", + "PDC_UINT32", + "PDC_LONG", + "PDC_VOID_PTR", + "PDC_SIZE_T" +}; + +static const char *DataTypeFormat[PDC_TYPE_COUNT] = { + "%d", // int + "%f", // float + "%lf", // double + "%c", // char + "%s", // char* + "%d", // bool (represented as an integer) + "%hd", // short + "%u", // unsigned int + "%lld", // int64_t + "%llu", // uint64_t + "%hd", // int16_t + "%hhd", // int8_t + "%hhu", // uint8_t + "%hu", // uint16_t + "%d", // int32_t + "%u", // uint32_t + "%ld", // long + "%p", // void* (pointer) + "%zu" // size_t +}; + +// clang-format on + +static const char * +get_enum_name_by_dtype(pdc_c_var_type_t type) +{ + if (type < 0 || type >= PDC_TYPE_COUNT) { + return NULL; + } + return DataTypeEnumNames[type]; +} + +static const size_t +get_size_by_dtype(pdc_c_var_type_t type) +{ + if (type < 0 || type >= PDC_TYPE_COUNT) { + return 0; + } + return DataTypeSizes[type]; +} + +static const size_t +get_size_by_class_n_type(void *data, size_t item_count, pdc_c_var_class_t pdc_class, + pdc_c_var_type_t pdc_type) +{ + size_t size = 0; + if (pdc_class == PDC_CLS_SCALAR) { + if (pdc_type == PDC_STRING) { + size = (strlen((char *)data) + 1) * sizeof(char); + } + else { + size = get_size_by_dtype(pdc_type); + } + } + else if (pdc_class == PDC_CLS_ARRAY) { + if (pdc_type == PDC_STRING) { + char **str_arr = (char **)data; + int i = 0; + for (i = 0; i < item_count; i++) { + size = size + (strlen(str_arr[i]) + 1) * sizeof(char); + } + } + else { + size = item_count * get_size_by_dtype(pdc_type); + } + } + return size; +} + +static const char * +get_name_by_dtype(pdc_c_var_type_t type) +{ + if (type < 0 || type >= PDC_TYPE_COUNT) { + return NULL; + } + return DataTypeNames[type]; +} + +static pdc_c_var_type_t +get_dtype_by_enum_name(const char *enumName) +{ + for (int i = 0; i < PDC_TYPE_COUNT; i++) { + if (strcmp(DataTypeEnumNames[i], enumName) == 0) { + return (pdc_c_var_type_t)i; + } + } + return PDC_UNKNOWN; // assuming PDC_UNKNOWN is the enum value for "unknown" +} + +#endif /* PDC_GENERIC_H */ \ No newline at end of file diff --git a/src/api/include/pdc_public.h b/src/commons/include/pdc_public.h similarity index 66% rename from src/api/include/pdc_public.h rename to src/commons/include/pdc_public.h index 8c47976f0..6c135f657 100644 --- a/src/api/include/pdc_public.h +++ b/src/commons/include/pdc_public.h @@ -28,6 +28,7 @@ #include #include #include +#include "pdc_generic.h" /*******************/ /* Public Typedefs */ @@ -41,24 +42,7 @@ typedef int PDC_int_t; typedef float PDC_float_t; typedef double PDC_double_t; -typedef enum { - PDC_UNKNOWN = -1, /* error */ - PDC_INT = 0, /* integer types */ - PDC_FLOAT = 1, /* floating-point types */ - PDC_DOUBLE = 2, /* double types */ - PDC_CHAR = 3, /* character types */ - PDC_COMPOUND = 4, /* compound types */ - PDC_ENUM = 5, /* enumeration types */ - PDC_ARRAY = 6, /* Array types */ - PDC_UINT = 7, /* unsigned integer types */ - PDC_INT64 = 8, /* 64-bit integer types */ - PDC_UINT64 = 9, /* 64-bit unsigned integer types */ - PDC_INT16 = 10, - PDC_INT8 = 11, - PDC_UINT8 = 12, - PDC_UINT16 = 13, - NCLASSES = 14 /* this must be last */ -} pdc_var_type_t; +typedef pdc_c_var_type_t pdc_var_type_t; typedef enum { PDC_PERSIST, PDC_TRANSIENT } pdc_lifetime_t; diff --git a/src/api/profiling/CMakeLists.txt b/src/commons/profiling/CMakeLists.txt similarity index 92% rename from src/api/profiling/CMakeLists.txt rename to src/commons/profiling/CMakeLists.txt index 1b2ee8254..05e2c90b9 100644 --- a/src/api/profiling/CMakeLists.txt +++ b/src/commons/profiling/CMakeLists.txt @@ -1,7 +1,8 @@ #------------------------------------------------------------------------------ # Include source and build directories #------------------------------------------------------------------------------ -include_directories( +set(PROFILING_INCLUDE_DIRS + ${PDC_COMMON_INCLUDE_DIRS} ${PDC_INCLUDES_BUILD_TIME} ${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR} @@ -21,6 +22,10 @@ include_directories( ${MERCURY_INCLUDE_DIR} ${FASTBIT_INCLUDE_DIR} ) +message(STATUS "PDC_COMMON_INCLUDE_DIRS: ${PDC_COMMON_INCLUDE_DIRS}") +include_directories( + ${PROFILING_INCLUDE_DIRS} +) install( FILES @@ -65,6 +70,8 @@ set(PDC_PROF_SRCS # PDCPROF add_library(pdcprof ${PDC_PROF_SRCS}) pdc_set_lib_options(pdcprof "pdcprof" ${PDC_LIBTYPE}) +target_include_directories(pdcprof PUBLIC "$" + $) set(PDC_EXPORTED_LIBS pdcprof ${PDC_EXPORTED_LIBS}) diff --git a/src/api/profiling/include/pdc_hashtab.h b/src/commons/profiling/include/pdc_hashtab.h similarity index 100% rename from src/api/profiling/include/pdc_hashtab.h rename to src/commons/profiling/include/pdc_hashtab.h diff --git a/src/api/profiling/include/pdc_stack_ops.h b/src/commons/profiling/include/pdc_stack_ops.h similarity index 100% rename from src/api/profiling/include/pdc_stack_ops.h rename to src/commons/profiling/include/pdc_stack_ops.h diff --git a/src/api/profiling/pdc_hashtab.c b/src/commons/profiling/pdc_hashtab.c similarity index 100% rename from src/api/profiling/pdc_hashtab.c rename to src/commons/profiling/pdc_hashtab.c diff --git a/src/api/profiling/pdc_stack_ops.c b/src/commons/profiling/pdc_stack_ops.c similarity index 100% rename from src/api/profiling/pdc_stack_ops.c rename to src/commons/profiling/pdc_stack_ops.c diff --git a/src/commons/serde/include/pdc_serde.h b/src/commons/serde/include/pdc_serde.h new file mode 100644 index 000000000..8c5efae9e --- /dev/null +++ b/src/commons/serde/include/pdc_serde.h @@ -0,0 +1,156 @@ +#ifndef PDC_SERDE_H +#define PDC_SERDE_H + +#include +#include +#include +#include "pdc_generic.h" + +#define MAX_KEYS 10 +#define MAX_BUFFER_SIZE 1000 + +typedef struct { + PDC_CType pdc_type; /**< Data type of the key */ + size_t size; /**< Size of the key */ + void * key; /**< Pointer to the key data */ +} PDC_SERDE_Key; + +typedef struct { + PDC_CType_Class pdc_class; /**< Class of the value */ + PDC_CType pdc_type; /**< Data type of the value */ + size_t size; // size of the data. If a string, it is strlen(data) + 1; + // if an array, it is the number of elements; + // if a struct, it is the totalSize of the data chunk of the struct, etc. + void *data; /**< Pointer to the value data */ +} PDC_SERDE_Value; + +typedef struct { + PDC_SERDE_Key *keys; /**< Array of keys */ + size_t numKeys; /**< Number of keys */ + size_t totalSize; /**< Total size of the header */ +} PDC_SERDE_Header; + +typedef struct { + size_t numValues; /**< Number of values */ + PDC_SERDE_Value *values; /**< Array of values */ + size_t totalSize; /**< Total size of the data */ +} PDC_SERDE_Data; + +typedef struct { + PDC_SERDE_Header *header; /**< Pointer to the header */ + PDC_SERDE_Data * data; /**< Pointer to the data */ + size_t totalSize; /**< Total size of the serialized data */ +} PDC_SERDE_SerializedData; + +/** + * @brief Initialize a serialized data structure + * + * @param initial_field_count Number of initial fields to allocate space for + * + * @return Pointer to the initialized PDC_SERDE_SerializedData structure + */ +PDC_SERDE_SerializedData *pdc_serde_init(int initial_field_count); + +/** + * @brief Append a key-value pair to the serialized data structure + * + * @param data Pointer to the PDC_SERDE_SerializedData structure + * @param key Pointer to the PDC_SERDE_Key structure representing the key + * @param value Pointer to the PDC_SERDE_Value structure representing the value + */ +void pdc_serde_append_key_value(PDC_SERDE_SerializedData *data, PDC_SERDE_Key *key, PDC_SERDE_Value *value); + +/** + * @brief get the total size of PDC_SERDE_SerializedData structure instance + * + * @param data Pointer to the PDC_SERDE_SerializedData structure instance + * + * @return total size of the PDC_SERDE_SerializedData structure instance + */ +size_t get_total_size_for_serialized_data(PDC_SERDE_SerializedData *data); + +/** + * @brief Serialize the data in the serialized data structure and return the buffer + * + * @param data Pointer to the PDC_SERDE_SerializedData structure + * + * @return Pointer to the buffer containing the serialized data + */ +void *pdc_serde_serialize(PDC_SERDE_SerializedData *data); + +/** + * @brief Deserialize the buffer and return the deserialized data structure + * + * @param buffer Pointer to the buffer containing the serialized data + * + * @return Pointer to the deserialized PDC_SERDE_SerializedData structure + */ +PDC_SERDE_SerializedData *pdc_serde_deserialize(void *buffer); + +/** + * @brief Free the memory allocated for the serialized data structure + * + * @param data Pointer to the PDC_SERDE_SerializedData structure to be freed + */ +void pdc_serde_free(PDC_SERDE_SerializedData *data); + +/** + * @brief Print the contents of the serialized data structure + * + * @param data Pointer to the PDC_SERDE_SerializedData structure to be printed + */ +void pdc_serde_print(PDC_SERDE_SerializedData *data); + +/** + * @brief Create a PDC_SERDE_Key structure + * + * @param key Pointer to the key data + * @param pdc_type Data type of the key. For SERDE_Key, we only support PDC_CLS_SCALAR class. + * @param size Size of the key data + * + * @return Pointer to the created PDC_SERDE_Key structure + */ +static inline PDC_SERDE_Key * +PDC_SERDE_KEY(void *key, PDC_CType pdc_type, size_t size) +{ + PDC_SERDE_Key *pdc_key = (PDC_SERDE_Key *)malloc(sizeof(PDC_SERDE_Key)); + size_t key_size = (size_t)get_size_by_class_n_type(key, size, PDC_CLS_SCALAR, pdc_type); + pdc_key->key = malloc(key_size); + memcpy(pdc_key->key, key, key_size); + pdc_key->pdc_type = pdc_type; + pdc_key->size = key_size; + return pdc_key; +} + +/** + * @brief Create a PDC_SERDE_Value structure + * + * @param data Pointer to the value data + * @param pdc_type Data type of the value + * @param pdc_class Class of the value + * @param size Size of the value data + * + * @return Pointer to the created PDC_SERDE_Value structure + */ +static inline PDC_SERDE_Value * +PDC_SERDE_VALUE(void *data, PDC_CType pdc_type, PDC_CType_Class pdc_class, size_t size) +{ + PDC_SERDE_Value *pdc_value = (PDC_SERDE_Value *)malloc(sizeof(PDC_SERDE_Value)); + size_t value_size = 0; + if (pdc_class == PDC_CLS_STRUCT) { + // TODO: we need to check if data is a valid PDC_SERDE_SerializedData structure. + PDC_SERDE_SerializedData *struct_data = (PDC_SERDE_SerializedData *)data; + size = struct_data->totalSize; + } + else { + value_size = (size_t)get_size_by_class_n_type(data, size, pdc_class, pdc_type); + } + pdc_value->data = malloc(value_size); + memcpy(pdc_value->data, data, value_size); + pdc_value->pdc_class = pdc_class; + pdc_value->pdc_type = pdc_type; + pdc_value->size = value_size; + return pdc_value; +} + +#endif /* PDC_SERDE_H */ \ No newline at end of file diff --git a/src/commons/serde/pdc_serde.c b/src/commons/serde/pdc_serde.c new file mode 100644 index 000000000..fe9e9aa64 --- /dev/null +++ b/src/commons/serde/pdc_serde.c @@ -0,0 +1,346 @@ +#include "pdc_serde.h" + +PDC_SERDE_SerializedData * +pdc_serde_init(int initial_field_count) +{ + PDC_SERDE_SerializedData *data = malloc(sizeof(PDC_SERDE_SerializedData)); + data->header = malloc(sizeof(PDC_SERDE_Header)); + data->header->keys = malloc(sizeof(PDC_SERDE_Key) * initial_field_count); + data->header->numKeys = 0; + data->header->totalSize = 0; + data->data = malloc(sizeof(PDC_SERDE_Data)); + data->data->values = malloc(sizeof(PDC_SERDE_Value) * initial_field_count); + data->data->numValues = 0; + data->data->totalSize = 0; + return data; +} + +void +pdc_serde_append_key_value(PDC_SERDE_SerializedData *data, PDC_SERDE_Key *key, PDC_SERDE_Value *value) +{ + data->header->keys[data->header->numKeys] = *key; + data->header->numKeys++; + // append type, size, and key + data->header->totalSize += (sizeof(int) + sizeof(size_t) + key->size); + data->data->values[data->data->numValues] = *value; + data->data->numValues++; + // append class, type, size, and data + data->data->totalSize += (sizeof(int) + sizeof(int) + sizeof(size_t) + value->size); +} + +size_t +get_total_size_for_serialized_data(PDC_SERDE_SerializedData *data) +{ + if (data->totalSize <= 0) { + size_t total_size = data->header->totalSize + data->data->totalSize + sizeof(size_t) * 6; + data->totalSize = total_size; + } + return data->totalSize; +} + +// clang-format off +/** + * This function serializes the entire PDC_SERDE_SerializedData structure. + * + * The overview of the serialized binary data layout is: + * +---------------------+---------------------+----------------------+---------------------+----------------------+----------------------+----------------------+----------------------+ + * | Size of the Header | Size of the Data | Number of Keys | Header Region | Data Offset | Number of Values | Data Region | Data Offset | + * | (size_t) | (size_t) | (size_t) | | (size_t) | (size_t) | | (size_t) | + * +---------------------+---------------------+----------------------+---------------------+----------------------+----------------------+----------------------+----------------------+ + * + * The first 2 field is called meta-header, which provides metadata about size of the header region and the size of the data region. + * Note that the size of the header region doesn't include the 'Number of Keys' field. + * Also, the size of the data region doesn't include the 'Data Offset' field. + * + * Then the following is the header region with two keys: + * +-----------------------+-------------------------+-----------------------------+---------------------------+--------------------------+-----------------------------+---------------------------+ + * | Number of Keys | Key 1 Type | Key 1 Size | Key 1 Data | Key 2 Type | Key 2 Size | Key 2 Data | + * | (size_t) | (int8_t) | (size_t) | (Variable size depending | (int8_t) | (size_t) | (Variable size depending | + * | | | | on Key 1 Size) | | | on Key 2 Size) | + * +-----------------------+-------------------------+-----------------------------+---------------------------+--------------------------+-----------------------------+---------------------------+ + * + * Then, the following is a header offset validation point and the data region with the final offset validation point. + * + * |----------------------------------------------------------------------------------------------------------| + * | Data Offset (size_t) | Number of Value Entries (size_t) | Value 1 Class (int8_t) | Value 1 Type (int8_t) | + * |----------------------------------------------------------------------------------------------------------| + * | Value 1 Size (size_t)| Value 1 Data (Variable size depending on Value 1 Size) | Value 2 Class (int8_t) | + * |----------------------------------------------------------------------------------------------------------| + * | Value 2 Type (int8_t)| Value 2 Size (size_t) | Value 2 Data (Variable size depending on Value 2 Size) | + * |----------------------------------------------------------------------------------------------------------| + * | ...repeated for the number of value entries in the data... | + * |----------------------------------------------------------------------------------------------------------| + * | Final Data Offset (size_t) | + * |----------------------------------------------------------------------------------------------------------| + * + * Please refer to `get_size_by_class_n_type` function in pdc_generic.h for size calculation on scalar values and array values. + * + */ +// clang-format on +void * +pdc_serde_serialize(PDC_SERDE_SerializedData *data) +{ + // The buffer contains: + // the size of the header (size_t) + + // the size of the data (size_t) + + // the number of keys (size_t) + + // the header region + + // the data offset (size_t) + + // the number of value entries (size_t) + + // the data region + void *buffer = malloc(get_total_size_for_serialized_data(data)); + // serialize the meta header, which contains only the size of the header and the size of the data region. + memcpy(buffer, &data->header->totalSize, sizeof(size_t)); + memcpy(buffer + sizeof(size_t), &data->data->totalSize, sizeof(size_t)); + + // serialize the header + // start with the number of keys + memcpy(buffer + sizeof(size_t) * 2, &data->header->numKeys, sizeof(size_t)); + // then the keys + size_t offset = sizeof(size_t) * 3; + for (int i = 0; i < data->header->numKeys; i++) { + int8_t pdc_type = (int8_t)(data->header->keys[i].pdc_type); + memcpy(buffer + offset, &pdc_type, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(buffer + offset, &data->header->keys[i].size, sizeof(size_t)); + offset += sizeof(size_t); + memcpy(buffer + offset, data->header->keys[i].key, data->header->keys[i].size); + offset += data->header->keys[i].size; + } + + // serialize the data offset, this is for validation purpose to see if header region is corrupted. + memcpy(buffer + offset, &offset, sizeof(size_t)); + offset += sizeof(size_t); + + // serialize the data + // start with the number of value entries + memcpy(buffer + offset, &data->data->numValues, sizeof(size_t)); + offset += sizeof(size_t); + // then the values + for (int i = 0; i < data->data->numValues; i++) { + int8_t pdc_class = (int8_t)data->data->values[i].pdc_class; + int8_t pdc_type = (int8_t)data->data->values[i].pdc_type; + memcpy(buffer + offset, &pdc_class, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(buffer + offset, &pdc_type, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(buffer + offset, &data->data->values[i].size, sizeof(size_t)); + offset += sizeof(size_t); + memcpy(buffer + offset, data->data->values[i].data, data->data->values[i].size); + offset += data->data->values[i].size; + } + // serialize the data offset again, this is for validation purpose to see if data region is corrupted. + memcpy(buffer + offset, &offset, sizeof(size_t)); + offset += sizeof(size_t); + return buffer; +} + +PDC_SERDE_SerializedData * +pdc_serde_deserialize(void *buffer) +{ + size_t offset = 0; + // read the meta header + size_t headerSize; + size_t dataSize; + memcpy(&headerSize, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + memcpy(&dataSize, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + + // read the header + size_t numKeys; + memcpy(&numKeys, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + PDC_SERDE_Header *header = malloc(sizeof(PDC_SERDE_Header)); + header->keys = malloc(sizeof(PDC_SERDE_Key) * numKeys); + header->numKeys = numKeys; + header->totalSize = headerSize; + for (int i = 0; i < numKeys; i++) { + int8_t pdc_type; + size_t size; + memcpy(&pdc_type, buffer + offset, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(&size, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + void *key = malloc(size); + memcpy(key, buffer + offset, size); + offset += size; + header->keys[i].key = key; + header->keys[i].pdc_type = (PDC_CType)pdc_type; + header->keys[i].size = size; + } + + // read the data offset + size_t dataOffset; + memcpy(&dataOffset, buffer + offset, sizeof(size_t)); + // check the data offset + if (dataOffset != offset) { + printf("Error: data offset does not match the expected offset.\n"); + return NULL; + } + offset += sizeof(size_t); + + // read the data + size_t numValues; + memcpy(&numValues, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + PDC_SERDE_Data *data = malloc(sizeof(PDC_SERDE_Data)); + data->values = malloc(sizeof(PDC_SERDE_Value) * numValues); + data->numValues = numValues; + data->totalSize = dataSize; + for (int i = 0; i < numValues; i++) { + int8_t pdc_class; + int8_t pdc_type; + size_t size; + memcpy(&pdc_class, buffer + offset, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(&pdc_type, buffer + offset, sizeof(int8_t)); + offset += sizeof(int8_t); + memcpy(&size, buffer + offset, sizeof(size_t)); + offset += sizeof(size_t); + void *value = malloc(size); + memcpy(value, buffer + offset, size); + offset += size; + data->values[i].data = value; + data->values[i].pdc_class = (PDC_CType_Class)pdc_class; + data->values[i].pdc_type = (PDC_CType)pdc_type; + data->values[i].size = size; + } + // check the total size + memcpy(&dataOffset, buffer + offset, sizeof(size_t)); + // check the data offset + if (dataOffset != offset) { + printf("Error: data offset does not match the expected offset.\n"); + return NULL; + } + offset += sizeof(size_t); + if (offset != headerSize + sizeof(size_t) * 6 + dataSize) { + printf("Error: total size does not match the expected size.\n"); + return NULL; + } + // create the serialized data + PDC_SERDE_SerializedData *serializedData = malloc(sizeof(PDC_SERDE_SerializedData)); + serializedData->header = header; + serializedData->data = data; + serializedData->totalSize = headerSize + dataSize + sizeof(size_t) * 6; + + return serializedData; +} + +void +pdc_serde_free(PDC_SERDE_SerializedData *data) +{ + for (int i = 0; i < data->header->numKeys; i++) { + free(data->header->keys[i].key); + } + free(data->header->keys); + for (int i = 0; i < data->data->numValues; i++) { + free(data->data->values[i].data); + } + free(data->data->values); + free(data->header); + free(data->data); + free(data); +} + +void +pdc_serde_print(PDC_SERDE_SerializedData *data) +{ + printf("Header:\n"); + printf(" numKeys: %zu\n", data->header->numKeys); + printf(" totalSize: %zu\n", data->header->totalSize); + for (int i = 0; i < data->header->numKeys; i++) { + printf(" key %d:\n", i); + printf(" type: %d\n", data->header->keys[i].pdc_type); + printf(" size: %zu\n", data->header->keys[i].size); + printf(" key: %s\n", (char *)data->header->keys[i].key); + } + printf("Data:\n"); + printf(" numValues: %zu\n", data->data->numValues); + printf(" totalSize: %zu\n", data->data->totalSize); + for (int i = 0; i < data->data->numValues; i++) { + printf(" value %d:\n", i); + printf(" class: %d\n", data->data->values[i].pdc_class); + printf(" type: %d\n", data->data->values[i].pdc_type); + printf(" size: %zu\n", data->data->values[i].size); + printf(" data: "); + if (data->data->values[i].pdc_class == PDC_STRING) { + printf("%s\n", (char *)data->data->values[i].data); + } + else { + printf("\n"); + } + } +} + +int +test_serde_framework() +{ + // Initialize a serialized data structure + PDC_SERDE_SerializedData *data = pdc_serde_init(5); + + // Create and append key-value pairs for different data types + char * intKey_str = "int"; + int intVal = 42; + PDC_SERDE_Key * intKey = PDC_SERDE_KEY(intKey_str, PDC_STRING, sizeof(intKey_str)); + PDC_SERDE_Value *intValue = PDC_SERDE_VALUE(&intVal, PDC_INT, PDC_CLS_SCALAR, sizeof(int)); + pdc_serde_append_key_value(data, intKey, intValue); + + char * doubleKey_str = "double"; + double doubleVal = 3.14159; + PDC_SERDE_Key * doubleKey = PDC_SERDE_KEY(doubleKey_str, PDC_STRING, sizeof(doubleKey_str)); + PDC_SERDE_Value *doubleValue = PDC_SERDE_VALUE(&doubleVal, PDC_DOUBLE, PDC_CLS_SCALAR, sizeof(double)); + pdc_serde_append_key_value(data, doubleKey, doubleValue); + + char * strKey_str = "string"; + char * strVal = "Hello, World!"; + PDC_SERDE_Key * strKey = PDC_SERDE_KEY(strKey_str, PDC_STRING, (strlen(strKey_str) + 1) * sizeof(char)); + PDC_SERDE_Value *strValue = + PDC_SERDE_VALUE(strVal, PDC_STRING, PDC_CLS_SCALAR, (strlen(strVal) + 1) * sizeof(char)); + pdc_serde_append_key_value(data, strKey, strValue); + + char * arrayKey_str = "array"; + int intArray[3] = {1, 2, 3}; + PDC_SERDE_Key * arrayKey = PDC_SERDE_KEY(arrayKey_str, PDC_STRING, sizeof(arrayKey_str)); + PDC_SERDE_Value *arrayValue = PDC_SERDE_VALUE(intArray, PDC_INT, PDC_CLS_ARRAY, sizeof(int) * 3); + pdc_serde_append_key_value(data, arrayKey, arrayValue); + + typedef struct { + int x; + int y; + } Point; + + char *pointKey = "point"; + Point pointVal = {10, 20}; + + PDC_SERDE_SerializedData *point_data = pdc_serde_init(2); + PDC_SERDE_Key * x_name = PDC_SERDE_KEY("x", PDC_STRING, sizeof(char *)); + PDC_SERDE_Value * x_value = PDC_SERDE_VALUE(&pointVal.x, PDC_INT, PDC_CLS_SCALAR, sizeof(int)); + + PDC_SERDE_Key * y_name = PDC_SERDE_KEY("y", PDC_STRING, sizeof(char *)); + PDC_SERDE_Value *y_value = PDC_SERDE_VALUE(&pointVal.y, PDC_INT, PDC_CLS_SCALAR, sizeof(int)); + + pdc_serde_append_key_value(point_data, x_name, x_value); + pdc_serde_append_key_value(point_data, y_name, y_value); + void *point_buffer = pdc_serde_serialize(point_data); + + PDC_SERDE_Key * structKey = PDC_SERDE_KEY(pointKey, PDC_STRING, sizeof(pointKey)); + PDC_SERDE_Value *structValue = PDC_SERDE_VALUE(point_buffer, PDC_VOID_PTR, PDC_CLS_STRUCT, sizeof(Point)); + pdc_serde_append_key_value(data, structKey, structValue); + + // Serialize the data + void *buffer = pdc_serde_serialize(data); + + // Deserialize the buffer + PDC_SERDE_SerializedData *deserializedData = pdc_serde_deserialize(buffer); + + // Print the deserialized data + pdc_serde_print(deserializedData); + + // Free the memory + pdc_serde_free(data); + pdc_serde_free(deserializedData); + free(buffer); + + return 0; +} \ No newline at end of file diff --git a/src/utils/include/pdc_id_pkg.h b/src/commons/utils/include/pdc_id_pkg.h similarity index 100% rename from src/utils/include/pdc_id_pkg.h rename to src/commons/utils/include/pdc_id_pkg.h diff --git a/src/utils/include/pdc_linkedlist.h b/src/commons/utils/include/pdc_linkedlist.h similarity index 99% rename from src/utils/include/pdc_linkedlist.h rename to src/commons/utils/include/pdc_linkedlist.h index 6fa7ce08a..a9de691b0 100644 --- a/src/utils/include/pdc_linkedlist.h +++ b/src/commons/utils/include/pdc_linkedlist.h @@ -43,8 +43,8 @@ #ifndef PDC_LINKEDLIST_H #define PDC_LINKEDLIST_H -#include "pdc_cont_pkg.h" -#include "pdc_cont.h" +// #include "pdc_cont_pkg.h" +// #include "pdc_cont.h" #include "mercury_thread_mutex.h" #include diff --git a/src/utils/include/pdc_malloc.h b/src/commons/utils/include/pdc_malloc.h similarity index 100% rename from src/utils/include/pdc_malloc.h rename to src/commons/utils/include/pdc_malloc.h diff --git a/src/utils/include/pdc_private.h b/src/commons/utils/include/pdc_private.h similarity index 99% rename from src/utils/include/pdc_private.h rename to src/commons/utils/include/pdc_private.h index b6ca3bcfd..b0fa48f14 100644 --- a/src/utils/include/pdc_private.h +++ b/src/commons/utils/include/pdc_private.h @@ -146,7 +146,7 @@ extern pbool_t err_occurred; /* Include a basic profiling interface */ #ifdef ENABLE_PROFILING -#include "stack_ops.h" +#include "pdc_stack_ops.h" #define FUNC_ENTER(X) \ do { \ diff --git a/src/utils/include/pdc_timing.h b/src/commons/utils/include/pdc_timing.h similarity index 100% rename from src/utils/include/pdc_timing.h rename to src/commons/utils/include/pdc_timing.h diff --git a/src/utils/pdc_malloc.c b/src/commons/utils/pdc_malloc.c similarity index 100% rename from src/utils/pdc_malloc.c rename to src/commons/utils/pdc_malloc.c diff --git a/src/utils/pdc_timing.c b/src/commons/utils/pdc_timing.c similarity index 100% rename from src/utils/pdc_timing.c rename to src/commons/utils/pdc_timing.c diff --git a/src/server/CMakeLists.txt b/src/server/CMakeLists.txt index 449b64f10..abe3917c6 100644 --- a/src/server/CMakeLists.txt +++ b/src/server/CMakeLists.txt @@ -48,7 +48,6 @@ add_executable(pdc_server.exe ${PDC_SOURCE_DIR}/src/server/pdc_server_region/pdc_server_region_transfer.c ${PDC_SOURCE_DIR}/src/server/pdc_server_region/pdc_server_region_transfer_metadata_query.c ${PDC_SOURCE_DIR}/src/utils/pdc_region_utils.c - ${PDC_SOURCE_DIR}/src/utils/pdc_timing.c ${PDC_SOURCE_DIR}/src/api/pdc_analysis/pdc_analysis_common.c ${PDC_SOURCE_DIR}/src/api/pdc_transform/pdc_transforms_common.c ${PDC_SOURCE_DIR}/src/api/pdc_analysis/pdc_hist_pkg.c @@ -70,9 +69,9 @@ endif() if(PDC_ENABLE_FASTBIT) message(STATUS "Enabled fastbit") - target_link_libraries(pdc_server.exe mercury pdcprof -lm -ldl ${PDC_EXT_LIB_DEPENDENCIES} ${FASTBIT_LIBRARY}/libfastbit.so) + target_link_libraries(pdc_server.exe mercury ${PDC_COMMONS_LIBRARIES} -lm -ldl ${PDC_EXT_LIB_DEPENDENCIES} ${FASTBIT_LIBRARY}/libfastbit.so) else() - target_link_libraries(pdc_server.exe mercury pdcprof -lm -ldl ${PDC_EXT_LIB_DEPENDENCIES}) + target_link_libraries(pdc_server.exe mercury ${PDC_COMMONS_LIBRARIES} -lm -ldl ${PDC_EXT_LIB_DEPENDENCIES}) endif() diff --git a/src/server/include/pdc_client_server_common.h b/src/server/include/pdc_client_server_common.h index a57e2169f..e125af9a5 100644 --- a/src/server/include/pdc_client_server_common.h +++ b/src/server/include/pdc_client_server_common.h @@ -1190,6 +1190,11 @@ hg_proc_pdc_kvtag_t(hg_proc_t proc, void *data) // HG_LOG_ERROR("Proc error"); return ret; } + ret = hg_proc_int8_t(proc, &struct_data->type); + if (ret != HG_SUCCESS) { + // HG_LOG_ERROR("Proc error"); + return ret; + } if (struct_data->size) { switch (hg_proc_get_op(proc)) { case HG_DECODE: diff --git a/src/server/pdc_client_server_common.c b/src/server/pdc_client_server_common.c index a0a7845a4..859ce7bb8 100644 --- a/src/server/pdc_client_server_common.c +++ b/src/server/pdc_client_server_common.c @@ -3096,7 +3096,7 @@ HG_TEST_RPC_CB(region_release, handle) size2 = HG_Bulk_get_size(remote_bulk_handle); if (size != size2) { error = 1; - printf("==PDC_SERVER: local size %lu, remote %lu\n", size, size2); + printf("==PDC_SERVER: local size %llu, remote %llu\n", size, size2); PGOTO_ERROR(HG_OTHER_ERROR, "===PDC SERVER: HG_TEST_RPC_CB(region_release, " "handle) local and remote bulk size does not match"); } @@ -3269,7 +3269,7 @@ HG_TEST_RPC_CB(region_release, handle) size2 = HG_Bulk_get_size(remote_bulk_handle); if (size != size2) { error = 1; - printf("==PDC_SERVER: local size %lu, remote %lu\n", size, size2); + printf("==PDC_SERVER: local size %llu, remote %llu\n", size, size2); /* PGOTO_ERROR(HG_OTHER_ERROR, "===PDC SERVER: HG_TEST_RPC_CB(region_release, * handle) local and remote bulk size does not match"); */ } @@ -6846,10 +6846,10 @@ PDC_kvtag_dup(pdc_kvtag_t *from, pdc_kvtag_t **to) if (from == NULL || to == NULL) PGOTO_DONE(FAIL); - (*to) = (pdc_kvtag_t *)calloc(1, sizeof(pdc_kvtag_t)); - (*to)->name = (char *)malloc(strlen(from->name) + 1); - (*to)->size = from->size; - + (*to) = (pdc_kvtag_t *)calloc(1, sizeof(pdc_kvtag_t)); + (*to)->name = (char *)malloc(strlen(from->name) + 1); + (*to)->size = from->size; + (*to)->type = from->type; (*to)->value = (void *)malloc(from->size); memcpy((void *)(*to)->name, (void *)from->name, strlen(from->name) + 1); memcpy((void *)(*to)->value, (void *)from->value, from->size); diff --git a/src/server/pdc_server.c b/src/server/pdc_server.c index a3020628a..449b31c06 100644 --- a/src/server/pdc_server.c +++ b/src/server/pdc_server.c @@ -1224,6 +1224,7 @@ PDC_Server_checkpoint() fwrite(&key_len, sizeof(int), 1, file); fwrite(kvlist_elt->kvtag->name, key_len, 1, file); fwrite(&kvlist_elt->kvtag->size, sizeof(uint32_t), 1, file); + fwrite(&kvlist_elt->kvtag->type, sizeof(int8_t), 1, file); fwrite(kvlist_elt->kvtag->value, kvlist_elt->kvtag->size, 1, file); } @@ -1403,7 +1404,8 @@ PDC_Server_restart(char *filename) } // init hash table - PDC_Server_init_hash_table(); + // FIXME: check if we need to init the hash table again. + // PDC_Server_init_hash_table(); if (fread(&n_cont, sizeof(int), 1, file) != 1) { printf("Read failed for n_count\n"); @@ -1492,6 +1494,9 @@ PDC_Server_restart(char *filename) if (fread(&kvtag_list->kvtag->size, sizeof(uint32_t), 1, file) != 1) { printf("Read failed for kvtag_list->kvtag->size\n"); } + if (fread(&kvtag_list->kvtag->type, sizeof(int8_t), 1, file) != 1) { + printf("Read failed for kvtag_list->kvtag->type\n"); + } kvtag_list->kvtag->value = malloc(kvtag_list->kvtag->size); if (fread(kvtag_list->kvtag->value, kvtag_list->kvtag->size, 1, file) != 1) { printf("Read failed for kvtag_list->kvtag->value\n"); diff --git a/src/server/pdc_server_region/pdc_server_region_request_handler.h b/src/server/pdc_server_region/pdc_server_region_request_handler.h index f3dc64cde..04ab48481 100644 --- a/src/server/pdc_server_region/pdc_server_region_request_handler.h +++ b/src/server/pdc_server_region/pdc_server_region_request_handler.h @@ -811,7 +811,7 @@ HG_TEST_RPC_CB(transfer_request, handle) ret_value = HG_Respond(handle, NULL, NULL, &out); if (in.access_type == PDC_WRITE) { ret_value = HG_Bulk_create(info->hg_class, 1, &(local_bulk_args->data_buf), - &(local_bulk_args->total_mem_size), HG_BULK_READWRITE, + (const hg_size_t *)&(local_bulk_args->total_mem_size), HG_BULK_READWRITE, &(local_bulk_args->bulk_handle)); if (ret_value != HG_SUCCESS) { printf("Error at HG_TEST_RPC_CB(transfer_request, handle): @ line %d \n", __LINE__); @@ -881,7 +881,7 @@ HG_TEST_RPC_CB(transfer_request, handle) *((int *)(local_bulk_args->data_buf + sizeof(int)))); */ ret_value = HG_Bulk_create(info->hg_class, 1, &(local_bulk_args->data_buf), - &(local_bulk_args->total_mem_size), HG_BULK_READWRITE, + (const hg_size_t *)&(local_bulk_args->total_mem_size), HG_BULK_READWRITE, &(local_bulk_args->bulk_handle)); if (ret_value != HG_SUCCESS) { printf("Error at HG_TEST_RPC_CB(transfer_request, handle): @ line %d \n", __LINE__); diff --git a/src/tests/cont_tags.c b/src/tests/cont_tags.c index 04132c332..caaf87b49 100644 --- a/src/tests/cont_tags.c +++ b/src/tests/cont_tags.c @@ -36,8 +36,9 @@ main(int argc, char **argv) int rank = 0, size = 1; - char tag_value[128], tag_value2[128], *tag_value_ret; - psize_t value_size; + char tag_value[128], tag_value2[128], *tag_value_ret; + pdc_var_type_t value_type; + psize_t value_size; strcpy(tag_value, "some tag value"); strcpy(tag_value2, "some tag value 2 is longer than tag 1"); @@ -78,31 +79,31 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCcont_put_tag(cont, "some tag", tag_value, strlen(tag_value) + 1); + ret = PDCcont_put_tag(cont, "some tag", tag_value, PDC_STRING, strlen(tag_value) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 1\n"); ret_value = 1; } - ret = PDCcont_put_tag(cont, "some tag 2", tag_value2, strlen(tag_value2) + 1); + ret = PDCcont_put_tag(cont, "some tag 2", tag_value2, PDC_STRING, strlen(tag_value2) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 1\n"); ret_value = 1; } - ret = PDCcont_put_tag(cont2, "some tag", tag_value, strlen(tag_value) + 1); + ret = PDCcont_put_tag(cont2, "some tag", tag_value, PDC_STRING, strlen(tag_value) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 2\n"); ret_value = 1; } - ret = PDCcont_put_tag(cont2, "some tag 2", tag_value2, strlen(tag_value2) + 1); + ret = PDCcont_put_tag(cont2, "some tag 2", tag_value2, PDC_STRING, strlen(tag_value2) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 2\n"); ret_value = 1; } - ret = PDCcont_get_tag(cont, "some tag", (void **)&tag_value_ret, &value_size); + ret = PDCcont_get_tag(cont, "some tag", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 1\n"); ret_value = 1; @@ -112,7 +113,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCcont_get_tag(cont, "some tag 2", (void **)&tag_value_ret, &value_size); + ret = PDCcont_get_tag(cont, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 1\n"); ret_value = 1; @@ -123,7 +124,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCcont_get_tag(cont2, "some tag", (void **)&tag_value_ret, &value_size); + ret = PDCcont_get_tag(cont2, "some tag", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 2\n"); ret_value = 1; @@ -134,7 +135,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCcont_get_tag(cont2, "some tag 2", (void **)&tag_value_ret, &value_size); + ret = PDCcont_get_tag(cont2, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 2\n"); ret_value = 1; diff --git a/src/tests/kvtag_add_get.c b/src/tests/kvtag_add_get.c index 97eadffed..91686b9be 100644 --- a/src/tests/kvtag_add_get.c +++ b/src/tests/kvtag_add_get.c @@ -33,13 +33,14 @@ int main() { - pdcid_t pdc, cont_prop, cont, obj_prop1, obj_prop2, obj1, obj2; - pdc_kvtag_t kvtag1, kvtag2, kvtag3; - char * v1 = "value1"; - int v2 = 2; - double v3 = 3.45; - void * value1, *value2, *value3; - psize_t value_size; + pdcid_t pdc, cont_prop, cont, obj_prop1, obj_prop2, obj1, obj2; + pdc_kvtag_t kvtag1, kvtag2, kvtag3; + char * v1 = "value1"; + int v2 = 2; + double v3 = 3.45; + pdc_var_type_t type1, type2, type3; + void * value1, *value2, *value3; + psize_t value_size; // create a pdc pdc = PDCinit("pdc"); @@ -88,42 +89,45 @@ main() kvtag1.name = "key1string"; kvtag1.value = (void *)v1; + kvtag1.type = PDC_STRING; kvtag1.size = strlen(v1) + 1; kvtag2.name = "key2int"; kvtag2.value = (void *)&v2; + kvtag1.type = PDC_INT; kvtag2.size = sizeof(int); kvtag3.name = "key3double"; kvtag3.value = (void *)&v3; + kvtag1.type = PDC_DOUBLE; kvtag3.size = sizeof(double); - if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.size) < 0) + if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.type, kvtag1.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); - if (PDCobj_put_tag(obj2, kvtag2.name, kvtag2.value, kvtag2.size) < 0) + if (PDCobj_put_tag(obj2, kvtag2.name, kvtag2.value, kvtag2.type, kvtag2.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); - if (PDCobj_put_tag(obj2, kvtag3.name, kvtag3.value, kvtag3.size) < 0) + if (PDCobj_put_tag(obj2, kvtag3.name, kvtag3.value, kvtag3.type, kvtag3.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); - if (PDCobj_get_tag(obj1, kvtag1.name, (void *)&value1, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj1, kvtag1.name, (void *)&value1, (void *)&type1, (void *)&value_size) < 0) printf("fail to get a kvtag from o1\n"); else printf("successfully retrieved a kvtag [%s] = [%s] from o1\n", kvtag1.name, (char *)value1); - if (PDCobj_get_tag(obj2, kvtag2.name, (void *)&value2, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj2, kvtag2.name, (void *)&value2, (void *)&type2, (void *)&value_size) < 0) printf("fail to get a kvtag from o2\n"); else printf("successfully retrieved a kvtag [%s] = [%d] from o2\n", kvtag2.name, *(int *)value2); - if (PDCobj_get_tag(obj2, kvtag3.name, (void *)&value3, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj2, kvtag3.name, (void *)&value3, (void *)&type3, (void *)&value_size) < 0) printf("fail to get a kvtag from o2\n"); else printf("successfully retrieved a kvtag [%s] = [%f] from o2\n", kvtag3.name, *(double *)value3); @@ -135,15 +139,16 @@ main() v1 = "New Value After Delete"; kvtag1.value = (void *)v1; + kvtag1.type = PDC_STRING; kvtag1.size = strlen(v1) + 1; - if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.size) < 0) + if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.type, kvtag1.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); /* PDC_free_kvtag(&value1); */ - if (PDCobj_get_tag(obj1, kvtag1.name, (void *)&value1, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj1, kvtag1.name, (void *)&value1, (void *)&type1, (void *)&value_size) < 0) printf("fail to get a kvtag from o1\n"); else printf("successfully retrieved a kvtag [%s] = [%s] from o1\n", kvtag1.name, (char *)value1); diff --git a/src/tests/kvtag_add_get_benchmark.c b/src/tests/kvtag_add_get_benchmark.c index ee5efdcbf..a682e30ee 100644 --- a/src/tests/kvtag_add_get_benchmark.c +++ b/src/tests/kvtag_add_get_benchmark.c @@ -220,7 +220,8 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value v = i + my_obj_s; for (j = 0; j < n_attr; j++) { sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", v, j); - if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)tag_values[j], tag_value_len + 1) < 0) + if (PDCobj_put_tag(obj_ids[i], tag_name, (void *)tag_values[j], PDC_STRING, tag_value_len + 1) < + 0) printf("fail to add a kvtag to o%" PRIu64 "\n", v); } } @@ -238,12 +239,13 @@ add_n_tags(uint64_t my_obj, uint64_t my_obj_s, uint64_t n_attr, char **tag_value void get_object_tags(pdcid_t obj_id, uint64_t obj_name_v, uint64_t n_attr, void **tag_values, uint64_t *value_size) { - uint64_t i; - char tag_name[256]; + uint64_t i; + char tag_name[256]; + pdc_var_type_t tag_type; for (i = 0; i < n_attr; i++) { sprintf(tag_name, "tag%" PRIu64 ".%" PRIu64 "", obj_name_v, i); - if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[i], (void *)&value_size[i]) < 0) + if (PDCobj_get_tag(obj_id, tag_name, (void **)&tag_values[i], &tag_type, (void *)&value_size[i]) < 0) printf("fail to get a kvtag from o%" PRIu64 "\n", obj_name_v); } } diff --git a/src/tests/kvtag_add_get_scale.c b/src/tests/kvtag_add_get_scale.c index 280eda25b..dc14c597e 100644 --- a/src/tests/kvtag_add_get_scale.c +++ b/src/tests/kvtag_add_get_scale.c @@ -76,7 +76,8 @@ main(int argc, char *argv[]) double stime, total_time, percent_time; pdc_kvtag_t kvtag; void ** values; - size_t value_size; + pdc_var_type_t value_type; + size_t value_size; #ifdef ENABLE_MPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &proc_num); @@ -167,6 +168,7 @@ main(int argc, char *argv[]) // Add tags kvtag.name = "Group"; kvtag.value = (void *)&v; + kvtag.type = PDC_INT; kvtag.size = sizeof(int); #ifdef ENABLE_MPI @@ -175,7 +177,7 @@ main(int argc, char *argv[]) #endif for (i = 0; i < my_add_tag; i++) { v = i + my_add_tag_s; - if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) + if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.type, kvtag.size) < 0) printf("fail to add a kvtag to o%d\n", i + my_obj_s); if (i % tag_1percent == 0) { @@ -208,7 +210,8 @@ main(int argc, char *argv[]) stime = MPI_Wtime(); #endif for (i = 0; i < my_query; i++) { - if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_size) < 0) + if (PDCobj_get_tag(obj_ids[i], kvtag.name, (void *)&values[i], (void *)&value_type, + (void *)&value_size) < 0) printf("fail to get a kvtag from o%d\n", i + my_query_s); if (i % query_1percent == 0) { diff --git a/src/tests/kvtag_get.c b/src/tests/kvtag_get.c index a8fcf70f1..7ca1e7c0e 100644 --- a/src/tests/kvtag_get.c +++ b/src/tests/kvtag_get.c @@ -33,9 +33,10 @@ int main() { - pdcid_t pdc, cont_prop, cont, obj_prop1, obj_prop2, obj1, obj2; - pdc_kvtag_t *value1, *value2, *value3; - psize_t value_size; + pdcid_t pdc, cont_prop, cont, obj_prop1, obj_prop2, obj1, obj2; + pdc_kvtag_t * value1, *value2, *value3; + pdc_var_type_t type1, type2, type3; + psize_t value_size; // create a pdc pdc = PDCinit("pdc"); @@ -82,17 +83,17 @@ main() else printf("Fail to create object @ line %d!\n", __LINE__); - if (PDCobj_get_tag(obj1, "key1string", (void *)&value1, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj1, "key1string", (void *)&value1, (void *)&type1, (void *)&value_size) < 0) printf("fail to get a kvtag from o1\n"); else printf("successfully retrieved a kvtag [%s] = [%s] from o1\n", value1->name, (char *)value1->value); - if (PDCobj_get_tag(obj2, "key2int", (void *)&value2, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj2, "key2int", (void *)&value2, (void *)&type2, (void *)&value_size) < 0) printf("fail to get a kvtag from o2\n"); else printf("successfully retrieved a kvtag [%s] = [%d] from o2\n", value2->name, *(int *)value2->value); - if (PDCobj_get_tag(obj2, "key3double", (void *)&value3, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj2, "key3double", (void *)&value3, (void *)&type3, (void *)&value_size) < 0) printf("fail to get a kvtag from o2\n"); else printf("successfully retrieved a kvtag [%s] = [%f] from o2\n", value3->name, @@ -100,7 +101,7 @@ main() PDC_free_kvtag(&value1); - if (PDCobj_get_tag(obj1, "key1string", (void *)&value1, (void *)&value_size) < 0) + if (PDCobj_get_tag(obj1, "key1string", (void *)&value1, (void *)&type1, (void *)&value_size) < 0) printf("fail to get a kvtag from o1\n"); else printf("successfully retrieved a kvtag [%s] = [%s] from o1\n", value1->name, (char *)value1->value); diff --git a/src/tests/kvtag_query.c b/src/tests/kvtag_query.c index 88e6621d7..cf1e80dcb 100644 --- a/src/tests/kvtag_query.c +++ b/src/tests/kvtag_query.c @@ -91,32 +91,35 @@ main() kvtag1.name = "key1string"; kvtag1.value = (void *)v1; + kvtag1.type = PDC_STRING; kvtag1.size = strlen(v1) + 1; kvtag2.name = "key2int"; kvtag2.value = (void *)&v2; + kvtag2.type = PDC_INT; kvtag2.size = sizeof(int); kvtag3.name = "key3double"; kvtag3.value = (void *)&v3; + kvtag3.type = PDC_DOUBLE; kvtag3.size = sizeof(double); - if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.size) < 0) + if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.type, kvtag1.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); - if (PDCobj_put_tag(obj1, kvtag2.name, kvtag2.value, kvtag2.size) < 0) + if (PDCobj_put_tag(obj1, kvtag2.name, kvtag2.value, kvtag2.type, kvtag2.size) < 0) printf("fail to add a kvtag to o1\n"); else printf("successfully added a kvtag to o1\n"); - if (PDCobj_put_tag(obj2, kvtag2.name, kvtag2.value, kvtag2.size) < 0) + if (PDCobj_put_tag(obj2, kvtag2.name, kvtag2.value, kvtag2.type, kvtag2.size) < 0) printf("fail to add a kvtag to o2\n"); else printf("successfully added a kvtag to o2\n"); - if (PDCobj_put_tag(obj2, kvtag3.name, kvtag3.value, kvtag3.size) < 0) + if (PDCobj_put_tag(obj2, kvtag3.name, kvtag3.value, kvtag3.type, kvtag3.size) < 0) printf("fail to add a kvtag to o2\n"); else printf("successfully added a kvtag to o2\n"); diff --git a/src/tests/kvtag_query_scale.c b/src/tests/kvtag_query_scale.c index a42cd5a5a..b9e4a6ffa 100644 --- a/src/tests/kvtag_query_scale.c +++ b/src/tests/kvtag_query_scale.c @@ -129,6 +129,7 @@ main(int argc, char *argv[]) // Add tags kvtag.name = "Group"; kvtag.value = (void *)&v; + kvtag.type = PDC_INT; kvtag.size = sizeof(int); for (iter = 0; iter < round; iter++) { @@ -136,7 +137,7 @@ main(int argc, char *argv[]) v = iter; for (i = 0; i < my_add_tag; i++) { - if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.size) < 0) + if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.type, kvtag.size) < 0) printf("fail to add a kvtag to o%d\n", i + my_obj_s); } @@ -153,6 +154,7 @@ main(int argc, char *argv[]) kvtag.name = "Group"; kvtag.value = (void *)&v; + kvtag.type = PDC_INT; kvtag.size = sizeof(int); for (iter = 0; iter < round; iter++) { diff --git a/src/tests/obj_tags.c b/src/tests/obj_tags.c index 2651cf64e..5906e3dd9 100644 --- a/src/tests/obj_tags.c +++ b/src/tests/obj_tags.c @@ -42,9 +42,10 @@ main(int argc, char **argv) dims[0] = 64; dims[1] = 3; dims[2] = 4; - char tag_value[128], tag_value2[128], *tag_value_ret; - char cont_name[128], obj_name1[128], obj_name2[128]; - psize_t value_size; + char tag_value[128], tag_value2[128], *tag_value_ret; + char cont_name[128], obj_name1[128], obj_name2[128]; + pdc_var_type_t value_type; + psize_t value_size; strcpy(tag_value, "some tag value"); strcpy(tag_value2, "some tag value 2 is longer"); @@ -119,30 +120,30 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCobj_put_tag(obj1, "some tag", tag_value, strlen(tag_value) + 1); + ret = PDCobj_put_tag(obj1, "some tag", tag_value, PDC_STRING, strlen(tag_value) + 1); if (ret != SUCCEED) { printf("Put tag failed at object 1\n"); ret_value = 1; } - ret = PDCobj_put_tag(obj1, "some tag 2", tag_value2, strlen(tag_value2) + 1); + ret = PDCobj_put_tag(obj1, "some tag 2", tag_value2, PDC_STRING, strlen(tag_value2) + 1); if (ret != SUCCEED) { printf("Put tag failed at object 1\n"); ret_value = 1; } - ret = PDCobj_put_tag(obj2, "some tag", tag_value, strlen(tag_value) + 1); + ret = PDCobj_put_tag(obj2, "some tag", tag_value, PDC_STRING, strlen(tag_value) + 1); if (ret != SUCCEED) { printf("Put tag failed at object 2\n"); ret_value = 1; } - ret = PDCobj_put_tag(obj2, "some tag 2", tag_value2, strlen(tag_value2) + 1); + ret = PDCobj_put_tag(obj2, "some tag 2", tag_value2, PDC_STRING, strlen(tag_value2) + 1); if (ret != SUCCEED) { printf("Put tag failed at object 2\n"); ret_value = 1; } - ret = PDCobj_get_tag(obj1, "some tag", (void **)&tag_value_ret, &value_size); + ret = PDCobj_get_tag(obj1, "some tag", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at object 1\n"); ret_value = 1; @@ -153,7 +154,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCobj_get_tag(obj1, "some tag 2", (void **)&tag_value_ret, &value_size); + ret = PDCobj_get_tag(obj1, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at object 1\n"); ret_value = 1; @@ -164,7 +165,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCobj_get_tag(obj2, "some tag", (void **)&tag_value_ret, &value_size); + ret = PDCobj_get_tag(obj2, "some tag", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at object 2\n"); ret_value = 1; @@ -175,7 +176,7 @@ main(int argc, char **argv) ret_value = 1; } - ret = PDCobj_get_tag(obj2, "some tag 2", (void **)&tag_value_ret, &value_size); + ret = PDCobj_get_tag(obj2, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at object 2\n"); ret_value = 1; diff --git a/src/tests/pdc_transforms_lib.c b/src/tests/pdc_transforms_lib.c index f1a83f742..6397bdc70 100644 --- a/src/tests/pdc_transforms_lib.c +++ b/src/tests/pdc_transforms_lib.c @@ -6,21 +6,26 @@ * >> pdc_public.h * * typedef enum { - * PDC_UNKNOWN = -1, - * PDC_INT = 0, - * PDC_FLOAT = 1, - * PDC_DOUBLE = 2, - * PDC_STRING = 3, - * PDC_COMPOUND = 4, - * PDC_ENUM = 5, - * PDC_ARRAY = 6, - * PDC_UINT = 7, - * PDC_INT64 = 8, - * PDC_UINT64 = 9, - * PDC_INT16 = 10, - * PDC_INT8 = 11, - * NCLASSES = 12 - * } PDC_var_type_t; + * PDC_UNKNOWN = -1, * error * + * PDC_INT = 0, * integer types (identical to int32_t) * + * PDC_FLOAT = 1, * floating-point types * + * PDC_DOUBLE = 2, * double types * + * PDC_CHAR = 3, * character types * + * PDC_STRING = 4, * string types * + * PDC_BOOLEAN = 5, * boolean types * + * PDC_SHORT = 6, * short types * + * PDC_UINT = 7, * unsigned integer types (identical to uint32_t) * + * PDC_INT64 = 8, * 64-bit integer types * + * PDC_UINT64 = 9, * 64-bit unsigned integer types * + * PDC_INT16 = 10, * 16-bit integer types * + * PDC_INT8 = 11, * 8-bit integer types * + * PDC_UINT8 = 12, * 8-bit unsigned integer types * + * PDC_UINT16 = 13, * 16-bit unsigned integer types * + * PDC_LONG = 14, * long types * + * PDC_VOID_PTR = 15, * void pointer type * + * PDC_SIZE_T = 16, * size_t type * + * TYPE_COUNT = 17 * this is the number of var types and has to be the last * + * } pdc_c_var_type_t; */ static int diff --git a/src/tests/read_obj_shared.c b/src/tests/read_obj_shared.c index 4b0058dbd..6f56965d8 100644 --- a/src/tests/read_obj_shared.c +++ b/src/tests/read_obj_shared.c @@ -163,7 +163,7 @@ main(int argc, char **argv) offset[0] = rank * my_data_size; local_offset[0] = 0; mysize[0] = my_data_size; - printf("rank %d offset = %lu, length = %lu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); + printf("rank %d offset = %llu, length = %llu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); local_region = PDCregion_create(ndim, local_offset, mysize); global_region = PDCregion_create(ndim, offset, mysize); @@ -220,7 +220,7 @@ main(int argc, char **argv) offset[0] = rank * my_data_size; local_offset[0] = 0; mysize[0] = my_data_size; - printf("rank %d offset = %lu, length = %lu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); + printf("rank %d offset = %llu, length = %llu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); local_region = PDCregion_create(ndim, local_offset, mysize); global_region = PDCregion_create(ndim, offset, mysize); diff --git a/src/tests/vpicio_mts.c b/src/tests/vpicio_mts.c index eec66215b..65b7b4dc8 100644 --- a/src/tests/vpicio_mts.c +++ b/src/tests/vpicio_mts.c @@ -199,7 +199,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t0 = MPI_Wtime(); if (rank == 0) { - printf("\n#Step %d\n", i); + printf("\n#Step %llu\n", i); } #endif PDCprop_set_obj_time_step(obj_prop_xx, i); diff --git a/src/utils/pdc_interface.c b/src/utils/pdc_interface.c index 4cca6b0bc..218d6518e 100644 --- a/src/utils/pdc_interface.c +++ b/src/utils/pdc_interface.c @@ -25,6 +25,8 @@ #include "pdc_malloc.h" #include "pdc_id_pkg.h" #include "pdc_interface.h" +#include "pdc_cont_pkg.h" +#include "pdc_cont.h" #include #include diff --git a/tools/pdc_export.c b/tools/pdc_export.c index a67f444e3..9c0b572b2 100644 --- a/tools/pdc_export.c +++ b/tools/pdc_export.c @@ -7,6 +7,7 @@ #include #include #include "hdf5.h" +#include "pdc_generic.h" // #define ENABLE_MPI 1 @@ -240,45 +241,11 @@ get_data_type(int data_type) if (data_type == -1) { return "PDC_UNKNOWN"; } - else if (data_type == 0) { - return "PDC_INT"; - } - else if (data_type == 1) { - return "PDC_FLOAT"; - } - else if (data_type == 2) { - return "PDC_DOUBLE"; - } - else if (data_type == 3) { - return "PDC_CHAR"; - } - else if (data_type == 4) { - return "PDC_COMPOUND"; - } - else if (data_type == 5) { - return "PDC_ENUM"; - } - else if (data_type == 6) { - return "PDC_ARRAY"; - } - else if (data_type == 7) { - return "PDC_UINT"; - } - else if (data_type == 8) { - return "PDC_INT64"; - } - else if (data_type == 9) { - return "PDC_UINT64"; - } - else if (data_type == 10) { - return "PDC_INT16"; - } - else if (data_type == 11) { - return "PDC_INT16"; - } - else { + char *result = get_enum_name_by_dtype(data_type); + if (result == NULL) { return "NULL"; } + return result; } char * diff --git a/tools/pdc_import.c b/tools/pdc_import.c index f51f587c8..adf8cd46e 100644 --- a/tools/pdc_import.c +++ b/tools/pdc_import.c @@ -711,9 +711,10 @@ do_attr(hid_t aid, pdcid_t obj_id) char buf[MAX_NAME] = {0}; char read_buf[TAG_LEN_MAX] = {0}; // pdc_kvtag_t kvtag1; - char * tag_name; - void * tag_value; - size_t tag_size; + char * tag_name; + void * tag_value; + pdc_var_type_t value_type; + size_t tag_size; /* * Get the name of the attribute. @@ -735,7 +736,7 @@ do_attr(hid_t aid, pdcid_t obj_id) else { tag_size = H5Tget_size(atype); } - PDCobj_put_tag(obj_id, tag_name, tag_value, tag_size); + PDCobj_put_tag(obj_id, tag_name, tag_value, value_type, tag_size); /* * Get attribute information: dataspace, data type diff --git a/tools/pdc_ls.c b/tools/pdc_ls.c index 9d3db32fe..5963b33e2 100644 --- a/tools/pdc_ls.c +++ b/tools/pdc_ls.c @@ -201,45 +201,11 @@ get_data_type(int data_type) if (data_type == -1) { return "PDC_UNKNOWN"; } - else if (data_type == 0) { - return "PDC_INT"; - } - else if (data_type == 1) { - return "PDC_FLOAT"; - } - else if (data_type == 2) { - return "PDC_DOUBLE"; - } - else if (data_type == 3) { - return "PDC_CHAR"; - } - else if (data_type == 4) { - return "PDC_COMPOUND"; - } - else if (data_type == 5) { - return "PDC_ENUM"; - } - else if (data_type == 6) { - return "PDC_ARRAY"; - } - else if (data_type == 7) { - return "PDC_UINT"; - } - else if (data_type == 8) { - return "PDC_INT64"; - } - else if (data_type == 9) { - return "PDC_UINT64"; - } - else if (data_type == 10) { - return "PDC_INT16"; - } - else if (data_type == 11) { - return "PDC_INT16"; - } - else { + char *result = get_enum_name_by_dtype(data_type); + if (result == NULL) { return "NULL"; } + return result; } char * From ffaeb3e35ed2a6aa6b9ab31921b2f0fb19483f28 Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Wed, 21 Jun 2023 19:33:30 -0400 Subject: [PATCH 196/806] LLSM Importer update: new job script + new data type update on kvtags (#92) * remove unnecessary install block from CMakeLists.txt * update output * Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. * build kvtag_add_get_scale * comment off free * update code * 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working * do while loop added, tested with 1m object and works * 1m objects test works, 10m object test fail as the original also fails * add new executable to test set * enlarge PDC_SERVER_ID_INTERVAL * update code * update console args * add p search test * add console arg for changing number of attributes per object * free allocated memory * fix query count issue * fix attr length definition * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * fix data type * fix data type * fix data type * add client side statistics * add client side statistics * fix format * clang formatter * update CMake * update CMake * update CMake * free allocated memory properly * clang format * clang format * clang-format-10 * change file name * address review comments * update llsm importer * update llsm importer * update server checkpoint intervals * update gitignore * adding job scripts * adding one debugging msg * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update output for uint64_t * add scripts * update output for uint64_t * update output for uint64_t * update output for uint64_t * update scripts * update scripts * delete debugging message * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * update tag names * update tag names * update query startingpos * update query startingpos * update job scripts * add progressive timing for kvtag_add_get_scale * fix iteration count in final report * update job scripts and benckmark program * update message format * update message format * update message format * update message format * clang format * update job scripts * comment off object/container close procedure in benchmark to save node hours * change the max number of object to 1M * change the max length of attribute value * change the max length of attribute value * llsm tiff import test * llsm tiff import test * llsm tiff import test * llsm tiff import test * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update cmake and llsm_importer * update cmake and llsm_importer * close if in cmake * cmake fix tiff * cmake policy to suppress warning * add pdc include dir * update code * update code * update code * update code * update code * update code * update array generating method * update array generating method * update array generating method * update array generating method * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * fix return type * fix return type * add timing * add timing * fix output * llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader * fix vairable name * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * add scripts * add scripts * add scripts * debugging for nonMPI program * debugging for nonMPI program * debugging for nonMPI program * clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created * enable MPI * enable MPI * enlarge BCase size * enlarge BCase size * enlarge BCase size * resolve bcast count * llsm data path in script * llsm data path in script * update csv reader * update csv reader * update csv reader * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * enlarge max write * update pdc * update pdc * update pdc * update pdc * update pdc_import.c * update pdc_import.c * update pdc_export.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update tools/cmake * clang format * clang format * added a tutorial for llsm_importer * added a tutorial for llsm_importer * make sure the line feed is included for string attribute * update timing for overall completion time * update formatting * update metrics * forcibly enable openmp * adding C flags from the mex compiler * Update .gitlab-ci.yml * updated code * clang format * llsm_importer (#1) formatter on llsm_importer * add type for kvtag structure (#2) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * Feature/metadata type (#3) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * remove unnecessary header files from installation * resolve conflict * add important files * clang formatting * update cmake * update * print numWorkers * update scripts * update script * update script * formatting * update llsm_tools.c * remove unnecessary hash table init --------- Co-authored-by: Houjun Tang Co-authored-by: Jean Luca Bez --- scripts/llsm_importer/gen_script.sh | 24 ++++++++- scripts/llsm_importer/template.sh | 64 +++++++++++++++++++----- src/api/pdc_region/pdc_region_transfer.c | 3 +- src/server/pdc_server.c | 4 -- tools/llsm/parallelReadTiff.c | 8 +++ tools/llsm_importer.c | 22 ++++---- 6 files changed, 95 insertions(+), 30 deletions(-) diff --git a/scripts/llsm_importer/gen_script.sh b/scripts/llsm_importer/gen_script.sh index 9d310f2bb..efefd819f 100755 --- a/scripts/llsm_importer/gen_script.sh +++ b/scripts/llsm_importer/gen_script.sh @@ -1,5 +1,21 @@ #!/bin/bash -N_THREAD=NO + +# Per node configuration of your HPC system. +MAX_PYSICAL_CORE=128 +MAX_HYPERTHREADING=2 + +# Designated number of threads per process on each node +# (this should be associated with -c option in srun) +NUM_THREAD_PER_SERVER_PROC=128 +NUM_THREAD_PER_CLIENT_PROC=64 + + +# Designated number of processes for server anc client on each node +# (this should be associated with -n option in srun) +NUM_SERVER_PROC_PER_NODE=1 +NUM_CLIENT_PROC_PER_NODE=2 + + MAX_NODE=512 MAX_ATTR=1024 MAX_ATTRLEN=1000 @@ -13,6 +29,12 @@ for (( i = 1; i <= $MAX_NODE; i*=2 )); do cp template.sh $TARGET sed -i "s/JOBNAME/${JOBNAME}/g" $TARGET sed -i "s/NODENUM/${i}/g" $TARGET + sed -i "s/MPHYSICALCORE/${MAX_PYSICAL_CORE}/g" $TARGET + sed -i "s/MHYPERTHREADING/${MAX_HYPERTHREADING}/g" $TARGET + sed -i "s/N_SERVER_PROC/${NUM_SERVER_PROC_PER_NODE}/g" $TARGET + sed -i "s/N_CLIENT_PROC/${NUM_CLIENT_PROC_PER_NODE}/g" $TARGET + sed -i "s/NTHREAD_PER_SPROC/${NUM_THREAD_PER_SERVER_PROC}/g" $TARGET + sed -i "s/NTHREAD_PER_CPROC/${NUM_THREAD_PER_CLIENT_PROC}/g" $TARGET if [[ "$i" -gt "16" ]]; then sed -i "s/REG//g" $TARGET else diff --git a/scripts/llsm_importer/template.sh b/scripts/llsm_importer/template.sh index d736b4076..834b8e2e5 100755 --- a/scripts/llsm_importer/template.sh +++ b/scripts/llsm_importer/template.sh @@ -13,24 +13,52 @@ # export PDC_DEBUG=0 - -export PDC_TMPDIR=$SCRATCH/data/pdc/conf - -rm -rf $PDC_TMPDIR/* - +# This is a script for running PDC in shared mode on Perlmutter +# When running in Shared mode, the client processes and server processes are running on the same node. +# By alternating the number of server processes and the number client processes, you should be able to change the C/S ratio. +# You can simply set the number of server processes, and let the script to calculate the number of client processes. + +# Per node configuration of your HPC system. +MAX_PYSICAL_CORE=MPHYSICALCORE +MAX_HYPERTHREADING=MHYPERTHREADING + +# Designated number of threads per process on each node +# (this should be associated with -c option in srun) +NUM_THREAD_PER_SERVER_PROC=NTHREAD_PER_SPROC +NUM_THREAD_PER_CLIENT_PROC=NTHREAD_PER_CPROC + + +# Designated number of processes for server anc client on each node +# (this should be associated with -n option in srun) +NUM_SERVER_PROC_PER_NODE=N_SERVER_PROC +NUM_CLIENT_PROC_PER_NODE=N_CLIENT_PROC + +# test if the number of threads is no larger than the total number of logical cores +TOTAL_NUM_PROC_PER_NODE=$((NUM_THREAD_PER_SERVER_PROC * NUM_SERVER_PROC_PER_NODE + NUM_THREAD_PER_CLIENT_PROC * NUM_CLIENT_PROC_PER_NODE)) +TOTAL_NUM_LOGICAL_CORE_PER_NODE=$((MAX_PYSICAL_CORE * MAX_HYPERTHREADING)) +if [[ "$TOTAL_NUM_PROC_PER_NODE" -gt "$TOTAL_NUM_LOGICAL_CORE_PER_NODE" ]]; then + echo "Error: TOTAL_NUM_PROC_PER_NODE is larger than TOTAL_NUM_LOGICAL_CORE_PER_NODE" + TOTAL_AVAILABLE_CORE=$((TOTAL_NUM_LOGICAL_CORE_PER_NODE - NUM_THREAD_PER_SERVER_PROC * NUM_SERVER_PROC_PER_NODE)) + NUM_CLIENT_PROC_PER_NODE=$(( TOTAL_AVAILABLE_CORE / NUM_THREAD_PER_CLIENT_PROC)) + echo "fixing the number of client processes to $NUM_CLIENT_PROC_PER_NODE" +fi + +# Set the number of times the test should be repeated. REPEAT=1 +# calculate the number of total processes for both server side and client side. N_NODE=NODENUM -NCLIENT=1 -# NCLIENT=126 +NCLIENT=$((NUM_CLIENT_PROC_PER_NODE * N_NODE)) +NSERVER=$((NUM_SERVER_PROC_PER_NODE * N_NODE)) +# clean up the PDC tmp directory +export PDC_TMPDIR=$SCRATCH/data/pdc/conf +rm -rf $PDC_TMPDIR/* export PDC_TMPDIR=${PDC_TMPDIR}/$N_NODE mkdir -p $PDC_TMPDIR -let TOTALPROC=$NCLIENT*$N_NODE - EXECPATH=/global/cfs/cdirs/m2621/wzhang5/perlmutter/install/pdc/share/test/bin -TOOLPATH=/global/cfs/cdirs/m2621/wzhang5/perlmutter/source/pdc_llsm/tools/build +TOOLPATH=/global/cfs/cdirs/m2621/wzhang5/perlmutter/source/pdc/tools/build SERVER=$EXECPATH/pdc_server.exe CLIENT=$TOOLPATH/llsm_importer CLOSE=$EXECPATH/close_server @@ -44,19 +72,31 @@ IMGLIST_PATH=${LLSM_DATA_PATH}/ImageList_from_encoder.csv date +# OpenMP settings: +# set the OPENMP thread number to the smaller number between $NUM_THREAD_PER_SERVER_PROC and $NUM_THREAD_PER_CLIENT_PROC +export OMP_NUM_THREADS=$((NUM_THREAD_PER_SERVER_PROC < NUM_THREAD_PER_CLIENT_PROC ? NUM_THREAD_PER_SERVER_PROC : NUM_THREAD_PER_CLIENT_PROC)) +export OMP_PLACES=threads +export OMP_PROC_BIND=close + +echo "OMP_NUM_THREADS=$OMP_NUM_THREADS" +echo "NSERVER=$NSERVER" +echo "NUM_THREAD_PER_SERVER_PROC=$NUM_THREAD_PER_SERVER_PROC" +echo "NCLIENT=$NCLIENT" +echo "NUM_THREAD_PER_CLIENT_PROC=$NUM_THREAD_PER_CLIENT_PROC" + echo "" echo "=============" echo "$i Init server" echo "=============" -stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $((N_NODE*1)) -c 2 --cpu_bind=cores $SERVER & +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $NSERVER -c $NUM_THREAD_PER_SERVER_PROC --cpu_bind=cores $SERVER & sleep 5 echo "============================================" echo "KVTAGS with $N_NODE nodes" echo "============================================" -stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $TOTALPROC -c 2 --cpu_bind=cores $CLIENT -f $IMGLIST_PATH +stdbuf -i0 -o0 -e0 srun -N $N_NODE -n $NCLIENT -c $NUM_THREAD_PER_CLIENT_PROC --cpu_bind=cores $CLIENT -f $IMGLIST_PATH echo "" echo "=================" diff --git a/src/api/pdc_region/pdc_region_transfer.c b/src/api/pdc_region/pdc_region_transfer.c index 775947f5d..87f9f3625 100644 --- a/src/api/pdc_region/pdc_region_transfer.c +++ b/src/api/pdc_region/pdc_region_transfer.c @@ -1446,8 +1446,7 @@ PDCregion_transfer_start(pdcid_t transfer_request_id) // Pack local region to a contiguous memory buffer unit = transfer_request->unit; - // Convert user buf into a contiguous buffer called new_buf, which is determined by the shape of local - // objects. + // Convert user buf into a contiguous buffer called , which is determined by the shape of local objects. pack_region_buffer(transfer_request->buf, transfer_request->obj_dims, transfer_request->total_data_size, transfer_request->local_region_ndim, transfer_request->local_region_offset, transfer_request->local_region_size, unit, transfer_request->access_type, diff --git a/src/server/pdc_server.c b/src/server/pdc_server.c index 449b31c06..e08882120 100644 --- a/src/server/pdc_server.c +++ b/src/server/pdc_server.c @@ -1403,10 +1403,6 @@ PDC_Server_restart(char *filename) printf("Error getting slurm job id from SLURM_JOB_ID!\n"); } - // init hash table - // FIXME: check if we need to init the hash table again. - // PDC_Server_init_hash_table(); - if (fread(&n_cont, sizeof(int), 1, file) != 1) { printf("Read failed for n_count\n"); } diff --git a/tools/llsm/parallelReadTiff.c b/tools/llsm/parallelReadTiff.c index c81928584..cc4026ac9 100644 --- a/tools/llsm/parallelReadTiff.c +++ b/tools/llsm/parallelReadTiff.c @@ -31,6 +31,8 @@ readTiffParallelBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, vo int32_t batchSize = (z - 1) / numWorkers + 1; uint64_t bytes = bits / 8; + printf("numWorkers %d\n", numWorkers); + int32_t w; #ifdef ENABLE_OPENMP #pragma omp parallel for @@ -105,6 +107,8 @@ readTiffParallel(uint64_t x, uint64_t y, uint64_t z, const char *fileName, void int32_t batchSize = (z - 1) / numWorkers + 1; uint64_t bytes = bits / 8; + printf("numWorkers %d\n", numWorkers); + uint16_t compressed = 1; TIFF * tif = TIFFOpen(fileName, "r"); TIFFGetField(tif, TIFFTAG_COMPRESSION, &compressed); @@ -321,6 +325,8 @@ readTiffParallel2DBak(uint64_t x, uint64_t y, uint64_t z, const char *fileName, int32_t batchSize = (y - 1) / numWorkers + 1; uint64_t bytes = bits / 8; + printf("numWorkers %d\n", numWorkers); + int32_t w; #ifdef ENABLE_OPENMP #pragma omp parallel for @@ -403,6 +409,8 @@ readTiffParallel2D(uint64_t x, uint64_t y, uint64_t z, const char *fileName, voi uint8_t errBak = 0; char errString[10000]; + printf("numWorkers %d\n", numWorkers); + #ifdef ENABLE_OPENMP #pragma omp parallel for #endif diff --git a/tools/llsm_importer.c b/tools/llsm_importer.c index ff358fcd3..ea5097278 100644 --- a/tools/llsm_importer.c +++ b/tools/llsm_importer.c @@ -138,14 +138,14 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) switch (data_type) { case 'i': int ivalue = atoi(field_value); - PDCobj_put_tag(cur_obj_g, field_name, &ivalue, sizeof(int)); + PDCobj_put_tag(cur_obj_g, field_name, &ivalue, PDC_INT, sizeof(int)); break; case 'f': double fvalue = atof(field_value); - PDCobj_put_tag(cur_obj_g, field_name, &fvalue, sizeof(double)); + PDCobj_put_tag(cur_obj_g, field_name, &fvalue, PDC_DOUBLE, sizeof(double)); break; case 's': - PDCobj_put_tag(cur_obj_g, field_name, field_value, sizeof(char) * (strlen(field_value) + 1)); + PDCobj_put_tag(cur_obj_g, field_name, field_value, PDC_STRING, strlen(field_value)); break; default: break; @@ -154,14 +154,14 @@ import_to_pdc(image_info_t *image_info, csv_cell_t *fileName_cell) } // add extra metadata tags based on the image_info struct - PDCobj_put_tag(cur_obj_g, "x", &(image_info->x), sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "y", &(image_info->y), sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "z", &(image_info->z), sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "bits", &(image_info->bits), sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "startSlice", &(image_info->startSlice), sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "stripeSize", &(image_info->stripeSize), sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "is_imageJ", &(image_info->is_imageJ), sizeof(uint64_t)); - PDCobj_put_tag(cur_obj_g, "imageJ_Z", &(image_info->imageJ_Z), sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "x", &(image_info->x), PDC_UINT64, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "y", &(image_info->y), PDC_UINT64, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "z", &(image_info->z), PDC_UINT64, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "bits", &(image_info->bits), PDC_UINT64, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "startSlice", &(image_info->startSlice), PDC_UINT64, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "stripeSize", &(image_info->stripeSize), PDC_UINT64, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "is_imageJ", &(image_info->is_imageJ), PDC_UINT64, sizeof(uint64_t)); + PDCobj_put_tag(cur_obj_g, "imageJ_Z", &(image_info->imageJ_Z), PDC_UINT64, sizeof(uint64_t)); // close object PDCobj_close(cur_obj_g); From 39fcd7c2a6d50bd342973db58e1346cfb91f52d8 Mon Sep 17 00:00:00 2001 From: Jean Luca Bez Date: Fri, 23 Jun 2023 09:09:38 -0700 Subject: [PATCH 197/806] Update .gitlab-ci.yml --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 39f940487..90d60224c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -164,7 +164,7 @@ perlmutter-metrics: variables: PDC_N_NODES: 64 PDC_N_CLIENTS: 127 - SCHEDULER_PARAMETERS: "-A m1248 --qos=debug --constraint=cpu --tasks-per-node=${PDC_N_CLIENTS} -N ${PDC_N_NODES} -t 00:30:00" + SCHEDULER_PARAMETERS: "-A m1248 --qos=regular --constraint=cpu --tasks-per-node=${PDC_N_CLIENTS} -N ${PDC_N_NODES} -t 00:30:00" SUPERCOMPUTER: "perlmutter" MERCURY_DIR: "/global/cfs/cdirs/m1248/pdc-perlmutter/mercury/install" PDC_TMPDIR: "${PDC_BUILD_PATH}/pdc-tmp-metrics" From 46abeed40fbbfc2cbabfbf94b8d9eac46a52d9e1 Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Fri, 23 Jun 2023 17:27:19 -0400 Subject: [PATCH 198/806] fix warnings, commenting off 'find_path' and 'find_library' for Mercury in src/commons/CMakeLists.txt (#93) * Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. * build kvtag_add_get_scale * comment off free * update code * 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working * do while loop added, tested with 1m object and works * 1m objects test works, 10m object test fail as the original also fails * add new executable to test set * enlarge PDC_SERVER_ID_INTERVAL * update code * update console args * add p search test * add console arg for changing number of attributes per object * free allocated memory * fix query count issue * fix attr length definition * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * fix data type * fix data type * fix data type * add client side statistics * add client side statistics * fix format * clang formatter * update CMake * update CMake * update CMake * free allocated memory properly * clang format * clang format * clang-format-10 * change file name * address review comments * update llsm importer * update llsm importer * update server checkpoint intervals * update gitignore * adding job scripts * adding one debugging msg * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update output for uint64_t * add scripts * update output for uint64_t * update output for uint64_t * update output for uint64_t * update scripts * update scripts * delete debugging message * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * update tag names * update tag names * update query startingpos * update query startingpos * update job scripts * add progressive timing for kvtag_add_get_scale * fix iteration count in final report * update job scripts and benckmark program * update message format * update message format * update message format * update message format * clang format * update job scripts * comment off object/container close procedure in benchmark to save node hours * change the max number of object to 1M * change the max length of attribute value * change the max length of attribute value * llsm tiff import test * llsm tiff import test * llsm tiff import test * llsm tiff import test * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update cmake and llsm_importer * update cmake and llsm_importer * close if in cmake * cmake fix tiff * cmake policy to suppress warning * add pdc include dir * update code * update code * update code * update code * update code * update code * update array generating method * update array generating method * update array generating method * update array generating method * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * fix return type * fix return type * add timing * add timing * fix output * llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader * fix vairable name * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * add scripts * add scripts * add scripts * debugging for nonMPI program * debugging for nonMPI program * debugging for nonMPI program * clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created * enable MPI * enable MPI * enlarge BCase size * enlarge BCase size * enlarge BCase size * resolve bcast count * llsm data path in script * llsm data path in script * update csv reader * update csv reader * update csv reader * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * enlarge max write * update pdc * update pdc * update pdc * update pdc * update pdc_import.c * update pdc_import.c * update pdc_export.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update tools/cmake * clang format * clang format * added a tutorial for llsm_importer * added a tutorial for llsm_importer * make sure the line feed is included for string attribute * update timing for overall completion time * update formatting * update metrics * forcibly enable openmp * adding C flags from the mex compiler * Update .gitlab-ci.yml * updated code * clang format * llsm_importer (#1) formatter on llsm_importer * add type for kvtag structure (#2) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * Feature/metadata type (#3) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * remove unnecessary header files from installation * resolve conflict * add important files * clang formatting * update cmake * update * print numWorkers * update scripts * update script * update script * formatting * update llsm_tools.c * remove unnecessary hash table init * update script * fix some warnings * fix some warnings * update * update * fix warning * update * update * update * update * update * update * update * update * update * fix warnings * fix warnings * fix warnings * fix warnings * fix warnings * fix warnings * fix warnings * update * update * update * server address and file paths using 1024, TMP_DIR path using 1024/2, NAME_MAX for appname and objname takes 1024/2, HOSTNAME takes 1024/8, NA_INFO_STRING takes 1024/4 * update * update * update * update --------- Co-authored-by: Houjun Tang Co-authored-by: Jean Luca Bez --- CMakeLists.txt | 40 +++++++++++++++++++ README.md | 5 ++- scripts/llsm_importer/gen_script.sh | 6 +-- src/api/CMakeLists.txt | 9 +++++ src/api/pdc_client_connect.c | 4 +- src/commons/CMakeLists.txt | 8 +++- src/commons/generic/include/pdc_generic.h | 36 ++++++++--------- src/commons/serde/include/pdc_serde.h | 27 +++++++------ src/commons/serde/pdc_serde.c | 30 +++++++------- src/server/CMakeLists.txt | 4 ++ src/server/include/pdc_client_server_common.h | 28 ++++++++----- src/server/include/pdc_server_metadata.h | 2 +- src/server/pdc_client_server_common.c | 10 ++--- src/server/pdc_server.c | 14 +++---- .../include/pdc_server_data.h | 2 +- .../pdc_server_region/pdc_server_data.c | 29 ++++++++------ src/tests/CMakeLists.txt | 3 ++ src/tests/kvtag_add_get.c | 4 +- src/tests/read_obj_shared.c | 4 +- src/tests/region_transfer_query.c | 4 +- src/tests/vpicio_mts.c | 10 ++--- 21 files changed, 176 insertions(+), 103 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5e7b65d94..8e7727e69 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -131,6 +131,46 @@ if("${PDC_SOURCE_DIR}" STREQUAL "${PDC_BINARY_DIR}") "Please create a separate binary directory and run CMake there.") endif() +#------------------------------------------------------------------------------ +# Set whether or not to disable compiler warnings +#------------------------------------------------------------------------------ +set(SUPPRESSED_LIST "") +# Disable warnings for unused parameters +option(PDC_SUPPRESS_UNUSED_ARG "Disable unused argument warnings" ON) +if(PDC_SUPPRESS_UNUSED_ARG) + set(SUPPRESSED_LIST "-Wno-unused-parameter" ${SUPPRESSED_LIST}) +endif() + +# Disable warnings for unused variables +option(PDC_SUPPRESS_UNUSED_VAR "Disable unused variable warnings" ON) +if(PDC_SUPPRESS_UNUSED_VAR) + set(SUPPRESSED_LIST "-Wno-unused-variable" ${SUPPRESSED_LIST}) +endif() + +# Disable warnings for unused functions +option(PDC_SUPPRESS_UNUSED_FUNC "Disable unused function warnings" ON) +if(PDC_SUPPRESS_UNUSED_FUNC) + set(SUPPRESSED_LIST "-Wno-unused-function" ${SUPPRESSED_LIST}) +endif() + +# Disable warnings for -Wunused-result +option(PDC_SUPPRESS_UNUSED_RESULT "Disable unused result warnings" ON) +if(PDC_SUPPRESS_UNUSED_RESULT) + set(SUPPRESSED_LIST "-Wno-unused-result" ${SUPPRESSED_LIST}) +endif() + +# Disable warnings for -Wmaybe-uninitialized +option(PDC_SUPPRESS_MAYBE_UNINIT "Disable maybe-uninitialized warnings" ON) +if(PDC_SUPPRESS_MAYBE_UNINIT) + set(SUPPRESSED_LIST "-Wno-maybe-uninitialized" ${SUPPRESSED_LIST}) +endif() + +# Disable warnings for -Wunused-but-set-variable +option(PDC_SUPPRESS_UNUSED_BUT_SET "Disable unused-but-set-variable warnings" ON) +if(PDC_SUPPRESS_UNUSED_BUT_SET) + set(SUPPRESSED_LIST "-Wno-unused-but-set-variable" ${SUPPRESSED_LIST}) +endif() + #------------------------------------------------------------------------------ # Set a default build type if none was specified #------------------------------------------------------------------------------ diff --git a/README.md b/README.md index 54ab218f6..5c5e6cbec 100644 --- a/README.md +++ b/README.md @@ -133,13 +133,16 @@ Now, it's time to compile and install PDC. * One can replace `mpicc` to other available MPI compilers. For example, on Cori, `cc` can be used to replace `mpicc`. * `ctest` contains both sequential and MPI tests for the PDC settings. These can be used to perform regression tests. +* Make sure MERCURY_HOME is added to CMAKE_PREFIX_PATH or PATH. + ```bash cd $PDC_SRC_DIR git checkout develop mkdir build cd build -cmake ../ -DBUILD_MPI_TESTING=ON -DBUILD_SHARED_LIBS=ON -DBUILD_TESTING=ON -DCMAKE_INSTALL_PREFIX=$PDC_DIR -DPDC_ENABLE_MPI=ON -DMERCURY_DIR=$MERCURY_DIR -DCMAKE_C_COMPILER=cc -DMPI_RUN_CMD=srun + +cmake ../ -DBUILD_MPI_TESTING=ON -DBUILD_SHARED_LIBS=ON -DBUILD_TESTING=ON -DCMAKE_INSTALL_PREFIX=$PDC_DIR -DPDC_ENABLE_MPI=ON -DMERCURY_DIR=$MERCURY_DIR -DCMAKE_PREFIX_PATH=$MERCURY_DIR -DCMAKE_C_COMPILER=cc -DMPI_RUN_CMD=srun make -j 32 && make install ``` diff --git a/scripts/llsm_importer/gen_script.sh b/scripts/llsm_importer/gen_script.sh index efefd819f..ffc5b6d3b 100755 --- a/scripts/llsm_importer/gen_script.sh +++ b/scripts/llsm_importer/gen_script.sh @@ -6,14 +6,14 @@ MAX_HYPERTHREADING=2 # Designated number of threads per process on each node # (this should be associated with -c option in srun) -NUM_THREAD_PER_SERVER_PROC=128 +NUM_THREAD_PER_SERVER_PROC=64 NUM_THREAD_PER_CLIENT_PROC=64 # Designated number of processes for server anc client on each node # (this should be associated with -n option in srun) NUM_SERVER_PROC_PER_NODE=1 -NUM_CLIENT_PROC_PER_NODE=2 +NUM_CLIENT_PROC_PER_NODE=1 MAX_NODE=512 @@ -35,7 +35,7 @@ for (( i = 1; i <= $MAX_NODE; i*=2 )); do sed -i "s/N_CLIENT_PROC/${NUM_CLIENT_PROC_PER_NODE}/g" $TARGET sed -i "s/NTHREAD_PER_SPROC/${NUM_THREAD_PER_SERVER_PROC}/g" $TARGET sed -i "s/NTHREAD_PER_CPROC/${NUM_THREAD_PER_CLIENT_PROC}/g" $TARGET - if [[ "$i" -gt "16" ]]; then + if [[ "$i" -gt "4" ]]; then sed -i "s/REG//g" $TARGET else sed -i "s/DBG//g" $TARGET diff --git a/src/api/CMakeLists.txt b/src/api/CMakeLists.txt index 1ba4b8389..cfbb8ac21 100644 --- a/src/api/CMakeLists.txt +++ b/src/api/CMakeLists.txt @@ -107,6 +107,10 @@ message(STATUS "PDC_BUILD_INCLUDE_DEPENDENCIES: ${PDC_BUILD_INCLUDE_DEPENDENCIES add_library(pdc ${PDC_SRCS}) +if(CMAKE_C_COMPILER_ID MATCHES "GNU|Clang") + target_compile_options(pdc PRIVATE ${SUPPRESSED_LIST}) +endif() + target_include_directories(pdc PUBLIC "$" $ @@ -129,6 +133,11 @@ set(PDC_EXPORTED_LIBS pdc ${PDC_EXPORTED_LIBS}) add_executable(close_server close_server.c ) + +if(CMAKE_C_COMPILER_ID MATCHES "GNU|Clang") + target_compile_options(close_server PRIVATE ${SUPPRESSED_LIST}) +endif() + target_link_libraries(close_server pdc ${PDC_COMMON_LIBRARIES}) install( diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index fff47f027..467de4bca 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -1126,8 +1126,8 @@ perr_t PDC_Client_mercury_init(hg_class_t **hg_class, hg_context_t **hg_context, int port) { perr_t ret_value = SUCCEED; - char na_info_string[PATH_MAX]; - char hostname[ADDR_MAX]; + char na_info_string[NA_STRING_INFO_LEN]; + char hostname[HOSTNAME_LEN]; int local_server_id; /* Set the default mercury transport * but enable overriding that to any of: diff --git a/src/commons/CMakeLists.txt b/src/commons/CMakeLists.txt index 9f8abf690..485ec4b8a 100644 --- a/src/commons/CMakeLists.txt +++ b/src/commons/CMakeLists.txt @@ -22,6 +22,8 @@ endif() find_package(MERCURY REQUIRED) if(MERCURY_FOUND) message(STATUS "mercury dir = ${MERCURY_DIR}") + # NOTE: enable the following if you need ${MERCURY_INCLUDE_DIR} in the future + # NOTE: remember to add MERCURY_HOME to PATH or CMAKE_PREFIX_PATH if you enable the following. find_path(MERCURY_INCLUDE_DIR mercury.h HINTS ${MERCURY_DIR}) find_library(MERCURY_LIBRARY mercury HINTS ${MERCURY_DIR}) message(STATUS "mercury include dir = ${MERCURY_INCLUDE_DIR}") @@ -103,6 +105,10 @@ file(GLOB_RECURSE PDC_COMMONS_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.h) add_library(${PDC_COMMON_LIBRARY_NAME} ${PDC_LIBTYPE} ${PDC_COMMONS_SOURCES} ${PDC_COMMONS_HEADERS}) +if(CMAKE_C_COMPILER_ID MATCHES "GNU|Clang") + target_compile_options(${PDC_COMMON_LIBRARY_NAME} PRIVATE ${SUPPRESSED_LIST}) +endif() + target_include_directories(${PDC_COMMON_LIBRARY_NAME} PUBLIC "$" $ @@ -207,4 +213,4 @@ set(PDC_INCLUDES_INSTALL_TIME PARENT_SCOPE ) -set(PDC_COMMONS_LIBRARIES ${PDC_COMMON_LIBRARY_NAME} PARENT_SCOPE) \ No newline at end of file +set(PDC_COMMONS_LIBRARIES ${PDC_COMMON_LIBRARY_NAME} PARENT_SCOPE) diff --git a/src/commons/generic/include/pdc_generic.h b/src/commons/generic/include/pdc_generic.h index 47322b553..fc9925ea9 100644 --- a/src/commons/generic/include/pdc_generic.h +++ b/src/commons/generic/include/pdc_generic.h @@ -17,7 +17,7 @@ typedef enum { false = 0, true = 1 } bool; #endif #endif -typedef enum { +typedef enum pdc_c_var_type_t { PDC_UNKNOWN = -1, /* error */ PDC_INT = 0, /* integer types (identical to int32_t) */ PDC_FLOAT = 1, /* floating-point types */ @@ -41,9 +41,9 @@ typedef enum { PDC_TYPE_COUNT = 19 /* this is the number of var types and has to be the last */ } pdc_c_var_type_t; -typedef pdc_c_var_type_t PDC_CType; +// typedef pdc_c_var_type_t PDC_CType; -typedef enum { +typedef enum pdc_c_var_class_t { PDC_CLS_SCALAR, PDC_CLS_ARRAY, PDC_CLS_ENUM, // not implemented, users can use PDC_CT_INT @@ -57,10 +57,10 @@ typedef enum { PDC_CLS_COUNT // just the count of the enum. } pdc_c_var_class_t; -typedef pdc_c_var_class_t PDC_CType_Class; +// typedef pdc_c_var_class_t PDC_CType_Class; // clang-format off -static const size_t DataTypeSizes[PDC_TYPE_COUNT] = { +static size_t DataTypeSizes[PDC_TYPE_COUNT] = { sizeof(int), sizeof(float), sizeof(double), @@ -82,7 +82,7 @@ static const size_t DataTypeSizes[PDC_TYPE_COUNT] = { sizeof(size_t) }; -static const char *DataTypeNames[PDC_TYPE_COUNT] = { +static char *DataTypeNames[PDC_TYPE_COUNT] = { "int", "float", "double", @@ -104,7 +104,7 @@ static const char *DataTypeNames[PDC_TYPE_COUNT] = { "size_t" }; -static const char *DataTypeEnumNames[PDC_TYPE_COUNT] = { +static char *DataTypeEnumNames[PDC_TYPE_COUNT] = { "PDC_INT", "PDC_FLOAT", "PDC_DOUBLE", @@ -126,7 +126,8 @@ static const char *DataTypeEnumNames[PDC_TYPE_COUNT] = { "PDC_SIZE_T" }; -static const char *DataTypeFormat[PDC_TYPE_COUNT] = { +__attribute__((unused)) +static char *DataTypeFormat[PDC_TYPE_COUNT] = { "%d", // int "%f", // float "%lf", // double @@ -149,8 +150,7 @@ static const char *DataTypeFormat[PDC_TYPE_COUNT] = { }; // clang-format on - -static const char * +__attribute__((unused)) static char * get_enum_name_by_dtype(pdc_c_var_type_t type) { if (type < 0 || type >= PDC_TYPE_COUNT) { @@ -158,8 +158,7 @@ get_enum_name_by_dtype(pdc_c_var_type_t type) } return DataTypeEnumNames[type]; } - -static const size_t +__attribute__((unused)) static size_t get_size_by_dtype(pdc_c_var_type_t type) { if (type < 0 || type >= PDC_TYPE_COUNT) { @@ -167,8 +166,7 @@ get_size_by_dtype(pdc_c_var_type_t type) } return DataTypeSizes[type]; } - -static const size_t +__attribute__((unused)) static size_t get_size_by_class_n_type(void *data, size_t item_count, pdc_c_var_class_t pdc_class, pdc_c_var_type_t pdc_type) { @@ -184,7 +182,7 @@ get_size_by_class_n_type(void *data, size_t item_count, pdc_c_var_class_t pdc_cl else if (pdc_class == PDC_CLS_ARRAY) { if (pdc_type == PDC_STRING) { char **str_arr = (char **)data; - int i = 0; + size_t i = 0; for (i = 0; i < item_count; i++) { size = size + (strlen(str_arr[i]) + 1) * sizeof(char); } @@ -195,8 +193,7 @@ get_size_by_class_n_type(void *data, size_t item_count, pdc_c_var_class_t pdc_cl } return size; } - -static const char * +__attribute__((unused)) static char * get_name_by_dtype(pdc_c_var_type_t type) { if (type < 0 || type >= PDC_TYPE_COUNT) { @@ -204,8 +201,7 @@ get_name_by_dtype(pdc_c_var_type_t type) } return DataTypeNames[type]; } - -static pdc_c_var_type_t +__attribute__((unused)) static pdc_c_var_type_t get_dtype_by_enum_name(const char *enumName) { for (int i = 0; i < PDC_TYPE_COUNT; i++) { @@ -216,4 +212,4 @@ get_dtype_by_enum_name(const char *enumName) return PDC_UNKNOWN; // assuming PDC_UNKNOWN is the enum value for "unknown" } -#endif /* PDC_GENERIC_H */ \ No newline at end of file +#endif /* PDC_GENERIC_H */ diff --git a/src/commons/serde/include/pdc_serde.h b/src/commons/serde/include/pdc_serde.h index 8c5efae9e..c65c70b33 100644 --- a/src/commons/serde/include/pdc_serde.h +++ b/src/commons/serde/include/pdc_serde.h @@ -10,18 +10,18 @@ #define MAX_BUFFER_SIZE 1000 typedef struct { - PDC_CType pdc_type; /**< Data type of the key */ - size_t size; /**< Size of the key */ - void * key; /**< Pointer to the key data */ + pdc_c_var_type_t pdc_type; /**< Data type of the key */ + size_t size; /**< Size of the key */ + void * key; /**< Pointer to the key data */ } PDC_SERDE_Key; typedef struct { - PDC_CType_Class pdc_class; /**< Class of the value */ - PDC_CType pdc_type; /**< Data type of the value */ - size_t size; // size of the data. If a string, it is strlen(data) + 1; - // if an array, it is the number of elements; - // if a struct, it is the totalSize of the data chunk of the struct, etc. - void *data; /**< Pointer to the value data */ + pdc_c_var_class_t pdc_class; /**< Class of the value */ + pdc_c_var_type_t pdc_type; /**< Data type of the value */ + size_t size; // size of the data. If a string, it is strlen(data) + 1; + // if an array, it is the number of elements; + // if a struct, it is the totalSize of the data chunk of the struct, etc. + void *data; /**< Pointer to the value data */ } PDC_SERDE_Value; typedef struct { @@ -111,7 +111,7 @@ void pdc_serde_print(PDC_SERDE_SerializedData *data); * @return Pointer to the created PDC_SERDE_Key structure */ static inline PDC_SERDE_Key * -PDC_SERDE_KEY(void *key, PDC_CType pdc_type, size_t size) +PDC_SERDE_KEY(void *key, pdc_c_var_type_t pdc_type, size_t size) { PDC_SERDE_Key *pdc_key = (PDC_SERDE_Key *)malloc(sizeof(PDC_SERDE_Key)); size_t key_size = (size_t)get_size_by_class_n_type(key, size, PDC_CLS_SCALAR, pdc_type); @@ -128,12 +128,15 @@ PDC_SERDE_KEY(void *key, PDC_CType pdc_type, size_t size) * @param data Pointer to the value data * @param pdc_type Data type of the value * @param pdc_class Class of the value - * @param size Size of the value data + * @param size Size of the value data. + * For scalar value, it is the result of sizeof(type) function; + * for array, it is the number of elements; + * for struct, it is the totalSize of the data chunk of the struct, etc. * * @return Pointer to the created PDC_SERDE_Value structure */ static inline PDC_SERDE_Value * -PDC_SERDE_VALUE(void *data, PDC_CType pdc_type, PDC_CType_Class pdc_class, size_t size) +PDC_SERDE_VALUE(void *data, pdc_c_var_type_t pdc_type, pdc_c_var_class_t pdc_class, size_t size) { PDC_SERDE_Value *pdc_value = (PDC_SERDE_Value *)malloc(sizeof(PDC_SERDE_Value)); size_t value_size = 0; diff --git a/src/commons/serde/pdc_serde.c b/src/commons/serde/pdc_serde.c index fe9e9aa64..21a9e9967 100644 --- a/src/commons/serde/pdc_serde.c +++ b/src/commons/serde/pdc_serde.c @@ -98,7 +98,7 @@ pdc_serde_serialize(PDC_SERDE_SerializedData *data) memcpy(buffer + sizeof(size_t) * 2, &data->header->numKeys, sizeof(size_t)); // then the keys size_t offset = sizeof(size_t) * 3; - for (int i = 0; i < data->header->numKeys; i++) { + for (size_t i = 0; i < data->header->numKeys; i++) { int8_t pdc_type = (int8_t)(data->header->keys[i].pdc_type); memcpy(buffer + offset, &pdc_type, sizeof(int8_t)); offset += sizeof(int8_t); @@ -117,7 +117,7 @@ pdc_serde_serialize(PDC_SERDE_SerializedData *data) memcpy(buffer + offset, &data->data->numValues, sizeof(size_t)); offset += sizeof(size_t); // then the values - for (int i = 0; i < data->data->numValues; i++) { + for (size_t i = 0; i < data->data->numValues; i++) { int8_t pdc_class = (int8_t)data->data->values[i].pdc_class; int8_t pdc_type = (int8_t)data->data->values[i].pdc_type; memcpy(buffer + offset, &pdc_class, sizeof(int8_t)); @@ -155,7 +155,7 @@ pdc_serde_deserialize(void *buffer) header->keys = malloc(sizeof(PDC_SERDE_Key) * numKeys); header->numKeys = numKeys; header->totalSize = headerSize; - for (int i = 0; i < numKeys; i++) { + for (size_t i = 0; i < numKeys; i++) { int8_t pdc_type; size_t size; memcpy(&pdc_type, buffer + offset, sizeof(int8_t)); @@ -166,7 +166,7 @@ pdc_serde_deserialize(void *buffer) memcpy(key, buffer + offset, size); offset += size; header->keys[i].key = key; - header->keys[i].pdc_type = (PDC_CType)pdc_type; + header->keys[i].pdc_type = (pdc_c_var_type_t)pdc_type; header->keys[i].size = size; } @@ -188,7 +188,7 @@ pdc_serde_deserialize(void *buffer) data->values = malloc(sizeof(PDC_SERDE_Value) * numValues); data->numValues = numValues; data->totalSize = dataSize; - for (int i = 0; i < numValues; i++) { + for (size_t i = 0; i < numValues; i++) { int8_t pdc_class; int8_t pdc_type; size_t size; @@ -202,8 +202,8 @@ pdc_serde_deserialize(void *buffer) memcpy(value, buffer + offset, size); offset += size; data->values[i].data = value; - data->values[i].pdc_class = (PDC_CType_Class)pdc_class; - data->values[i].pdc_type = (PDC_CType)pdc_type; + data->values[i].pdc_class = (pdc_c_var_class_t)pdc_class; + data->values[i].pdc_type = (pdc_c_var_type_t)pdc_type; data->values[i].size = size; } // check the total size @@ -230,11 +230,11 @@ pdc_serde_deserialize(void *buffer) void pdc_serde_free(PDC_SERDE_SerializedData *data) { - for (int i = 0; i < data->header->numKeys; i++) { + for (size_t i = 0; i < data->header->numKeys; i++) { free(data->header->keys[i].key); } free(data->header->keys); - for (int i = 0; i < data->data->numValues; i++) { + for (size_t i = 0; i < data->data->numValues; i++) { free(data->data->values[i].data); } free(data->data->values); @@ -249,8 +249,8 @@ pdc_serde_print(PDC_SERDE_SerializedData *data) printf("Header:\n"); printf(" numKeys: %zu\n", data->header->numKeys); printf(" totalSize: %zu\n", data->header->totalSize); - for (int i = 0; i < data->header->numKeys; i++) { - printf(" key %d:\n", i); + for (size_t i = 0; i < data->header->numKeys; i++) { + printf(" key %ld:\n", i); printf(" type: %d\n", data->header->keys[i].pdc_type); printf(" size: %zu\n", data->header->keys[i].size); printf(" key: %s\n", (char *)data->header->keys[i].key); @@ -258,13 +258,13 @@ pdc_serde_print(PDC_SERDE_SerializedData *data) printf("Data:\n"); printf(" numValues: %zu\n", data->data->numValues); printf(" totalSize: %zu\n", data->data->totalSize); - for (int i = 0; i < data->data->numValues; i++) { - printf(" value %d:\n", i); + for (size_t i = 0; i < data->data->numValues; i++) { + printf(" value %ld:\n", i); printf(" class: %d\n", data->data->values[i].pdc_class); printf(" type: %d\n", data->data->values[i].pdc_type); printf(" size: %zu\n", data->data->values[i].size); printf(" data: "); - if (data->data->values[i].pdc_class == PDC_STRING) { + if (data->data->values[i].pdc_type == PDC_STRING) { printf("%s\n", (char *)data->data->values[i].data); } else { @@ -302,7 +302,7 @@ test_serde_framework() char * arrayKey_str = "array"; int intArray[3] = {1, 2, 3}; PDC_SERDE_Key * arrayKey = PDC_SERDE_KEY(arrayKey_str, PDC_STRING, sizeof(arrayKey_str)); - PDC_SERDE_Value *arrayValue = PDC_SERDE_VALUE(intArray, PDC_INT, PDC_CLS_ARRAY, sizeof(int) * 3); + PDC_SERDE_Value *arrayValue = PDC_SERDE_VALUE(intArray, PDC_INT, PDC_CLS_ARRAY, 3); pdc_serde_append_key_value(data, arrayKey, arrayValue); typedef struct { diff --git a/src/server/CMakeLists.txt b/src/server/CMakeLists.txt index abe3917c6..5d73b6b12 100644 --- a/src/server/CMakeLists.txt +++ b/src/server/CMakeLists.txt @@ -53,6 +53,10 @@ add_executable(pdc_server.exe ${PDC_SOURCE_DIR}/src/api/pdc_analysis/pdc_hist_pkg.c ) +if(CMAKE_C_COMPILER_ID MATCHES "GNU|Clang") + target_compile_options(pdc_server.exe PRIVATE ${SUPPRESSED_LIST}) +endif() + #install( # TARGETS # pdc_server.exe diff --git a/src/server/include/pdc_client_server_common.h b/src/server/include/pdc_client_server_common.h index e125af9a5..e3ea447ea 100644 --- a/src/server/include/pdc_client_server_common.h +++ b/src/server/include/pdc_client_server_common.h @@ -57,18 +57,14 @@ hg_thread_mutex_t meta_buf_map_mutex_g; hg_thread_mutex_t meta_obj_map_mutex_g; #endif -#ifndef HOST_NAME_MAX -#if defined(__APPLE__) -#define HOST_NAME_MAX 255 -#else -#define HOST_NAME_MAX 64 -#endif /* __APPLE__ */ -#endif /* HOST_NAME_MAX */ - #define PAGE_SIZE 4096 -#define ADDR_MAX 512 +#define ADDR_MAX 1024 +#define NA_STRING_INFO_LEN ADDR_MAX / 2 +#define HOSTNAME_LEN ADDR_MAX / 8 +#define TMP_DIR_STRING_LEN ADDR_MAX / 2 #define DIM_MAX 4 #define TAG_LEN_MAX 2048 +#define OBJ_NAME_MAX TAG_LEN_MAX / 2 #define PDC_SERVER_ID_INTERVEL 1000000000ull #define PDC_SERVER_MAX_PROC_PER_NODE 32 #define PDC_SERIALIZE_MAX_SIZE 256 @@ -79,6 +75,16 @@ hg_thread_mutex_t meta_obj_map_mutex_g; #define PDC_UPDATE_CACHE 111 #define PDC_UPDATE_STORAGE 101 +#ifndef HOST_NAME_MAX +#if defined(__APPLE__) +#define HOST_NAME_MAX ADDR_MAX / 4 +#define HOSTNAME_LEN HOST_NAME_MAX +#else +#define HOST_NAME_MAX ADDR_MAX / 8 +#define HOSTNAME_LEN HOST_NAME_MAX +#endif /* __APPLE__ */ +#endif /* HOST_NAME_MAX */ + #define pdc_server_cfg_name_g "server.cfg" #define ADD_OBJ 1 @@ -391,8 +397,8 @@ typedef struct data_server_region_unmap_t { // For storing metadata typedef struct pdc_metadata_t { int user_id; // Both server and client gets it and do security check - char app_name[ADDR_MAX]; - char obj_name[ADDR_MAX]; + char app_name[OBJ_NAME_MAX]; + char obj_name[OBJ_NAME_MAX]; int time_step; // Above four are the unique identifier for objects diff --git a/src/server/include/pdc_server_metadata.h b/src/server/include/pdc_server_metadata.h index f81225a7d..c65ff59bc 100644 --- a/src/server/include/pdc_server_metadata.h +++ b/src/server/include/pdc_server_metadata.h @@ -44,7 +44,7 @@ /*****************************/ extern int pdc_server_rank_g; extern int pdc_server_size_g; -extern char pdc_server_tmp_dir_g[ADDR_MAX]; +extern char pdc_server_tmp_dir_g[TMP_DIR_STRING_LEN]; extern uint32_t n_metadata_g; extern HashTable * metadata_hash_table_g; extern HashTable * container_hash_table_g; diff --git a/src/server/pdc_client_server_common.c b/src/server/pdc_client_server_common.c index 859ce7bb8..e3e8597b1 100644 --- a/src/server/pdc_client_server_common.c +++ b/src/server/pdc_client_server_common.c @@ -401,7 +401,7 @@ PDC_metadata_cmp(pdc_metadata_t *a, pdc_metadata_t *b) void PDC_mkdir(const char *dir) { - char tmp[ADDR_MAX]; + char tmp[TMP_DIR_STRING_LEN]; char *p = NULL; FUNC_ENTER(NULL); @@ -476,8 +476,8 @@ PDC_metadata_init(pdc_metadata_t *a) a->ndim = 0; a->data_server_id = 0; - memset(a->app_name, 0, sizeof(char) * ADDR_MAX); - memset(a->obj_name, 0, sizeof(char) * ADDR_MAX); + memset(a->app_name, 0, sizeof(char) * NAME_MAX); + memset(a->obj_name, 0, sizeof(char) * NAME_MAX); memset(a->tags, 0, sizeof(char) * TAG_LEN_MAX); memset(a->data_location, 0, sizeof(char) * ADDR_MAX); memset(a->dims, 0, sizeof(uint64_t) * DIM_MAX); @@ -3096,7 +3096,7 @@ HG_TEST_RPC_CB(region_release, handle) size2 = HG_Bulk_get_size(remote_bulk_handle); if (size != size2) { error = 1; - printf("==PDC_SERVER: local size %llu, remote %llu\n", size, size2); + printf("==PDC_SERVER: local size %lu, remote %lu\n", size, size2); PGOTO_ERROR(HG_OTHER_ERROR, "===PDC SERVER: HG_TEST_RPC_CB(region_release, " "handle) local and remote bulk size does not match"); } @@ -3269,7 +3269,7 @@ HG_TEST_RPC_CB(region_release, handle) size2 = HG_Bulk_get_size(remote_bulk_handle); if (size != size2) { error = 1; - printf("==PDC_SERVER: local size %llu, remote %llu\n", size, size2); + printf("==PDC_SERVER: local size %lu, remote %lu\n", size, size2); /* PGOTO_ERROR(HG_OTHER_ERROR, "===PDC SERVER: HG_TEST_RPC_CB(region_release, * handle) local and remote bulk size does not match"); */ } diff --git a/src/server/pdc_server.c b/src/server/pdc_server.c index e08882120..f461c31d5 100644 --- a/src/server/pdc_server.c +++ b/src/server/pdc_server.c @@ -111,7 +111,7 @@ extern hg_thread_pool_t *hg_test_thread_pool_g; extern hg_thread_pool_t *hg_test_thread_pool_fs_g; hg_atomic_int32_t close_server_g; -char pdc_server_tmp_dir_g[ADDR_MAX]; +char pdc_server_tmp_dir_g[TMP_DIR_STRING_LEN]; int is_restart_g = 0; int pdc_server_rank_g = 0; int pdc_server_size_g = 1; @@ -693,8 +693,8 @@ PDC_Server_init(int port, hg_class_t **hg_class, hg_context_t **hg_context) perr_t ret_value = SUCCEED; int i = 0; char self_addr_string[ADDR_MAX]; - char na_info_string[ADDR_MAX]; - char hostname[1024]; + char na_info_string[NA_STRING_INFO_LEN]; + char hostname[HOSTNAME_LEN]; struct hg_init_info init_info = {0}; /* Set the default mercury transport @@ -728,9 +728,9 @@ PDC_Server_init(int port, hg_class_t **hg_class, hg_context_t **hg_context) if ((hg_transport = getenv("HG_TRANSPORT")) == NULL) { hg_transport = default_hg_transport; } - memset(hostname, 0, 1024); - gethostname(hostname, 1023); - snprintf(na_info_string, ADDR_MAX, "%s://%s:%d", hg_transport, hostname, port); + memset(hostname, 0, HOSTNAME_LEN); + gethostname(hostname, HOSTNAME_LEN - 1); + snprintf(na_info_string, NA_STRING_INFO_LEN, "%s://%s:%d", hg_transport, hostname, port); if (pdc_server_rank_g == 0) printf("==PDC_SERVER[%d]: using %.7s\n", pdc_server_rank_g, na_info_string); @@ -1990,7 +1990,7 @@ PDC_Server_get_env() if (tmp_env_char == NULL) tmp_env_char = "./pdc_tmp"; - snprintf(pdc_server_tmp_dir_g, ADDR_MAX, "%s/", tmp_env_char); + snprintf(pdc_server_tmp_dir_g, TMP_DIR_STRING_LEN, "%s/", tmp_env_char); lustre_total_ost_g = 1; #ifdef ENABLE_LUSTRE diff --git a/src/server/pdc_server_region/include/pdc_server_data.h b/src/server/pdc_server_region/include/pdc_server_data.h index 4bcdac9dd..a204aa437 100644 --- a/src/server/pdc_server_region/include/pdc_server_data.h +++ b/src/server/pdc_server_region/include/pdc_server_data.h @@ -244,7 +244,7 @@ typedef struct cache_storage_region_t { /*****************************/ extern int pdc_server_rank_g; extern int pdc_server_size_g; -extern char pdc_server_tmp_dir_g[ADDR_MAX]; +extern char pdc_server_tmp_dir_g[TMP_DIR_STRING_LEN]; extern double server_write_time_g; extern double server_read_time_g; extern double server_get_storage_info_time_g; diff --git a/src/server/pdc_server_region/pdc_server_data.c b/src/server/pdc_server_region/pdc_server_data.c index 6f7974b5d..12747ee5c 100644 --- a/src/server/pdc_server_region/pdc_server_data.c +++ b/src/server/pdc_server_region/pdc_server_data.c @@ -130,7 +130,7 @@ PDC_Server_set_lustre_stripe(const char *path, int stripe_count, int stripe_size perr_t ret_value = SUCCEED; size_t len; int i, index; - char tmp[ADDR_MAX]; + char tmp[TMP_DIR_STRING_LEN]; char cmd[TAG_LEN_MAX]; FUNC_ENTER(NULL); @@ -4695,7 +4695,7 @@ PDC_Server_posix_write(int fd, void *buf, uint64_t write_size) while (write_size > max_write_size) { ret = write(fd, buf, max_write_size); if (ret < 0 || ret != (ssize_t)max_write_size) { - printf("==PDC_SERVER[%d]: in-loop: write %d failed, ret = %d, max_write_size = %llu\n", + printf("==PDC_SERVER[%d]: in-loop: write %d failed, ret = %ld, max_write_size = %lu\n", pdc_server_rank_g, fd, ret, max_write_size); ret_value = FAIL; goto done; @@ -4737,7 +4737,7 @@ PDC_Server_data_write_out(uint64_t obj_id, struct pdc_region_info *region_info, double start = MPI_Wtime(), start_posix; #endif - uint64_t write_size; + uint64_t write_size = 0; if (region_info->ndim >= 1) write_size = unit * region_info->size[0]; if (region_info->ndim >= 2) @@ -7253,15 +7253,18 @@ PDC_Server_query_evaluate_merge_opt(pdc_query_t *query, query_task_t *task, pdc_ pdc_selection_t *sel = query->sel; uint64_t nelem; size_t i, j, unit_size; - pdc_query_op_t op = PDC_QUERY_OR, lop = PDC_QUERY_OR, rop = PDC_QUERY_OR; - float flo = .0, fhi = .0; - double dlo = .0, dhi = .0; - int ilo = 0, ihi = 0, ndim, count = 0; - uint32_t ulo = 0, uhi = 0; - int64_t i64lo = 0, i64hi = 0; - uint64_t ui64lo = 0, ui64hi = 0; - void * value = NULL, *buf = NULL; - int n_eval_region = 0, can_skip, region_iter = 0; + // FIXME: need to check the types of these 'op's. I think they should be of the following (or don't even + // need to be initilized): + pdc_query_op_t op = PDC_EQ, lop = PDC_EQ, rop = PDC_EQ; + // pdc_query_op_t op = PDC_QUERY_OR, lop = PDC_QUERY_OR, rop = PDC_QUERY_OR; + float flo = .0, fhi = .0; + double dlo = .0, dhi = .0; + int ilo = 0, ihi = 0, ndim, count = 0; + uint32_t ulo = 0, uhi = 0; + int64_t i64lo = 0, i64hi = 0; + uint64_t ui64lo = 0, ui64hi = 0; + void * value = NULL, *buf = NULL; + int n_eval_region = 0, can_skip, region_iter = 0; printf("==PDC_SERVER[%d]: %s - start query evaluation!\n", pdc_server_rank_g, __func__); fflush(stdout); @@ -9484,7 +9487,7 @@ PDC_Server_recv_get_sel_data(const struct hg_cb_info *callback_info) get_sel_data_rpc_in_t *in = (get_sel_data_rpc_in_t *)callback_info->arg; query_task_t * task_elt, *task = NULL; pdc_metadata_t * meta; - struct hg_cb_info fake_callback_info; + struct hg_cb_info fake_callback_info = {0}; DL_FOREACH(query_task_list_head_g, task_elt) { diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index 499421880..2fd2d0839 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -107,6 +107,9 @@ set(PROGRAMS foreach(program ${PROGRAMS}) add_executable(${program} ${program}.c) + if(CMAKE_C_COMPILER_ID MATCHES "GNU|Clang") + target_compile_options(${program} PRIVATE ${SUPPRESSED_LIST}) + endif() target_link_libraries(${program} pdc) endforeach(program) diff --git a/src/tests/kvtag_add_get.c b/src/tests/kvtag_add_get.c index 91686b9be..7d53da6a6 100644 --- a/src/tests/kvtag_add_get.c +++ b/src/tests/kvtag_add_get.c @@ -94,12 +94,12 @@ main() kvtag2.name = "key2int"; kvtag2.value = (void *)&v2; - kvtag1.type = PDC_INT; + kvtag2.type = PDC_INT; kvtag2.size = sizeof(int); kvtag3.name = "key3double"; kvtag3.value = (void *)&v3; - kvtag1.type = PDC_DOUBLE; + kvtag3.type = PDC_DOUBLE; kvtag3.size = sizeof(double); if (PDCobj_put_tag(obj1, kvtag1.name, kvtag1.value, kvtag1.type, kvtag1.size) < 0) diff --git a/src/tests/read_obj_shared.c b/src/tests/read_obj_shared.c index 6f56965d8..4b0058dbd 100644 --- a/src/tests/read_obj_shared.c +++ b/src/tests/read_obj_shared.c @@ -163,7 +163,7 @@ main(int argc, char **argv) offset[0] = rank * my_data_size; local_offset[0] = 0; mysize[0] = my_data_size; - printf("rank %d offset = %llu, length = %llu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); + printf("rank %d offset = %lu, length = %lu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); local_region = PDCregion_create(ndim, local_offset, mysize); global_region = PDCregion_create(ndim, offset, mysize); @@ -220,7 +220,7 @@ main(int argc, char **argv) offset[0] = rank * my_data_size; local_offset[0] = 0; mysize[0] = my_data_size; - printf("rank %d offset = %llu, length = %llu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); + printf("rank %d offset = %lu, length = %lu, unit size = %ld\n", rank, offset[0], mysize[0], type_size); local_region = PDCregion_create(ndim, local_offset, mysize); global_region = PDCregion_create(ndim, offset, mysize); diff --git a/src/tests/region_transfer_query.c b/src/tests/region_transfer_query.c index e2c7dcfe6..dede54aaa 100644 --- a/src/tests/region_transfer_query.c +++ b/src/tests/region_transfer_query.c @@ -45,9 +45,9 @@ main(int argc, char **argv) int rank = 0, size = 1, i; int ret_value = 0; - uint64_t offset[3], offset_length[3], local_offset[1]; + uint64_t offset[3], offset_length[3]; //, local_offset[1]; uint64_t dims[1]; - local_offset[0] = 0; + // local_offset[0] = 0; offset[0] = 0; offset[1] = 2; offset[2] = 5; diff --git a/src/tests/vpicio_mts.c b/src/tests/vpicio_mts.c index 65b7b4dc8..c80d11c03 100644 --- a/src/tests/vpicio_mts.c +++ b/src/tests/vpicio_mts.c @@ -76,7 +76,7 @@ main(int argc, char **argv) uint64_t *offset_remote; uint64_t *mysize; double t0, t1; - int steps = 1, sleeptime = 0; + uint64_t steps = 1, sleeptime = 0; pdcid_t transfer_request_x, transfer_request_y, transfer_request_z, transfer_request_px, transfer_request_py, transfer_request_pz, transfer_request_id1, transfer_request_id2; @@ -95,8 +95,8 @@ main(int argc, char **argv) sleeptime = atoi(argv[3]); } if (rank == 0) - printf("Writing %" PRIu64 " number of particles for %d steps with %d clients.\n", numparticles, steps, - size); + printf("Writing %" PRIu64 " number of particles for %ld steps with %d clients.\n", numparticles, + steps, size); dims[0] = numparticles; @@ -199,7 +199,7 @@ main(int argc, char **argv) MPI_Barrier(MPI_COMM_WORLD); t0 = MPI_Wtime(); if (rank == 0) { - printf("\n#Step %llu\n", i); + printf("\n#Step %lu\n", i); } #endif PDCprop_set_obj_time_step(obj_prop_xx, i); @@ -504,7 +504,7 @@ main(int argc, char **argv) if (i != steps - 1) { sleep(sleeptime); if (rank == 0) { - printf("Sleep time: %d.00\n", sleeptime); + printf("Sleep time: %ld.00\n", sleeptime); } } } // End for steps From af02e66a4bad87d92254bce76b0241f4b12bdcdb Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Fri, 23 Jun 2023 14:29:16 -0700 Subject: [PATCH 199/806] Update clang-format-fix.yml --- .github/workflows/clang-format-fix.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/clang-format-fix.yml b/.github/workflows/clang-format-fix.yml index 9145f65ca..5ca34c945 100644 --- a/.github/workflows/clang-format-fix.yml +++ b/.github/workflows/clang-format-fix.yml @@ -1,15 +1,15 @@ -name: clang-format Check +name: clang-format autofix on: workflow_dispatch: push: jobs: formatting-check: - name: Formatting Check + name: Formatting Fix runs-on: ubuntu-latest if: "!contains(github.event.head_commit.message, 'skip-ci')" steps: - uses: actions/checkout@v2 - - name: Run clang-format style check for C programs. + - name: Run clang-format style fix for C programs. uses: DoozyX/clang-format-lint-action@v0.11 with: source: '.' From a7e7ecfb02fe38ecc64aa4244a29f662ebe0f58a Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Fri, 23 Jun 2023 14:33:43 -0700 Subject: [PATCH 200/806] Update clang-format-fix.yml --- .github/workflows/clang-format-fix.yml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/clang-format-fix.yml b/.github/workflows/clang-format-fix.yml index 5ca34c945..686218650 100644 --- a/.github/workflows/clang-format-fix.yml +++ b/.github/workflows/clang-format-fix.yml @@ -1,16 +1,18 @@ -name: clang-format autofix +name: clang-format Commit Chanages on: workflow_dispatch: push: jobs: formatting-check: - name: Formatting Fix + name: Commit Format Changes runs-on: ubuntu-latest if: "!contains(github.event.head_commit.message, 'skip-ci')" + permissions: + contents: write # In order to allow EndBug/add-and-commit to commit changes steps: - - uses: actions/checkout@v2 - - name: Run clang-format style fix for C programs. - uses: DoozyX/clang-format-lint-action@v0.11 + - uses: actions/checkout@v3 + - name: Fix C formatting issues detected by clang-format + uses: DoozyX/clang-format-lint-action@v0.13 with: source: '.' extensions: 'c,h,cpp,hpp' @@ -18,7 +20,7 @@ jobs: inplace: True style: file # exclude: './config ' - - uses: EndBug/add-and-commit@v7 + - uses: EndBug/add-and-commit@v9 with: author_name: github-actions author_email: github-actions[bot]@users.noreply.github.com From cd5b3057399bfcb66476103fd8a6add1445e60d5 Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Thu, 29 Jun 2023 15:24:03 -0700 Subject: [PATCH 201/806] Increase the default server cache size to 32GB and flush frequency to 30s --- src/server/pdc_server_region/pdc_server_region_cache.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/server/pdc_server_region/pdc_server_region_cache.c b/src/server/pdc_server_region/pdc_server_region_cache.c index 6bcf5217b..5414eff85 100644 --- a/src/server/pdc_server_region/pdc_server_region_cache.c +++ b/src/server/pdc_server_region/pdc_server_region_cache.c @@ -3,7 +3,8 @@ #ifdef PDC_SERVER_CACHE -#define MAX_CACHE_SIZE 1610612736 +#define MAX_CACHE_SIZE 34359738368 +#define PDC_CACHE_FLUSH_TIME_INT 30 typedef struct pdc_region_cache { struct pdc_region_info * region_cache_info; @@ -825,7 +826,7 @@ PDC_region_cache_clock_cycle(void *ptr) struct timeval current_time; struct timeval finish_time; int nflush = 0; - double flush_frequency_s = 2.0, elapsed_time; + double flush_frequency_s = PDC_CACHE_FLUSH_TIME_INT, elapsed_time; int server_rank = 0; char *p = getenv("PDC_SERVER_CACHE_FLUSH_FREQUENCY_S"); From 1578f00f6c259a35f42144d154de49844beffc61 Mon Sep 17 00:00:00 2001 From: github-actions Date: Thu, 29 Jun 2023 22:24:41 +0000 Subject: [PATCH 202/806] Committing clang-format changes --- src/server/pdc_server_region/pdc_server_region_cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/pdc_server_region/pdc_server_region_cache.c b/src/server/pdc_server_region/pdc_server_region_cache.c index 5414eff85..92c408fa6 100644 --- a/src/server/pdc_server_region/pdc_server_region_cache.c +++ b/src/server/pdc_server_region/pdc_server_region_cache.c @@ -3,7 +3,7 @@ #ifdef PDC_SERVER_CACHE -#define MAX_CACHE_SIZE 34359738368 +#define MAX_CACHE_SIZE 34359738368 #define PDC_CACHE_FLUSH_TIME_INT 30 typedef struct pdc_region_cache { From 10189bf20ddafb714b6f91b4d29a2a8b50444ffa Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 10 Jul 2023 15:41:24 -0400 Subject: [PATCH 203/806] add FindMERCURY.cmake --- CMake/FindMERCURY.cmake | 46 +++++++++++++++++++++++++++++++++++++++++ CMakeLists.txt | 25 ++++++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 CMake/FindMERCURY.cmake diff --git a/CMake/FindMERCURY.cmake b/CMake/FindMERCURY.cmake new file mode 100644 index 000000000..869d9a647 --- /dev/null +++ b/CMake/FindMERCURY.cmake @@ -0,0 +1,46 @@ +# FindMERCURY.cmake + +# Find the system's MERCURY library +# This will define: +# +# MERCURY_FOUND - System has MERCURY +# MERCURY_INCLUDE_DIRS - The MERCURY include directory +# MERCURY_LIBRARIES - The libraries needed to use MERCURY + +find_package(MERCURY QUIET HINTS $ENV{MERCURY_HOME} $ENV{MERCURY_DIR} $ENV{MERCURY_ROOT} $ENV{MERCURYPATH} $ENV{MERCURY_PATH}) +if(MERCURY_FOUND) + message(STATUS "mercury dir = ${MERCURY_DIR}") + # NOTE: enable the following if you need ${MERCURY_INCLUDE_DIR} in the future + # NOTE: remember to add MERCURY_HOME to PATH or CMAKE_PREFIX_PATH if you enable the following. + find_path(MERCURY_INCLUDE_DIR mercury.h HINTS ${MERCURY_DIR}) + find_library(MERCURY_LIBRARY + NAMES + mercury + mercury_debug + HINTS ${MERCURY_DIR} + ) + + find_library(MERCURY_NA_LIBRARY + NAMES + na + na_debug + HINTS ${MERCURY_DIR} + ) + + find_library(MERCURY_UTIL_LIBRARY + NAMES + mercury_util + HINTS ${MERCURY_DIR} + ) + + set(MERCURY_LIBRARIES ${MERCURY_LIBRARY} ${MERCURY_NA_LIBRARY} ${MERCURY_UTIL_LIBRARY}) + set(MERCURY_INCLUDE_DIRS ${MERCURY_INCLUDE_DIR}) + message(STATUS "mercury include dir = ${MERCURY_INCLUDE_DIRS}") + message(STATUS "mercury lib = ${MERCURY_LIBRARIES}") + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(MERCURY DEFAULT_MSG MERCURY_LIBRARY MERCURY_INCLUDE_DIR) +else(MERCURY_FOUND) + set(MERCURY_LIBRARIES "") +endif() + +mark_as_advanced(MERCURY_INCLUDE_DIR MERCURY_LIBRARY) \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 5e7b65d94..d878252fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -51,6 +51,18 @@ set(PDC_PACKAGE_TARNAME "${PDC_PACKAGE}") if(NOT MPI_RUN_CMD) set(MPI_RUN_CMD mpiexec) endif() + +#------------------------------------------------------------------------------ +# general cmake flags: +# -DCMAKE_INSTALL_PREFIX=/usr/local -- the prefix for installing +# -DCMAKE_BUILD_TYPE=type -- type can be Debug, Release, ... +# -DCMAKE_PREFIX_PATH=/dir -- external packages +# +# note that CMAKE_PREFIX_PATH can be a list of directories: +# -DCMAKE_PREFIX_PATH='/dir1;/dir2;/dir3' +#------------------------------------------------------------------------------ + + #------------------------------------------------------------------------------ # Setup install and output Directories #------------------------------------------------------------------------------ @@ -79,6 +91,19 @@ if(NOT CMAKE_INSTALL_RPATH_USE_LINK_PATH) set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) endif() +#------------------------------------------------------------------------------ +# Setup CMake Prefix Paths for searching external libraries. +#------------------------------------------------------------------------------ +# note that CMAKE_PREFIX_PATH can be a list of directories: +# -DCMAKE_PREFIX_PATH='/dir1;/dir2;/dir3' + +if(NOT CMAKE_PREFIX_PATH) + set(CMAKE_PREFIX_PATH ${CMAKE_INSTALL_PREFIX}) +endif() +# MERCURY +set(CMAKE_PREFIX_PATH ${CMAKE_PREFIX_PATH} $ENV{MERCURY_DIR}) +# ANY Future external package goes here... + #------------------------------------------------------------------------------ # Setup CMake Environment #------------------------------------------------------------------------------ From 398c0b900e3e25be94357ae075d9628cc48635e1 Mon Sep 17 00:00:00 2001 From: Wei Zhang Date: Mon, 10 Jul 2023 15:55:17 -0400 Subject: [PATCH 204/806] update commons/CMakeLists.txt --- src/commons/CMakeLists.txt | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/src/commons/CMakeLists.txt b/src/commons/CMakeLists.txt index 485ec4b8a..d0f83bb54 100644 --- a/src/commons/CMakeLists.txt +++ b/src/commons/CMakeLists.txt @@ -19,19 +19,12 @@ endif() # Mercury -find_package(MERCURY REQUIRED) +find_package(MERCURY CONFIG REQUIRED) if(MERCURY_FOUND) - message(STATUS "mercury dir = ${MERCURY_DIR}") - # NOTE: enable the following if you need ${MERCURY_INCLUDE_DIR} in the future - # NOTE: remember to add MERCURY_HOME to PATH or CMAKE_PREFIX_PATH if you enable the following. - find_path(MERCURY_INCLUDE_DIR mercury.h HINTS ${MERCURY_DIR}) - find_library(MERCURY_LIBRARY mercury HINTS ${MERCURY_DIR}) - message(STATUS "mercury include dir = ${MERCURY_INCLUDE_DIR}") - message(STATUS "mercury lib = ${MERCURY_LIBRARY}") - set(PDC_EXT_INCLUDE_DEPENDENCIES ${MERCURY_INCLUDE_DIR} + set(PDC_EXT_INCLUDE_DEPENDENCIES ${MERCURY_INCLUDE_DIRS} ${PDC_EXT_INCLUDE_DEPENDENCIES} ) - set(PDC_EXT_LIB_DEPENDENCIES mercury ${PDC_EXT_LIB_DEPENDENCIES}) + set(PDC_EXT_LIB_DEPENDENCIES ${MERCURY_LIBRARIES} ${PDC_EXT_LIB_DEPENDENCIES}) endif() include_directories(${PDC_EXT_INCLUDE_DEPENDENCIES}) From 702d6058b6b2ab4f76e018ea8a754861a07d8ca7 Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Tue, 18 Jul 2023 13:44:07 -0700 Subject: [PATCH 205/806] Fix unnecessary memory allocation (#103) --- src/api/pdc_obj/pdc_obj.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/pdc_obj/pdc_obj.c b/src/api/pdc_obj/pdc_obj.c index d402782f9..f16a71275 100644 --- a/src/api/pdc_obj/pdc_obj.c +++ b/src/api/pdc_obj/pdc_obj.c @@ -1237,9 +1237,9 @@ PDCobj_get_info(pdcid_t obj_id) tmp = PDC_obj_get_info(obj_id); - ret_value = PDC_CALLOC(struct pdc_obj_info); - if (!ret_value) - PGOTO_ERROR(NULL, "failed to allocate memory"); + /* ret_value = PDC_CALLOC(struct pdc_obj_info); */ + /* if (!ret_value) */ + /* PGOTO_ERROR(NULL, "failed to allocate memory"); */ ret_value = tmp->obj_info_pub; From 98dae84a5b580a22de4343e24ff46beca67682c9 Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Wed, 19 Jul 2023 09:15:32 -0700 Subject: [PATCH 206/806] Fix an issue with opening a deleted container, added test (#101) * Fix an issue with opening a deleted container, added test * Refactor the query aggregation process --- src/api/pdc_client_connect.c | 21 +++++++++++---------- src/api/pdc_obj/pdc_cont.c | 3 +++ src/server/pdc_client_server_common.c | 5 ++++- src/tests/cont_del.c | 5 +++++ 4 files changed, 23 insertions(+), 11 deletions(-) diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index 467de4bca..e6ebe726f 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -2361,21 +2361,22 @@ PDC_Client_query_metadata_name_timestep_agg(const char *obj_name, int time_step, FUNC_ENTER(NULL); #ifdef ENABLE_MPI - if (pdc_client_mpi_rank_g == 0) { + if (pdc_client_mpi_rank_g == 0) ret_value = PDC_Client_query_metadata_name_timestep(obj_name, time_step, out, metadata_server_id); - if (ret_value != SUCCEED || NULL == *out) { - *out = (pdc_metadata_t *)calloc(1, sizeof(pdc_metadata_t)); - PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: - ERROR with query [%s]", pdc_client_mpi_rank_g, obj_name); - } - } - else + + MPI_Bcast(&ret_value, 1, MPI_INT, 0, PDC_CLIENT_COMM_WORLD_g); + if (ret_value != SUCCEED) + PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: - ERROR with query [%s]", pdc_client_mpi_rank_g, obj_name); + + if (pdc_client_mpi_rank_g != 0) *out = (pdc_metadata_t *)calloc(1, sizeof(pdc_metadata_t)); MPI_Bcast(*out, sizeof(pdc_metadata_t), MPI_CHAR, 0, PDC_CLIENT_COMM_WORLD_g); - MPI_Bcast(metadata_server_id, 1, MPI_UINT32_T, 0, PDC_CLIENT_COMM_WORLD_g); #else ret_value = PDC_Client_query_metadata_name_timestep(obj_name, time_step, out, metadata_server_id); + if (ret_value != SUCCEED) + PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: - ERROR with query [%s]", pdc_client_mpi_rank_g, obj_name); #endif done: @@ -5606,8 +5607,8 @@ PDC_Client_query_container_name_col(const char *cont_name, uint64_t *cont_meta_i MPI_Bcast(cont_meta_id, 1, MPI_LONG_LONG, 0, PDC_CLIENT_COMM_WORLD_g); #else - printf("==PDC_CLIENT[%d]: Calling MPI collective operation without enabling MPI!\n", - pdc_client_mpi_rank_g); + PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: Calling MPI collective operation without enabling MPI!", + pdc_client_mpi_rank_g); #endif done: diff --git a/src/api/pdc_obj/pdc_cont.c b/src/api/pdc_obj/pdc_cont.c index 444596729..23f539390 100644 --- a/src/api/pdc_obj/pdc_cont.c +++ b/src/api/pdc_obj/pdc_cont.c @@ -274,6 +274,9 @@ PDCcont_open(const char *cont_name, pdcid_t pdc) ret = PDC_Client_query_container_name(cont_name, &cont_meta_id); if (ret == FAIL) PGOTO_ERROR(0, "query container name failed"); + if (cont_meta_id == 0) + PGOTO_ERROR(0, "query container not found"); + cont_id = PDC_cont_create_local(pdc, cont_name, cont_meta_id); ret_value = cont_id; diff --git a/src/server/pdc_client_server_common.c b/src/server/pdc_client_server_common.c index e3e8597b1..859c7234a 100644 --- a/src/server/pdc_client_server_common.c +++ b/src/server/pdc_client_server_common.c @@ -1685,7 +1685,10 @@ HG_TEST_RPC_CB(container_query, handle) HG_Get_input(handle, &in); PDC_Server_find_container_by_name(in.cont_name, &cont_entry); - out.cont_id = cont_entry->cont_id; + if (cont_entry) + out.cont_id = cont_entry->cont_id; + else + out.cont_id = 0; HG_Respond(handle, NULL, NULL, &out); diff --git a/src/tests/cont_del.c b/src/tests/cont_del.c index ab708685f..5ecdf2e6b 100644 --- a/src/tests/cont_del.c +++ b/src/tests/cont_del.c @@ -79,6 +79,11 @@ main(int argc, char **argv) printf("successfully close container c1\n"); } + printf("trying to open a deleted container, should fail\n"); + cont = PDCcont_open("VPIC_cont", pdc); + if (cont > 0) + printf("Error: opened a container that was just deleted @ line %d!\n", __LINE__); + // close a container property if (PDCprop_close(create_prop) < 0) { printf("Fail to close property @ line %d\n", __LINE__); From d1bdc1626aec2006bad1fa92a5ef703fd42a2d5c Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Wed, 19 Jul 2023 11:16:13 -0700 Subject: [PATCH 207/806] Fix container tag delete error (#102) * Fix container tag delete error * Committing clang-format changes * Update tag delete function * Refactor metdata lookup process for tag deletion * Committing clang-format changes * Formatting and comment * Committing clang-format changes --------- Co-authored-by: github-actions --- src/api/pdc_client_connect.c | 145 ++++++++++++++++------------- src/api/pdc_obj/include/pdc_prop.h | 10 -- src/server/pdc_server_metadata.c | 44 ++++----- src/tests/cont_tags.c | 71 ++++++++++---- src/tests/kvtag_add_get.c | 2 +- 5 files changed, 156 insertions(+), 116 deletions(-) diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index e6ebe726f..34cef9ad8 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -7058,16 +7058,20 @@ metadata_get_kvtag_rpc_cb(const struct hg_cb_info *callback_info) ret_value = HG_Get_output(handle, &output); if (ret_value != HG_SUCCESS) { client_lookup_args->ret = -1; - PGOTO_ERROR(ret_value, "==PDC_CLIENT[%d]: metadata_add_tag_rpc_cb error with HG_Get_output", - pdc_client_mpi_rank_g); + PGOTO_ERROR(ret_value, "==PDC_CLIENT[%d]: %s error with HG_Get_output", pdc_client_mpi_rank_g, + __func__); + } + client_lookup_args->ret = output.ret; + if (output.kvtag.name) + client_lookup_args->kvtag->name = strdup(output.kvtag.name); + client_lookup_args->kvtag->size = output.kvtag.size; + client_lookup_args->kvtag->type = output.kvtag.type; + if (output.kvtag.size > 0) { + client_lookup_args->kvtag->value = malloc(output.kvtag.size); + memcpy(client_lookup_args->kvtag->value, output.kvtag.value, output.kvtag.size); } - client_lookup_args->ret = output.ret; - client_lookup_args->kvtag->name = strdup(output.kvtag.name); - client_lookup_args->kvtag->size = output.kvtag.size; - client_lookup_args->kvtag->type = output.kvtag.type; - client_lookup_args->kvtag->value = malloc(output.kvtag.size); - memcpy(client_lookup_args->kvtag->value, output.kvtag.value, output.kvtag.size); - /* PDC_kvtag_dup(&(output.kvtag), &client_lookup_args->kvtag); */ + else + client_lookup_args->kvtag->value = NULL; done: fflush(stdout); @@ -7118,13 +7122,13 @@ PDC_get_kvtag(pdcid_t obj_id, char *tag_name, pdc_kvtag_t **kvtag, int is_cont) in.key = tag_name; } else - PGOTO_ERROR(FAIL, "==PDC_Client_get_kvtag(): invalid tag content!"); + PGOTO_ERROR(FAIL, "PDC_get_kvtag: invalid tag content!"); *kvtag = (pdc_kvtag_t *)malloc(sizeof(pdc_kvtag_t)); lookup_args.kvtag = *kvtag; hg_ret = HG_Forward(metadata_get_kvtag_handle, metadata_get_kvtag_rpc_cb, &lookup_args, &in); if (hg_ret != HG_SUCCESS) - PGOTO_ERROR(FAIL, "PDC_Client_get_kvtag_metadata_with_name(): Could not start HG_Forward()"); + PGOTO_ERROR(FAIL, "PDC_get_kvtag: Could not start HG_Forward()"); // Wait for response from server work_todo_g = 1; @@ -7140,55 +7144,6 @@ PDC_get_kvtag(pdcid_t obj_id, char *tag_name, pdc_kvtag_t **kvtag, int is_cont) FUNC_LEAVE(ret_value); } -perr_t -PDCtag_delete(pdcid_t obj_id, char *tag_name) -{ - perr_t ret_value = SUCCEED; - hg_return_t hg_ret = 0; - uint64_t meta_id; - uint32_t server_id; - hg_handle_t metadata_del_kvtag_handle; - metadata_get_kvtag_in_t in; - struct _pdc_obj_info * obj_prop; - struct _pdc_client_lookup_args lookup_args; - - FUNC_ENTER(NULL); - - obj_prop = PDC_obj_get_info(obj_id); - meta_id = obj_prop->obj_info_pub->meta_id; - server_id = PDC_get_server_by_obj_id(meta_id, pdc_server_num_g); - - debug_server_id_count[server_id]++; - - if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) - PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); - - HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_del_kvtag_register_id_g, - &metadata_del_kvtag_handle); - - // Fill input structure - in.obj_id = meta_id; - in.hash_value = PDC_get_hash_by_name(obj_prop->obj_info_pub->name); - in.key = tag_name; - - hg_ret = HG_Forward(metadata_del_kvtag_handle, metadata_add_tag_rpc_cb /*reuse*/, &lookup_args, &in); - if (hg_ret != HG_SUCCESS) - PGOTO_ERROR(FAIL, "PDC_Client_del_kvtag_metadata_with_name(): Could not start HG_Forward()"); - - // Wait for response from server - work_todo_g = 1; - PDC_Client_check_response(&send_context_g); - - if (lookup_args.ret != 1) - printf("PDC_CLIENT: del kvtag NOT successful ... ret_value = %d\n", lookup_args.ret); - -done: - fflush(stdout); - HG_Destroy(metadata_del_kvtag_handle); - - FUNC_LEAVE(ret_value); -} - static hg_return_t kvtag_query_bulk_cb(const struct hg_cb_info *hg_cb_info) { @@ -7455,6 +7410,69 @@ PDC_Client_query_kvtag_col(const pdc_kvtag_t *kvtag, int *n_res, uint64_t **pdc_ FUNC_LEAVE(ret_value); } +// Delete a tag specified by a name, and whether it is from a container or an object +static perr_t +PDCtag_delete(pdcid_t obj_id, char *tag_name, int is_cont) +{ + perr_t ret_value = SUCCEED; + hg_return_t hg_ret = 0; + uint64_t meta_id; + uint32_t server_id; + hg_handle_t metadata_del_kvtag_handle; + metadata_get_kvtag_in_t in; + struct _pdc_obj_info * obj_prop; + struct _pdc_cont_info * cont_prop; + struct _pdc_client_lookup_args lookup_args; + + FUNC_ENTER(NULL); + + if (is_cont) { + cont_prop = PDC_cont_get_info(obj_id); + meta_id = cont_prop->cont_info_pub->meta_id; + } + else { + obj_prop = PDC_obj_get_info(obj_id); + meta_id = obj_prop->obj_info_pub->meta_id; + } + + server_id = PDC_get_server_by_obj_id(meta_id, pdc_server_num_g); + + debug_server_id_count[server_id]++; + + if (PDC_Client_try_lookup_server(server_id, 0) != SUCCEED) + PGOTO_ERROR(FAIL, "==CLIENT[%d]: ERROR with PDC_Client_try_lookup_server", pdc_client_mpi_rank_g); + + HG_Create(send_context_g, pdc_server_info_g[server_id].addr, metadata_del_kvtag_register_id_g, + &metadata_del_kvtag_handle); + + // Fill input structure + in.obj_id = meta_id; + + if (is_cont) + in.hash_value = PDC_get_hash_by_name(cont_prop->cont_info_pub->name); + else + in.hash_value = PDC_get_hash_by_name(obj_prop->obj_info_pub->name); + in.key = tag_name; + + // reuse metadata_add_tag_rpc_cb here since it only checks the return value + hg_ret = HG_Forward(metadata_del_kvtag_handle, metadata_add_tag_rpc_cb /*reuse*/, &lookup_args, &in); + if (hg_ret != HG_SUCCESS) + PGOTO_ERROR(FAIL, "PDC_Client_del_kvtag_metadata_with_name(): Could not start HG_Forward()"); + + // Wait for response from server + work_todo_g = 1; + PDC_Client_check_response(&send_context_g); + + if (lookup_args.ret != 1) + printf("PDC_CLIENT: del kvtag NOT successful ... ret_value = %d\n", lookup_args.ret); + +done: + fflush(stdout); + HG_Destroy(metadata_del_kvtag_handle); + + FUNC_LEAVE(ret_value); +} + /* - -------------------------------- */ /* New Simple Object Access Interface */ /* - -------------------------------- */ @@ -7484,7 +7502,6 @@ PDCcont_put(const char *cont_name, pdcid_t pdc) } pdcid_t - PDCcont_get_id(const char *cont_name, pdcid_t pdc_id) { pdcid_t cont_id; @@ -7634,9 +7651,9 @@ PDCcont_del_tag(pdcid_t cont_id, char *tag_name) FUNC_ENTER(NULL); - ret_value = PDCobj_del_tag(cont_id, tag_name); + ret_value = PDCtag_delete(cont_id, tag_name, 1); if (ret_value != SUCCEED) - PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: error with PDCobj_del_tag", pdc_client_mpi_rank_g); + PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: error with PDCtag_delete", pdc_client_mpi_rank_g); done: fflush(stdout); @@ -7832,7 +7849,7 @@ PDCobj_del_tag(pdcid_t obj_id, char *tag_name) FUNC_ENTER(NULL); - ret_value = PDCtag_delete(obj_id, tag_name); + ret_value = PDCtag_delete(obj_id, tag_name, 0); if (ret_value != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: Error with PDC_del_kvtag", pdc_client_mpi_rank_g); diff --git a/src/api/pdc_obj/include/pdc_prop.h b/src/api/pdc_obj/include/pdc_prop.h index 37ff599eb..0459e0283 100644 --- a/src/api/pdc_obj/include/pdc_prop.h +++ b/src/api/pdc_obj/include/pdc_prop.h @@ -108,16 +108,6 @@ struct pdc_obj_prop *PDCobj_prop_get_info(pdcid_t prop_id); */ perr_t PDCprop_update(pdcid_t obj_id, pdcid_t prop_id); -/** - * Delete a tag with a specific name and value - * - * \param obj_id[IN] Object ID - * \param tag_name [IN] Metadta field name - * - * \return Non-negative on success/Negative on failure - */ -perr_t PDCtag_delete(pdcid_t obj_id, char *tag_name); - /** * ********** * diff --git a/src/server/pdc_server_metadata.c b/src/server/pdc_server_metadata.c index 2331df3b1..6997adf8c 100644 --- a/src/server/pdc_server_metadata.c +++ b/src/server/pdc_server_metadata.c @@ -2778,7 +2778,8 @@ PDC_Server_del_kvtag(metadata_get_kvtag_in_t *in, metadata_add_tag_out_t *out) #ifdef ENABLE_MULTITHREAD int unlocked; #endif - pdc_hash_table_entry_head *lookup_value; + pdc_hash_table_entry_head * lookup_value; + pdc_cont_hash_table_entry_t *cont_lookup_value; FUNC_ENTER(NULL); @@ -2794,62 +2795,63 @@ PDC_Server_del_kvtag(metadata_get_kvtag_in_t *in, metadata_add_tag_out_t *out) #ifdef ENABLE_MULTITHREAD // Obtain lock for hash table - unlocked = 0; hg_thread_mutex_lock(&pdc_metadata_hash_table_mutex_g); #endif + // Look obj tags first lookup_value = hash_table_lookup(metadata_hash_table_g, &hash_key); if (lookup_value != NULL) { pdc_metadata_t *target; target = find_metadata_by_id_from_list(lookup_value->metadata, obj_id); if (target != NULL) { - PDC_del_kvtag_value_from_list(&target->kvtag_list_head, in->key); - out->ret = 1; + ret_value = PDC_del_kvtag_value_from_list(&target->kvtag_list_head, in->key); + out->ret = 1; } else { ret_value = FAIL; out->ret = -1; + printf("==PDC_SERVER[%d]: %s - failed to find requested kvtag [%s]\n", pdc_server_rank_g, + __func__, in->key); + goto done; } } else { - ret_value = FAIL; - out->ret = -1; - } - - if (ret_value != SUCCEED) { - printf("==PDC_SERVER[%d]: %s - error \n", pdc_server_rank_g, __func__); - goto done; + cont_lookup_value = hash_table_lookup(container_hash_table_g, &hash_key); + if (cont_lookup_value != NULL) { + PDC_del_kvtag_value_from_list(&cont_lookup_value->kvtag_list_head, in->key); + out->ret = 1; + } + else { + ret_value = FAIL; + out->ret = -1; + printf("==PDC_SERVER[%d]: %s - failed to find requested kvtag [%s]\n", pdc_server_rank_g, + __func__, in->key); + goto done; + } } +done: #ifdef ENABLE_MULTITHREAD - // ^ Release hash table lock hg_thread_mutex_unlock(&pdc_metadata_hash_table_mutex_g); - unlocked = 1; #endif #ifdef ENABLE_TIMING // Timing gettimeofday(&pdc_timer_end, 0); ht_total_sec = PDC_get_elapsed_time_double(&pdc_timer_start, &pdc_timer_end); -#endif #ifdef ENABLE_MULTITHREAD hg_thread_mutex_lock(&pdc_time_mutex_g); #endif -#ifdef ENABLE_TIMING server_update_time_g += ht_total_sec; -#endif #ifdef ENABLE_MULTITHREAD hg_thread_mutex_unlock(&pdc_time_mutex_g); #endif -done: -#ifdef ENABLE_MULTITHREAD - if (unlocked == 0) - hg_thread_mutex_unlock(&pdc_metadata_hash_table_mutex_g); -#endif +#endif // End ENABLE_TIMING + fflush(stdout); FUNC_LEAVE(ret_value); diff --git a/src/tests/cont_tags.c b/src/tests/cont_tags.c index caaf87b49..2f801acc4 100644 --- a/src/tests/cont_tags.c +++ b/src/tests/cont_tags.c @@ -32,7 +32,6 @@ main(int argc, char **argv) { pdcid_t pdc, cont_prop, cont, cont2; perr_t ret; - int ret_value = 0; int rank = 0, size = 1; @@ -58,7 +57,7 @@ main(int argc, char **argv) } else { printf("Fail to create container property @ line %d!\n", __LINE__); - ret_value = 1; + return -1; } // create a container cont = PDCcont_create("c1", cont_prop); @@ -67,7 +66,7 @@ main(int argc, char **argv) } else { printf("Fail to create container @ line %d!\n", __LINE__); - ret_value = 1; + return -1; } cont2 = PDCcont_create("c2", cont_prop); @@ -76,88 +75,120 @@ main(int argc, char **argv) } else { printf("Fail to create container @ line %d!\n", __LINE__); - ret_value = 1; + return -1; } ret = PDCcont_put_tag(cont, "some tag", tag_value, PDC_STRING, strlen(tag_value) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 1\n"); - ret_value = 1; + return -1; } ret = PDCcont_put_tag(cont, "some tag 2", tag_value2, PDC_STRING, strlen(tag_value2) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 1\n"); - ret_value = 1; + return -1; } ret = PDCcont_put_tag(cont2, "some tag", tag_value, PDC_STRING, strlen(tag_value) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 2\n"); - ret_value = 1; + return -1; } ret = PDCcont_put_tag(cont2, "some tag 2", tag_value2, PDC_STRING, strlen(tag_value2) + 1); if (ret != SUCCEED) { printf("Put tag failed at container 2\n"); - ret_value = 1; + return -1; } ret = PDCcont_get_tag(cont, "some tag", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 1\n"); - ret_value = 1; + return -1; } if (strcmp(tag_value, tag_value_ret) != 0) { printf("Wrong tag value at container 1, expected = [%s], get [%s]\n", tag_value, tag_value_ret); - ret_value = 1; + return -1; } ret = PDCcont_get_tag(cont, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 1\n"); - ret_value = 1; + return -1; } if (strcmp(tag_value2, tag_value_ret) != 0) { printf("Wrong tag value at container 1, expected = [%s], get [%s]\n", tag_value2, tag_value_ret); - ret_value = 1; + return -1; } ret = PDCcont_get_tag(cont2, "some tag", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 2\n"); - ret_value = 1; + return -1; } if (strcmp(tag_value, tag_value_ret) != 0) { printf("Wrong tag value at container 2, expected = [%s], get [%s]\n", tag_value, tag_value_ret); - ret_value = 1; + return -1; } ret = PDCcont_get_tag(cont2, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); if (ret != SUCCEED) { printf("Get tag failed at container 2\n"); - ret_value = 1; + return -1; } if (strcmp(tag_value2, tag_value_ret) != 0) { printf("Wrong tag value at container 2, expected = [%s], get [%s]\n", tag_value2, tag_value_ret); - ret_value = 1; + return -1; + } + + ret = PDCcont_del_tag(cont2, "some tag 2"); + if (ret != SUCCEED) { + printf("Delete tag failed at container 2\n"); + return -1; + } + else { + printf("successfully deleted a tag from container c2\n"); + } + + ret = PDCcont_get_tag(cont2, "some tag 2", (void **)&tag_value_ret, &value_type, &value_size); + if (ret != SUCCEED) { + printf("Get tag failed at container 2\n"); + return -1; + } + + if (tag_value_ret != NULL || value_size != 0) { + printf("Error: got non-empty tag after deletion\n"); + return -1; + } + else { + printf("verified the tag has been deleted successfully\n"); } // close a container if (PDCcont_close(cont) < 0) { printf("fail to close container c1\n"); - ret_value = 1; + return -1; + } + else { + printf("successfully close container c1\n"); + } + // close a container + if (PDCcont_close(cont2) < 0) { + printf("fail to close container c1\n"); + return -1; } else { printf("successfully close container c1\n"); } + // close a container property if (PDCprop_close(cont_prop) < 0) { printf("Fail to close property @ line %d\n", __LINE__); - ret_value = 1; + return -1; } else { printf("successfully close container property\n"); @@ -165,10 +196,10 @@ main(int argc, char **argv) // close pdc if (PDCclose(pdc) < 0) { printf("fail to close PDC\n"); - ret_value = 1; + return -1; } #ifdef ENABLE_MPI MPI_Finalize(); #endif - return ret_value; + return 0; } diff --git a/src/tests/kvtag_add_get.c b/src/tests/kvtag_add_get.c index 7d53da6a6..ad06b506b 100644 --- a/src/tests/kvtag_add_get.c +++ b/src/tests/kvtag_add_get.c @@ -132,7 +132,7 @@ main() else printf("successfully retrieved a kvtag [%s] = [%f] from o2\n", kvtag3.name, *(double *)value3); - if (PDCtag_delete(obj1, kvtag1.name) < 0) + if (PDCobj_del_tag(obj1, kvtag1.name) < 0) printf("fail to delete a kvtag from o1\n"); else printf("successfully deleted a kvtag [%s] from o1\n", kvtag1.name); From 55a5628eb009bee71171978df8dae2d140f2fd21 Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Thu, 27 Jul 2023 10:49:22 -0700 Subject: [PATCH 208/806] Fix the server cache issue when cache becomes full and needs flush (#113) * Fix the server cache issue when cache becomes full and need flush * Committing clang-format changes --------- Co-authored-by: github-actions --- .../pdc_server_region_cache.c | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/server/pdc_server_region/pdc_server_region_cache.c b/src/server/pdc_server_region/pdc_server_region_cache.c index 92c408fa6..d2f2abe56 100644 --- a/src/server/pdc_server_region/pdc_server_region_cache.c +++ b/src/server/pdc_server_region/pdc_server_region_cache.c @@ -393,8 +393,11 @@ PDC_region_cache_register(uint64_t obj_id, int obj_ndim, const uint64_t *obj_dim struct pdc_region_info *region_cache_info; if (obj_ndim != ndim && obj_ndim > 0) { printf("PDC_region_cache_register reports obj_ndim != ndim, %d != %d\n", obj_ndim, ndim); + return FAIL; } + pthread_mutex_lock(&pdc_obj_cache_list_mutex); + obj_cache_iter = obj_cache_list; while (obj_cache_iter != NULL) { if (obj_cache_iter->obj_id == obj_id) { @@ -460,7 +463,15 @@ PDC_region_cache_register(uint64_t obj_id, int obj_ndim, const uint64_t *obj_dim memcpy(region_cache_info->buf, buf, sizeof(char) * buf_size); total_cache_size += buf_size; + pthread_mutex_unlock(&pdc_obj_cache_list_mutex); + if (total_cache_size > maximum_cache_size) { + int server_rank = 0; +#ifdef ENABLE_MPI + MPI_Comm_rank(MPI_COMM_WORLD, &server_rank); +#endif + printf("==PDC_SERVER[%d]: server cache full %.1f / %.1f MB, will flush to storage\n", server_rank, + total_cache_size / 1048576.0, maximum_cache_size / 1048576.0); PDC_region_cache_flush_all(); } @@ -571,11 +582,11 @@ PDC_transfer_request_data_write_out(uint64_t obj_id, int obj_ndim, const uint64_ region_cache_iter = region_cache_iter->next; } } + pthread_mutex_unlock(&pdc_obj_cache_list_mutex); if (!flag) { PDC_region_cache_register(obj_id, obj_ndim, obj_dims, buf, write_size, region_info->offset, region_info->size, region_info->ndim, unit); } - pthread_mutex_unlock(&pdc_obj_cache_list_mutex); // PDC_Server_data_write_out2(obj_id, region_info, buf, unit); #ifdef PDC_TIMING @@ -862,10 +873,9 @@ PDC_region_cache_clock_cycle(void *ptr) gettimeofday(&finish_time, NULL); elapsed_time = finish_time.tv_sec - current_time.tv_sec + (finish_time.tv_usec - current_time.tv_usec) / 1000000.0; - fprintf( - stderr, - "==PDC_SERVER[%d]: flushed %d regions from cache to storage (every %.1fs), took %.4fs\n", - server_rank, nflush, flush_frequency_s, elapsed_time); + fprintf(stderr, + "==PDC_SERVER[%d]: flushed %d regions to storage (full/every %.0fs), took %.4fs\n", + server_rank, nflush, flush_frequency_s, elapsed_time); } pthread_mutex_unlock(&pdc_obj_cache_list_mutex); } From 0cc87e7acc13d9584c47bde601e1e6d49d0664d8 Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Thu, 27 Jul 2023 13:50:41 -0400 Subject: [PATCH 209/806] Fix a wrong option description. (#115) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * remove unnecessary header files from installation * resolve conflict * add important files * clang formatting * update cmake * update option message --------- Co-authored-by: Houjun Tang --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2deac8837..715766747 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -336,7 +336,7 @@ endif() #----------------------------------------------------------------------------- # SERVER CACHE option #----------------------------------------------------------------------------- -option(PDC_SERVER_CACHE "Enable timing." OFF) +option(PDC_SERVER_CACHE "Enable Server Caching." OFF) if(PDC_SERVER_CACHE) set(PDC_SERVER_CACHE 1) endif() From bc245e1b3869d11fa3f30fcc4862e466803aa33a Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Mon, 31 Jul 2023 10:19:26 -0700 Subject: [PATCH 210/806] Install header files needed by PDCpy (#114) --- src/api/CMakeLists.txt | 5 ++++- src/api/pdc_obj/pdc_obj.c | 6 ++---- src/commons/CMakeLists.txt | 2 ++ src/commons/utils/include/pdc_id_pkg.h | 5 +++++ src/commons/utils/include/pdc_private.h | 5 ----- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/api/CMakeLists.txt b/src/api/CMakeLists.txt index cfbb8ac21..8a6b6492e 100644 --- a/src/api/CMakeLists.txt +++ b/src/api/CMakeLists.txt @@ -157,6 +157,9 @@ set(PDC_HEADERS ${PDC_SOURCE_DIR}/src/api/pdc_obj/include/pdc_obj.h ${PDC_SOURCE_DIR}/src/api/pdc_region/include/pdc_region.h ${PDC_SOURCE_DIR}/src/api/pdc_obj/include/pdc_prop.h + ${PDC_SOURCE_DIR}/src/api/pdc_obj/include/pdc_cont_pkg.h + ${PDC_SOURCE_DIR}/src/api/pdc_obj/include/pdc_obj_pkg.h + ${PDC_SOURCE_DIR}/src/api/pdc_obj/include/pdc_prop_pkg.h ${PDC_SOURCE_DIR}/src/api/pdc_query/include/pdc_query.h ${PDC_SOURCE_DIR}/src/api/pdc_region/include/pdc_region.h ${PDC_SOURCE_DIR}/src/api/pdc_transform/include/pdc_transform.h @@ -239,4 +242,4 @@ set(PDC_INCLUDES_INSTALL_TIME ${PDC_INSTALL_INCLUDE_DIR} ${PDC_EXT_INCLUDE_DEPENDENCIES} PARENT_SCOPE -) \ No newline at end of file +) diff --git a/src/api/pdc_obj/pdc_obj.c b/src/api/pdc_obj/pdc_obj.c index f16a71275..83325ba29 100644 --- a/src/api/pdc_obj/pdc_obj.c +++ b/src/api/pdc_obj/pdc_obj.c @@ -1236,10 +1236,8 @@ PDCobj_get_info(pdcid_t obj_id) /* obj_id = PDC_find_byname(PDC_OBJ, obj_name); */ tmp = PDC_obj_get_info(obj_id); - - /* ret_value = PDC_CALLOC(struct pdc_obj_info); */ - /* if (!ret_value) */ - /* PGOTO_ERROR(NULL, "failed to allocate memory"); */ + if (NULL == tmp) + PGOTO_ERROR(NULL, "failed to allocate memory"); ret_value = tmp->obj_info_pub; diff --git a/src/commons/CMakeLists.txt b/src/commons/CMakeLists.txt index d0f83bb54..04fcd7056 100644 --- a/src/commons/CMakeLists.txt +++ b/src/commons/CMakeLists.txt @@ -59,6 +59,8 @@ message(STATUS "PDC_COMMONS_INCLUDE_DIRS: ${PDC_COMMONS_INCLUDE_DIRS}") install( FILES ${CMAKE_BINARY_DIR}/pdc_config.h + ${PDC_SOURCE_DIR}/src/commons/utils/include/pdc_id_pkg.h + ${PDC_SOURCE_DIR}/src/commons/utils/include/pdc_malloc.h DESTINATION ${PDC_INSTALL_INCLUDE_DIR} COMPONENT diff --git a/src/commons/utils/include/pdc_id_pkg.h b/src/commons/utils/include/pdc_id_pkg.h index 9623661fd..c0ac3afce 100644 --- a/src/commons/utils/include/pdc_id_pkg.h +++ b/src/commons/utils/include/pdc_id_pkg.h @@ -47,6 +47,11 @@ /* Map an atom to an ID type number */ #define PDC_TYPE(a) ((PDC_type_t)(((pdcid_t)(a) >> ID_BITS) & TYPE_MASK)) +struct _pdc_class { + char * name; + pdcid_t local_id; +}; + struct _pdc_id_info { pdcid_t id; /* ID for this info */ hg_atomic_int32_t count; /* ref. count for this atom */ diff --git a/src/commons/utils/include/pdc_private.h b/src/commons/utils/include/pdc_private.h index b0fa48f14..3fee1baaa 100644 --- a/src/commons/utils/include/pdc_private.h +++ b/src/commons/utils/include/pdc_private.h @@ -67,11 +67,6 @@ typedef enum { C_lang = 0, FORTRAN_lang, PYTHON_lang, JULIA_lang, N_LANGUAGES } /***************************/ /* Library Private Structs */ /***************************/ -struct _pdc_class { - char * name; - pdcid_t local_id; -}; - #ifdef __cplusplus #define ATTRIBUTE(a) #else /* __cplusplus */ From e9abc584a2b29e9351410f79ada0bc39698b8b4d Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Wed, 2 Aug 2023 14:37:45 -0700 Subject: [PATCH 211/806] Support unlimited object dimension size (#117) * Support unlimited object dimension szie * Add function description for PDC_SIZE_UNLIMITED --- src/api/pdc_obj/include/pdc_obj.h | 2 +- src/commons/include/pdc_public.h | 4 +++- src/tests/region_transfer.c | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/api/pdc_obj/include/pdc_obj.h b/src/api/pdc_obj/include/pdc_obj.h index 8ad7a285a..4ce3d6bb5 100644 --- a/src/api/pdc_obj/include/pdc_obj.h +++ b/src/api/pdc_obj/include/pdc_obj.h @@ -228,7 +228,7 @@ perr_t PDCprop_set_obj_tags(pdcid_t obj_prop, char *tags); * * \param obj_prop [IN] ID of object property, returned by PDCprop_create(PDC_OBJ_CREATE) * \param ndim [IN] Number of dimensions - * \param dims [IN] Size of each dimension + * \param dims [IN] Size of each dimension, positive value, can be PDC_SIZE_UNLIMITED * * \return Non-negative on success/Negative on failure */ diff --git a/src/commons/include/pdc_public.h b/src/commons/include/pdc_public.h index 6c135f657..cd7496723 100644 --- a/src/commons/include/pdc_public.h +++ b/src/commons/include/pdc_public.h @@ -48,7 +48,7 @@ typedef enum { PDC_PERSIST, PDC_TRANSIENT } pdc_lifetime_t; typedef enum { PDC_SERVER_DEFAULT = 0, PDC_SERVER_PER_CLIENT = 1 } pdc_server_selection_t; -typedef struct pdc_histogram_t { //????????? +typedef struct pdc_histogram_t { pdc_var_type_t dtype; int nbin; double incr; @@ -59,4 +59,6 @@ typedef struct pdc_histogram_t { //????????? #define SUCCEED 0 #define FAIL (-1) +#define PDC_SIZE_UNLIMITED UINT64_MAX + #endif /* PDC_PUBLIC_H */ diff --git a/src/tests/region_transfer.c b/src/tests/region_transfer.c index 67a2880f2..3a3b04547 100644 --- a/src/tests/region_transfer.c +++ b/src/tests/region_transfer.c @@ -57,7 +57,7 @@ main(int argc, char **argv) int *data = (int *)malloc(sizeof(int) * BUF_LEN); int *data_read = (int *)malloc(sizeof(int) * BUF_LEN); - dims[0] = BUF_LEN; + dims[0] = PDC_SIZE_UNLIMITED; #ifdef ENABLE_MPI MPI_Init(&argc, &argv); From 38a34fe9ba074ace1d279f9326338715396d20aa Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Wed, 2 Aug 2023 14:46:57 -0700 Subject: [PATCH 212/806] Fix obj_round_robin_io_all test code --- src/tests/obj_round_robin_io_all.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/tests/obj_round_robin_io_all.c b/src/tests/obj_round_robin_io_all.c index 05a5c0dcc..500779213 100644 --- a/src/tests/obj_round_robin_io_all.c +++ b/src/tests/obj_round_robin_io_all.c @@ -108,9 +108,9 @@ main(int argc, char **argv) my_data_size *= dims[i]; } - mydata = (char **)malloc(size * WRITE_REQ_SIZE); + mydata = (char **)malloc(2 * sizeof(char*)); mydata[0] = (char *)malloc(my_data_size * type_size); - mydata[1] = mydata[0] + my_data_size * type_size; + mydata[1] = (char *)malloc(my_data_size * type_size); offset = (uint64_t *)malloc(sizeof(uint64_t) * ndim); mysize = (uint64_t *)malloc(sizeof(uint64_t)); @@ -453,6 +453,8 @@ main(int argc, char **argv) free(obj1_list); free(obj2_list); free(data_read); + free(mydata[0]); + free(mydata[1]); free(mydata); free(offset); free(mysize); From 5e76eda840c0b36fe68b84d379428fea2ecefe7a Mon Sep 17 00:00:00 2001 From: github-actions Date: Wed, 2 Aug 2023 21:47:36 +0000 Subject: [PATCH 213/806] Committing clang-format changes --- src/tests/obj_round_robin_io_all.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/obj_round_robin_io_all.c b/src/tests/obj_round_robin_io_all.c index 500779213..4070107a7 100644 --- a/src/tests/obj_round_robin_io_all.c +++ b/src/tests/obj_round_robin_io_all.c @@ -108,7 +108,7 @@ main(int argc, char **argv) my_data_size *= dims[i]; } - mydata = (char **)malloc(2 * sizeof(char*)); + mydata = (char **)malloc(2 * sizeof(char *)); mydata[0] = (char *)malloc(my_data_size * type_size); mydata[1] = (char *)malloc(my_data_size * type_size); From cf21137a2fc6da7d11d3e6f96467cbbd88ccd87d Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Thu, 3 Aug 2023 11:28:58 -0700 Subject: [PATCH 214/806] More header files fix for PDCpy --- .../include/pdc_analysis_and_transforms_common.h | 3 ++- src/api/pdc_analysis/include/pdc_hist_pkg.h | 1 + src/api/pdc_analysis/pdc_hist_pkg.c | 1 - src/api/pdc_obj/include/pdc_obj_pkg.h | 3 +-- src/api/pdc_obj/include/pdc_prop_pkg.h | 5 +++-- src/api/pdc_transform/include/pdc_transforms_pkg.h | 2 ++ src/commons/CMakeLists.txt | 1 + src/commons/utils/include/pdc_id_pkg.h | 2 +- src/commons/utils/include/pdc_private.h | 4 ---- 9 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/api/pdc_analysis/include/pdc_analysis_and_transforms_common.h b/src/api/pdc_analysis/include/pdc_analysis_and_transforms_common.h index 082a3dd02..79ccbf3db 100644 --- a/src/api/pdc_analysis/include/pdc_analysis_and_transforms_common.h +++ b/src/api/pdc_analysis/include/pdc_analysis_and_transforms_common.h @@ -24,7 +24,8 @@ #ifndef PDC_OBJ_ANALYSIS_H #define PDC_OBJ_ANALYSIS_H -#include "pdc_private.h" +#include "pdc_prop_pkg.h" +#include "pdc_transforms_pkg.h" #include "mercury_proc_string.h" #include "mercury_atomic.h" #include diff --git a/src/api/pdc_analysis/include/pdc_hist_pkg.h b/src/api/pdc_analysis/include/pdc_hist_pkg.h index 3697743c3..6be68af7f 100644 --- a/src/api/pdc_analysis/include/pdc_hist_pkg.h +++ b/src/api/pdc_analysis/include/pdc_hist_pkg.h @@ -25,6 +25,7 @@ #ifndef PDC_HIST_H #define PDC_HIST_H +#include "pdc_private.h" #include "pdc_public.h" #include "math.h" #include diff --git a/src/api/pdc_analysis/pdc_hist_pkg.c b/src/api/pdc_analysis/pdc_hist_pkg.c index 47c93a94d..75343a144 100644 --- a/src/api/pdc_analysis/pdc_hist_pkg.c +++ b/src/api/pdc_analysis/pdc_hist_pkg.c @@ -1,5 +1,4 @@ #include "pdc_hist_pkg.h" -#include "pdc_private.h" #include #include diff --git a/src/api/pdc_obj/include/pdc_obj_pkg.h b/src/api/pdc_obj/include/pdc_obj_pkg.h index f08343a70..11effda8e 100644 --- a/src/api/pdc_obj/include/pdc_obj_pkg.h +++ b/src/api/pdc_obj/include/pdc_obj_pkg.h @@ -24,8 +24,7 @@ #ifndef PDC_OBJ_PKG_H #define PDC_OBJ_PKG_H - -#include "pdc_private.h" +#include "pdc_public.h" /****************************/ /* Library Private Typedefs */ diff --git a/src/api/pdc_obj/include/pdc_prop_pkg.h b/src/api/pdc_obj/include/pdc_prop_pkg.h index 52d80efa1..a97f1bfda 100644 --- a/src/api/pdc_obj/include/pdc_prop_pkg.h +++ b/src/api/pdc_obj/include/pdc_prop_pkg.h @@ -24,8 +24,7 @@ #ifndef PDC_PROP_PKG_H #define PDC_PROP_PKG_H - -#include "pdc_private.h" +#include "pdc_public.h" /*******************/ /* Private Typedefs */ @@ -43,6 +42,8 @@ typedef struct pdc_kvtag_t { void * value; } pdc_kvtag_t; +typedef enum { ROW_major, COL_major } _pdc_major_type_t; + struct _pdc_transform_state { _pdc_major_type_t storage_order; pdc_var_type_t dtype; diff --git a/src/api/pdc_transform/include/pdc_transforms_pkg.h b/src/api/pdc_transform/include/pdc_transforms_pkg.h index 3b4153152..4929f1f12 100644 --- a/src/api/pdc_transform/include/pdc_transforms_pkg.h +++ b/src/api/pdc_transform/include/pdc_transforms_pkg.h @@ -32,6 +32,8 @@ /***************************/ /* Library Private Structs */ /***************************/ +typedef enum { C_lang = 0, FORTRAN_lang, PYTHON_lang, JULIA_lang, N_LANGUAGES } _pdc_analysis_language_t; + struct _pdc_region_transform_ftn_info { pdcid_t object_id; pdcid_t region_id; diff --git a/src/commons/CMakeLists.txt b/src/commons/CMakeLists.txt index 04fcd7056..ab706bcbe 100644 --- a/src/commons/CMakeLists.txt +++ b/src/commons/CMakeLists.txt @@ -61,6 +61,7 @@ install( ${CMAKE_BINARY_DIR}/pdc_config.h ${PDC_SOURCE_DIR}/src/commons/utils/include/pdc_id_pkg.h ${PDC_SOURCE_DIR}/src/commons/utils/include/pdc_malloc.h + ${PDC_SOURCE_DIR}/src/commons/utils/include/pdc_linkedlist.h DESTINATION ${PDC_INSTALL_INCLUDE_DIR} COMPONENT diff --git a/src/commons/utils/include/pdc_id_pkg.h b/src/commons/utils/include/pdc_id_pkg.h index c0ac3afce..ba858c82d 100644 --- a/src/commons/utils/include/pdc_id_pkg.h +++ b/src/commons/utils/include/pdc_id_pkg.h @@ -25,7 +25,7 @@ #ifndef PDC_ID_PKG_H #define PDC_ID_PKG_H -#include "pdc_private.h" +#include "pdc_public.h" #include "pdc_linkedlist.h" #include "mercury_atomic.h" /* diff --git a/src/commons/utils/include/pdc_private.h b/src/commons/utils/include/pdc_private.h index 3fee1baaa..e47911fd9 100644 --- a/src/commons/utils/include/pdc_private.h +++ b/src/commons/utils/include/pdc_private.h @@ -60,10 +60,6 @@ typedef enum { PDC_Q_MATCH_GREATER_THAN /* greater than */ } _pdc_query_op_t; -typedef enum { ROW_major, COL_major } _pdc_major_type_t; - -typedef enum { C_lang = 0, FORTRAN_lang, PYTHON_lang, JULIA_lang, N_LANGUAGES } _pdc_analysis_language_t; - /***************************/ /* Library Private Structs */ /***************************/ From 2e8323a61cd892c4a4d5fcaff58af74e3e3468b0 Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Tue, 8 Aug 2023 15:51:41 -0700 Subject: [PATCH 215/806] Fix cmake path (#121) * Fix cmake path * Fix cmake path --- CMake/pdc-config.cmake.build.in | 2 +- CMake/pdc-config.cmake.install.in | 2 +- CMakeLists.txt | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CMake/pdc-config.cmake.build.in b/CMake/pdc-config.cmake.build.in index e61dd4bac..920bad2a1 100644 --- a/CMake/pdc-config.cmake.build.in +++ b/CMake/pdc-config.cmake.build.in @@ -24,5 +24,5 @@ if(NOT TARGET "pdc" AND NOT PDC_INSTALL_SKIP_TARGETS) if(NOT TARGET "mercury") include(@MERCURY_DIR@/mercury-config.cmake) endif() - include(${SELF_DIR}/api/pdc-targets.cmake) + include(@PDC_INSTALL_SHARE_DIR@/pdc-targets.cmake) endif() diff --git a/CMake/pdc-config.cmake.install.in b/CMake/pdc-config.cmake.install.in index 2e8bba491..e3ead7e94 100644 --- a/CMake/pdc-config.cmake.install.in +++ b/CMake/pdc-config.cmake.install.in @@ -24,5 +24,5 @@ if(NOT TARGET "pdc" AND NOT PDC_INSTALL_SKIP_TARGETS) if(NOT TARGET "mercury") include(@MERCURY_DIR@/mercury-config.cmake) endif() - include(${SELF_DIR}/pdc-targets.cmake) + include(@PDC_INSTALL_SHARE_DIR@/pdc-targets.cmake) endif() diff --git a/CMakeLists.txt b/CMakeLists.txt index 715766747..315305b9b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,6 +81,9 @@ endif() if(NOT PDC_INSTALL_DATA_DIR) set(PDC_INSTALL_DATA_DIR ${CMAKE_INSTALL_PREFIX}/share) endif() +if(NOT PDC_INSTALL_SHARE_DIR) + set(PDC_INSTALL_SHARE_DIR ${CMAKE_INSTALL_PREFIX}/share/cmake/pdc) +endif() # Setting this ensures that "make install" will leave rpaths to external # libraries intact on "make install". This ensures that one can install a From c5c7b91556f54445da4e03bb636236bcf3533f60 Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Tue, 15 Aug 2023 10:59:16 -0700 Subject: [PATCH 216/806] Kvtag query (#122) * Add a new collective kvtag query api to return full/aggregated results to all clients * Committing clang-format changes * Add test code * Committing clang-format changes * Add an optimization when multiple clients issue different queries * Add test program * Fix free issue * Committing clang-format changes --------- Co-authored-by: github-actions --- src/api/include/pdc_client_connect.h | 23 +- src/api/pdc_client_connect.c | 139 ++++++++++-- .../pdc_server_region_cache.c | 7 + src/tests/CMakeLists.txt | 1 + src/tests/kvtag_query_mpi.c | 207 ++++++++++++++++++ 5 files changed, 355 insertions(+), 22 deletions(-) create mode 100644 src/tests/kvtag_query_mpi.c diff --git a/src/api/include/pdc_client_connect.h b/src/api/include/pdc_client_connect.h index 933f2a2e8..3eb109714 100644 --- a/src/api/include/pdc_client_connect.h +++ b/src/api/include/pdc_client_connect.h @@ -595,16 +595,31 @@ perr_t PDC_Client_create_cont_id_mpi(const char *cont_name, pdcid_t cont_create_ perr_t PDC_Client_query_kvtag(const pdc_kvtag_t *kvtag, int *n_res, uint64_t **pdc_ids); /** - * Client sends query requests to server (used by MPI mode) + * Client sends query requests to server (used by MPI mode), each client gets a subset of the + * queried results * - * \param kvtag [IN] ********* - * \param n_res [IN] ********** - * \param pdc_ids [OUT] ********* + * \param kvtag [IN] kvtag + * \param n_res [OUT] number of hits + * \param pdc_ids [OUT] object ids of hits, unordered * * \return Non-negative on success/Negative on failure */ perr_t PDC_Client_query_kvtag_col(const pdc_kvtag_t *kvtag, int *n_res, uint64_t **pdc_ids); +#ifdef ENABLE_MPI +/** + * Client sends query requests to server (used by MPI mode), all clients get the same aggregated + * query results, currently assumes MPI_COMM_WORLD + * + * \param kvtag [IN] kvtag + * \param n_res [OUT] number of hits + * \param pdc_ids [OUT] object ids of hits, unordered + * + * \return Non-negative on success/Negative on failure + */ +perr_t PDC_Client_query_kvtag_mpi(const pdc_kvtag_t *kvtag, int *n_res, uint64_t **pdc_ids, MPI_Comm comm); +#endif + /** * Client sends query requests to server (used by MPI mode) * diff --git a/src/api/pdc_client_connect.c b/src/api/pdc_client_connect.c index 34cef9ad8..34fc8415e 100644 --- a/src/api/pdc_client_connect.c +++ b/src/api/pdc_client_connect.c @@ -7264,8 +7264,12 @@ PDC_Client_query_kvtag_server(uint32_t server_id, const pdc_kvtag_t *kvtag, int FUNC_ENTER(NULL); - if (kvtag == NULL || n_res == NULL || out == NULL) - PGOTO_ERROR(FAIL, "==CLIENT[%d]: input is NULL!", pdc_client_mpi_rank_g); + if (kvtag == NULL) + PGOTO_ERROR(FAIL, "==CLIENT[%d]: %s - kvtag is NULL!", pdc_client_mpi_rank_g, __func__); + if (n_res == NULL) + PGOTO_ERROR(FAIL, "==CLIENT[%d]: %s - n_res is NULL!", pdc_client_mpi_rank_g, __func__); + if (out == NULL) + PGOTO_ERROR(FAIL, "==CLIENT[%d]: %s - out is NULL!", pdc_client_mpi_rank_g, __func__); if (kvtag->name == NULL) in.name = " "; @@ -7307,7 +7311,8 @@ PDC_Client_query_kvtag_server(uint32_t server_id, const pdc_kvtag_t *kvtag, int PDC_Client_check_bulk(send_context_g); *n_res = bulk_arg->n_meta; - *out = bulk_arg->obj_ids; + if (*n_res > 0) + *out = bulk_arg->obj_ids; free(bulk_arg); // TODO: need to be careful when freeing the lookup_args, as it include the results returned to user @@ -7320,18 +7325,21 @@ PDC_Client_query_kvtag_server(uint32_t server_id, const pdc_kvtag_t *kvtag, int perr_t PDC_Client_query_kvtag(const pdc_kvtag_t *kvtag, int *n_res, uint64_t **pdc_ids) { - perr_t ret_value = SUCCEED; - int32_t i; - int nmeta = 0; + perr_t ret_value = SUCCEED; + int i, nmeta = 0; + uint32_t server_id; FUNC_ENTER(NULL); *n_res = 0; for (i = 0; i < pdc_server_num_g; i++) { - ret_value = PDC_Client_query_kvtag_server((uint32_t)i, kvtag, &nmeta, pdc_ids); + // when there are multiple clients issuing different queries concurrently, try to balance the + // server workload by having different clients sending queries with a different order + server_id = (pdc_client_mpi_rank_g + i) % pdc_server_num_g; + ret_value = PDC_Client_query_kvtag_server(server_id, kvtag, &nmeta, pdc_ids); if (ret_value != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: error with PDC_Client_query_kvtag_server to server %d", - pdc_client_mpi_rank_g, i); + pdc_client_mpi_rank_g, server_id); } *n_res = nmeta; @@ -7365,14 +7373,15 @@ PDC_assign_server(uint32_t *my_server_start, uint32_t *my_server_end, uint32_t * FUNC_LEAVE_VOID; } -// All clients collectively query all servers +// All clients collectively query all servers, each client gets partial results perr_t PDC_Client_query_kvtag_col(const pdc_kvtag_t *kvtag, int *n_res, uint64_t **pdc_ids) { - perr_t ret_value = SUCCEED; - int32_t my_server_start, my_server_end, my_server_count; - int32_t i; - int nmeta = 0; + perr_t ret_value = SUCCEED; + int32_t my_server_start, my_server_end, my_server_count; + int32_t i; + int nmeta = 0; + uint64_t *temp_ids = NULL; FUNC_ENTER(NULL); @@ -7392,23 +7401,117 @@ PDC_Client_query_kvtag_col(const pdc_kvtag_t *kvtag, int *n_res, uint64_t **pdc_ } } - *n_res = 0; + *n_res = 0; + *pdc_ids = NULL; for (i = my_server_start; i < my_server_end; i++) { - if (i >= pdc_server_num_g) { + if (i >= pdc_server_num_g) break; - } - ret_value = PDC_Client_query_kvtag_server((uint32_t)i, kvtag, &nmeta, pdc_ids); + + ret_value = PDC_Client_query_kvtag_server((uint32_t)i, kvtag, &nmeta, &temp_ids); if (ret_value != SUCCEED) PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: error with PDC_Client_query_kvtag_server to server %u", pdc_client_mpi_rank_g, i); + if (i == my_server_start) + *pdc_ids = temp_ids; + else { + *pdc_ids = (uint64_t *)realloc(*pdc_ids, sizeof(uint64_t) * (*n_res + nmeta)); + memcpy(*pdc_ids + (*n_res) * sizeof(uint64_t), temp_ids, nmeta * sizeof(uint64_t)); + if (temp_ids) + free(temp_ids); + } + *n_res = *n_res + nmeta; } - *n_res = nmeta; +done: + fflush(stdout); + FUNC_LEAVE(ret_value); +} + +#ifdef ENABLE_MPI +// All clients collectively query all servers, all clients get all results +perr_t +PDC_Client_query_kvtag_mpi(const pdc_kvtag_t *kvtag, int *n_res, uint64_t **pdc_ids, MPI_Comm comm) +{ + perr_t ret_value = SUCCEED; + int32_t my_server_start, my_server_end, my_server_count; + int32_t i; + int nmeta = 0, *all_nmeta = NULL, ntotal = 0, *disp = NULL; + uint64_t *temp_ids = NULL; + + FUNC_ENTER(NULL); + + if (pdc_server_num_g > pdc_client_mpi_size_g) { + my_server_count = pdc_server_num_g / pdc_client_mpi_size_g; + my_server_start = pdc_client_mpi_rank_g * my_server_count; + my_server_end = my_server_start + my_server_count; + if (pdc_client_mpi_rank_g == pdc_client_mpi_size_g - 1) { + my_server_end += pdc_server_num_g % pdc_client_mpi_size_g; + } + } + else { + my_server_start = pdc_client_mpi_rank_g; + my_server_end = my_server_start + 1; + if (pdc_client_mpi_rank_g >= pdc_server_num_g) { + my_server_end = 0; + } + } + + *n_res = 0; + *pdc_ids = NULL; + for (i = my_server_start; i < my_server_end; i++) { + if (i >= pdc_server_num_g) + break; + + /* printf("==PDC_CLIENT[%d]: querying server %u\n", pdc_client_mpi_rank_g, i); */ + + ret_value = PDC_Client_query_kvtag_server((uint32_t)i, kvtag, &nmeta, &temp_ids); + if (ret_value != SUCCEED) + PGOTO_ERROR(FAIL, "==PDC_CLIENT[%d]: error in %s querying server %u", pdc_client_mpi_rank_g, + __func__, i); + if (i == my_server_start) + *pdc_ids = temp_ids; + else if (nmeta > 0) { + *pdc_ids = (uint64_t *)realloc(*pdc_ids, sizeof(uint64_t) * (*n_res + nmeta)); + memcpy(*pdc_ids + (*n_res) * sizeof(uint64_t), temp_ids, nmeta * sizeof(uint64_t)); + free(temp_ids); + } + *n_res = *n_res + nmeta; + /* printf("==PDC_CLIENT[%d]: server %u returned %d res \n", pdc_client_mpi_rank_g, i, *n_res); */ + } + + if (pdc_client_mpi_size_g == 1) + goto done; + + all_nmeta = (int *)malloc(pdc_client_mpi_size_g * sizeof(int)); + disp = (int *)malloc(pdc_client_mpi_size_g * sizeof(int)); + MPI_Allgather(n_res, 1, MPI_INT, all_nmeta, 1, MPI_INT, comm); + for (i = 0; i < pdc_client_mpi_size_g; i++) { + ntotal += all_nmeta[i]; + if (i == 0) + disp[i] = 0; + else + disp[i] = disp[i - 1] + all_nmeta[i]; + } + + /* printf("==PDC_CLIENT[%d]: after allgather \n", pdc_client_mpi_rank_g); */ + + temp_ids = (uint64_t *)malloc(ntotal * sizeof(uint64_t)); + MPI_Allgatherv(pdc_ids, *n_res, MPI_UINT64_T, temp_ids, all_nmeta, disp, MPI_UINT64_T, comm); + + /* printf("==PDC_CLIENT[%d]: after allgatherv\n", pdc_client_mpi_rank_g); */ + + free(all_nmeta); + free(disp); + if (*n_res > 0) + free(*pdc_ids); + *pdc_ids = temp_ids; + *n_res = ntotal; done: fflush(stdout); FUNC_LEAVE(ret_value); } +#endif // Delete a tag specified by a name, and whether it is from a container or an object static perr_t diff --git a/src/server/pdc_server_region/pdc_server_region_cache.c b/src/server/pdc_server_region/pdc_server_region_cache.c index d2f2abe56..0054f9485 100644 --- a/src/server/pdc_server_region/pdc_server_region_cache.c +++ b/src/server/pdc_server_region/pdc_server_region_cache.c @@ -679,6 +679,7 @@ PDC_region_cache_flush_by_pointer(uint64_t obj_id, pdc_obj_cache *obj_cache) char ** buf, **new_buf, *buf_ptr = NULL; uint64_t * start, *end, *new_start, *new_end; int merged_request_size = 0; + int server_rank = 0; uint64_t unit; struct pdc_region_info **obj_regions; #ifdef PDC_TIMING @@ -751,6 +752,9 @@ PDC_region_cache_flush_by_pointer(uint64_t obj_id, pdc_obj_cache *obj_cache) nflush += merged_request_size; } +#ifdef ENABLE_MPI + MPI_Comm_rank(MPI_COMM_WORLD, &server_rank); +#endif // Iterate through all cache regions and use POSIX I/O to write them back to file system. region_cache_iter = obj_cache->region_cache; while (region_cache_iter != NULL) { @@ -764,6 +768,9 @@ PDC_region_cache_flush_by_pointer(uint64_t obj_id, pdc_obj_cache *obj_cache) if (obj_cache->ndim >= 3) write_size *= region_cache_info->size[2]; + printf("==PDC_SERVER[%d]: server flushed %.1f / %.1f MB to storage\n", server_rank, + write_size / 1048576.0, total_cache_size / 1048576.0); + total_cache_size -= write_size; free(region_cache_info->offset); if (obj_cache->ndim > 1) { diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index 2fd2d0839..ca1856b3f 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt @@ -67,6 +67,7 @@ set(PROGRAMS kvtag_add_get_scale # kvtag_query kvtag_query_scale + kvtag_query_mpi # obj_transformation region_transfer_query region_transfer diff --git a/src/tests/kvtag_query_mpi.c b/src/tests/kvtag_query_mpi.c new file mode 100644 index 000000000..22c619c97 --- /dev/null +++ b/src/tests/kvtag_query_mpi.c @@ -0,0 +1,207 @@ +/* + * Copyright Notice for + * Proactive Data Containers (PDC) Software Library and Utilities + * ----------------------------------------------------------------------------- + + *** Copyright Notice *** + + * Proactive Data Containers (PDC) Copyright (c) 2017, The Regents of the + * University of California, through Lawrence Berkeley National Laboratory, + * UChicago Argonne, LLC, operator of Argonne National Laboratory, and The HDF + * Group (subject to receipt of any required approvals from the U.S. Dept. of + * Energy). All rights reserved. + + * If you have questions about your rights to use or distribute this software, + * please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov. + + * NOTICE. This Software was developed under funding from the U.S. Department of + * Energy and the U.S. Government consequently retains certain rights. As such, the + * U.S. Government has been granted for itself and others acting on its behalf a + * paid-up, nonexclusive, irrevocable, worldwide license in the Software to + * reproduce, distribute copies to the public, prepare derivative works, and + * perform publicly and display publicly, and to permit other to do so. + */ + +#include +#include +#include +#include +#include +#include "pdc.h" +#include "pdc_client_connect.h" + +int +assign_work_to_rank(int rank, int size, int nwork, int *my_count, int *my_start) +{ + if (rank > size || my_count == NULL || my_start == NULL) { + printf("assign_work_to_rank(): Error with input!\n"); + return -1; + } + if (nwork < size) { + if (rank < nwork) + *my_count = 1; + else + *my_count = 0; + (*my_start) = rank * (*my_count); + } + else { + (*my_count) = nwork / size; + (*my_start) = rank * (*my_count); + + // Last few ranks may have extra work + if (rank >= size - nwork % size) { + (*my_count)++; + (*my_start) += (rank - (size - nwork % size)); + } + } + + return 1; +} + +void +print_usage(char *name) +{ + printf("%s n_obj n_query\n", name); +} + +int +main(int argc, char *argv[]) +{ + pdcid_t pdc, cont_prop, cont, obj_prop; + pdcid_t * obj_ids; + int n_obj, n_add_tag, my_obj, my_obj_s, my_add_tag, my_add_tag_s; + int proc_num, my_rank, i, v, iter, round; + char obj_name[128]; + double stime, total_time; + pdc_kvtag_t kvtag; + uint64_t * pdc_ids; + int nres, ntotal; + +#ifdef ENABLE_MPI + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &proc_num); + MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); +#endif + + if (argc < 3) { + if (my_rank == 0) + print_usage(argv[0]); + goto done; + } + n_obj = atoi(argv[1]); + round = atoi(argv[2]); + n_add_tag = n_obj / 100; + + // create a pdc + pdc = PDCinit("pdc"); + + // create a container property + cont_prop = PDCprop_create(PDC_CONT_CREATE, pdc); + if (cont_prop <= 0) + printf("Fail to create container property @ line %d!\n", __LINE__); + + // create a container + cont = PDCcont_create("c1", cont_prop); + if (cont <= 0) + printf("Fail to create container @ line %d!\n", __LINE__); + + // create an object property + obj_prop = PDCprop_create(PDC_OBJ_CREATE, pdc); + if (obj_prop <= 0) + printf("Fail to create object property @ line %d!\n", __LINE__); + + // Create a number of objects, add at least one tag to that object + assign_work_to_rank(my_rank, proc_num, n_obj, &my_obj, &my_obj_s); + if (my_rank == 0) + printf("I will create %d obj\n", my_obj); + obj_ids = (pdcid_t *)calloc(my_obj, sizeof(pdcid_t)); + for (i = 0; i < my_obj; i++) { + sprintf(obj_name, "obj%d", my_obj_s + i); + obj_ids[i] = PDCobj_create(cont, obj_name, obj_prop); + if (obj_ids[i] <= 0) { + printf("Fail to create object @ line %d!\n", __LINE__); + goto done; + } + } + + if (my_rank == 0) + printf("Created %d objects\n", n_obj); + fflush(stdout); + + // Add tags + kvtag.name = "Group"; + kvtag.value = (void *)&v; + kvtag.type = PDC_INT; + kvtag.size = sizeof(int); + + for (iter = 0; iter < round; iter++) { + assign_work_to_rank(my_rank, proc_num, n_add_tag, &my_add_tag, &my_add_tag_s); + + v = iter; + for (i = 0; i < my_add_tag; i++) { + if (PDCobj_put_tag(obj_ids[i], kvtag.name, kvtag.value, kvtag.type, kvtag.size) < 0) { + printf("fail to add a kvtag to o%d\n", i + my_obj_s); + goto done; + } + } + + if (my_rank == 0) + printf("Rank %d: Added a kvtag to %d objects\n", my_rank, my_add_tag); + fflush(stdout); + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); +#endif + n_add_tag *= 2; + } + + n_add_tag = n_obj / 100; + + for (iter = 0; iter < round; iter++) { + v = iter; + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + stime = MPI_Wtime(); +#endif + + if (PDC_Client_query_kvtag_mpi(&kvtag, &nres, &pdc_ids, MPI_COMM_WORLD) < 0) { + printf("fail to query kvtag [%s] with rank %d\n", kvtag.name, my_rank); + break; + } + + if (nres != n_add_tag) + printf("Rank %d: query result %d doesn't match expected %d\n", my_rank, nres, n_add_tag); + +#ifdef ENABLE_MPI + MPI_Barrier(MPI_COMM_WORLD); + total_time = MPI_Wtime() - stime; +#endif + if (my_rank == 0) + printf("Total time to query %d objects with tag: %.5e\n", nres, total_time); + fflush(stdout); + n_add_tag *= 2; + } + + // close a container + if (PDCcont_close(cont) < 0) + printf("fail to close container c1\n"); + + // close an object property + if (PDCprop_close(obj_prop) < 0) + printf("Fail to close property @ line %d\n", __LINE__); + + // close a container property + if (PDCprop_close(cont_prop) < 0) + printf("Fail to close property @ line %d\n", __LINE__); + + // close pdc + if (PDCclose(pdc) < 0) + printf("fail to close PDC\n"); +done: +#ifdef ENABLE_MPI + MPI_Finalize(); +#endif + + return 0; +} From 006281d8d013b71207c25b45b6dd35435dfeb62c Mon Sep 17 00:00:00 2001 From: Houjun Tang Date: Wed, 23 Aug 2023 13:40:40 -0700 Subject: [PATCH 217/806] Rebase develop to stable (#125) * updates in documentation * update docs * trigger update * trigger update * remove extension * include docs build dependencies * update file * update metrics * Update .gitlab-ci.yml * Update .gitlab-ci.yml * update logo * Update .gitlab-ci.yml * adding recovered documentation * update text * fix typo * update CSS * increase content width * remove build docs * Refer documentation to readthedocs website (#95) * Update README.md * Update getting_started.rst * Update getting_started.rst * include updated text * fix links * fix typo * Update README.md * update output * Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. * 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working * do while loop added, tested with 1m object and works * 1m objects test works, 10m object test fail as the original also fails * update code * add console arg for changing number of attributes per object * free allocated memory * fix query count issue * fix attr length definition * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * fix data type * fix data type * fix data type * add client side statistics * add client side statistics * fix format * clang formatter * update CMake * clang format * clang format * clang-format-10 * change file name * update llsm importer * update llsm importer * adding job scripts * adding one debugging msg * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update output for uint64_t * add scripts * update tag names * update tag names * update query startingpos * update job scripts * fix iteration count in final report * update job scripts and benckmark program * clang format * update job scripts * comment off object/container close procedure in benchmark to save node hours * change the max number of object to 1M * change the max length of attribute value * change the max length of attribute value * llsm tiff import test * llsm tiff import test * llsm tiff import test * llsm tiff import test * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update cmake and llsm_importer * update cmake and llsm_importer * close if in cmake * cmake fix tiff * cmake policy to suppress warning * add pdc include dir * update code * update code * update code * update code * update code * update code * update array generating method * update array generating method * update array generating method * update array generating method * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * fix return type * fix return type * add timing * add timing * fix output * llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader * fix vairable name * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * add scripts * add scripts * add scripts * debugging for nonMPI program * debugging for nonMPI program * debugging for nonMPI program * clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created * enable MPI * enable MPI * enlarge BCase size * enlarge BCase size * enlarge BCase size * resolve bcast count * llsm data path in script * llsm data path in script * update csv reader * update csv reader * update csv reader * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * enlarge max write * update pdc * update pdc * update pdc * update pdc * update pdc_import.c * update pdc_import.c * update pdc_export.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update tools/cmake * clang format * clang format * added a tutorial for llsm_importer * added a tutorial for llsm_importer * make sure the line feed is included for string attribute * update timing for overall completion time * update formatting * Remove unnecessary fflush call Signed-off-by: Chen Wang * Fix Issue #85, server segfault when another client application with different number of ranks connects to it * Committing clang-format changes * update VPIC output timing precision (#88) * update VPIC output timing precision * update timing to make consistent * llsm_importer (#1) formatter on llsm_importer * add type for kvtag structure (#2) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * Feature/metadata type (#3) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * remove unnecessary header files from installation * resolve conflict * add important files * clang formatting * update cmake * add FindMERCURY.cmake * LLSM Importer update: new job script + new data type update on kvtags (#92) * remove unnecessary install block from CMakeLists.txt * update output * Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. * build kvtag_add_get_scale * comment off free * update code * 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working * do while loop added, tested with 1m object and works * 1m objects test works, 10m object test fail as the original also fails * add new executable to test set * enlarge PDC_SERVER_ID_INTERVAL * update code * update console args * add p search test * add console arg for changing number of attributes per object * free allocated memory * fix query count issue * fix attr length definition * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * fix data type * fix data type * fix data type * add client side statistics * add client side statistics * fix format * clang formatter * update CMake * update CMake * update CMake * free allocated memory properly * clang format * clang format * clang-format-10 * change file name * address review comments * update llsm importer * update llsm importer * update server checkpoint intervals * update gitignore * adding job scripts * adding one debugging msg * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update output for uint64_t * add scripts * update output for uint64_t * update output for uint64_t * update output for uint64_t * update scripts * update scripts * delete debugging message * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * update tag names * update tag names * update query startingpos * update query startingpos * update job scripts * add progressive timing for kvtag_add_get_scale * fix iteration count in final report * update job scripts and benckmark program * update message format * update message format * update message format * update message format * clang format * update job scripts * comment off object/container close procedure in benchmark to save node hours * change the max number of object to 1M * change the max length of attribute value * change the max length of attribute value * llsm tiff import test * llsm tiff import test * llsm tiff import test * llsm tiff import test * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update cmake and llsm_importer * update cmake and llsm_importer * close if in cmake * cmake fix tiff * cmake policy to suppress warning * add pdc include dir * update code * update code * update code * update code * update code * update code * update array generating method * update array generating method * update array generating method * update array generating method * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * fix return type * fix return type * add timing * add timing * fix output * llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader * fix vairable name * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * add scripts * add scripts * add scripts * debugging for nonMPI program * debugging for nonMPI program * debugging for nonMPI program * clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created * enable MPI * enable MPI * enlarge BCase size * enlarge BCase size * enlarge BCase size * resolve bcast count * llsm data path in script * llsm data path in script * update csv reader * update csv reader * update csv reader * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * enlarge max write * update pdc * update pdc * update pdc * update pdc * update pdc_import.c * update pdc_import.c * update pdc_export.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update tools/cmake * clang format * clang format * added a tutorial for llsm_importer * added a tutorial for llsm_importer * make sure the line feed is included for string attribute * update timing for overall completion time * update formatting * update metrics * forcibly enable openmp * adding C flags from the mex compiler * Update .gitlab-ci.yml * updated code * clang format * llsm_importer (#1) formatter on llsm_importer * add type for kvtag structure (#2) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * Feature/metadata type (#3) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * remove unnecessary header files from installation * resolve conflict * add important files * clang formatting * update cmake * update * print numWorkers * update scripts * update script * update script * formatting * update llsm_tools.c * remove unnecessary hash table init --------- Co-authored-by: Houjun Tang Co-authored-by: Jean Luca Bez * fix warnings, commenting off 'find_path' and 'find_library' for Mercury in src/commons/CMakeLists.txt (#93) * Revert "update output" This reverts commit fe1f8b44995bc0dabd3b957e1032c2da26f56fdd. * build kvtag_add_get_scale * comment off free * update code * 1. kvtag_scale_add_get added \n 2. uint64_t support for obj/tag/query count \n 3. moving work assigning block downwards right before creating objects \n 4. everything is tested working * do while loop added, tested with 1m object and works * 1m objects test works, 10m object test fail as the original also fails * add new executable to test set * enlarge PDC_SERVER_ID_INTERVAL * update code * update console args * add p search test * add console arg for changing number of attributes per object * free allocated memory * fix query count issue * fix attr length definition * code refactored * code refactored * code refactored * code refactored * code refactored * code refactored * fix data type * fix data type * fix data type * add client side statistics * add client side statistics * fix format * clang formatter * update CMake * update CMake * update CMake * free allocated memory properly * clang format * clang format * clang-format-10 * change file name * address review comments * update llsm importer * update llsm importer * update server checkpoint intervals * update gitignore * adding job scripts * adding one debugging msg * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update container creation to collective mode for debugging purpose * update output for uint64_t * add scripts * update output for uint64_t * update output for uint64_t * update output for uint64_t * update scripts * update scripts * delete debugging message * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * make Cmake to publish scripts directory * update tag names * update tag names * update query startingpos * update query startingpos * update job scripts * add progressive timing for kvtag_add_get_scale * fix iteration count in final report * update job scripts and benckmark program * update message format * update message format * update message format * update message format * clang format * update job scripts * comment off object/container close procedure in benchmark to save node hours * change the max number of object to 1M * change the max length of attribute value * change the max length of attribute value * llsm tiff import test * llsm tiff import test * llsm tiff import test * llsm tiff import test * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update code * update cmake and llsm_importer * update cmake and llsm_importer * close if in cmake * cmake fix tiff * cmake policy to suppress warning * add pdc include dir * update code * update code * update code * update code * update code * update code * update array generating method * update array generating method * update array generating method * update array generating method * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * update CMakeLists * fix return type * fix return type * add timing * add timing * fix output * llsm tiff importer 1st version: read csv and import tiff files to PDC, adding metadata available in CSV files and TIFF loader * fix vairable name * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * fix cmake * add scripts * add scripts * add scripts * debugging for nonMPI program * debugging for nonMPI program * debugging for nonMPI program * clang format, without PDC, everything works perfectly. program fails at PDC init stage where PDCprop_create(PDC_CONT_CREATE, pdc) is being created * enable MPI * enable MPI * enlarge BCase size * enlarge BCase size * enlarge BCase size * resolve bcast count * llsm data path in script * llsm data path in script * update csv reader * update csv reader * update csv reader * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * update pdc * enlarge max write * update pdc * update pdc * update pdc * update pdc * update pdc_import.c * update pdc_import.c * update pdc_export.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update pdc_import.c * update tools/cmake * clang format * clang format * added a tutorial for llsm_importer * added a tutorial for llsm_importer * make sure the line feed is included for string attribute * update timing for overall completion time * update formatting * update metrics * forcibly enable openmp * adding C flags from the mex compiler * Update .gitlab-ci.yml * updated code * clang format * llsm_importer (#1) formatter on llsm_importer * add type for kvtag structure (#2) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * Feature/metadata type (#3) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * remove unnecessary header files from installation * resolve conflict * add important files * clang formatting * update cmake * update * print numWorkers * update scripts * update script * update script * formatting * update llsm_tools.c * remove unnecessary hash table init * update script * fix some warnings * fix some warnings * update * update * fix warning * update * update * update * update * update * update * update * update * update * fix warnings * fix warnings * fix warnings * fix warnings * fix warnings * fix warnings * fix warnings * update * update * update * server address and file paths using 1024, TMP_DIR path using 1024/2, NAME_MAX for appname and objname takes 1024/2, HOSTNAME takes 1024/8, NA_INFO_STRING takes 1024/4 * update * update * update * update --------- Co-authored-by: Houjun Tang Co-authored-by: Jean Luca Bez * Update clang-format-fix.yml * Update clang-format-fix.yml * Increase the default server cache size to 32GB and flush frequency to 30s * Committing clang-format changes * update commons/CMakeLists.txt * Fix unnecessary memory allocation (#103) * Fix an issue with opening a deleted container, added test (#101) * Fix an issue with opening a deleted container, added test * Refactor the query aggregation process * Fix container tag delete error (#102) * Fix container tag delete error * Committing clang-format changes * Update tag delete function * Refactor metdata lookup process for tag deletion * Committing clang-format changes * Formatting and comment * Committing clang-format changes --------- Co-authored-by: github-actions * Fix the server cache issue when cache becomes full and needs flush (#113) * Fix the server cache issue when cache becomes full and need flush * Committing clang-format changes --------- Co-authored-by: github-actions * Fix a wrong option description. (#115) * upate metadata type system * update serde framework to coupe with the new data type system * replace unnecessary data types * adding type for pdc_kvtag_t, all occurances are fixed * update new commons CMake for publishing commons * commons compilation passed * compiled * remove unnecessary header files from installation * resolve conflict * add important files * clang formatting * update cmake * update option message --------- Co-authored-by: Houjun Tang * Install header files needed by PDCpy (#114) * Support unlimited object dimension size (#117) * Support unlimited object dimension szie * Add function description for PDC_SIZE_UNLIMITED * Fix obj_round_robin_io_all test code * Committing clang-format changes * More header files fix for PDCpy * Fix cmake path (#121) * Fix cmake path * Fix cmake path * Kvtag query (#122) * Add a new collective kvtag query api to return full/aggregated results to all clients * Committing clang-format changes * Add test code * Committing clang-format changes * Add an optimization when multiple clients issue different queries * Add test program * Fix free issue * Committing clang-format changes --------- Co-authored-by: github-actions * fix conflict issue * fix conflict issue --------- Signed-off-by: Chen Wang Co-authored-by: Jean Luca Bez Co-authored-by: Wei Zhang Co-authored-by: Chen Wang Co-authored-by: github-actions Co-authored-by: Wei Zhang --- .gitignore | 4 +- CMakeLists.txt | 3 + README.md | 275 +- docs/_static/css/pdc.css | 65 + docs/_static/image/pdc.png | Bin 0 -> 170557 bytes docs/build/doctrees/environment.pickle | Bin 10131 -> 0 bytes docs/build/doctrees/index.doctree | Bin 4786 -> 0 bytes docs/build/html/.buildinfo | 4 - docs/build/html/_sources/index.rst.txt | 20 - docs/build/html/_static/alabaster.css | 701 - docs/build/html/_static/basic.css | 764 -- docs/build/html/_static/custom.css | 1 - docs/build/html/_static/doctools.js | 314 - .../html/_static/documentation_options.js | 10 - docs/build/html/_static/file.png | Bin 286 -> 0 bytes docs/build/html/_static/jquery-3.4.1.js | 10598 ---------------- docs/build/html/_static/jquery.js | 2 - docs/build/html/_static/language_data.js | 297 - docs/build/html/_static/minus.png | Bin 90 -> 0 bytes docs/build/html/_static/plus.png | Bin 90 -> 0 bytes docs/build/html/_static/pygments.css | 77 - docs/build/html/_static/searchtools.js | 506 - docs/build/html/_static/underscore-1.3.1.js | 999 -- docs/build/html/_static/underscore.js | 31 - docs/build/html/genindex.html | 101 - docs/build/html/index.html | 110 - docs/build/html/objects.inv | Bin 236 -> 0 bytes docs/build/html/search.html | 111 - docs/build/html/searchindex.js | 1 - docs/requirements.txt | 2 + docs/source/Doxyfile.in | 2693 ++++ docs/source/conf.py | 61 +- docs/source/developer-notes.rst | 307 + docs/source/documentation/analysis.rst | 8 + docs/source/documentation/objects.rst | 18 + docs/source/documentation/query.rst | 5 + docs/source/documentation/regions.rst | 6 + docs/source/documentation/transformation.rst | 5 + docs/source/getting_started.rst | 247 +- docs/source/index.rst | 28 +- scripts/kvtag_add_get_benchmark/cori/clean.sh | 2 +- 41 files changed, 3372 insertions(+), 15004 deletions(-) create mode 100644 docs/_static/css/pdc.css create mode 100644 docs/_static/image/pdc.png delete mode 100644 docs/build/doctrees/environment.pickle delete mode 100644 docs/build/doctrees/index.doctree delete mode 100644 docs/build/html/.buildinfo delete mode 100644 docs/build/html/_sources/index.rst.txt delete mode 100644 docs/build/html/_static/alabaster.css delete mode 100644 docs/build/html/_static/basic.css delete mode 100644 docs/build/html/_static/custom.css delete mode 100644 docs/build/html/_static/doctools.js delete mode 100644 docs/build/html/_static/documentation_options.js delete mode 100644 docs/build/html/_static/file.png delete mode 100644 docs/build/html/_static/jquery-3.4.1.js delete mode 100644 docs/build/html/_static/jquery.js delete mode 100644 docs/build/html/_static/language_data.js delete mode 100644 docs/build/html/_static/minus.png delete mode 100644 docs/build/html/_static/plus.png delete mode 100644 docs/build/html/_static/pygments.css delete mode 100644 docs/build/html/_static/searchtools.js delete mode 100644 docs/build/html/_static/underscore-1.3.1.js delete mode 100644 docs/build/html/_static/underscore.js delete mode 100644 docs/build/html/genindex.html delete mode 100644 docs/build/html/index.html delete mode 100644 docs/build/html/objects.inv delete mode 100644 docs/build/html/search.html delete mode 100644 docs/build/html/searchindex.js create mode 100644 docs/requirements.txt create mode 100644 docs/source/Doxyfile.in create mode 100644 docs/source/developer-notes.rst create mode 100644 docs/source/documentation/analysis.rst create mode 100644 docs/source/documentation/objects.rst create mode 100644 docs/source/documentation/query.rst create mode 100644 docs/source/documentation/regions.rst create mode 100644 docs/source/documentation/transformation.rst diff --git a/.gitignore b/.gitignore index 74675f7ef..f65ea896c 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,6 @@ src/install .vscode -build \ No newline at end of file +build + +docs/build diff --git a/CMakeLists.txt b/CMakeLists.txt index 315305b9b..7095ef7f3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -534,6 +534,9 @@ install( PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE ) +# set(ADD_EXE_PERMISSION_CMD "chmod +x ${}/test/*") +# add_custom_command(TARGET ${PROJECT_NAME} POST_INSTALL COMMAND ${add_permission_cmd}) + #install( # FILES diff --git a/README.md b/README.md index 5c5e6cbec..085893768 100644 --- a/README.md +++ b/README.md @@ -1,233 +1,48 @@ -[![linux](https://github.com/hpc-io/pdc/actions/workflows/linux.yml/badge.svg?branch=stable)](https://github.com/hpc-io/pdc/actions/workflows/linux.yml) -# Proactive Data Containers (PDC) -Proactive Data Containers (PDC) software provides an object-centric API and a runtime system with a set of data object management services. These services allow placing data in the memory and storage hierarchy, performing data movement asynchronously, and providing scalable metadata operations to find data objects. PDC revolutionizes how data is stored and accessed by using object-centric abstractions to represent data that moves in the high-performance computing (HPC) memory and storage subsystems. PDC manages extensive metadata to describe data objects to find desired data efficiently as well as to store information in the data objects. - -PDC API, data types, and developer notes are available in docs/readme.md. - -More information and publications of PDC is available at https://sdm.lbl.gov/pdc - -# Installation - -The following instructions are for installing PDC on Linux and Cray machines. -GCC version 7 or newer and a version of MPI are needed to install PDC. - -Current PDC tests have been verified with MPICH. To install MPICH, follow the documentation in https://www.mpich.org/static/downloads/3.4.1/mpich-3.4.1-installguide.pdf - -PDC also depends on libfabric and Mercury. We provide detailed instructions for installing libfabric, Mercury, and PDC below. -Make sure to record the environmental variables (lines that contains the "export" commands). They are needed for running PDC and to use the libraries again. - -## Preparing for Installation - -PDC relies on [`libfabric`](https://github.com/ofiwg/libfabric/) as well as [`mercury`](https://github.com/mercury-hpc/mercury). Therefore, let's **prepare the dependencies**. -### Preparing Work Space - -Before installing the dependencies and downloading the code repository, we assume there is a directory created for your installation already, e.g. `$WORK_SPACE` and now you are in `$WORK_SPACE`. - -```bash -export WORK_SPACE=/path/to/your/work/space -mkdir -p $WORK_SPACE/source -mkdir -p $WORK_SPACE/install -``` - -### Download Necessary Source Repository - -Now, let's download [`libfabric`](https://github.com/ofiwg/libfabric/), [`mercury`](https://github.com/mercury-hpc/mercury) and [`pdc`](https://github.com/hpc-io/pdc/tree/develop) into our `source` directory. - -```bash -cd $WORK_SPACE/source -git clone git@github.com:ofiwg/libfabric.git -git clone git@github.com:mercury-hpc/mercury.git -git clone git@github.com:hpc-io/pdc.git -``` - -### Prepare Directories for Artifact Installation -```bash -export LIBFABRIC_SRC_DIR=$WORK_SPACE/source/libfabric -export MERCURY_SRC_DIR=$WORK_SPACE/source/mercury -export PDC_SRC_DIR=$WORK_SPACE/source/pdc - -export LIBFABRIC_DIR=$WORK_SPACE/install/libfabric -export MERCURY_DIR=$WORK_SPACE/install/mercury -export PDC_DIR=$WORK_SPACE/install/pdc - -mkdir -p $LIBFABRIC_SRC_DIR -mkdir -p $MERCURY_SRC_DIR -mkdir -p $PDC_SRC_DIR - -mkdir -p $LIBFABRIC_DIR -mkdir -p $MERCURY_DIR -mkdir -p $PDC_DIR - -echo "export LIBFABRIC_SRC_DIR=$LIBFABRIC_SRC_DIR" > $WORK_SPACE/pdc_env.sh -echo "export MERCURY_SRC_DIR=$MERCURY_SRC_DIR" >> $WORK_SPACE/pdc_env.sh -echo "export PDC_SRC_DIR=$PDC_SRC_DIR" >> $WORK_SPACE/pdc_env.sh - -echo "export LIBFABRIC_DIR=$LIBFABRIC_DIR" >> $WORK_SPACE/pdc_env.sh -echo "export MERCURY_DIR=$MERCURY_DIR" >> $WORK_SPACE/pdc_env.sh -echo "export PDC_DIR=$PDC_DIR" >> $WORK_SPACE/pdc_env.sh -``` - -Remember, from now on, at any time, you can simply run the following to set the above environment variables so that you can run any of the following command for your installation. - -```bash -export WORK_SPACE=/path/to/your/work/space -source $WORK_SPACE/pdc_env.sh -``` - -### Compile and Install`libfabric` - -Check out tag `v1.11.2` for `libfabric`: - -```bash -cd $LIBFABRIC_SRC_DIR -git checkout tags/v1.11.2 -``` - -Configure, compile and install: - -```bash -./autogen.sh -./configure --prefix=$LIBFABRIC_DIR CC=cc CFLAG="-O2" - -make -j 32 -make install - -export LD_LIBRARY_PATH="$LIBFABRIC_DIR/lib:$LD_LIBRARY_PATH" -export PATH="$LIBFABRIC_DIR/include:$LIBFABRIC_DIR/lib:$PATH" - -echo 'export LD_LIBRARY_PATH=$LIBFABRIC_DIR/lib:$LD_LIBRARY_PATH' >> $WORK_SPACE/pdc_env.sh -echo 'export PATH=$LIBFABRIC_DIR/include:$LIBFABRIC_DIR/lib:$PATH' >> $WORK_SPACE/pdc_env.sh -``` - -Note: On NERSC supercomputers, e.g. Cori and Perlmutter, we should add `--disable-efa --disable-sockets` to the `./configure` command during the compilation on login nodes. - -### Compile and Install `mercury` - -Now, you may check out a specific tag version of `mercury`, for example, `v2.2.0`: - -```bash -cd $MERCURY_SRC_DIR -mkdir build -git checkout tags/v2.2.0 -git submodule update --init -``` - -Configure, compile, test and install: - -```bash -cd build -cmake ../ -DCMAKE_INSTALL_PREFIX=$MERCURY_DIR -DCMAKE_C_COMPILER=cc -DBUILD_SHARED_LIBS=ON -DBUILD_TESTING=ON -DNA_USE_OFI=ON -DNA_USE_SM=OFF -DNA_OFI_TESTING_PROTOCOL=tcp -make -j 32 && make install +

+ PDC +

-ctest - -export LD_LIBRARY_PATH="$MERCURY_DIR/lib:$LD_LIBRARY_PATH" -export PATH="$MERCURY_DIR/include:$MERCURY_DIR/lib:$PATH" - -echo 'export LD_LIBRARY_PATH=$MERCURY_DIR/lib:$LD_LIBRARY_PATH' >> $WORK_SPACE/pdc_env.sh -echo 'export PATH=$MERCURY_DIR/include:$MERCURY_DIR/lib:$PATH' >> $WORK_SPACE/pdc_env.sh -``` - -## Compile and Install PDC -Now, it's time to compile and install PDC. - -* One can replace `mpicc` to other available MPI compilers. For example, on Cori, `cc` can be used to replace `mpicc`. -* `ctest` contains both sequential and MPI tests for the PDC settings. These can be used to perform regression tests. -* Make sure MERCURY_HOME is added to CMAKE_PREFIX_PATH or PATH. - - -```bash -cd $PDC_SRC_DIR -git checkout develop -mkdir build -cd build - -cmake ../ -DBUILD_MPI_TESTING=ON -DBUILD_SHARED_LIBS=ON -DBUILD_TESTING=ON -DCMAKE_INSTALL_PREFIX=$PDC_DIR -DPDC_ENABLE_MPI=ON -DMERCURY_DIR=$MERCURY_DIR -DCMAKE_PREFIX_PATH=$MERCURY_DIR -DCMAKE_C_COMPILER=cc -DMPI_RUN_CMD=srun -make -j 32 && make install -``` - -Let's run `ctest` now on a compute node: - -### On Cori -```bash -salloc --nodes 1 --qos interactive --time 01:00:00 --constraint haswell -``` -### On Perlmutter - -```bash -salloc --nodes 1 --qos interactive --time 01:00:00 --constraint cpu --account=mxxxx -``` - -Once you are on the compute node, you can run `ctest`. - -```bash -ctest -``` - -Note: On Cori, if you happen to see failures regarding `libibverb` validation, login to one of the compute nodes by running an interactive job and re-compile all PDC's dependencies and PDC itself. Then problem will be solved. - -If all the tests pass, you can now specify the environment variables. - -```bash -export LD_LIBRARY_PATH="$PDC_DIR/lib:$LD_LIBRARY_PATH" -export PATH="$PDC_DIR/include:$PDC_DIR/lib:$PATH" - -echo 'export LD_LIBRARY_PATH=$PDC_DIR/lib:$LD_LIBRARY_PATH' >> $WORK_SPACE/pdc_env.sh -echo 'export PATH=$PDC_DIR/include:$PDC_DIR/lib:$PATH' >> $WORK_SPACE/pdc_env.sh -``` - -## About Spack - -One can also install `PDC` with [`Spack`](https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/pdc/package.py), with which the dependencies of `PDC` can be easily managed and installed. - -```bash -git clone -c feature.manyFiles=true https://github.com/spack/spack.git -cd spack/bin -./spack install pdc -``` - -## Running PDC - -Essentially, PDC is a typical client-server application. -To run `PDC`, one needs to start the server processes first, and then the clients can be started to issue RPC requests handled by the `Mercury` RPC framework. - -We provide [`mpi_test.sh` utility](https://github.com/hpc-io/pdc/blob/develop/examples/mpi_test.sh) for running MPI tests. For example, on a regular Linux machine, you may run the following: +# Proactive Data Containers (PDC) -```bash -export JOB_RUNNER=mpiexec -cd $PDC_DIR/bin -./mpi_test.sh ./pdc_init $JOB_RUNNER 2 4 +[![linux](https://github.com/hpc-io/pdc/actions/workflows/linux.yml/badge.svg?branch=stable)](https://github.com/hpc-io/pdc/actions/workflows/linux.yml) +![GitHub release tag(latest by date)](https://img.shields.io/github/v/tag/hpc-io/pdc) +![Spack](https://img.shields.io/spack/v/pdc) +![Read the Docs](https://img.shields.io/readthedocs/pdc?logo=readthedocs&logoColor=white) + +Proactive Data Containers (PDC) software provides an object-focused data management API, a runtime system with a set of scalable data object management services, and tools for managing data objects stored in the PDC system. The PDC API allows efficient and transparent data movement in complex memory and storage hierarchy. The PDC runtime system performs data movement asynchronously and provides scalable metadata operations to find and manipulate data objects. PDC revolutionizes how data is managed and accessed by using object-centric abstractions to represent data that moves in the high-performance computing (HPC) memory and storage subsystems. PDC manages extensive metadata to describe data objects to find desired data efficiently as well as to store information in the data objects. + +Full documentation of PDC with installation instructions, code examples for using PDC API, and research publications are available at [pdc.readthedocs.io](https://pdc.readthedocs.io) + +More information and publications on PDC is available at https://sdm.lbl.gov/pdc + +If you use PDC in your research, please use the following citations: + +``` +@misc{byna:2017:pdc, + title = {Proactive Data Containers (PDC) v0.1}, + author = {Byna, Suren and Dong, Bin and Tang, Houjun and Koziol, Quincey and Mu, Jingqing and Soumagne, Jerome and Vishwanath, Venkat and Warren, Richard and Tessier, François}, + url = {https://www.osti.gov/servlets/purl/1772576}, + doi = {10.11578/dc.20210325.1}, + url = {https://www.osti.gov/biblio/1772576}, + year = {2017}, + month = {5}, +} + +@inproceedings{tang:2018:toward, + title = {Toward scalable and asynchronous object-centric data management for HPC}, + author = {Tang, Houjun and Byna, Suren and Tessier, Fran{\c{c}}ois and Wang, Teng and Dong, Bin and Mu, Jingqing and Koziol, Quincey and Soumagne, Jerome and Vishwanath, Venkatram and Liu, Jialin and others}, + booktitle = {2018 18th IEEE/ACM International Symposium on Cluster, Cloud and Grid Computing (CCGRID)}, + pages = {113--122}, + year = {2018}, + organization = {IEEE} +} + +@inproceedings{tang:2019:tuning, + title = {Tuning object-centric data management systems for large scale scientific applications}, + author = {Tang, Houjun and Byna, Suren and Bailey, Stephen and Lukic, Zarija and Liu, Jialin and Koziol, Quincey and Dong, Bin}, + booktitle = {2019 IEEE 26th International Conference on High Performance Computing, Data, and Analytics (HiPC)}, + pages = {103--112}, + year = {2019}, + organization = {IEEE} +} ``` - -This is test will start 2 processes for PDC servers. The client program ./pdc_init will start 4 processes. Similarly, one can run any of the client examples in `ctest`. - -Depending on the specific HPC environment where you run `PDC` , the value of `$JOB_RUNNER` variable can be changed to `srun` (for NERSC), `aprun` (for Theta), or `jsrun` for `Summit`, accordingly. - -These source code will provide some knowledge of how to use PDC. For more reference, one may check the documentation folder in this repository. - -# PDC on Cori - -If you are running `PDC` on Cori supercomputer, here are some tips you would need to follow: - -* On Cori, it is recommended to use `cc` as the default compiler when compiling PDC and its dependencies. -* When preparing compilation for `PDC` using `CMake`, it is suggested to append console argument `-DMPI_RUN_CMD=srun` so that `ctest` can be executed on Cori. -* Sometimes, it might be helpful to unload `darshan` module before the installation. - -* For opening an interactive job session on Cori, it is recommended to add `--gres=craynetwork:2` option to the `salloc` command: - ```bash - salloc -C haswell -N 4 -t 01:00:00 -q interactive --gres=craynetwork:2 - ``` -* To launch the PDC server and the client, add `--gres=craynetwork:1` before the executables, for example: - - * Run 4 server processes, each on one node in background: - ```bash - srun -N 4 -n 4 -c 2 --mem=25600 --cpu_bind=cores --gres=craynetwork:1 --overlap ./bin/pdc_server.exe & - ``` - - * Run 64 client processes that concurrently create 1000 objects in total: - ```bash - srun -N 4 -n 64 -c 2 --mem=25600 --cpu_bind=cores --gres=craynetwork:1 --overlap ./bin/create_obj_scale -r 1000 - ``` - - - diff --git a/docs/_static/css/pdc.css b/docs/_static/css/pdc.css new file mode 100644 index 000000000..63a7ed5d1 --- /dev/null +++ b/docs/_static/css/pdc.css @@ -0,0 +1,65 @@ +div.wy-nav-content { + max-width: 1024px; +} + +nav.wy-nav-side, div.wy-side-nav-search, +div.ethical-dark-theme .ethical-sidebar, +div.rst-versions .rst-current-version { + background: #efefef; +} + +div.wy-side-nav-search > a img.logo { + margin: 40px auto; +} + +div.wy-menu-vertical p.caption { + color: #503788; +} + +div.wy-menu-vertical a { + color: #404040; +} + +div.wy-menu-vertical a:hover { + background-color: #ffffff; +} + +.rst-content div[class^=highlight], .rst-content pre.literal-block { + border-radius: 5px; + background: #efefef; +} + +.admonition note { + border-radius: 5px; +} + +.admonition-title { + border-radius: 5px 5px 0 0; +} + +div.rst-versions span.rst-current-version { + color: #344f90; +} + +div.rst-versions span.fa { + color: #404040!important; +} + +div.ethical-dark-theme a, div.ethical-dark-theme a:visited { + color: #404040!important; +} + +div.rst-versions { + color: #404040; +} + +div.rst-content dl dt { + border-radius: 3px; + display: block; +} + +.cpp .function > dt.cpp { + background: #efe7fa; + color: #65419d; + border-left: 3px solid #65419d; +} \ No newline at end of file diff --git a/docs/_static/image/pdc.png b/docs/_static/image/pdc.png new file mode 100644 index 0000000000000000000000000000000000000000..9322a04e18c52e8b9ef564252003bf04b5017437 GIT binary patch literal 170557 zcma%hV{j!*&~CV~ZErRk+uCewTbpEK+fFvNZQHhSq7&P8a!!2Tuj>A}RrkkNHCh5P|B9s&)Q4k3cAs`@7q@{kVKtMpo{|~```>!XUk8AqB3es6c@)zXV1cU#7 z4Fm@%ZD$AwT$=wONNE-7TL`F(ed*t#Y985_TW}t_z%7>Bzash3zbNLQ7$2cKf7rui zA){S+wxrQawc1QIGFWRhGV0!M)4O$pw!Lq9%(>>8=Tx?#m}!&{VYiXLgx7qNFfk*O zJWR899>CHrH}f4QGS447&VbihcXSxh|4p%@=wtNp-k1bvq(T3$Cj4&}{eQ=-v1vW- zk?87w`~P#)u_u`0>hz|O2Kj&Yx3kkS{a^xxFfnPaWQ;~!L?3B|RfOD{@H1a>Kl$Vl zEMkqs5E9{rgM=FzE14t#qS`z>)t-@2tKRRuZ z6wbm1SmFu@(SpczyI1sdakJ9g?>@yf4wM&-+4=RsZUSVBO0B zkr<}57KFUDVBq!11M_jp<>v9`Tnic8BJfpvYAt9eF>xnPc60Y@^Oy`v6x~!mp_(Ml z`Pb<5=B~k5%1X%Lr#DZaD~;A11k1Y@;c{kN5@1~M$NG8`Oy9GaM4{c_>sLkKhQW(@r}T4uAl9%4#ad@V0U8LZ{dZD zzp`TN?hNphqYELU+W^)pa;4x45c;${TBB_5mqK!RxeM@V* zXDnoWfL(8Aor_j%XS*d?pIET%l+%YxJ%8 z>+_wfwBZWia{-}*X?+$Qa4liw07rsE0w&_FCXxy~07%7b;jqc0CDqs)9RCJEm)147 zD`dZtj`bZ7)0V?JWR93?-WPndp2Au(kb2uWU&XwgKND*9E4{riJ74zRIQr6#mC*3XvC@ZH{3?;3{(Y=seL-?O7l@|v$mrj);ubjU z=B^03%A~SJdS(mi5;D$wH7H7l3*6%a?@;@WWimB4ufDJ}`a{*we?tyTawT3p*Yg-vf zXh#`N@U6MbXZp5iA2K>0D=#HK)35A$Jl_LnXYRpGx({xJ9$`zDRB5v}v5=W!xk8tc ztMNphDtPmJP5rCV&jG)+5qPHgC9I{i7fkON#Zk8-zw+$p87699Xc$T*zv2ks^TmM= zBIibx6Ue88i|rFvLYvnxz(PsFSKhg!8C(C%=%1Va$->n;b$bRGr0Cl!weyKSwcwFi zQAf0X1B8t=C@Sk9WmjAXF71B7KmoAN=uc5Q@0hn{4jowYxeX?!Se`Wi^>mK^6U2)v z8kS2>Ke8o2&y%sm?FRP+M&qH_|_*}DGA0`hNLDtY+$L88#Ce7LkJF4ec zba#Ve`tgFwafb1PH|dM1Gu$CJ}A56!*d^hzTy$1Cm~B1bv*mmuV#!+MDhlH2;)d+8o|Ep=>VB#W)p?8{WN0DTav zZgfG3AvySdTa){sjE5&qPjZp;I{tIcoiYE13`|Ufh3dLgEcA5S~0e-^7U0>P@Ml@bt5|#9w z^LPaaSZebhRO0;5KNbX1%iw#5oTxNto&q1;#{$0RzdS>4&uxgIR<#G0S5BK&c0nw< znO1ij?koG?4XwTruFSzq??SGm*t+e1l9nP#zfI&YUUypsEoX7A4RLRCa$_(|S5B}P0TwyPLL2mTmQC}lf zK@Kz4gAf7@;oyC2M5!H~2^5?_b!VeoMz1J&5AXzDdet{yLR&1ayem~`>D!!DG5hV~ zrDUq_UBYHl`~gL0FP!v^;seO|kYWBKHz|8HRRd9FmKA2N(GX)S8h*XeL35pGYuiWt ztQtJVlg}Hg#?W~QJy`=n?V&x5d6;vRv_#&4l^3}NefB8-1%#xD=@|YIp%y^mBMzvh zdmC%Z4^CD03m0IIp6x@pjcMhrz*|8yAicOnlg#-sOI80u(rRx{r@!G8YghRaZ*1q0 zXm1ho*OE{4aAj5Dp1`1|-YGff@B3d-A9tm_>pr0ni7n?K8A7e2_WDPuszd7A-``g zpl#>V6+yU!b~{}-=V~;(f&t7y%IO16E(1#aZ=+FmO>6nRgve-t{md+?E83|gCwcGf zfwUo|8bPb;2rj-`y6F`lg~Z+iq3qf_l2XPWY&a;5DWF{!Bv8|PRUpYKWUSv-pQB34 zO$nV&z|oE?VN+`DGI{yrr%`DI!uV+Gi1o{LZ>+WLdTZ_r1$@xEWN|>mkI2?{f6H~g z|NKDa4S7!OHEeJ13--#=t$vXEQ5{e3mZ3`YRlkZ_{KzLXGtYRW-*+~bUcgfmkPoUe zgW;k%=_>Y>SbPhl*;w!;N&oVX(>5c$72(JFYvRKgX2nLPcB$2Frk5(f{ve&N>(KAN z5ZX#p-7y_H!CxMxz&f_1&dl!aCJW6cSH0WW1-Q}l0&Ouaw}r7m@nf>8ZVl0I)bC<} zWSeCp)NBoDl-V0!!4ZEgJ*8}H7^nz3h4#TM+52aFnGu-G+wh<40`K*` z#iH;wo_XWt#KvY%rk2{8kp%eyttvZi*Q!T^R9i@9y1GbJ-P%--INA99p2fF*DMS2c zfJ5;?w7s-0SJ)By4;o$B_X2!oHhGxTEo4@g|$h~f#zUcr5 z{tat<7F)Aat@%E$ghKqW()KW&U09c~p30jK46(&W^VD7L*FXj9Th0ROA!G@BLmqGli+=!EJIA%v` z1<-B+b)6J1%=}_Z?Fho*QI%Zn$!BnmlYO>N(xP0g!H0ifV&QZ7KiwB>jX%{w#+I0Q z96cd5uH|s}ZtU>*$@izOG_C01N*Hxnl%$RUQJ_TC(Y(7W1Fto9SMM*TS`FNNyv;}92dmZugGPA-4xW!KLPtT|7K zeCrprVoR?h9|Eam2~S?2D(NbulQtF z*P7c+z}1JQ3o&~95%Fc1YEx;y&m73>L3*h3Niw#ADro$swGK(1VLiJb`1)%llNQ)D zN%U|A)7g5Yh^#tcHAewne%ebesaI64D6M$S%eTDv<#4V4LEIL*=#qEulu>B8qWQ2} zHd{m|j}G$!z(4O5R6FYbF;jEyTWEDVbcPqeYR6O8VPYjXL;N5Y}L zH@8U`}?@KtluLeyv#~E`X)$5w~03)t|ZCOT; z)a+#x8T%*J9?57MW$Z`fGq6h~b1DE!cg+nKZ}kz)%4UU~!I2e*UF|FQXbtIC-!)`J z=^nNG77X!+QiImrjkdjI!4q;~>60XpBhxT+UmqUQ+oGbjG!mNK@6Q!>?fW|tdSUu6 zbNc%mQwy1Z;u;t?4@N&TqWPvLZjZXzMHttoG@HEVJXQ|Z0vpjdW?7rJ*zWVd)AgCu z4)v~up3aRQb}9ZvT|nQ6yQmmlZw>e$y%l%^S6)*y5+18erwRj3v_(!cPnSBUT@Q}# znP-cy4z0!BA%po>wwW>#{B1^&`#NvqBj5Cf4~iho30)prXrA(Kz6LBL8xr?7clln+ zJ`#(Zfxj1>G_bXK2N7!D2mWTmQ5nCcF;yVdv7&XmWoCN5`+g3radLC**p&OdbVXeE zh&hpqqupGt1|d|I`XQi z!0&WIl1k0njd>J774(mS8!Qx)lz9tHhQ7Wax{>{--o zMcnLqSj2g^yQRMj^E)A!bVzecj94el>Xh8 z_k>G5u%I_cBPpl4F>_b_gKw?soJDk}-nW6wjZ#vp&9O20vDTI8;r zl5#V@sS`+HCp$EiX-M%ivwwa~<4tV^%3#53dA8{}v__B}Mzoel4&+FvJO2QC4D`t~ zv+;B_BiiZ89d6WjA?8G}Pkdyo(s$_T(J|QXaQ)1=Qg=uyWWkCnya9SUcWB<`+R3d^v(aXuxm_Vo>BMf7*~++%?$%NYM`Hc`*<*(k ztv!P-Inx7@@H7_=akhQ5pF(dCA1dwk$~yyUN%66#;%tXXzCM{U8wZoFbWr5d!T^Tz z$)5owb;FB-VW*~P1HwwCGa@e5ms*~6TEKU~?C!~!YvokAXB%})A41Z%W0n74%;KN& zrklb}PK5=*tbl`Um1*_p?NJvy-v~N;h(yVaEM)m3D+uKu>+Vi^S(p=kDWnXb?r^S3 zfz73nvZIK!*jP8d_Sm@_ah&pI?&&RstI!ZoWYCw;~4D2g|TdqT6#WG|P7|*5PmlMPngIF})C_Jn{AT!Sd{y@$Jkm*t}w4 za*?MBVUUPz&@?==Aj|hnDK8Is*ow*;jj6!85}QM+fRz6c0XOAt?)NgJW4k(3~A6;@L!YP?3~5>_;8Q0u*oU4LMW@; z_5HToexN^VpB%LHIJsxy2dV6K9Lw`KzPdMp)iZ27iR!a673O(qFyzWg|CZ+C{!ma4 z2GCW_ub;p>743PYwW|nT{1->vA-CpWV>=_oB|U+ec|%i2z_zGEbqX8x5n=60iT*c^ z9J4CxH|);BygGO7D-|z4sUN%%$A+D#EP^u3Fnh~Jx&iuRh;6g9BovsIk*7lH4_gdz4=1Un*9QrpBF^x!xW~%Y0f7R7!{nqpY zvu%}ubvHYK57!=H71bV5$7>M8C$mfT0LYGv@uH1ucbS}FQ;ZKyPECBx&2~N_2x*;X z4()|kI(5AkIR`rB27UT#Gk(sVp?D1@tZp3|dP^3^C#LNDy^x}ASn8luIk;*sS_Ok- zX|SsnHv7g(H)$TB={zr<-19pZFvXN1t`CfLoj7nh|vk>(`f7de0qPUu&~n z#J5Z-WoNT5D6@s#WU_j1x#re7!iD8ivkg8@u=HnlvoXTOFUvgx&*eI}2-eBE`kylr ziu{%Fr0Kj=vbnIvlL-n2{e$8rYWNdlH<-u7FLJ4#_@@ZYO^=TmvV_}Pi*dwBGz@_k z10!*;aA=0Ds!*w3$;|S4orPl|MDo~t&(_LIY6bmu{`!+N%aV0|Zpdk){AbrFW95mF z&e$ilYSF-I6F$zKDO?T)XSJCOAG<}9Uv^K@g{m~Gc6u6`Ixa@Zc4M;EfHmF}NPV@y z(KzyP4f3vJR_W^_bCUN;VJ~1+NjRnTf9bFas-D?QKn@eguCcN`ySf~r)O@U)+{oc6 zHtSqH3E*4@SJ};O6BX4*0fvj)Jj;H1c4uyjORQI8jMRhJ4y1_x&XoP9B!ZMvs zmR2hR>m;rKf4Ny^0AsBnE!%SQu3i|~fz0}Q=>6Y(KJJfs$@D%xeL?+& z*6<%zFx`cp$lt9EZ9)48x@0)xgxbV@D51k&D}(yngpsYVPh4Vl#4sxh*g@9Ns~smL zHQl)F90;D1>i`zeBs;cxX+*`34M~k<|G`H~9@@upI&*I>R?Ep!*ha|_-dNo;d2MJ6 zNmAUq0|WQoO(BDI4#mXgTN>Oxm|10ewMo@6ri`N%x+ic__Np;ziNpE!)X_j(?)r#g z`sPKd72O3?{cUAaZ9Ql zPIg4%@$vG+uqY9a=eCKhy858!t#H4k8(3pk@E@X!T-VFV-k4>hX)&xH5f{j9epZ*$ z@{GJtK{VAqx}a26=ay7)&)n5}GnY$pSzKB`H=T;tqbDnMApFJio+^~8>q%m;Zrazl zHP95*x4?;aWAMqH&1L=+8*`BIg)gi8B-z>w^}BY)4%_eEt@nH0h+D1gD0Pjkns|%! zp|;PEf;CfgMwRJUoVl$lZ+we!gf6NROiTbTj>5g*MPbE4WZXoffzi0`Ps>?G_lM9N zSq5!RcrY-WXJN1mVWH8RU}2puwfH=nvHxFN=R3TU7*%_#&@?(;uY%K0$V}^@G}Ky@ z0L^1xQRliP)}9MeqsWIb;^7e^05jn}lB}k;?%i*3@pM%tqx=1OMy5|36LUG?X}PRk zW9ZG9NCI{rbxz8_X7{$H<`(HeYhJ2jbx2t?@37;gwZMqDtKiK)uq8U?3tQh z0EY^Q$ZAtW=bF;_3-&LPg58Nc_7=DcB(|_@_ZAM^10jr(Dfz{A?1}_Q)`(n+*ncy>8QE>3Ai1S=H(LAo+y0?j}Y5!ne}XhlU zkKA9iH7%211$K0hJd_hqP7=)s>{vYFTR=-Mo>&VMm8J>qkBVgQJeU`_o)sFXPd9gX z=U}D|Qb$B_7KUgGw|ohl@4w&>`aLPswpS+8Fv8}Rw`8BqQU7hSigPR%?;o~GZBIT2 zDv-urCZ4npj=JByE95^!U9@&?Y7XIw%(=(e2xR8pa)mXwX6`P*o7%0>WqN-msd-(R z{4U!BNHM_%bfgp;z0U(P5hPO+BpjLGQoE}ys-}F4*iuQo`ja`oVu~hu?qj=u%lTd< zzVwy9IQPXDGY5FD--zNg!8g{kDuq0c_;bcEm{o>laX{~v5A6~PLB1Aaq@nPHR#CFY z)m*@$M3H*MlRZA;#8Dnk-1`k2x%SeyRl&uU6N5MT(C=hW=jCInnyAZHLqRfUu#y^9 z)Wmk5%GaNplFLn-nzA)l+jeiFEr^G&`@36Uj;We^=tNnHIa6(slw?pM_8LfK>t_v2 z!&RJAB8VVmvD_net~X|14>ML(4QjrO)Bb->qjEy5GO7B#?~XN3;06q!)23x6u#Fv& z7rye@eR8gqmK4eLAQn(59pH5v^qKcg`n#Pdp&0zb7$dvryC?y>#`BX87=xDVSs@qM zMcCmn?ZR2YdI*_hV@{p24kkk%gdMJdLvO6^et!g%zV6LXbf!9XcO;16A0A|j`lpOY z>~~-(RFzo|G!u`%h|JFGotduyLwcu8N=l!fZomgYBbQpgXm@t!Z*8}L*%Rk9E*z`? z#XksdiXU_%Gw$V#q@x0l_1qG#mpxKXmBFmr9r#~YlAWA%Kw54aJCc+AGZ*E3aGKp+ z)Kuf4SUCRC173}Ip$v~&Wi$b7AY;4Kepu=j7H+X2V2JZYDOz@cwO=K!)ql}ZEkg1- z&9l}%W=~=_k~2wsbDfzdyVBXkPi#;l4NkO{9>(rX-^H#hlzN;$2ut(Bl8mY=U0U&t z_c%rQ*8_v+>NcZr?w$&&8laJxU=V7MKKb&dDO3SdH? zfc3q#TDOd{#jLE*A-bHGg*HK)tgE<3InJ1_+v4IM;3Og;Ef z+m%BhGes4J7%*=6I-a2Yph($9Y|$O;KN@kbaia7`3GpW114mw24{2b#=>1Bw7za>o zV?Jd~M*dd%Nvztu!@@pbfC5TBGyM0b6nH;gqT4E4_w&!>qTnn1zoEEg`(;+M<(I2R z@TcoVPZ_+>eZKka9=(oa7=YjdCr$OObG2gMB)9T8HH(u_^pi@`ZMzAzJ6rK8`{(DF z!Ge|jiRxgf)Rdyi@6i7EG}6YmK1+$>{e_){k`aFzdZ$22`tyQ+Z}#W;cj~irQNG>7 z((%*)G;ayAF7v9XCY;B;v73eA-s`{7J{Na2OjE#pTE^`O8D5_eq|iH4&bYgrNlVRF zA;AhS{;0$!G02mCb%1&p`3gx4r8iN8VGkk1bK_5gk%y`@kaN|}Ha(-xmG*t4_k#U% zgRC&cf-D5{j|pdV&AR9N)dSU8$HPNk>BEv1WscCcX`h$wr3hS&&C5n|&*CJn-CHyB z9`W7>5{v_r%EUmyC*j`{tpbqx?@Jfu6ttTOfpX)l8Fu_$v^73R87{~1Ip}qT5A%< zA@=5;*bfFQWjiW(h<=mPaUGWIq}P@`iZ-s=@fT!@I**i!I_`-h43IyI&N(8~Ki&D7 zFtrW~5t%A5g5-IZg)AK#PmBM$y5d^c(oye0@DkF}tTrKAGe- zr99&cZ5WokX!|efk_g%R>@a_DoU5i{s?h-8(D2^{;YYK}o*5^BB}86Z?vBHEu3016 zWDP;z+j$#sLGkD6|i z^~fC%jmWBY*}XOR_Fx%44fxYiEi3DhEeZ$G{ta5eiW-QGHBD`oI8G_@O1=5D0gog; zWBzlY+y*=~6ZSjbBYXv8vsxUa?SERnTcdTBPAkY3p#^&{LGjSWJ~yDw8o1RY=*Dyz z-)33^eI%Z2G5^TsV)bBCZ#KHKh8F@Q+{1WoFc9cL#q4$a(JHC3ea{P&^7#A(*S3-kERC$A~ko4mgbIcB$7y4zVFcHT+Y5 z)Wp3%Q;}at<2!;YSB{@i3A{v7d0uj<|8#;c-iq(SIU($0 zQgC_WWGmm9g=)@`%#E*4n1c3!C0Q?x=OjYYLCD-1T<|=h{=1FdWC^TMq(=J(wONqK z@b@v#<3#TF>1JoxI`edh%sC^Dv#ykXTKqRJ^d9M#JXtkv0nsB)he3EePlbZS8A+OH1Mhs|xCL?@^S zA}`QjkRmV>oXy^s6QWP+_o#jB%!&+gL_)=^Zjkh7MPMhZWaryurH+ z0#%0>2Yh2LrH9>+mk7+6?k!m{KVty=F@O!|O)=mMGG5_;046cv$EG`I#ts<1t zf&z3>*?q-PbUQas;lGW?a<``&9gE7Y6U5`T=1>a%C=37&FV9tdtOn?4tH(zoG~>rl zj`Cdps55lMR}#?#t&fFPuiFT(s&HEEkAXOt-!i7dws)HRwL ziHn%vK#J=j$2Q)#8RGu!_VN=>*d|ABJQv16hzg{tI3MQND^DBe#FW(Q?0tC}q<4B$ zE~_7b@Y^pGC1HP_10L?Bdc2qQPDmP-paTs(Gaz9A%Fe$#2uku`j8b={1PLThFjawbNxVx~pmR<~Y zMiiWyG(PMTEma>BvU@coDOc~~$x9uxO>Q^@8XCPvs)SZ}$@&e9mo~hm%!{o&hEq@R zb??PNwZp;X;f={TaKG}bG?8EUllsDn-3cw{5(}2#K7?KrqeRb*F6F<7#Xt_x?Dv#O-ERoOFFjCw0|-KP(qAU`l?6 zZdiR(`f6%c+VaR8+}%lyOZ7$by<*=MqXk(jwxhop@S-I{$Js#_WzQYDQY0yFXpBV8 z?Hj-8E1@)Ide!^ap(Ko=z>bSzTH!3ejy`hoRmR#75p z$T!A$n(}y}55vcW7WuCo$hV2?|KNKDkEYX@CwS+O^OamhR;er*P;8x63 zO~zlx%s}h6k>THPnq#o>#Ds0eK~AVCx{k3Ej306_B+VP=PKNf`2WVGY=WLtN9A~QT zKg33XKd0m}G^6j^Rb6SvS7wnxW$6(< zAn$E##y`rqIg7FE_3tHgu!iv|B?HB`34nG^OfhsrDlGAyV#ybOw5_?(|=LP9Q zfqouwG31ZFWd?IZU9wVH8OgJEL|vSY!!UbrGiT|VKPkNw@z!l-8iU#C zo0cbtTgb~X3V)%ZW++V`a*p#)#7zG?7rMlox`$BB~R zovUB5hJu=5TbDVAK6aI~E$7wQDsVny3%~i8{L@yp5zS$1E=geN(Lx?ob{FmyCWxH` z+X2{it5n>fH}49HaZG9#qW@(WTph18-fk~t*lyR2&`s?YefbtocHz=BMi}X#0SU6D zZJp7&GrTUQaXf#YvpKx-ov7l5WC`w_*MujvA~L#-z;$b~44NyV{j7)V?9VXu9Ihx$|U2 z{IzDuJQl`I_{%OgB;^@eQ^GL-t{03|_rW`rs3{xwgV-7I@G#Lm6t%?Pxs}=TP6i-Z zS0fs8JDxJcbF?kzwP(q%F~Fv+v_AGX<S|D*rt#N`G{9h8|4S9P~%KA z9pHTaj@xXkitllXY&$emrJa$Eo@F@CH0E2NvZXse-P=_e9c~%z&S)j%ZDu_D|6V`f zoiJFL4p6_B4F{Ouz6THdgdi;ftA6h~=g?Jqpi%&G}&eQk-O;R_$-NhVj>;UBZK#IyoMUci5EmPBeM2f8)^U00dNE_$Wvl>I! zT3YYHDB}ylQA&(8oM`-1p$wZF>u?0xg5X2l3&C9twUCchm!e!<70VwRQA}Z>YYZ;X z{mP<;B(nG<<>(I92vaotP~3K+Z0dP1oZBl53Vp9-ACH$!mihZ`=9qnZCZ4|_7a6BMIm_6h8f>+hI_Sg%% z-2eSD^cE#<`mJQ{D*O!eRhZxEbBI0lSNv0@KaM3Kiq5_$79s2=ts5wN`nU)+^%w?% zaAIc`z{YQC7N$5~g(mW?rVkBO)OWKq@!g$M9l$UN4J`OzJRhfPptC>E?7Ud-J-MnJ zC+Mv*lcPf-+gJh&@H)I>;5pOFlJvAX()%rsmZfyoa>}pN#^c$!QyivQUEcSLO!+bg zs60mC8bb0_?_;49Y>M5gOb$6&`LzpA!&l~g?tv0oA8` z=mg}cdM};1^IZ2SQF7qu>!&UblN^?U^ zTKPaBV+1v6lYC@tFGWiMpZTpC#m&K~%JO*v+0_DMyZt$uIC|)zvg^C1X=?_A|L&Zu zL1xy-7j!=yuPMG(L;GIBpH zk^r=a#{>yDL17zX?Gka}G%thrB^AD;NI88{?MWal-#O|FB}Iw3z9pv9IUo{Mr4Gkc z`?cO59&~}>jtzI=076Bvxt-@fdnh{&xWrc5mGHSrKnnOC`%ae02~Fay4<3pn>@r=a zJ^y_)@g8ONJ2gQ{4LhaU#o$M~W<}b?>UV3pE^nm%LsskhL-?t?!@~ zJH6y}eq7KtnJ#~}0+`iAuc@{X6#d@Z`xjoUl6_U z*sX!+qAp!-cy;^uyu_1_CN@Ojoe>|L^7zv<8M2v=|ClX9*#hCQK**YiY^iN$SPJ=2X=OiCF@SY;`d!Uxre+97YwF60dz>l1PZx1Eem=*CURlLjZbLQ z4NK33%Omx6>L-Cluk575&E*lq@&Wa*vBQ?P(R@xz*nZ%`E_?O${yLWec9dM@aJhNW zhqO&S81dPqGsf0;lVi~ofeeQZr=L`)J0$4sLMSWr?}@3T_h>l?Xz;m!$0#dWJ*Vz; zg7Dqj`O4jYtcyAiOCz6qwe`C%a4SVVVY>>I_J`w~uq`o#t@N8EC)nxD#fgf*5ET|4 zY?6CF&JW(3v_-%CLCq3tNAbLG$_pam@^A!H7o0S7uZ=wr7^XN83G5R2B=5<;YKikz z&QmShADywijGM4D?pE9S-j1mh@Hek;`HLN7@DUy2h~JhS5uq6fCzYlS&$HWijG~m${i0>=geNM*m2shy z?5FqpCI>0`bxq{FpLio@tM8$e$?{dnaJRsPocC`Y?}@I-R|w(2J|povoQv{UUY{QC z<3!rG*oUBfIRDBRd^)x;CeyQhIOYw5;y6L3c}Y3Fhnx=`7|EwgmGD^-AZ)al(&=eN zdV6S|{itJGraG{>G}C6cia;^LvBH0Dw+c`xee^*XUTxTUUG*92=6@F-mf3VEPyenW zK11S0d)VT=xB$S1r~3$OZ?-0#kJ~J6KU6~iGTpGCq`mI?N!VgS`rk4n=t8Plom0?o zUUyheWSLxV(Y4xLKQ3p560traGkcM1;=5BXvfF&#J#2{YLE|QvT{ly^ebWmn?dNoX z(W5H+tc=WjTT3~tlOp*l;BY&S>t1C=A43kbA0`H8lx_89N)X?FQ@~^QlObY7)47oA z)%m){5$;-%jJD<0ksy>wJ}`<-2d`p&9cPxJ zcFg~Wjx+nbY3|5pvJ&&o#OS=|POMmyB?Ori~ z?xmj+?>$tpvhxsQKqujStaf8fNk3YWG+rBbt5@*o$dX}*bMPNUym)kg?vl@O=oQA6 z`y>`#&SuR)&3Sx89{m-Cog)&UYee4n z5=RiB@>cq`X*O7M)n|3(fh818_=-TVZA@dM!}tcXW*)i?HVma&cgR)ba*hhJ0PA|2 z6)7!;MARf_-EvlEXGgw=%~Lm|{-uke`yS49VSQ^ax_9fF!Mt5dy<;23)aI3<^%@C- z#X8A9*Xw7qXo^E|h2Wu8)bMuHf5|t`_j+7VA><76nsFm+zT∓pe zpHsxu*HIVrt!I}n=xEc#uk|i-vUbg6yA(I9#_iv&3GotwllC?&6KNmC;F%giHp>tTiO?fG%|ZaU~sA&<8W!`N9T1FXI-|jmdb6`#-#IBHv(CbDTa0l1g+nAx4(cKkQhmE zM1#00^2$Xi+sf<=s^=R->teI~9+YFrILdz2`~EN{q3CiyzX?}kR!6JAA1ntY7(jns%FR6>4diK}^0^Us zY}jpcv~#d5B9X_dAb+ohecXh)EyEEdr@)hYM4nSZ6dC&oi&X*N{Cnfg#y^iwg1|z1 z#HbiMFfb7g*Xk}!;;&!BUqx`{Hyl4KDhkfu!BddX*CMH)&yXFuh>8TOKg;30#d>q= zB-sx%B|>EF+=JS(JU{L!NIZsR{`E0^86?PyP*8`?JHQ3X)ye34_LwWawfeSOVaBN3 zn-lUbYS2$)<&Ph5yINs7^*`dYpxAut>;N&X+&1vZPvd)Ng%2>g9PJ<|g)5ZkTN>;Q zK&!>LJ_d}kVIL52c&I#LBtw{{B@uPPSc#lErVt zfi~hf_Nc#5Pn^CwhJcupy)dFc%tj;ss!hdY04>B~_Bb&9uVF4EJtJ{9KpO!5S0UdO z{~ti@j2TC}HC)R2piK*grv0)+B<(8Y-Talkq0MJNho7xuJ5UJ!nC!!X_;WzcHrz-7 z1>Y8u>R&GGXS5f*@k&sN4ys@DuemuJ$k1Gr?;5Z$tF3UW_qQcS!GX!pGRg&v@_hLC zzx+CHsga+Zo1J!MnJ$Bnb zdJGON!t$#H(TFKxHa=W|nvJAFj!R*1Nt)PoBWQb*+xIf^S4uJX;*Szt;{=OLC%%22 zckKAIZ=SdvWzG&$J-uh{Zw6j*)5DxmBx~-c1RSc5TUU3<142%4qU6%9W!`nkX*|Aply;_Z-~T>qyAF>m_IF9l?D;I>3NK5LK<(UO%#Fx>Osui_ z73}Ts`kqUU4oCRIP!mIo4Klz;%w!|L^SjLVFXWFG>5H-^O&Lo^8MP|pMIiBetqpjm zjz?H`QuV5Ij-Q#oPpe)ACoFcYByQt_*Y%A9*?;wX#TTtV9ID3+Al&`vrTS;MajaPJ zjah>e++-!9_!G9M9;&+e&m8-?4QG@a{Cx!5JTas8{w|rHy#mKAaQ+*WE@`&l1DF@e)iYB3Q337aiXzUMaefiT!Yex zs0mYX-ONgL0#1P{uM*p|N!Sf;DRG(!Enw}fBGwl9R5J5MSkDSlK0e=6$ygHEFgc%L z&0UJAb>(T^sMe5p+urjJbQ7CQ4tub>4heASkWPoY6>m{XD)ERH?3<{P?TV6QMy0FI z%@A7#?qnWD1j60mAAMJjbaoPVF?z{ez>!`F<%JR!=j9X*Xn7EM{}7bK6#}>{(UX@g z<6#W>)WJtT_QTvd!=nLb3?{n-oRChsEDPf6K}yqL@hwm2KPP@i*u2x6qtQZ+G-mHV z%{KS8&i|bmn>`-#=GCA1dxa=kF#VK$^ogmv8oa@A$6vx2%8$r^5}O8x@@uvg6ZBHp ze0%>B7r<%^RXx9Gger*X=GVv70)jMI+H*d>g@s4RfeSW6Wng8eG8B|>ncK4sENDtB zj~ClY9oJr|5=^5vu9tfk7QT3d=3;;ltwt3uFqSsGX(`^V+s42>yHZ372QH~tQ$ z{DLS!hdRY+)e%Xl;Yvin_2Sbir!k`3up)HP?XN#7p;iw}RYD}x69Ugqk@xrBs>X;7 z^iHMBPpP%Z*3{{##y4XowvFs2-b4+3coSk>&MINziWUkzmp<+{^iWWfjt$!rQu9}D{k@?{8bPOmQ^>%^F~m+XkV zAnMPfYWm)+g7VH*BxGA=o8K>WDLn0UkTR*|Go4J;v20+nptjV`JG`72U$TR;Wt2t`>C3c>!HkF=Uzv6@t? zE>K9E@`0Q#!?>$q+lyJ=Uk8X0*2eIsZxI6E4Qu7V_scvulaVj$X5bIGOEwlKCmYCO z_@K2Tqa&Qaz?E)74oZ9!^pS~Bcq^pSGhU3}vgTKs6EgsZ_Wc z_{V0If5mvB=!|6V(q{Pw*a5;V=*@DvHDMYMR)4;oI%=bjGeQfpqA`T*JOBL)d7}e( zyO2A)HxH6FlkaUG{NhwuO#P_vJizrjB1I{2?2^mv8Oq4>-xA~%Sqe;T#i_Kc$=$!*vTh+{FJw?7NUqA0(I!npOg_p_Fqk>^6z=tBWC809uXtpxe_G8 zZGpx*{h?dXtZ-$&I%x8scl-#rMNES#dIDteW40hPEWFb4_=YdId52D(_K2JZ=a8eI z44Z#L9(UqGfbfc%EKnzmfgyeleB);!D0UGdk{2)3l=LY}ulj^fec~542-=i6@Qhvh zgF0#9qCtOtsmC!>^wOAD~<+}{~&83n(n+3_nq$E)EU`S!zM`k^Bc=7Dqi;F)}-b#2p}|0gpX%XMVva zq-HS)X%FJC(w7;;+`zprRRvRM6|XX0iQll^H$r9KQ*3jaGtt_+F4IM<7uV?KA4sje zDbA67?OqW>7v_tiSo^mt5(j?g5jCLp44O~|ME5>v(*JvnYE9hG4&R8e7T3sGaEY)5 z80!pZB1b@Jt8sXTjeqE7?SD{Xdi#E#@ZmfkOW#RP%jjX(kda_n_|#<|v-pLLJ>nTW z%236Q(Y>PoJp-^u?BFijMSICNv=6+ZM!^uV03Mck(8i3b&!@3i=k{t7W9 zZ`5o@vi{wR+CfZ_ORxAx&cyo1AF=9V=6`y*GAPV*_XI_~e!trH(zUpwgSxi`h97&} z5VH6}Wa!vszr*C%!TbG#CU7BfhaUQ-&pSgqx2~%JMg{io4h!i2Ww?3jl7WixiDNrW zVf{b$w~nm2T_)VRwqAiITKO;infd2To%P0bWZUuO=3c$cG z1*5IDVPTemFRu_xg51^GM~JZ0R-FVPuBexKY>=ICOiy_RGw;C%3D>D~jX?g1wKy-T(9}Rmc>x9)l8^Rnj{$hDK zMG>*L`TOSmshY682Tw-fGtLF!3cyUvh48(Zf497pv+lJY!drb=&|L!e>AmH3goD zBNt8bXowlz;~g^;uM9hCG6c!>0{lK|_BHj+{QQ z9{GJ$7?$=+%l~KZJK)>8?sK1Q*-mOFb<*}_H)-pPDsf_4*5oEZY;cO&TjBsg5CAbR zfQ!8cNf0E#y@zC3617Nili(IbS$3Vg)J>DP8BLVuILmfooBG}X^Xym_Wsx!^%Z9E1 zh$>NLv_Z%{0S&Jj8bK9|aZM0MSFC78XZ?ELPDfPB8An?Ew{3#rhjq-#8g;bqO?7-Q zqK>|6WtCETUaj4dRPjfBOyj6|t@yS&PTa+906)G|5XY!TGPCZIAR+jP$b0GQoFCto z=ocn4gTK#6uAfg}x9zxH*OOtH$lR?td1McD;se!gSZVB~`wc?Vz?DOU2oCASoiHX!XctQ_PItNGEZTjc?fytAVqR_|%EK^$L*B_>?<}6YqweDeg~@#B0lj`| zjM9x2>plo$WC2S|CYpfg!6C#w}-| zOKU<=YCBO29WapBnk+&D+z!Y$A0bMm11eS%l&LMCGb$Ebd^IUm3(H#BreoM=&cvX|sP^ph@{HMtKG_&N(R6^NL6!D>-sS~UIc_q*P*NWXa^>+`dGy4V z>$N*5>tD4?GyN5%acn_JY?n>05?L;t+iyCLJ(h7CGL0kGK1qmU974m?tCaTHCApOf z>FQS|#?2;T*^0DY@?gdu2ZT zoddJw#Z|OnqRXKle@ktdR&9lUY=a-tr>dS;FhjSz za^_zH2!Y&LjtL1HI(9J`6~7Y@=_z1^zMsx6&EB@w@wL<>S1^-Vf$d2_!-sv1@5TZ0 z<=!XlQHtrF+secl^0E7Os(P*|EBiL~XwH9bw_*I`UgP9X_m~DQ9x@JXi@d+nGV$$0 zhN=I|)y-h9c@*0T3GB0vy)3lM9D45nPswdzcqXz&otPXXP8@9qLQd*z92M? z|0vHm0VyFLjbRK*%@A^};U65dPrO6=S;5JBWcrzh9oFYQkgKZ9-gDo6^Gu@9H7l08 zF2qvxFI}?(lixD-<;BK`z(;@o5xMb$rviC5M8@zVCgr6KEtZ$ASrSoc z2S28u`nDR3W0`i>?XQlglJ^v;sli{W^=B8H7v$TTRu^sVn5QD7!Rv&9kb*Y-2sFgl z8@N?OaXdp5LIa99^{|rT6ssLdP6JU3M2!&jp-hXo_=Zr#Y$wNBAWy3%WD-EZmP*Jn ziP{h}ECd}I(rZ9vmP46X4iS$i5N0jZWDJ^Bhaye|iehSDjxF0|J9uVE%SblyNRpLa zdf8q&E^t8|6NG$IV52fCw91}TzfQ$yJg$$b`;%^c=^Of|YVvRPp5DbBhJ*zA-uS0l zpZs=oX>!84z!x~2{(GKcmSv=`EBkqRbO2kJZfuTo{4Rs(K{}`A7h5-lmOK-=9mr9f z4(}ItzagXt?x=bsd(_jF2k9vknuc*eKZ5<1p+87fa|?!KyJ6t#do7ceZKm+k`{|)j zwtg~dcksol2ynOeg-7x%qc7!Jhq2Gvg96(aa&+S_>`|Rx$P|0P(Pz%K3}Zi0F$LNY zNR5+_IfwP)n~3(#@`hLGo#zEw=d4I;x^T^gb&hp(SFV5hb+LT{3fuFqOHJpmTE_HS zX7BgtXFGk?ad>qXUNX4nt_du4+5S(KTXW&O$uv7E^Uthb?ls;FDBa*bB^7?lVjp|c zY#-&`xX1fLQ_%aUUUgO@)jYeTxE@Vj??YTQjnx3xmg3qZZS z8J6^?Vd5VFo$@r){KJGij=+$598|_p$dj8;#BPF;*#c8?CA37T&}4FFCh9`P=!BTj z3Ta{pCZa|Zj3ZEG)Pm-PAmTPc&a7Y9M~TFes1qBZjjsh2TMgqnLO3y{dkBHtlCsG9 zges?QQw8*K)nn4ArVc7G^m1NY6$~3fzo*w$Qtrn-cs{PXxC0Ou*ZeVdQmC4_srY41 zeD&+mF{S^-h^njN#k4*lh^zYsk69JimR$LNb}*}czKiP~-@>ekzg5?=weZsY`{X0b zuCp$r&V46a-Yd8j*A&4{RrucR^zw~zj%;{1hgvZD*lh@758aDw^X!JD_RqG??1~)EwfB6iz&fVhYo8t5s~s2+yJnPoEwk5EL{#PM{RN(j zpAea^b#ImwPdhZe(bpBe8Av?mf2HtDT(uWd=o!Bt^-MzNpGHy1vnX`TUOZ@+ddFkS z=otNy*)sK8t@%8hj#)TOvsbB9ukF%jyq0Hw;i4ztGtENf7`v!8%w2P>A>H}=9G2%- zEO(b&)xB3oO}(ixjQmOKnC0EL$NR&m9&WKxebDJ=I&P-Fj@__Ym)bZ_XS6_-+5jn0 z3YyF|7&wG1_z{=eI-)X~pkuYc#BGH!y#;1|9aITN7u1M5rWN+&S{Run&_y*t&1r-p zsR7ocMnVur{`@z)4o1>f#czf>p$5kINN%il6h#rW64wGcsu@%gQ70*(TUHj?#wz(I zDlRZeC0EYtqspP(Q2$3&a?5BSx;Z`fiI0Bu(!2Qo$cU}T;YQVg6<2ea&o28xVshw; zAg&S|R`n~J`L+8}*VNy)hgJFzJFE23sEwgpx&)9Z8q;ho?4RD2d-#qj=Cel;e&B#+ z`Sq^lMHjt?4Cj%dpF=h={1R>d%W_fol0BWW_2-KsDMSvsCKvKvi7Z1nNYuIMK3&5WS)6V=9-4In2?lr8ik&ziFfxeay>gO z@emRt1R-~mz@6w#Qi+> z*~cMsOhMwEeI{4+;sOEaq_Yo+squNQ;S$8&XCZbDqlliEd-oVh{~TB79)B&*Jck_T z+?)CKS;l+M<2RrGrq(b{+4SRSX7dYI?U5)fa}Ri(v&Wr=i=cIv;IhoP-0!LTVl+>0 z@{;2g+axrK{+rJ4wH!$F(@$hedXz!c?2`6ny!N5{Y2)yhDACOYA6hdiR_oFl=9T;= zSonvbPicUG*#=EoD-2uOAx`UrBhk%z6bfy)jvp#u6!eAgFo}__XTUKKb6X;s1?M8z)h-rIZDuZTQ(oL^r^qk zRQKGHRPcM`gWI#E!yhOE>GiNfIheFZcm7;{(fIfQ<>~D0maePr*Fmc1x-+jF(|DrD zKKAPZ*C>SMVTjDLP*9_@x%8hmH-(0|EU{_oVyv~KK_$vV{QKNP+uHICAL>3#)0YqS``ilDaV z1ACrmb<+g zA8R@kwU=5jPJ$@^$I}x!U(8?}28Y@9)erkRBiK%zd3cMuUzegA*Kap3uhgl`{NjBF zEzhlZ`UH_G{D_d6xRk3IM}ck_2UX$Ma&#k0PAn-j%mni+;f2b9!jdsUAkz>N64_gJ z;hh{|V%O~7iER^)3hDP>{wgtEj4h(4iY4~*Y43htZW-IH(aykN3PWg{MxJ2;3gh_i z)wbS$e7jANW$a^G^T1|>>7ra^xUflNSsqEP6qLU3$7Cf#i|*q%sc^JRJqDwC@U0@} zg_|zw^)_VM*;RVY(07f>K`80o59AfpX`fvsQ;igewS74v>iSg+X;#N-J?HSeMsN&z zZYPQ|+aM;2LMLd1A`(%a+yp(d5oXRAxLL&Db8E@#MyTTn*(5hZ&#Q-#OGtuK4?Uv> zrpyq`TS{O|sDh4J4Ruz;ODmERr2!h!Mw;0~2%{P{?qL{`TL~%ELdmQLoze<h#C6p@-1urRqpRl`4BKeRy3l?; zyMD8PQTMy8G0ne7$htGKXzrv3c5R^skzO!$B}G0ZUCFTzO2YSw6_Y%%bvRvA+P~xo z*}X+QPae{SU(VA_BF8j_T;mAxbkj>-AJ1`(HwbN`IN+Fq!a9gU&Pev%A)*q(%gS!7 zFkIL!HBP-&;GBZkF^fay>D;%E(XRf76y}L%C4@Yru4zbIgsikPH9*1m^InIJkcduDPsTY6ZE_Ggek+`@HH27*Dq#~sV%1$k zAR6gllsP{ff1}T9K6v``$^$33^!~298{)SC{%uF^oLG;<=(6O*n7SWxqZ`$nwa4yH zh%NgxFR`qW$LIiaQw@@1O!NHsx<9ctmVmpi!T4cc@7vfd8(?i#49suKAAfy|q(Ae< zJ(doMc!nd>PyVjRF!{$q!|;+ly7K9vhsE^x#T@+z4rzyxubX&XM$asH{a#3)-z}u4 z-pHe8Akv>lzHJ8io}phAc*mDzAeGcuwcPkDWS(irOXg6ZpS{XUNM#%Pa*<`uN$Y2h zX&vYDb1j#y>#cPs!)~1VhoEs_Q6(m#haQnj#yx6l_pxm*D>Ve+|Va72?ww4hhsUXBr4g;4E3!e}P zI|LQ8fhY>{9#I!6P8;MgUZ?~fXi{5X<~6`9XofqviV#X2A&*vQnISSx6QoIvgnS}P zToup@s$l0fK%Gb{{e`vT(fRD>Ow_PTuALkfnJ-ip=w=s!{q}k%?>*!g&C9m*eP3vsd}Y6L_*|Z6 z;F=k%nY#3#NHboLXB$0VXzYJproG@&sQ;4t$w2oPP}+&Nstw>bikw}B(70}&wtRdD7yDcCROiG>7vm$Op1|xOX*Xh?fKsp$@LRL zMj+0tT&?2O&l@&%z?e#u1~KL;22mgB9Z>PApyt;hKd}X>1ad678VYtNl!995Qkq~U z#Nr@I!<osMu97ayp?9v_s1e!pLk!5xWdVb{({lP4v_nSjlTMuLdSE zww2iiZAvMWsn;ryySR%Rf}2z)V#OAN7hefhR2ld&Rc|IU%7;__wRltHIFB9tL^{WF z)78p8EM(?fx<6Yy5)?{@2nme*Y7c#G$x6U%dEvoqYA8ja8O%_c239=C>7Z(;L9Lp; zqEz+bfPNNI)A_nw$)$J5S)?ETOuqTjzlfEKM`^2FgZWbT#2>}>DJb+~$e~8Ah_s`% zZyzr-g&)?c$25B5+?>HU9w?%RKDF35D*=^xoKIUvz9*yRexNcRJFr+AtFDLZP*1r+ zl|8pD=+sd#wo#=S{dcorM)^U<(klm^)E13jF{}DvQcO3kz~wDKn-N^C;2oKdq!ux< z>S2u~gutqSGqDnS&S9b~I-yNygpvC+yh(%*QaYexHo=l{l)MgtCT;D6Ok}(cxVbH` zu}h$jM42blLY~kGno~^FNF^k^Ca5F(C8N zOI|7{uwIzRGfY9G9!H^J407Eh6x#8(6uSNuPjj!z1lt`jXice#34)4>vV@YO!-DtfPnQmf7PL^GMc8 zU;p)>llT3NvT$}u&KG9a?1MhV8MT5Ms?nG(TsJk#jnGbYJ?xV7H*w0K;dVgAuZD)x06M)MGR`q#+}lA58etXGL7&tN3sD)S?aeTy)xs9ZXO+?j z1E&$T!MZZUdco6c#}%lxZEX@fsm#1fXG5 zfhMZOA}D`f0-;3ptEl`_n;i1N?Jmi=C*0KSt7m@~cOW>4u5F1MYnklW^1tUtmlgA$ ztTUxPR)0Tx{b6oeV$}<*xLTygm18rb^Lt5g%^x}MitXy|2lgrlYxfxXu}|0c>K5%_ z!b*;xtvi)es5P--v!cL6nhRgNT+>OVhaGEcs4F^6HJrrX|o4jEC3D$UGm2E#P`rpX^$%+KBO zrLa8J=`R>e6SI14-?*))d&xPSJ}7MZL9tRXJnzyDUy-VZYHaDY)h0n`-bRQ)&O8J0 zLTTD+xH66`7p%} zYT@Ekz)#xHWUOLd4N6j~;UkKMV%LF6J50!=0%~R*EX*qCxg{{LDk4=4uS+0)S@Vu*1#AU7Wq*9!7<|E~JaThBH=GxL%5~9n&TvsvnWRx!>DPHwX?x@|I z(Ea>Pjr&rPD!;dlQLFnfuk(W-lMb&spnHDPw(Pm<&hEDr_I-MrEX>_Uhd+0{Hj81G zbaF$EX8M&v-S}_z(R0i4l2TcQ_!7l!S4%yJZ6c}cX8PSfnppz4KdX8a=63-_Ank?*NZ{|1wKV#s1Y zcTEm6hk1&jrbe3-ba>6pe8%^}OpRoE)HAT_hvrSHffX5ZDWH@CO?u@J43ZHT)MFRT zvCXT^2~G0`4p9KydT0}gK__a%kaiNrtk#8;>nav`pV2`GBm_EAk~TR6)7DyOc@5B~ zMAE=F!j;knC%==B3Q-}1Xq+jvgd7@S&o~1kJBSiaJ&de!*jZID5K^II8(<;iM92DI z%2(_>eHgWG8HpS^hGZY7er@w5;?`~A(ocPRQ z$M52Q020;}zmm4G4eX76FcKBlG<^Z^`GSk{L4-`utj|Qj;a{=eh@3p zRSC5tZ{(>ak1eO4Y{%?<1&Wb1MXK?&8>z)-Vlw5qf63EL2J+R@jXM+*@BG;8bod?x zJ^Mwudp17TG=1F|1{pQTkr_tEMW%C;h5BLsJN>D?Pe`=W7Kvf%b)jJ#YW?I9seSmS z0{U|F1EqzA2`EkDP$i5a(R#V zMr~Ev(~0{0!Pi^`=l*CZ7~OA6XkJYb0?{!~Lcwf?hkXPFP6Hu}PUx5oP;ncGYG{Fr zc@zO|4NN;)VN0ulZAT+)gdj{rMHrbCFvnNHMhM8jCJ#m%XjU^hRu4x?E6ltKIFl=2 z0A!8(!!j(u!hE)n9qlt`D4pn*(YQgmhL|b^Wfi5}=d*K;43(kVhJqMTQER4#5 zMJJcuytALS74~FX#iwsca>l#31Hg^1xWtJLzQKwvIh_<8m=|m;MN*u5o}W;2j1|+F z^kHA$?ZSTb+(52s6#4RzIkRSF#Wu{A{yYh6|9>6Oif#AXU#UYmHxQ82w4U zaR_4L7=-4L*G;Oqt2Uw4>hQ-+#=%eAXdI#D*@p`JlZ!6Z$V@X?V$=8^3)Lep?<;)n zUzc-zw+l+m*decWeA-qtp8jE9%X?u`59~L|2VhZ!$CuLHpFjObPQ9x!TBS zowsvZL2)9nAZ^eP+b;2?xj%qMChr3s#lT9^}RVT+_` zCj?+j2@$o?PKcoyj)W3IIu+0-2Vmqkz{9DAol^@PtCo;R00yFRoU9rs1wj<0lD_19 zQ*`}x3B)Mu^-$7o=*2{Vh{!|O4WoPnR$2cYRT=cJ5nB1#+gmx56J=jmB49_k%HyJ| zukhB^e=>euD3ia=|2#XU1bjv@xUuEG=d(I(xAnq4By6Xq)(Dm3jy&~5%u0?cQ%r~` z^AM!w5h#u0kmyJS84_4L5%<({XEe|WW%)i$q6Z6=2O@Ip;BN?116!_IDkn_UW3 zQW?C=N;sJzXfrEe(yj|V5Y z(Pi^IMsQgh!^_2Haqo)OK+cnQe|@*~+)H~3`^QDuBP-^1OnHY^6&kzu7ijw$bG6~* z8~6A#3iM+K#mZ60b>mQ*29ZYyBu6*gwAXw6)E4hW9z9f{(DlAms3+v18$5RK(DHLW zDU}xm3db1q+DRx4bDrh)|0uvMooe?Qr{Ad5u8f&lNpumVBm&eNg%|+C$G2t zMS-~0{a1ZQ?K%DTlxU7^vZ9i?3R?;I6NDgvaowB*!)1QV$D4AS;^oV7F%Asa^8kH}Vw2|G7ijyJGPM zLc04~Lh4*3Djm7O2ZmSHzFWLpHsKsHRa^B!cSC#h1W(5em%5; zAgt^ba;zGz#1LHE2ABw$SczgWXEdRds0|CN7Pc&+ERt(rWD#YOSqdAk6edP+At2Gg zErFY~_oVt^C#uBF>Les`<8BNlE`4Kjbouc;TxZhuEc-{+w_9wPy*EoU{J1(N{Jx3x#B~4P z9<=u_`E%}&Pd&Pi3jbWJA4iVr(%F2|zb)!iu*Eoe|9$#N!lgVEEJ)#GKgx4uZ4?O2^+T!wp5~O2tkz!s$pbTF2sGi_h^qd42h2$uP@K&n~)BU=z2mF%~@2=9He9G>CdDshpv&ytj>66~5P(*PAOB z4gO!5s27&JbA!J9gLhM-d{mJYTm5m?`eF!T-Oq71+VYr*?w7KnorFZZ*t{|D`?OeV zcj6lVM>25mM%djHzCTwvXvmXK{(FAWMAbp_(rP}4E&cZjm4oXQ)Oe0i8(uWgo<=jm zlTpKydv$$35F2|M#dOz_V+-`Qxh}b73}VwbtwYYG_KiTIIQw^Y?dTun>Iqo2 zLvU&a>Xy@gIS`v?A1XCXZ=o&yi$#J+<9rbmI%ggzQjg9SD<+^8_r7VhoPBJucB+`B)!Kx% zd22=mA&(}Mq!PoQ+DsHjBkY6-?8$^Qm>nn~&mPWU_zAI;B(uRqL`w)r!l;6MT_v=fW>DO6P=WwL zDc+sC3wu66iHDkO;_&Nq!7wz!p;ukP(GVqSx@yuCIMDS)oA8|40&3<7euXFdlu2tl*V*4aHlJHmei^pEg zRSZL<8-!fby&|bD!kKFzCjD2Fny7swwMY$F)V%n{D*SpJ@GD z2kpO>AUwwLN3F)0Kb2`GU-Ic^uGtj#D|%Kr^^-ZJ`q3Pj>V*|q;w+T+u6G*4KQ>aM z_76H%NZWtELUsCv8^^N3+56;*p0j$z7z}v>f6$92zp_|6Q%u8ZBcpcSNtA+jOT@r? z;b2CB1Ospog+Qkgg&-hA!mWdKb3GheE8*K(1AST}jJ$f-1(8kn5bWty@C(Xdr4Z^}EV=#hnrq)0gQ7=SwxT9;}jO5Z{gkX#?^fK`(r=v+xx5!KXZVRn^PV!k(Qj;VG1gPQola4NrbAY5A;vCi`$RXHJ$gR#!_3j@ulv>Z+oS&(KS~slca6({4H-& zRR%x4vO)08`eoOCAOv!6s_fDoU7|8qIq~WJilGa;)n}2X>-~vZ(Y+!bBnNe42Xo|u z$XAAuqa1}qHGn+X@P9~!{hwV*e@Zi$Dx$(~I0%VQ>Y@KGG>%?3{rC0YwTAt2dg2YE zY3jR1)7+x7-KD^yocpJMd#0H7_k3xw;~v9#zTGhWJ)326(Wrj6YIc=bKmPS}RsW(M zQ})8yRo0^On;pv0O+m%A&IAs{i;tRgV}JC~ldqL4o?S5yAIdoS1)FX111AD4^Yrg= zsYZULmQCJJr7Ml$&(npo*}B~R>w5PEqAONAlKu0giOq0v+F)nD4sHuMKm$fmb`Pn zG31QJzV|-pv(MbC%0KdvT|WOGjs0H>KD>E-ppUcB115QJqFu<^7c8Yi9d)cOMbxjiDo;F5wNh1xzwo~nOdq8)*ZC06830i`Nyvt^{-L)eJDlC?&1bWi7pkeVtw<1b=sdX*IQc{n`(Z@k1xjN4He+8 zuXrtWt?QQ8_{cnT__OJveMs*jAt)?mR?IEt&%2{UDNE$XBXI{;yo!{R5(M z{%N`WmjbzT@Y@d6OrS&>{?KAVIt%+(`7K?G?vL2LFW%!&4;#ugr>~jC-VE$?|02R5f%(tQZ6(?!BdBP&sf;MNdPkoQ6R)cvD$xmBj;V zOxo#Dn|9JdOD=unmJCAvuKOx9r_EGR|FUw2T@PBt^d}sOzGn_8tzC8$fgQt2x&MNRZu0A zA;>6&D>(>HY&DcIB|Bw>;d|@_BP_GzT(?Ek_uZ0$Q>Oz3XW~P$))i3)s-0aA>4n|f zY=x&SM)A>nbQ#ku_~2q~?6RZxIHl9y5Gw|jyheXW zHS(~uXyD)TsUaxk!;tHzL0P8h>tW}H779d};%lHzDu+Fx6y|vH97mMIhB6o# zKG1?<7-AY>PH2QF*#jGS)-W4ki>rZ)TMRnB8XjU7Y_WuZHU!{E@V*lVQljkn8?(Ik zA4+9COHS>RRoHc3UH;G$Zt2kDep&xD3R1*BdLMo0Sd}sVzN|CG<1%Je^xnPB^y^tfstFQyWl_!7;BeVr=} zO4<7*jb!MgT+-L)$v?Ab62QeEGxyv>9*fS-DNFx@(xTo*jdproB@e%8paz0UY5$Ub zWJ+rI7_AwH#xMo7Zs-kV(ZI6zpN!_o2hG~qn|6P7t6ut~S~s>!YWUu5n{nSxm=#0! z8BJ5SHQpnFV&%wV#hP)oTX_6^_r1#Vj;$({^&Pb+dR{8ZAO5^{OZ{qBO2vFJAp;jT zxR821;&J6=x4^-wf|XMPU1B5LDaGXdQaBS2LmNpyl34*eG5p2s$_4VUFgsu*WKtZd znI2OOJtG7vrWVGi2oY7m71suOV+rhuweT?f&?mUbYobKr1E8bIVTlev8&`ErAfZlt z@{=afF_%Ml^5^c{vwyS|b`O;o_HKM{JF|4)t6E|IG@WzqiY>CSF7NHvD7&tjGIosUX%1(xv6LZ4(Sx*Mr|$1kx%L*!wX(+dj5&k24?%b zmEBAXdpRghEofFN%qb0UaLQmK1Yu${6JlwFG2(igR7n&>9jGKi2FY#k#Jb>Q9)>YC zK-xxBNhz#Ewdgn0LBTA6E}@*Lkq$yaA@~_V1X$&;GHPMp(-XLdKrvU}i=3?eA(d7j=scojL63(q<1;#08Zcf&60gGW3(?~oq7 zYEl}^?QlBtPr;MZ4|9Gm+@fRfXCFcF!Bd0QZRg&Zgt;Zxs+YUC1CW*4@R`i01}*#X zlJgmB%I3KnO0jd3@8_G>HSW8a@iGcdd~3V-*c}sb{9YVToO?81)<2O`)c>qlI<_Jy zJk-u}Pf~`Vmub}q6slopsnOpTYx+L4$nW=}#NYdfl^(rhpu*5m6Sp*%6D7X*FL~;J zyKeSgt7;(AY#Fci28KU!#mFKsSkC-woqTXfu|h?P(MPS+GeY|kV}IU8fL*@Y%PyOD z3qsH`T46}6gd?F54t6Kpku)ZpdYHMj&?JXojpV$FYk)1e1}=60{`fL@i0UvWmcSj~ zN(iSMepUkv%z9MvDqx8XLeH)t3Zxp^c%o)v2?-^2K%LmW;DP02IAJBqL(lQR%JhRy z@?JwA#lo)hu7cxm=AVF*5QM3q7tVrixW(ODuWDC#rn6MigOZ%nptDcFAtP;sr-JWp z<2=+;;LJXI#VR@lm*_am!gFxtk+wPAt(5RDF++ao;9UZ_laUpD^pWk~Zh4ITo#I$l zjQcAe_VwIK7{#YPl_wiqvAkG8d(SF`cBDk97=}tS{HDP+VXK#oUSl+@(uvQ?G-F~F zHJB_?txSg4rNC_oe_E%S`463J1RCw^lAQo<29)O69*bdgu2~meR7rWLu(Hh3|C377 z^r9}Wc47ZRc6y>-XS#7Z`VRvhYFy(}_aDBM=P;!=uQqcU=N*|r7*lHyVpb7iXn>zv zhmu613^)zM&_~E41YaaLkUV^ZOiCl3R7|2OVnZlSEQN+u15;c%?1>dbO;n?jTMQf1 z2TziZ5Q>*5lv2=yID82ea5Dp-HA8Ib56pOcZR5ylW&&fK7HV-vGUJ--dlL) z1-I}dO!+-<=AMH|c>1?GVfT^~l-|W%+%$5ZSG(N&4@lmEHfSFX~>jfRj#o?yD;O)Q{wTpZr#x4iVg&0pn>VN5>$ zpgXz#GmV_%Uv;EE{kVhGxZbtnNNinZ=O!Et9G?Ex%`STIk*s~VM(roBe4z^Mh#I6A!Y=qEC*dvV`-v-m@~V_RZqbQ{D}-mQ zl?B}|dJdiXzA5iyWIyPdiIsD2_-j?!-2vOdQ-jU}UF{a(>G+ktr)F00KI^86zoq$& zUn)-xe#5yo@Pu-cVSV{ifmrn#PrU1&UGb*1&S(#_>YEij&lenzdv2Q(#a%1|`wM!K zQX~UsS8BXn`GfU4L}BdBKl^N|Vsb@XPzxo)+vM`mHT(7N$UG*i~mZRG&X)P{EN?oU6OWl|%qw9OpogF_`Zms&MW^3~=rJ2qD*Oh+k zH&j;R@8#(ozmo{ser-={|D}2V(Vy5e>Ryy>J$})*v+}rQ&*6Y+^O615%%dx^q-Io% z4%w8Gvrc+yMO~$r3X9>y-#gU$bn%&G}TTI=_V z*&RRju#fzAc}no*itU}hF>Y!7LtsbSZv%psU%S%UE_+hje(vGc|Cc|h{y*HQ$7h|~ zx>IG_nkroBO=3Skc*}Cb6>mEAO;1863~@nN;%gS-Gn|AFT#4my$F{)7ZiX(o9L3yf zlqI*rkyHj_QVk)J8hD8+iR@>%`9w{m_+Vp{5Oq?v5J>1s48R^=OVmpc9!42aD;`uQ z)S)=u0qc6AT$t7HCX~QL+F9dDU?qx&D4X47hfZ8IS@0>2f5w&9)n&;)eZ?y|1G}W> zm%7646m!neYbF&z>e9WI+>y_^^M*g8+peo#kRJp!&WgR-9R}b$T1!w^pkk{%xUl z1e9_DHtpoXS}?WO|DZ7KWGuz&k{S{ZA5rp}{-cQ34w_R7eO4n38MP=7)I*lq1bymZ zsM9*3-d(kj14*~573$0`Xj4v+_gkS79D#D{DVTX}&}WbtWyaIw`2;lStzH#ee0(1u{YAL+s_$!^5jX1-BB#d}3%b|6CT<%MSgz!KhsAjBUJPV>QCbA>RQ;J+vG`63i;Nz{%R0%Ip zBu=7cd{H%UlRkmODj1$B-LdFr*nj+9xA4?ALi@X-N)Gib*)hYM({-W)Pf+)o zHwB)stqmLsto2tr*Ey!lan4uG8{Du*mw=8Tsw~D0*P3!T)>pvxRKzi$7$xg%D2sTX zZSuev=Y>1k0@p?-oSWPz-VlIoZ82<*m%z0t0O!U4Tu(Vr@`Qu5vBR>q^o{bzeBbpy z={{^<7(AIgETA;~Crpa4SGDg1Pmx() zCfZ&$C(La5eLnvvWZUXcNDPxUqn(gPGYqN6piZxbCan?@-Z5y2anoitp=bv=meC3- zrH%aE3ghM@aAqBac}pvF{4>yRZGoN`Ji{Jxd`l;6nL+4?fixs`z`33DB?i)(*8V45 zO6_;e8At2s^oD4$xN~84Sgf5*(ndilCqcp3O&58z}{w(E%6dX?QcM5hVBBvy~7^MisoKn&_!Bv`ibj(78|Q{wU1GK#Yl{PF*5`un@lF+8sJGI>Vj1dH=~>= zj>z&8A&V3b$`}>!WRw#^2@-VxYCx605t(R^zu8SFPHF|6P!3x{0PeUD`K-mnpjLrm z)WJ{2wzCLnkk3;>6pTBbkWy>|${8-WlgNEwR-)|5!*H&yT$DiG4ZEc4V?N=j9aXXu zo-*0F9mfiui8U{~ zqup2hgdnWz9PqCv%4wqqx+o*G>jUr+LMUF-fYNndl&`hHw$TqW+Yi@T4@zV02*i3} zNpQm)B#^r>%6Zz z*Eqkg-ehlbGbw)PG4qF_%6vJyJH%E zx+AXhvNW#dvM{ybazRq)@ z_>~gF(2`?hJ}gx2>HVTNvt8!f*)XHu{>)qM?G3PydllGR1@qP;@NTJtl_)VQuL+LS zh_P)UB+`gd@;X2W#m#F$DN$$6^rLX39V0|rj}Y$!xlau+acg1V)WOTFg@qd;Mm4mM zPQRiRvmmP?J%+{P(l=9kY7*E*9aA>>PK{JwadSyEOBM<#Svu@%7Z4u5%1OnS-7hQ_ zpLwIa;53SbN8uHoLDRt#OUi_7Pqshcilx(Zn;doKr@TM0t?|RVE{MS69`f0OL?!uA z5<^tedM8{_P8c@$h`K0-KZ+1MY3qrrfd7dAOlwM@-{2zTQVKg!9H#X&{2Pec*x-dZ zh7izGZbCR^@N9Cy{J0;g^`$U;%LDskE?Cxhp<8Q#gOHSUtrwovWiXIu+jmMqt%O^E=7=>PEPbl>_SPl;s6s4yamALnc4eP@bZbuc@6&TnG+eJuUXpXK zgjx4$9@B@Ni6P{1TCqF29JvWKkh2;fO0GZ-`Qa%AWyndXhbX-TT6PDd#0cakRzuEk zL!VrS14(rd@k1yg?^DbcVkqh$Ax2$63~x~uG1{DZNRsLo%7`k6VOOShyw|7nK!Q2|F`<8(omLJU?~6GXfQ z7}Fb}-10Q^+p0kk^`Oi=0S!?n8fF6&Wtg6H)77>LZPpgHL-G%~A2lN7Rjhw?! z5>nA-Hp7^5ggk0sCH*7IAXNHcmrQzit{Rc~Wv!i;)(?52lbzE@~Vkp~Lk5WM={DdTJn~%dz?rn&7 z1lG+V*micpl~s%Kw9{}Awc%nl5`yT4olA^s$}t29A$thPIx-H!&F@5M+F?{P0&ws> zuyCr$|E3ZZ><~)0Ps2j~Z+4;zXkG{D-vJM|21cT0EZk05lgYS@S~xh7q=iH!5F=b3 z8@%j`uWTuaZC`T1Bx|yBwTD-B#grV$l1qq!jOR(HgJ)y$!VK6K6IxiZ@{;enj{FAH z#Q6JCn-}6k%te`6U@7+&~@+Hh?wg@0W=TruRg%yGj`uG<(({tp{nD1Fio(|3YIwFO~aQ%Q`z z6P^tbb>S!Kp?o2&O9>$d-`Zl>iQx~Dv25!~;3P!kc!DUHwQe|`v@8&gbpv^P$BSY@ zR<E+Hh$msZM&`U$;uV; z`20}7k1>8B^?z=T4~{yrXR{PYoO*IA;!NJMH#Koq_u()93$sH4w|R{Rmx$AcQ&FRs|lv` zI%rs(gfKdYp=*aMJ)%lT+blwYM4cG3o+e7;2nsTqpeM6!B{3daVm!@6N!SFD^xtGA zNeD-m+5s&wblRcT*z&9U;rhHji#}Y(LrPO>KOwbW1BLx78B@Xn~y&ZSnSJ2sserPn4W_ zOB38_4Md5w!o8&yZZ1(E>D4G9O2kX=EHF9B1Aw`TM4I~C=){Dyk@vE zh+-r6-`~RPS_O^4VrIz=iNv20SnXw1Ua|42VM`(pegK|~O8B_d3snTI zi9{8#iP2*;!A}%OAeEepRYu667M_?gc!?76kcpKup#r6ZL~IE}b;OeydlI=9Z5T)ZuA|#a@LRnN5Q6{A@Z!AG^Y!!kVeDAA3ym?)rz=0n4atB}% z_Q9ERmOOe8$UgN>0W-y+?)!BI`%bzd$B0UCXLqAC=NNpVle3~jS7&#~eIj&kX?z*e z7FX47iLQ9X6DzwtB9rDtY#AD{$@8w7gk(%K#Ly8h2Dgabf9+g@7 z{rfEX?J0nu-_JMi(}k9k9_@Y&9ONwK!loe9$-8=KavS z=lyN@x!ESgXBhG~TB4XC4Bp^@bHy0M-CiH&dGOxj!L`83|&<1d8NW#M&1#`v}?ho>HR{wVyEH_Ow5<>!W42%WZzy?Ejfz|2aD8Vws@MeWN z&p=)r-2M9MXujd2)^Ta|i$A|I`9eECx$~9z(ZV<8NB0Se9V{4kX%hs;;3F)J!+UCl?V4baUMzim^J3=3is{Wy)cp68`~>?R|L4A`eXBHl{2!ttPZ6Bn zL2`lt{#qX4kvu|gq@Yai!GAtYFdV_hLLf8z6ppbY1Ydg$4^7fD^aRnVL-+~k)Ho|` zT19;FO+;yX5p4zmaXO2Sz>okA1GPI2_W(^sd(7bGJjb6QWJtox?}T|W4~H>98zDF} z3|wslULB3MTf%Zr2y0pyZhqHXooQO1=&&bat1l1RVCsFZCi}bg_V~5Mq3w?h5?@cU z%&AYjZ>K>!2_MHm03pI}qmK|UNQmnb@agP`>M~GR6!?G6+MYowXmq;sFJ zV4!tI;OmSNE)k?P1co*Ne^(Uo_9!I6Fw>bus8xZbFUn$t#?l^#+(p}}lUQkUUbX4( z2@vJ_3#&By4#M}o2+zzr@QS}@-$i!*OOC(%KKbgqSN${Ze8YA7CFE-_;Jfk^$=gr9 zW861?)7C4oPS1hV8~TqHO^g9APEFYu`r_iZ3%)8Bq-vT{vqT^y;cSZ|+7d=YT<~UC z2Wts{S{acZ0Sm&rO5r?c2sJ)}AqGc#7*X2ERtsNi%ER8^gr37@Vh*Srct^sFq0&1(ugQaRaTw-~Z5p2|KhM%r$LO-!lt$*mBu`Y%KzTzE8gEW zRQlb+wc)bX@)v)r2G6_H=lOTHSxG%;%ivB=9@8Cu+|qk++o0fvE=)t9U37$T*O11| zz6frxQoO6nVM#}r4CiPjGC>l?!eAVCS*_3V;G5M-xZfYcB137xn8utoi*;6{D?@o! z^SgM+s(+C|usNN_;z$w;J#joV6>ojm`mHo>|Lpzg!>5uC{Uz_w|( zelh00_t70TZ5#i}m3P1Mk@tdld@lFX^2Mj!VdKH?r3v@w)B)lb^6*U`A~^9B()k?Z zi2`=d?IKRYbN6ne!2M(s5Fjjl1L8KXkNHR-A)r8Gh!EbPi5<8Y{z>lt$nY*){O&lW zBm`(Mo-qX;z8;#OfzZ~R!>s7J-6I28#0GK*b6>=IvhbUp5>zSm`5+4t7a?TtO~c9J zM(j_kFo4#WY6=_Od~|KYtBD|_4x65M&#spbqmw5edXC)Q5h}BGN$>mmgK)4~PO!?B zwM8WP9ro$=5g!m{R6w#N28HVq5uFITlSna8l_pw4KbPGZB~;>so{XUAtx?@k{=U97I#g0M9?z9&F}SKs=qeB^JDjjK_*uz&5w4|Wwfm5!LActz@V62eO)RvUlj^z!8lA9J2H#u!$wvY8=XiA54q`9`r^UXnOo3T8C!3H-> zb-6zqs!0B3f4d@`>y*A@^vl89Pu|eZotl1B>+r_CXMg410Uzh*a&Pb(vpisKvzoZC zi{X0rE^hN+xzDObEUvZg5DaPDWA(X4t5~LmEDshi!`EgtMLb{yzsm5v z%TTySz^oG*_jLu#>bCJ<_6-p&-vXSl4%r#)bJ@bjYVv+ z+I?sc_qAr|cfX0HzGsAhj79#xXrxh0#yPJ8++nzFu)?+zaO+}7Z&t=@*l}w zjMetehh9Ijz9IdC#64w1g} z7@>2=h?=$;`p58}6NW4pasxU)@U`hK@X&MSv|EBXiNRuHC^(r92eg9QkmSAct6B>hC1_zb05n6 z0YnG5E-WnM9zu*Z5$O_5ZR0cv8R^at!hJDFT?Njc-yepppB5CO6@|MPrn)R7hNw>$ zM~p_|q-DrWgipT$R|kVrOXF&i5h8#x3{`ta014War{=E?W%AuWySHAJ$un<7Lsy@_ zYrpnFabJ6U=A+&lxbpHdxtU*6i&m+A^=G3E!KHY!^nZq1x$WEYaBFE&8U?{-F{}#h zt|<(qRt!fUfxm@7q-jXiE`~F$i!kt2I%yptq`Sii)kk4%3c&HI4S_nhdg{timqD1} z?iSP2UIKMaY|vPIZ4USe=e0_L@)bg}dL7XQPTLZIz154&DnBgscK9ppus4NZJrjYI z7U69qc$;FFFSEi~9>ivg6`nH=IB6dbs|l}KF1vyNtn?950lrQcaeLb8#d^zd)`wv4 z^uyik!bW}O4?WG2EBLDPH|{2B-|;h89qsBVi^5QsR0hkWwvno6V4za|y@6`wJyT-_ z`iAf~b?u4oxbe%x)fZnKxb~%g`IB1bkNTNw-Nk&nf{TU#u2bdj_WJRV2kk5ac}6EJ zB!$a7pk`Q2-0w;jESLc6yPC9x{IvlQ2D5| z@IcGpHxWRlM_6JY&7Lpd!PFjZ52Oi`63i0`EHbdH!#lK-L#&QG!!kqr-e?J11mNPp zJ|2!A;oi_wYz-1X`hD09Nm#^KNPVJ()h^7U@--g8GQiMhpqd9BV^f?SBQ(v%=U6hF zz%yNBwY`URg3>qm7_NbB*!x*Es;73g;W&Q^%h(P!SzUXE9}^f1%Q1qBFkEF2i}!ga z87juYH{6r^8{UmCd-JPb`H>IK=$l9zyi?_b zG!ZSKM&odg9wEeF_tOqMlPTEwe|TgYF>ZG!VQHTrSch_mjqx)~pCCrd3{U3ZrZQ>NU8Vn@UggYH^TjYdvkEpdLk@%LYzmuS7E4yXb&l*dn2@y802mr z{N1#pju5N!G`73a*z4r`1Uy-{#j-WwwNQ^VEQ>@MjUQwZUiOz_u@Y_OA zszX1rK3jkBXfUPA24?*ERBf(ZikhC~qZd`$@ zF%1717d!-mz1pT4$QGgHC!m5=YX~=5;iG}r8~xa5cEVHUp*<0DZ5G(;UHna`5YqOV zEiTvn+tJU(I-K?1zghR<(QCawJ@%^gr`uY+KigB6zR+K#*uMQ*_6YMq-y zHA?Zjr@e6X`18G&AAjgQuz!+2m)jV`tiI+9QCy~?ixyY6_yxMHm2k7qj|c4qJ{QMb zeqpXq+wZZ0o7EEx4D{LlBp&Jlm^WkykrJUG`rvN*iQM;dqaoVOu| z2dr!s^+~L98XJM&F!FU)ueZmad>bGSG^weLk<=f~vkHDVl4tlA36DG$jA5+x?a+b_ zF*6)jPc?1!ZPPZMVTs}W(0GXDp~rBWSP^o%mC>ZyWYnzV@@PR@T5-MQ3D!p$S~Q1s zLgPN6vc?L0?cxaoZW|W8j8#_X&f(`+9DjyYV+M9Ee^vVk&S?T`gtjqwg3a**!ig34 zNCAPtBw`ae*e1k0_OL-PSO*_*{;bv~b`YN=bjEV6UAS%Pe1>s-)n#UFh5Ss`-&gx($dh}B zPwXQ=0NaOXU`7VuB-e$Nd1NXJKdXQF0)v+8xjD#S8qX6B1m9~1G>-y{fqhovVOqmB zWYH9sMTl2ZfWJ2dKOt}Beu)h$2oUNX5!5UOJlY(mIUo?8BB#%A`=@DfKJH6yqi%+T zhJc{OdRfieMc<8qB*83(V3*<2aDO)Obv@_VTR@m5;qR4TrI7@>)5vseBiftzdR!k1 z{>659G#D#OX^QVFttn(XbBOfD5G6#?-6gfp4n_1@Y41=szTf2HBh>Y3or1X>c_NL~1=&gL*;RWZ1# zy?nSC=#pw0j+O{q{7n8veg|4S@U{}p%}H469ayVn=r__-2+e5q7Q&ShVzd=U^%`UX zGtwHydIKx`>P@WIx)|zwH2Dy05MggYn(qxY2WVPBC}lLkN)KWLgrnAj`;9KRX(zTS z8Rjq0Y$_rMRXCtjx#2iths@UUHTw|dG9qWaNYo?|r?muMaUon` zhJ^sJRx9d7`e02M=2!i2H4rTA9-5H{>vc4iR)U1!@th6+sl9qLp6J{Dn=^!p@$=a> z-C606d)p%~2djPOd&&a;FMjI?_Rw5GVuN0JJS`$Osj%(9Ufe# zg5K;RKnRQn+AwYsDEArsxAkG%W~FwQ;X234Z&?$;-2u@_Zx{F4XcY{DJH`a&SrI-M zDd8b2uQ^ub_qAz;`Yx`G&_J{YSnbc_jy{hYtUB)+2_t<8_Zct?Lb zzPd26^Jn)>c`Ok|OM?t|hWG+2d8|%Gaq3z&{sai3y+ z>;=u^0Im@R9V_ZhW0qC-0i5Elbp+mqNya9Fe`7Ka>%;-v#%)&6e2-}ljw$|6!?8{B zGqZYMn@D4WRkrIj1~jMfPVFE}DEaw5*Z5Py=NM6jdtj0k>-n8O^1gPe%W2>FM6F9o z{f|Ez9xncdYdFFA{|e$$PiPDb=+P4>TyAjm5u||wglTc%afYffhwuf4=xa|9Jb!@b zwIp1$3_pS49m>LfzNErXHI1K(39ybN5Im|;1D-~jQ-u%Vc02q{Uc!!+#d)~fJe)US!pc9? zZ9|yv4R`v~&V;E(f}@*{2Eu{%Q>^CSghT4A5BW+fq;?+^&fibN@m6fmB7E>vZ9;0a zBiCR?ie?k#GW~pik-ua0J`^fgun=VaD$bM8a#cBKAq1RgomK5avepf68{b>)$5yN8 zqY~hHa-URjKJ`v)(UPoHUf5__=8E8NrmB^{85h}g9g%SziK=hNpugM$Lxum3O=s7B zW9&>gJksWBH~mxVCpzf+E*F0!A9VEA!DlXBe*SAy*Pn;K-+M1=WL)o$;j$)#rN$Jl zckknRR}$CyeYmN~VS!42OB=$ie%i&LguAS~?)S-2*j*OGG-KsF%!HEpTmtsjAF;Y-H9x6f zbwYuK%XUuieHU^B%qiT{FB#I05V$0IZ4)Nbhw#w`La*ijBsmqo^KtjTf8s@pGFbX| z;VA(|0+7{w#`qkep?yTpoghI|3K5Eu7*@pih|kjuL?} z(Wto1oHUStXwuvJoS!pIOL&Gr2jQw^b*ocg?n}`sV(|6F;AF7cdb6CC@8@<8_3y(o zwv8~u)I-oa1_cOcaH5&;Xp}~g!j>k>|I_MF>(OB`T*v`@Lov>GpWB)5?~5?RX(kLv ze|Hj_v=8SXmo-p?td$A(1PX*gj^ zP=`Wj5PEHK2BfT7laEFn?x1CK&{DYE0N;~q%c<`RHJ`%M&cG!oay=;&gwgQz2*DQW z;BwnU^WG3cGeQ6oOsxr|SYSn1q{Z50q-iv<8oLUR#HF`CBVYewE_nM_{$uRM(?3XG zfBVLF@5kMX&p$Lzg00f0oOOZeNUQVr!!>3^TWkax1D%$kw51?7O9-?D5$VVv#`TNR zI)a2itdUTuOu$kdMx@aXOKpgj;#Du22OF)}Y-bTsCnF{Tx*^1E5`?Fj#?c_bR>}D{ z5ga`JJ&jSAg?ZG(X$hhQSCA$WMZAKLJYz+Xk64)(!80KQYF3b`k8;_3j>h4ya=}+; zMS&LK=`>@z-i^bv5yYFgtq7NR^9BkuiBL_HCPE|RdS)xUkSl0>^)3|7I%qi#q-iVR zS{c3y52O|+T(lTF&BakeBdv(ipzQpd7I@CM;H-52k*6~JLz&-?<)^zL{EHK1(f0;x z-QVgtllb@jm9FjOK4P!V&S{vKanHYTywOW?37UD9v!k;T0+1@l9h-?(qge(nBf5%a8S z9}XX2fr0jbrn1B^c+kIt+0iF>C|XJ}FjfpZw17C)7=Bvll2 zuk8|q1kgl=@7Y!%^3e1Y0S3NvJPq3$1#C|5!9rtk4nKiQbgr8^gnjxs!+Qq-R@K8-4Id#yaQGO!UIvw4jAYCG;33>>>issjOkD8Zi1kRUrh|}lG1WAbjxd)i69fcLSB(EB;MBxv4l#8o_bB0(Q!k(^TqlMZr;YS* z9hzxxwDcg&H_%Q4>E`F_GCS=W67(XprHh z9k{q%e1w{}Bf!9>F|-CDIuM5H1F$iKeGGqBtruH$Hn`gTG>^DXeb#YPF~N?HVA7q2!WSkL)$yJhNgNL2^0ej;h}9|0)r!~<9J7Hn0XZ`rGZ*1Mnc{$vMl9Io8@ z&Hj4Nx7zBrzcAP?bs1_iuJPLN|2|0qj#8lvEoRS;W4Tw)AO7^%%{L7rlW%taDChft zK$z(a;tf_xm--X9rse;v9A`u;te!Zon-Z8KEbj2N83RGlvxhsqY24PQafb(?Fq9P@ zSoeD+nn4`%tom<{C2@D?0Q1@%f`ss_mi3_zUEE{CZ6KboXGobDe>*9hqtN6L$eQb*6qk7RXErkarRN=B6GaK3PJD78RnMMMht8{a8&CD8cCx&Ekmm@sibj zjuv$ad0b-1GWf+9U<7+$I7=AkvB_=fAI{U7_8}4cKCZ8sW*5*h^l5``11nR)#?GMh za6h>_7@V4v3IT7&9zyL!25}LQ{$pfxA3)o}?@m<9?KdFNKw|J{_Ye?4OoMTVR!~|l zU$xjGEPdNB6EZFqHUaG}w_h4z+OoY%=5q(|h)#%vg1;}M4t6MY6(JEK5^Y4*N0I6g z!=xlsBXD)mNHw&F{yq2xLR?odYA!42&Lg<^9hPX_GXJ;n5pKu(!AkiUSOCZuK?yD}V z014fE)*G04>rbLLUP8L_9KjneeFxCW{Zvkd~GR z9O6_?7tM?&6=k4DL6N!SAc{s0L$i z^@x^YaMgr4t)C_m#3t=ylgr686DpNXBpV{gu&7MYB(makmguccfJhwSdk()d{r@6>p8#bjA}uddWBH2BG2b!zBgFoB*?7r*T^(3 zKlgk3(R{i0CpTE5vBvXCf30mpQ@>I39nV*qztG?6ozs;CzT>vFQ{RaIGVW+13(`Lx3TJ>$)Un2$Q?L1WDf> zW(ShEp;d5Q>sGH_EQz4+S8$7=aNm%^ZB~+t1jFJWA;3y;ZuAg0M>4qAm%`o7I5v2Y zJ{*JywUwUY}<0(ipZ_IWLZhX@qIeP==4OI>m~9n#&uZ9Wd~g1``ZQ z8VuLN{W>8vazH?d5q3}1K?1FW%sT%!6F$pByEKb!wGW4#cH}l3!TH(_oZ`Ybt7FR$ zO@n3;m`pIF58#+SB0x%T^Yc4|RnP#$sV$%Axjyln_I0G5!w=Jhmd1JTifb?vCBEkg zBGcOl828~G+kkL_o?i3O&)WLo2FnHkT_i_6&_y-mExesj}3^hV~ zqh}Y^KGCv@0HRfTx|6W;`x9t?3_D>J?#Uw1AEgbZXayw%2^zCrye~nR(DaM}+GG*7 zP6=_^ii0*1W&x!177!m!&|o}}Sc%8PDI}W3c2^2XTC(^p8^Z9gI?wecXk>&8p%tM8 z1z0G_&7$e8u#6C)l7URSk!UK39zv7VdRps4-Vi~akdfM0JrjC<4Z)%1d-XvSHMAI7 zin})lxm8Ax-^-|I?bS}h>0pR5*uuRLR`aZo>n$okoDbjqw0z@BN@(`!pZM=S|HI73^KEhQI9)3f08zptuU;Szc39MF7@>wRyiFqbMe|`{Q5l5W6a*_1 z*gWe&tb^4$pO?j{A_C7@p}{R3ssNP^;695eSyg02?eDwqw%}=S9A@T1xjVe^1|5d3m_ty)NzRy`dbEPJG zwXe*V8#wDLcGZS0gY|`v9064P+<4=QpBsPu>HloF^mzZ1E`Q?k^DkYz@dATaPBC)r z<=!Wq-(UQ^-W$Rde#sVCP2J+bdcU(oBgx?=tEn4Q@>zZ^iH z#r2f~M9&?*XB*xAj(d=AI)DA!RuX&d@Pa&&`%e+tMtm{{Y2pAr6K!Ld7GOLDhk8k#i0-}k&ws{t<;6dXTz&jY(&f{SEHXDcL%$qq5B`tt z_SE}v_2M`IM+*px&U}p_b(r-?YZUR$7=mqnNR2@xXlde*=2mlKqv4-6mJ0#jjh=$`)PdUX9uND`(y|fP+Q3VSH+D+=r zI-F;m2)wceZ~qcu=p_Jbv<6Pgc_k|dtZF`9^{FklJZ&~aX(S3wNg|kHe6Op@PUBE$ zDK2gcH!X#)l}8aHa5K$LxGKaDEt_0V8zNk%OuhWyxGG}5_@m0v@A+dY_x{v$o&SaI z3lhdiFk^KP=PD$astOoAlTT|EG_#)o8^zPR_={mf2{b2O&YtR0} z8#f>S!L7Nc9Uu0dyO*E-)acdyzkl`GyFc(9-mY}`vB(2&hE@EHo)Bg_^0?F!Pz_;5 zj8{c_m=i6n2%0;C1co+*MFz<|ql8-wgPXL2TLV#6)(I?d9(Sr%w-?c5R9ong+fcX58Ub0_d$ zegf+V*Wo+?bN&d9i8pBn$MB!uClHR1A3j2ARE&JP@62fqzGL6orVdQDf#TPL{cpiH zK^P6cgUHxZ#HK~7CsxrTZy`v72-416z1vV|a9)DgIY8qe{5NSrZn3PvJR;mCF&a-~ z=oIm(ID9k_Z?}XP;c4eKln8A*Agq{LUn_;#RQ)7y)kA?$U~$(IkrqGR)N>){P2mU1IFA0)wB= z^`r=c9O8qVcV7~+F@=0*0eQVbV>&>HRdhnf|3zylF?0xjXS4z&B|^%Nz1}RO@eE?d z0z%!p*w*A}Aq-=#ztmF1&R_~@evebMr`$jQ@c{{kv>{Qo9}oa(tuU56a%~J;ZpTzd z0{ISc!9RmcH!Y-@0jjY;*3dc_-sx^(EPlcxqQWSlp($trteA5@R6meI&HgW@8v|eQ zw|Uh2XSg*k+CzYr6+obtkJca}1eRRuCKPF2Z9%wNBM3DoX(B9KM9=X?4=r#5wki?Y zgkwt>>opMsn#I^)0Rn@jTg?K9;IOs$)gjykoE+i;bv*&n=|_o%B54%F>%$1uFtqC& zNR->vAy1-}n@E?rxjvh$@M#`38`!B`N4(0)Am2cOrV(!P5-tH`#X8m#8U%;y%qEJo znIiv=Rjne?M)=Y!(zF-f88aehyflg+qBSnsl?9O+nnz`lqkP zQ9pLZgTXIE{{6Y?kZruuuS}E&|5rU#aiTneH>%QKpR7+@`tIol#{R|68Q*v@d*Sj+ z{U=?AbC;feYU=Vk|7hUq^ADxwz4Pz<{LHmq-Mx7I=>NKN?fL10Yo|ZZfPFKKaV%1? zueR@DzORI(jy=p#`|t7qyxWvg2T5G$qKUA|neTSvCM&=-T>vXX66OVDbfRNnkPwPu zsV_<(q_E7&>^>{W1p^JFm-a$%%+dxP4D8TaIA0#Liwuqp(IJtQ?TT@awvfmAh>W`f zX?5uI^+CSQfD;eVVrzkw>^co#t973?Q-F(Mv7uua_wHb;dyfHMz$zb$njI|B24;+D z&i4eXBZSE$4QFZ>KCb)fa1xsgQ7b_q%)l*9A&Fk(qan4ml4o=qj&lh-nBw0(4E6Cm z-=9LjNOKurMNUI<(6Zcwo_!>$-Ydd;J`KlMnm{XHYch_&C_j(rsm^7ri)#=BQDmIU z5#e=-L4BP8`T7x7XhMOh5<=r?NCeL2s2E?1)&0ORB*Ov1(~poGeT3NM1pK4NkjI{W zBRzEV?bDZm*NRnO@*fjq-$S-^R*8~%XN^7LAd*R zj%hcC@NiqlrZPzDb`k8Qd5Bh1+}1(vSBVez7=wk?w_jX9rUAD6z6v z)cb#AX>Q3DntZ?b{MXOb~7?fLG{#Ftk1qBN;W8q1k2NUZ1sHD-8Tu_0U&Q2Saq>(^*nF2u?= zk!!FaUmHcbT3oDlLZYD+2@Uxy&8mX)Bpiax4DNOp5@McZPQr)N)rJU~brc%y*k<4t zo9)=6?F6_!v9scWw;c!N+*UM{c$E){c0cm<4k%}Rc-*#uNICbz85^WBGmXN`f?*5M zhE+ntgH{?s{Gw{S_BW^ zTl})$ZZF_=M~aUit`Qh>T>;$D#c`jN*+M5_$sIhajbmP%qUtYUp+~}O*Dh|;3T_w_ z+~k3KSHqwl+9qHUSSC=G212w32D}&mPc*~j!Tz9k7fY-{?{$moCs8~Ur>TrtY>f~U ztcJJ76Ic=*(fUQpu5E0IQ&$YCO`5OZ%u4x(sgfNyFKVJ94HqX{@iN(4+A8^c9T zBd)(3avFx%5bZ@+&u9`$tbqNayI9xrIfjybR-QnRemt5_ikX zsm))*G(%-5Ll7z`o|ll)AMvx(HfT;^Z3-?i9#p@KqeY>zT zsDmToG+0t?zH1lv9v?~46qB&fYOTf{$gK1gzSpmbBf~-?t`8Gzl4>bles2_7Y>q)+ z;Jmi$^N4cYly(B8od(iMVDyN)AIda`1eD$&k_-{?{%uz883wW>tbzcE^1aEv5dSCK zxV~{#=JB=^Qtjee39YFqhHUE&0l|IW#$|NnX)F;G#3`O02DrHHK#0V8#OW?K{Nmzt zeFi}SMDEHV#N~v$eLoOD{AJ0Xi?nV(cQ-02fa$^={SUDoe5xvfNVN@-DnjO! zEdqz@Pr!v6HVG9jr(9SG=UZh)tXeefbrM{5B&waT)wl^iNxdc$<$J=kB_#9GlHE~`>qFddrs~vh8_yKV|MF|2 z)zU9tsNDP0>GI(JV*0NWKXj+UOLzYHxeJ%S_^Fasb-Z-ztoYQ6P?Kz zWa2{n!|ps536QNmaaxPP#K$I0W!WfU!6?&AQVelfy~bb>V{EZ1wXsTc^%c}g)@;bB zLsD#L^9-A9ShchlhSmzJ?T5xVHW^}TT+XuLEy9Vgm@L6eyAVUPuZ@-9F&)7palD%7!0t0z&I;+ zF)q|_0bWtPj~yd4^bD?{B0}em)Sm9vQJTOFAJ#6EXdKV5E=*`-7g0k3QC81h!ww?4 zQzU2{-m!heua}UXVr4zN2j?WML9_xJ%KlMg?D%(LlL`U^j(6w@!USWGK=X+v$plA+ zp!W3~VbyQ~cYhKAQxQR$h%8!QF{E8wrhOz&3)n@%zTZSOPA+E!4`w~*3m6jOfazmGgsfIv_>M8SOSFkBO5EQm z+UL;jq`@#{suPr2iCo zf-s>G_rM?|dKBdNeNJ{WtoR+y^vAKwBBaQ9$t?vOicW_O1wx6RwNFAx^nULNt3#ZJ z>ci?q@)(bc9TqbZEB|CG4TVth@im2}lWb?ux9-Bzm_?|A5N?fpUw|a)oqt#AlXoJm z@%O|~@y@0KVdO`oj)g@-8rDt%m&dJ#z{oWx`CJrIJ#%?c!%OQOy~_{DKt%WcpkPLJ_9{++KA^Ki0amyu)sDK+Z| z^Z9@<5L!f_(TZ%P4f}PQw2XTUeYv zx?&ZnvsPNEm9TNcTCoAQIPF$xCTQaPl<#;W4?hMS@qG+T4@>>gwX# z-yTZ*x=zAPLzKav#VsoREDx;vt+WCj2zU5JT_i|mHBrp8Co#i=<{^V+Ru{%?9@MvW zhqx-5`SPH?-;-4D5q+mCj$7j1AcWCneFk@F3N!r)Y%%1`ti(1LNDq0CTKV^)KFWYA zVYz#k@DVm3T0&)E7myHRUva+n5k>9KVWELIXdhcc39R*sYZBWq_lwg?I|K%8L6f0H z?9oP2SY*X{Un6czB*V_?+oBVL+m*1!>Up!Dq1vyg&4^tLe78;vqmh7Xa! z4`;DD8RIg=sj-s}hAJ5>5;)?%=yA2zwP=RAIh@A)n7AWhfmQHhhV(9^@g(g-oVS0( zU=?sGz;})TIh}!RGEV@A;pcO3pJV88-9y|~(l{$Gnp=ePOo)rpql((6BsTIGKG8ah z(DM&4gbAY5*bb7@MTEs`Vmv8=CVuW2?c|ZV{y_tSTUeKN8yYn~xI7QRpAy%FfgbcTl+jxX^ZX?G4ZABO1^aqFz$^VI4d` zaB4?wN*it?sD^eBppkid50K^O@ax4;B&_0fet0y5V?WnL^tUk-)uCP@ngIOb&VvT= zIT?ry@~sXDPO(3=tg`zu+#V@HLPD17AL-3QBFuvXhr)RTx)X@-yXazYOPvXXdiIgw z{*Cgx8&dDtu|tr_4E-#!nk-U;bDfq+ZsOUSgyi0P7$HR2kF8|el&fkJy=A*Bwa z$f7CQb_l7RpQS?#XA(t3w61EGpfJc|y#X#af*egH(ITeFKJXesAn74Xio}w-`lNv?Qu@B1WT0wuq5yxeXc# zcpjTUhHJKswn*d1G(?al9Fl}V<}5+MKn;q!0@6HUZDE>63W=I1!d&-YE8j(c ztzm>3L+~^?kmvlj8)!3)gi<#{y~YluTvYco4+g&fEZ0SCzRQ4bupn0*AWWQmZi|(@ z7s^=|gWkjE2_C+d6Q&}X^47RecqNSEIv0WD#$k<@@1X(EYLX2$2DuLj!Xig7$h5fR z7o3ply$DshRiNa~_>g7bE3bOtsc`VUPWZV@f7vGDWozmwD~Zqv)LYdC0#VUpy~@SU zN&BkbQd?sMYM0>V=Sh9e40pLO9NJ9v7Pr$DqBR@e3Y>Azev%CSoj?j-PPBh|DD&*KmEyeUitH%*?W&iZeRV<*Y4i` z@}JJlfAP9_?k*45*-iyFJL9VtT|itZjOr=|7??uen@yTkyA z4F9$xhV%|@GVm_3qMgwRTL@vD2D89}dX@okqdljN)H~l*#8O8dOEi#qR*m-<2=hJS z7P4aS!5z5y+yX;yk<%=QQ)4{%&4vSPjK^6K=dfU+MKD~qxZG8J0qcV?hOl3?5%U1& zJ#>I&%@G#G7*&MEeGToTi-sXgXQYTNLc*gf!qzQ@YJY;YE}Bp8u6o+a&46FvJk~~z z;bfSJTew;#cL*LrZhRlhIsvJD!r%aFgWGTjV-SJP_41AHz`;QGjnFDssk%k;+#y!E z1dwZFACb`$^}@V7dJ5-=xSL-FcETq?tMRhRPxe29G<<@H+7BcT#hAhNXbB0E0)_LI zMzhEh8ey79bowcR>P27zYUmV6(Lr$R2!ZJx6pi}~?Gr@1bLwcqq3JxKw~H7}ATSVT z;FS<3^!@^c3nw&MQh;@%yR-Y4+( z?W)&jT!uVtCWol*2?G4gUeUU%Tin5p(4hT=dkHNaLCgIe?JgnKC!i$3-A?fLegHI| zmdmGwgmih%ho7O{!_QBPH|`<|tNh!#)2`WT{m zJ0d-b+Ho%?ZYD)w1?w2R%|1vy-}@9+raper*Dd{tmk@Ij2o}){xILscuN7vYaM_l6 z0#^+4A+EV_yU8t4!o!D5d7MTNR9h;gni%HHsHnJ~hG^Q_>_L*tPqn*fNgl*olMKxe z^0nOV>fHos0gVbWbp%WsZKcMhf-A}8L>o4dsox}UXjuePyxva;*pU{ei(1?WwU{{% z8zgbcse-0d=||$M7+Wi-cI+#ZbN_PwQk4%o^@L76*QM~uc4Awc~6T)|h(e9a2w75@jVyK2O#EI*=Ujz0-htylb5L)h@@R`qyU zV%bzJJ?(CF{wq^eM!rxN{m9{#FJAicr$;XQYX8Sw-mS}D{Ot8B&wuyg)wllb@Rglk z`uyYg7;j$(-i(4Af#U+)&Dl@#^#laux!u74~s#25RY&`=6^pgqD`-ySwt`P%epI0-2$tL4@4 z61FCDSeZz{rZ%rFaUNoXTs}Xs3#%A#k+xtJy~leC>fHo42&yfb$vVSST<`E{i`W?5 zrtxqY!!(kC3_O#p#AyjmhE9k8xJNUwPw;sLtIH_EGAhHy=eD@~&GVe^`4X0`Fk1$Ose2&3Al0)g-J{-oPI{uV-q(qSX3bw{1hOOwCUPN}{6uzlPu;`!C zveJ+y#D!&XKZ%q&EQ%P-Rx-pGhWn7liYQLwJ-bq?m$VL3c~adn9DFip}T?rBbI6{oJ&4Jo+VSj`&6=|=*B-v`lDHm2J{h^CnA z5zT$MZ4A4R8TdIGsIzYyAp#{zP)Ph9M8s*aUc$Ur3|L6ppv5Nnx42j>YZDBAR>W-u zwGl*$z>-AY4{aVYZPaayAluG;KoI!}E4f27=RHKaCxTL~!b&=XT#FCeO-ZD?d<0<% zd9H7+T|uggp-c!w+kG?+noiR;3jqdqAFEeQ1Sy>uCf-Xc^3hge@HV9p;PdHrR^1vm z!NMTyqJ?yXX-i?aE4Y2yxPDw-yrTpsja=%CBHu;$&|YFqgl5z6M*>7?3DhYqzW+DE z@Q-wH-aH0=;AK|*zA7gTXG`taCyO5D6|@#HDsGh6dKA+>HH$B-|T`%!2NAYbXg3GJg$=D^-rD-so(kZP=) z4?$jLK|<|;&h11PB%5gu)fSo$EwRFa=vhBXG?gQ9+KbEIKErjabz{4A6^Ca%$O!0F z**PuE{p>o8%ubV7M~J{sxD6r|X0^Gl%=HV^njx2O5@hRqcu}aaaQR#pf-J~o70!4N zuGm6G^wp`^M7VnEcVe$>{K7}=qmT8Yt4dc5y=w7{Ry&l*f0nSm{z^=Lu72|ewu;M_ z-~Q$E7oUFi<1G97)tAOMZXDz0jeXp^^SJBd%;PV9W^R9}{K}0t|KP@rXMZ$z@yqHR z0B=%}uQx?8%PQ+84~Xmi5!`5&akaicGa)2+@Z9VO;5LI{NvqHv3Ycpt;C8PMv#mMx z-XU|WidVSv7kW~#ii@WN&N@M{#_CJJ;$eS^a8R&1mc_d*c=p1NK3Sr18nlM*;(~l#YJP|2^@om@U!B#UzGT~ z7}n$fAy(`Gaa&ts5a#Kudi&h9afJpT*71np#Xy`+sFic{!X5%92I9aYI5XPA%>p_wG6XapuPGzaI&Kvkv~qWqj*LNP?6P=^4a$@n^V_&SZraE#Qr zfUl^~X=4Kn??LVtF(U0CUmxQ8C-T^$83qX}2O)0Oi#{a>@QYsH+(zOa8lj$3czb19 zMgh+5EQ31@rT-XlZpWRW3=}-^`7f|Zng;y)a!%C_4 zqHvyxRv+z)rqM(z(uN>$nm7xiR0k{d9?{=JLWZ9^(=4u^6lf8&1aT@#SWX+^(MtQ_ zceB{c{Y(2OicW;hX+k5(@Ju7s?d3jVMcx&Gw^dSMk!awfkDsSYR_`6+76UADIX(hq zi|~?ITuH6b4+Th2v-)=rnv{1FEpZwJLz#u$PNN5TG2{m;`-tdG-VlJTH42LucAa6E zV!(?QPfEQH#ZD(Hdpkc*kU>xDU`3s7;&aVm+7qjIzLscb;MdSV8k{(4_VPJSOMnP7 zNj7plMRV6?LPOlZiRMxe(-2y5apyvD>Zi>?U=eI*Xca9Mnw_}j;zfyYDV2M%|EeEK zbri{R78GTEoYwL+LMqE}PqbQbz|RqXbp>fcpm1h^fVZPm6UGjoKm0raQR77ZRl=NB zqHvu;O@6LF;jQ*G_u+&v%@c^p78*sZk0!FNUW-Xp^0UzJ#07r&)c``}4nA*zT(QAz zF77iTdfu;co2=4MYzQ^F5j*RKT)6^;Hj=J(B38ZvxonGoT_H$4N1r5TzvHK;+_pYZ z9{i@^Y#7635uB&?OmT&#o9%xv{EB;R=+(pz)LeG_^3#t+wdtFW{@$g_&(hbgzl<$j zNPps?iEr9YKE=nkFBZ)Ydl!) z$Y4Pm#+~jsuD66S#{*}nBaM|-8Ovh8MU9O0Himk4j8%RDj$T56wz9!WbD4JYK(ycz zL$K>OjgA#-PYG+SVJvAREVk}om7%lJy^lLuhPCK@K6HxJ-fdV|87}r_I86>441U`< z&BY|c%qn|Bm!N?p;hHSKI*^9>oT4_@bubj|gp!M4v@%FYFuJ&( z3-a%ni8e#%SQxmm0deXq20N?T*xq>kF_CP;!)N!1AAV~-FV z+EZ`76rlM;X-xsszItzrJS+Z;3IH0*@CQwI{RCNf>{J~F-Zv@kKc1zbaC?n!tLqb) zVDO(y!ef#i5dZM`HewkHM&oN3WZzQmyAXW{!40?4I5gp1oXFcL?si&a0( zLD3$ow`&bgFo1P5GzPNMbjoVHNJ!=o=oLfYbAHAQ%_GL;3ux}ZFK$gs;|S}-HY^YT zDK38-Hmx6aekXj!Q&!+FkQ&%Sb|i)@Ur*^zuxm^klj2GTl|}eV3;#_YRbADsnZ8lU>?t;GwR^jY)9F?U&F!uB zq}$W1q?4T)P4{=-%aS6-_H5up%!5n|-;R7I6ax1qiag1s(yd7h+nx?SpFscqc zo+mpg`V?hv69a@cX#s0MnqnQ5e35p)l2ehC8CRr@EX6clCq))FmGOn3h;5@48B$Dw z5qi0WMqA5s-DE~gZatAg{GB#z%Yb{+G7>a0OD*@alVrg4eO;R<$rwb<4U`}8q0r<; zylMgYR=Mr8qL5I!Eshk4B1W@P2HjgpIQ1k+xdm23Dru%6QTRhNx_FHf;bT1BI=^zu zEJM1GTT-4*3O@hu;yG|Pt|3{!#s6(7c_)FkMH-#~nFhYL>IQ%TkGYW@0blOHD=BJZ8}l+*k{wl#YG?QhC79eU>NpPYW`p!N7ur4QYP@#SYS zzqIt!uJO_<_c|Z+wJ$P~d~U>xc}8KcY7%&v7sjQbB+lz-5CcKH!l?cYMrto2u8W)Knh6R$!%2))K z$5TpDptPNpaf-`~KCcaxF;A+vrlY`VD*U;J>v|t)L#F1&aY-M+GKIv{lYoJd?;1&C zg@Pv?^fvTC3Vs;t6O6uRlDN+8E=y^3zMpg`G%%uH)std0L8S-!73H=U*BOsf3>hKM z-A}YLOyA}9cHo-cg-LpxYfH+&@s{LtNFE0bXN4lUOo8;u0QSr_f~TeN5=r4qnnIbN z;ZXeNiX;)5!-On-Ll#YyVW9q(geq9l)k3A)>U0r7&_oV-rad%Rbh5?rps^ zvoRXrOb*@wY1R}{8aer9_edu)O*afDBW?SPlsGTJE#3KK1{XhvP41gt#&BrhAsUT; zmKW=IRGHH1=4Tcg%^|_hBsf@xV^U^IC6PH9Q_L+olSg`pM!@rF=Vu)q-lZ8uVBtC4 z(n;xank_%yqD}_eX?)rYP0kG`1C|J>%%Sxt1s{EqlsLJE2)|c}-YAVA1|Pq((ljYR zGt5oMpn3%n1}j$XmrL)3YrusVzpD|ATWNxo8TF7lNIQc;nvPQBq&jT5Gl0E@AWB_96xuyVl47#`F2q$~0T?c zG{SJ7T`{dFw->6TnY20(ZBD`8!APA^e!PAYTU}PgV0!Je4UJ8SWs*vjn(n|^_i4NUk1P< z?M9N1nd|X?G?Z);f0782G=Ox|Ce4O4LITQh8~%1Lb{ZWt1a7--le8g`zaN!u3rZxa zT!RBy3VxjXQX(;=8&{B`=~a4GX<+k6HrQx1cICLswK6boLs4afO{vOjN2F#G?kbvg z-3Cq1i9{=dikc;aRBQM1R61`cwXr0RCBXG9G+Srm294?}&(k6tq%0}Z;-*PiYp?SC zHejsVf~~{wrG=Ko4|gkTw8_vve$0lcWA0nu`26YS&@UWsw=ABjb$|8LQS+ZnK4yQQ zM3fWLm7ktF`_|jX&%A?kPv8B@`IkTczIAZE*YfPkcmK!p7x(beQ*Zy_gTD4 zJwdDvM&j(j9`I6cvX`mVQlk%X$rKTRN|)*L|_`rkthym zC`DWyJET!%u{K1(CF!gW6tF^5xFOroNuQ8itkQ%Q^<{2{hC@+V8!BL7j`T9g_mJ`@ zG>|n?$vQ>WJRQKiRDR|AuaQEojY*%1pmJ-=$>%pn9gAa(^eKD}l8uEXu&9&HgN#s5 z-D0#{f>m>X)sfrSoZ5q3M-e4_W4i>;kh))f+jY13(2`WEN6>Y=6ZqS zW!PpJ;R|s|{Vjb;sc{wN`UI?Bk-={7WE7?uFE&PK2AVRX`V_JQ`$+KIxo8|w zO&+6Ue9oh;_OYV~~oq`A$a8y>3*r zc4ekjUKPYnw+GoyAF`wgXSW&qLyY$M|812A`)U*R8S%%toglBL5UD28Z&R|m12iCo zW_mq{X(QicAf-<4C-R-n-;6MlPIg+6R>^uML^`Elqnpusi~{OZ2EoImjBtksUNuF$ zhlZizx#=J=c5cGi9^yV4V5NY2xIYPL%+xGm?LxSn>(iJrVn`ikM(R$aj%_OC(372O zh%{Y8tjDMvf1=TeSPl1s#+T_dAk}I|mfIaYvO9+Kp4=Rl$qy`d53(bqK$@09#NCLhlr*a!C)a1er zpO4k@+_up`wV?<+j!-`iMyh!1AVbD+Ux8XTPa?kO?c!Svd z@|2fi>c^5!3M)GC`T)&=qWR1U2 zQ*t}3h!wuq3ZwJaWoFM<9P3g>oMa<2f2=fxg|Q4NCq}^tDXmMFM=6dpHzUQ;G%E!I z12A$O2TjU7=A*dBX-K*I)n#dj*M@VjF!FaXYIhIkVUyWcqZLN_F*x|z_3<52O%blq z9hwu3X<`@Fks?il;yovcfiw)6E_*6ZI+5{?9O8Nky!_kQcN;01h@U2t;dxE+9NDyCMbOhcZxIFn$tfR=Y@44=0g)ju zO58X90F8vvx|`JM?4p76rzzTg{y(DB!;0yo7dd|)bHS(2BRC{9QbJU+z$X&?f11Z! zQU>J{x&RzBuModqS$^&gjWlrb!8a=9OK5KV?)myHe2*BSZ4_LRv7O&Z2aP4%LxJqz z{`M7+?jVivGj(>5C_8K@46IYU9Z2;@vCTj!O~Tmj2q46ODNphCQs8&GB{#%}Trr)>sOS(;Un(S311dVhFnY<`|6niWmW+sWv+cMFLgnf~iL_l?(Iw$r?j=`?*^ zs^@xWra2lzs?UgB8LVdXp6uX$sQ6k6cbrjpzQF)rBT2G>A}r$ovR>o;tnOH^krZ)X zY~wVIJdGyTL86iNwQ~Ea%cYDv$gLb-3-_&2rpYcLLNbUpnNVrBQqb+n;I>Qw4!2s6 zZ*%Z`8EFVKSkgkA6kF!8`x_0UAUpO*J9{l*BxqELCIiWZq*3ofxLL+4)<|NcnGPCF zts5m0POLRe>T%*uvlnT_AZR#^L1d3wkgl^JP-R7uG*GEqN8u>Vi~E>r-9oXRMp$n| zx!r(Z?J_bng>BM@zd1|_*+8D>#LZ*!&|rOyW|G$m%G^ed$CGKcASAcMj`8`%CHP4n zVbYqr$)!w0ka3iare&-(FOsm~Y3 zz<6!=UG*{BA0Dq>+C5qAv(GkKZ=S5Se|@$k^e-oVY2yQpgZ$I-{JEXxiKp+pcl_cV zjGz4iDdgV2d+43ya^igDr=EFczv;x&JKwdg%hRuJe{A-Z{a^Uj=U?DOajD&h3mtyE zrt{+BKol>_Sck@e7bqyts!X`i9m0J7KCX38$a`a?3@NZ^#O3}7>_Z-0?~^1!DSAf} z;p?xHJZ|*x_n|1}DfCtq$zU*!R~S8C)zDx@^SG+Z;I;lTZVZtY80ku(!xcUj1`~K$ zyMMp0hZskH9yg|QTqli%fedcQz&piuWz>iD5jUgpfYKz$CH*8QvK|_g^x)VUX0*)6 z**0#0bMn5yIi*10iJOe@Njwx<>r4h)aw}>=`dF8hF%B1>vybf4IQFq2vwXNemRUE2 z-9=+dz(_jSI#I^j<3)-#X<(?L%x3cbW2U=<6e0t_mQz_+<{~i9${;l9VZ1`>C}LHT z?kME0kwa33i0n;RNLt<@sY690(eJ`clL*elu|yMdoM1#gLJ^&z5v#aOimr{ux20vI zPeBh-Y@H{v2u+Z5D6|s6haQiSf&w&`BEr*hYbdQ$wiQ~D>BmkAyNiOqIw4bPWt@ZN zKr^z<$!xHKQW@9DfMH`uno)U3hm7h6g$O(-NGTA$XM)?zk^0g)$qe_y!9XHP%CXRB zgW5QpT-V3%Q$mtMHG4Ef62xR2J{nWBHwWiH5z%2k{3_BC=~McY`*~avb^E3&GzLik zOd&o_p8e)%ZUVTyNH=ToNq+8aqD-4rIJlu8hhK8lgfipT(o zgQgRvart^8%7D3-=HeqY+XjPh^m&!siF7|BcNM9losl*vLi(pCXhsFTSGvQ+eWj4~ zkS6%O&vBc1jTK?OHcn#63|MhnWkZSk6cUL)?IeH!l9%MObj1+rp@H!I)7(a+neW$Og-^4C=`G#B3f=u^+WwfOYe z!>^pbaQlyrpF4QqZ9K^G^ws<)UVZwl-#+!~;h(&6@%A^Bf`675+;faLUh4ATMMhB1 z=v;V#QQE7#_+MAM@sef(3#zy>h<%ADI?s#ejZrIJV-)vFZxri;QC!yKad{+yHLaTl zKsu3ITUx1kl*fuXOe2Zmn#}qcOygQB{~n}i&|I!-16ZXXE^*tJr3LDUOt0l@7-h;W zuB+p_N&&-V8E79%kw9ota*JqUM`>wlA+@aN%8CeXX@e9;8V4_IDTiXDF|1AGuts7q zO9}C*`^64}`ka#cU?EjFr$TV*!`P&-MMk$_8ZW@BrMS-QGYZecs^919DCSerqr!{z z;Vc$O88W5UG@V6kxTxLQ?NRaFWMKqwxrNXeBvN zx-2G@JQf*W+0@@A0j7DbXkfirWTn_3ML$9z4-N0auE~-fNI%@aXkV0qE5<OY<0!>DN2*9@^Ki7slq$F~eK|_e4U(Q?I;S$29Uz(5rTG)h zLVA^ZX%JbOR)BO9=%aXbMz~EM(k*fR?xTSeQBqMDwJsdWHrsrRoa0E65PXd0GyOCu z(ut=>awCAl$5Vn~pZVjW(kM0v2+%J;D$(#1$xdXGz!s!l0a zVkcQy5$%*)U0y^b5~pxxTOEwdT}a75wuov2kGIi_a?=80jW!DU28EyIQp@#7TKOIm za-9_5ZXpXB?#h^mm;#9-)y8d*h;rQDG7Y3i!z;;bt|mWrnq*+#2A8N0eHT z;{Qq%_DmB4k7m*jqy02VC&|AvO^b+;3NkGunWlAqSFV#loY<}vVsWC-w8DoG(W)gH zp41xKLaJ^JnIi@o+Xf;nq{#-7N0S&E4X}|0Tff2Q3?!-zr6hcYpTYg{6VjJttv9V9 zMFWf;$nTY6Mbv`Nxi`kJ@)(swMD`#_7np;SNK zZh!Ga^;YS4jr;CcozXSbXZeoFgFkM|3(wxGdG@(mc>ZD;XJ7i_H)YH}MFf47Uxuqh zzn6I-z0xO%gkhXzRP?k=Kh&g291*NA8Z`34y-otTLUFhvg${=z6mB=x)HxbP9*Z;r z17CY>RBn06xCco?Lz);!;L2EDxt;YoBf*s^AC{%0yH5H^#BoWxhs)|c%u^88wJF@7 zcrTJ%mMJ7sm3NV%WTr41X$Bjk_sfABDZrat*Q4538v86#AXiSs;To3aP6gOTGD`jT-jb(o}@tn@SwG6I)UA$|(~1`TPIMiL#}hi{hRPQjL}@+b|(HJ4TL7;IBP znDj}~2Om;cQNK<4BC&A)tdnvpD97kON6rQML?v zVC8l^G&-3Y>zttoj|XU0VVFo2R;g4wo`-?PwkGA*$99k&mt2Yxwnn92MTX`_<2jj8 zZreB}_pmXz4=)YLB2A$torq5r5#;%fllJ^1w>U}3J5fepOa`}i89}F%nP+|)lbzAO zS;J$T3Bf{=km;`*(vgq*ZW|9PncT@CnI@Yh4Han$Ja3v^M4w2*JegKzUpZ;;0V%_x z3zAf5<{HxJq#srpye7@&#zUln9Fij;xHVBYR1|au85z=-YtW~3HuN(Hk$O_G?mP8Fd`2ypa>2};TPdoOIz2VLL1T6 zoHCA)W{?pb%JMryLnnzuX(kCq+dC~4sw6RxKWQ4l zRtjjFPzb3<2KIYp{A67jJBU!AGcA7Pr0htmjA^W3x6=rJoeg`<6n;{R5JrKP_6@{#PixWtx;yFRY2C z+f1YA3~*fwZnnuSK++5iB;Q6u?Ub2TD~g$fWj0w&07)_D9@0%U$*W!nVqGb9k~q4G zV(luaOJ-6Tk#E^Vp48=S=01|d(iHza$#mzjMq5cdH0nybQ+dDbRujDSYf4q#X!k18 zZGMGJ5;UB{MiZ$-rZdk+<@<{l>dOCF- zvwd#t=+?iTYBB8}Z(4U8ueLvRyvkaq`Ly-napobG$6vbrp-t-Xrw)GV!t?vCr=Q>b zH?O|1_f53(tU87@Mi(3X6nRD=mwC~>$cyMXQpir@5n(?u9`hwx0(IL?$|I}uW<*;+^# zTVpO|im+pbzmL)k2K+q6jB;<@I!KdcBrUlt8x$xfP0~t|TAAfVPU^8pa@|A$&glru z1B}kO&r75@%V3NqMZ?hZJ%mJFC5j(dLB9VBtw-J>tgL7#FNJI915(1CYOEdM+FcQ;o1SKPVKq??_zqh4v z{YQs3{ib)&Z;kfH7Q;j7%f9i{E2Xa3<*^PHx+UQ``a28%;|LM&D!d%TJu<=R5Xka9AT@vYi| zU*$qrqV!%H0*r(Y)Hc$E6VWyrS|$RTQS7(1R zKIo6zyJ-L|A$Vz6L2ftKCC!)Y%AmKD0ZDbcQ0yTEw21hxDYsR8%>mK|&9|ZpUk9E2GFe}1{x)WF+qYzOBQzvsj0!lV=$r6woY;7Yg*-Ro-Yb%fRy5E*u-}I z{aaFdJhxF&M1b^=GDA`NM` zF@RE=QyD{fo9C!lYlnyXD5ezTd*y{H8m*`_Eb+A@w$^#1+G&nQR}ku2py`>EzUO&T zlaJ<=CgCJ%H?dDT3pXwya*Pzjeb3RHG7U1AzDbi_=DFu*(afNL@0lW@M5|Vi=-Nb< z`xvOcPi{$?Sg~yt@g|3oXql%0MWin~KcldefNzxMS1U-7TKuHEXwxz*weF4w65Zir z;r_`_+1{I~GGmlGJ9X5H$7@{}Z+3ij{FwbeOxJ9_b-ZbF>%)rsk9v8D7s%y-Jk5b- z&?mVV87%O^JExLFKsy#>Ohf0unkt}_HIbyVjova|BOP4pTgCEd1)IHlSQ_$Rbu2;? z$-~|o#8O`rSBH18sSabU(~nnV5PKv*!FACL7|oMnmiuFPgV--7v&yLaMlWAOs#v9< zN%`;j5j)n#;xLSbDaa8RCv!@ZAn#BNCW?Vf*){3BjCOq#+brhC!|%>dMgH^Z@v`4C znVVi6O8>HdF8hHahvkW1`&dBZs0nKe&sax_H*F)izpzutZF3ark%}@PFXJ7~i6}N? zdhJY^$FvLMs8p7{1q-8Zi!KVwv{&i>EzPS;x*S#p<#rZLL#T+6{g$o*BaOtyV|SlS zP^i=JX-Xs=ngMCbL6LUyeWFrRi=<{9%_~Wdo)Kv@<%VT8PT@bKk$IHd68|J4;=vuH zcusOB(}+-Hw}-b;CV8bM;)sv!F_KOqM#Ig_lqkpr_;h8YNe;PjAEFu;+>*0Fqs)zk zkS9Gw85pE!B5rO+D8WrZPK=SV^n8BAPch{B3<`q!eZ=^gx%)_T8flue`&IwM&ELuO zB$fI_&JB>pgX9FUC;N${&l+B^_56x3TXx-(lOAcO(0DpNe*wq z$6zDEK*Fa9(5wSAn=B$anrW{KPPLsv6hflgN8 zMB401o<&Tip7yLM{8T3P?N1RM4E^gRI-)F)JQm5=tWWtx=RaBG{qO=9<@)nOBt>KSCO*x=Ui|q)9S(lyQs-g|tZ1$u-d= zX|`D!VxH%2x7~qsz1*&1)ZV@R_rX@<|DC8_{G(`#4@0GW~wO!}EuG&CV|s zI=8-@XAT!cp^fBHWkG@D;;%JwdldaH1I=KS#%)!m58HZ{;5{Za zxtxfRD#BG;q+lQ7+-AIml+nD5;1L6IM|s>dn_wNc(XfmZjVxN1grk1>uJf_Q4_<6> z^4OJ6oO)#Sf1mm6I>zfA|6#7$^0o157bdDqn0ds9(Z>v!t+ssWK`xGOT+Us{f8t?Z z`;E(Mr}E9~7s{_ac_#Ki1Nr~7yiB2A917!=E-!8f70CDnh3y5k+^+KD<=zA?Q5e@5 zWnI%paG63fuW@6U)UqfhFqFr24e5kY_gc3XD?KqRFxtDpDEj(P77L69*BI@YXb7e; zxyK*E+Ju{Q5#ayBn5VHwrQX#+8VyD4Mo$?AinY{C+ElyY)~87!d29^Fab25(Ul)T{ zYFkZ6wble|Bp2gE;BPGB{^0zHY_DzP)&mLF`z@x)@GoyoW#*h`OTW1}k$-n}Dvb59 zG@Qp7SyPy|#&?i7&d8jmVbX_rjDAMLG5APpcAD_|U`6Rskr zit=O$uH#9BrZR|6<&{ir0|nYXn}>&{>76814DG@>O7Yaoct`|pnwkBX1cGNHN;kZ~ z45NLLl1E77IHPSzejGhSlExmJsG!^%Lv%EW@E9M%QDlc?R@fnOGUJNm7aEBwb)uxq zNtEYO`h4v0_b^2FNXp0GeKfo%_ro<*`4ivZ?(5m! zQtg|X@z6&?Aem$Q)fadPas-Hg6JlxbW*GtWk>lwVH#KLQ3sNYk`tXYG17+YYrdP5 z)J>Y?@$FI=(~Rof5>=}`$aYDKTRTeaF5GGe(iDU~TuPotO0|J3O)<$wndERl%Gzmh z;h@@&WEVwT##LmRt&|XHqOi8cNIEuTsw2FLnCX%GA zD9vWKgTYg+1*vK$3bj%ca^-$vV++3zJpK@kCDd$ECMLL#uKt~=X7#(4ton}2h0m^C zoT^@h{xOGruEz9J`p<5B?(xn^2NKq_|#MTfB)3^+c@{c7u*lCjimt(1)n6)O2eRNEHY|a>ZECO z1!x}kdsmwnIju3`5lUGdwP2kW+!8OaOT3Vm`|`NTi+G_s0Ap8BS!aP4?6v+BUehFS zy(d9aAkB>Uu%h>4e%Oain#(#bFvDn8sbzI_ya3b4wn7ooMR0>6YL@;FL-&*D9K96^ zYYv;^l9e5SdAI=cNS2Xh^3Q^U(fO;L*7aGX(D4Z5B{`k;xx@1tTMoUysRV_y7|80Nz%u74w0Dx(j<}KP!3^f+B6zO zR%Tm`R}enAOB%@|KSdE8XT+>8AgBo*%p{*OFYpk_!HZ}=4Tk&fQS*36v)L{Wf)sjxTMWf+8iSVR z(;lMG_)yeRWceLUw~!zx&bclZ%34PB{QehIJXc*&B-K78>9DN!BHv5n(K=C)F$MuH}n>ZDi?I2J0M#{ZO(ybKaS zl+k^uBg}2_HT6>NMCw_QB-&$))~$%Qy0F*hMVaPP=`i#C8I`xn^jxQsoF`*0BpQ@R z;dvSCR?+C%CBk>0((WOF_^>Y(dpjg2hS7T)O|MzZ(W4lM+$PFVOcSJ)Oj`iOHj)9O z_oAbD)pE+9xDPqi#7Vx4R^V zqjr^qwxrNSPG+<8D>fe?p;Z~<4|Jf@Ri;mjJbL~7$g!>OSRVahmb2&Y&7OJuHZDB=CeELG3$qvR zeBoiY^~U*we|GNa!@s`p#w{Fw^56qy#DD8@ohC6)16d*++|csTRU|!lu%@zMzI`3b zI=O{a#ARL-*Xi&#crjcbmEIX4%*&M90S7inC5x&gmil5?9}dIJ$Ze6)-wkaNtBg>W zl>{|X1dYO|A>s7fVW%OvH6@~D1jeBfOqzmH0%Uot1mj>DTcj6BhT9^gtVnLcL`Eq` zvc#y_qCI?fX|&+C&E-C|a3c9&gIgbBaXw!DY2WdEm2sr_ucZu#P4<&f@y28kD+6f? zd=kcis8W|{OC5t#<5G;zqRlCT)26$(CWYAH5IL+;BUGs!4)vJCf(Md{a(JV{fSlj*6Xg^3+y8f-|vkN8{`h0!wNGc<&0 z3J=Yy$j>%2wvWK{E`sBSq#_!~ur#@n!S#SLj^fv5Nh8}diX4(XMFe|jiUVa*Py}&J z7!i_4c)$(k$Ss9OTv*I}nJh1B2eqF9HJRM8|DB<;6`6v~KoGQ#h$D~W-b9;*~d1%@7GzV z#rJVX0Chl$zg=09hunTj9@|8vWdqw%fKkRp>Rrlh zw3|)KNOzl2>@@QnZSr+1BnK0sq=pzj*J$SkGBqSYQbnbMq|!m6sFJG2W~8glh*fPO z)nMTFfacS34OvNQ@Zt>`g@Ir8d2ImK#+)!t`IO>_i$l^QJ&GIMX&C!S8(JUCx*$y_ ztmJI0Gcq?#&=mL@lg@>;Q8yNJA*>ChVe2nrgCe#_@iGt7fM(NJCz0Hs$Sq7pu`rnV zKi2y8^vUiwe=yBoKESdt7yj7=P08XOaedXzeRqucvB`6=ITVGB)a4q_!ZN;vP12j0 z=gvS1Sf3`TXe;pPl5kCy5F3xerH^8BIE7Sq9HGHI*pJ`B*02MPaX+k55{2h0G8{qp zxJ27Dl*jXk>&uAIOjDA5KDCS8390?Whn`XK>^4%9Ipl`lL|DHK-^?zOGeuN*KAn1b zZ5npDH6{b-q!-U*5|P<3Tv97)jM4c7X=aR3x0>tH?EMT9g4_qo2#rmiXK;|vm66on zyk{B7JXCSe@7HppGw{o$Ue~XsbqubYs`1_FI_qWI>JS;gG z(tS=9r@)g2WTsJ{54)s*kc#3>5sL6TE|q%|G?xT_4|H&S1_U8h45iK>4%9m+wEJQ|j5l-y$J+pl@fOfNTODf-DSNeb#yR3Z9 zt0X8MG<%V0CJjh~rRHU9^Y2>}(n7Np`6eqeGDVeiQ=lQ`nuU6}zbcDzE34Ar#4b&z z*dbGVUHmC|D-M(?wldW=+T%jLji%AzMy1z+LkewzW|(fH=&DS}v|4FAL7E@W6}NrZ zw#uK*uN9g%k_W2APyaAJOMl(|N8KIPC+)qN7@+RXcI2%JsL?M>qfU)FYNZIN2EcjWLbyQxAHwPF=kH^JkvD`KxCy z9OA?i2mf{UnRkAmIUpOC4u0m1m+oAB`Rpz8^JfmeZQSKrn-oYS6?7(Wqc?=tb#7eg z4&VwexJ&#(F1EU{!ieSCh#$+mXs+~Tu%ZrPh2*d@NTZSdAANDm4@fm!Y3h{16)hiw z5nS)LVTnevOcGfi<~n2Y|1>rjnP1UaXe?oDjCip=NpqnHuZ?(=1i?+U1BQXPQVrJ7 zUw}cA!6u_i<5=+5UYaz0?gx3kKIl@O+W$yoBs^!(6~1g3_rN^pqZ#I5rNKHG&D*D9 z*phN7qmrkxLn`sZKbBGIVmYMmI?2K`xUJNNGHMG5jQU|5*@1I70M`iDn@LcplSuL0 zl5YfqN&tsN0Q%>vdV|FOt~G9X}AI8Iw>42e&l7~nTAm5k}(7R zt+y&ucz1eyitwi778OM`r#2#}W|YqeKhYC|SL>x2<&o%p^Ius*D>yi^vY){dE z!r1SK;jq<2(eh)zMaag3vfMJGx$O3tv9I=Fhh~y&+fec&b~>#H^Yz;;q{CJZN~DT{ z%8%_1KM8^FL1HP?`w?lN(6u=!@HP}0_!;vU3sQ!pm4eCj3N;j0z9!S+L#c`TBjrEj z78f6T?Pg_sA=m6wjyJAh#Lee(Ep8+v$&be>UH-B>ZkfWm!}TOjLk916TJ3+BYH*L3 zx=cUx04>$A@rbwGe7oF6lB(zaE4SDdlyQzyEs3H*k~7yRceSf5S9kWMVUp`&4`p1)1p8t2M-tq47 zqZXXv7vk(=uD?BB8~)70oT%r|S3dgUGk1RF!V`CY=Kax^Ko^Fn-GNJJID>)m0Jh6R_~BuTJ< zd7bpOc4C=0zt$1LipGh}z6jGblp_SeiOQR*2v_+aqYTMKm{?O;+`@PWA z8dr3&Kiwp~S!e_cd_)Fwun&^v1_Rid&cicIF?u|N&`3h*fnk-JRa1<1Mk{b=Z^JEZ zerKfzM*)t}6rW2WB!llmCFDlq{Fz2zavvp9N}RM2oXH?Em7*afDey`DpMR5Z!V|l& zkSfah9HPTJ$Vd|4SP9YYB%@NA2hV4M<`a=^4;K&~%26cKD2?-d)cXhwd6fc%VSNA< zsR*ntATts~oX3(GO#efAF8R{_=sQ0$^80Q}r8E56L3ieBi53?MG>u$8&4ZK?m9YqZ zr?}2;9xLB@`x$7d2TTRev71DsdM9yzsE=%iEf$}1EiZ;3O~uG!U+Gc+mD;7C=R8L z7L6@I%Bl2v71Jm&%FcE;aL{Jr|7FZU`j#6=B~j!_EakQ!vZ@3UeE(ci0CyWj?A=OD ztCUJ^$JubRi_i6NKf6pyFOGb>9XqlwG>M>;B2nEZlv0O*(Kbbv=V*u9OGu((oe|{* zGYyEe)9L%IP>bP#$})Ywmv`D0K344BO79+}an$qJ+Lqy!mb{YFa>RyR8bY{ZQ<;9N zP+5bG`!0010q+qW2gTo8x1o%4h~X4zwAHUvid*&VUE6?7Fubq46+xGV=ztl^NOfE^e5(>?$6!xoX4~>JAKs3L?3u%ED-DaO; zoO^Jc(ShWDtT0Nu!N_c7Xakpq{J5g=Vw0w_(C>g{IEZx;$l9<+p^`O5hx0O1YdDLI zp)3}qwXKGPF_5NUrm;@Cu}V1&z4XlvC?(35DU?fNdF2+^+JOC?t3&Az)$;cTxoi!X zboSBMpPS^C)MywUT>)mDlokmhLSu;06s5d-l$7N!gNB3Nw3%O_Fpb0!U9JNh5K@vmCY{ALTv=N@^30z>PP%qd4eDVz;MAk&iRd zXO!C!N0LIk-JQlB&7er~Nl@(5+|Hd&zd|WHttrxrOrxdv_jvdoq>~OOc6se2Wu{Ln zsi%el)gt2`E|QXzKbLw}ev*VwxkZz2lOl%{J4VRGE*r{{vCg0AR!IVMAuhLhcsvCa z4S z9&*`#gG?r zaIyRYzt8+?j}bR|;<(lm!K);X>n%nsYCX8x>c*nVj@NiG&UYD==1`Zl0nGDax!xfO zgh`r=8_ViAmIh+jP)Bf8s^)f*W;97G@%aUs&;n6@ossqiquphh;yVz)28B`X{jW`K zV^MM&1{s0td{`cLlU8UVx)_$Z51Wjt)-;ZPwbCB>vG`)Y-(u5vhBwD!UolIMkg<@G ztsXy>M_w}72lMbBFT*h{iF*O~czm(rMZ~6LJj4adcoe>wAROaqM*nH}Cijrmmyy+Q z9U5tAs7w;6AT?1S#q7gJ+K?U_CelS{My3Z>5FvSNPwyc(?u2(Zgp3%-@eJHF?0`N- zx=1M{MzVt?gta*ohJpx=aJ~K#taB-niI`j#8NPRr=9D85N}h zeVAoeZ*gZybJ+nYSK>p0hMeu8dGtwXbQfIeAca1Jph|=}jy)1aOvC?6_Hw`U7WXT& zstOeSy}=;vP`uxy$mZIDN)g4IB$w^p7*egv*r7;Q+Wbhj$B~g+DD7N_rcr65P^+yd zkWBVvaGO+9?y+M}{@+RQ6BAK+u-8o^=?&w60bH)rf^+s6O7X)-h=NmArC zG9;G6HYo~7!K|@hzcq;6X33OigwBBNfbVg;&4mLQXeLd`c!x6mSF*Kx%%mwkWV}G4 z^40~Ck0cUqp}X7%cPm1fP#!nbD0@}%NaK>Ky>S@&v&ta5?Ns0|x71P9!yelYds*#I zyUe=Wdsb~8K@E*yDuw)*^q`0$AkU2ykRPrn)pLE52@-+_jya0^U>Lr+G;Ac6*vJk_ z(kX8=h$MwSO=`>X9LWr<$W#m&nosBi&0=^5@v&|AX;=aEHZoETcuJ<@#t|P*Bd6P; zX`~V1_9}w~MAamnzA!8#zVJu}p+UZO`EyMnRVro`c2&%$n(3q!{Fvlhm}HL!FFeWf=fc{ z3{kYh6b6cn+JW1>9_(v;IOyWJXq7}jiggzah5^xTM~KwmLy1O^qA6rMyrdDELK<)P zW=ScemOh~e1MYOWu-#`xkpf@pFe%exgECkyGp`sJ$+Y5q5=f=hi?R?zOPKHFR0ibt zX>f1L7)ggwnH8054WQH`_wF4u9f#7TPi}9e_}s3{oKlhpX?h(2WiVc*w#rz5%=D63 zW(5_a^%fZ)ap0EZxzJqXc%=(ruHAhnUBCWqMOJ?tmi^l0U)XED@}-^X>)3C)O7mR7 z!BGRspQXtjSwX7WjkoGt$W>WM4GYM&tfO4-Ax+p&>|8^sdKud^gCNNzRpTHbIuL8% z=if>aZMuRt85r-^n&GK&DlKkP&8tXPuXB4>5v`{I^=u+YV@@`^lnnKNQVPa`aP8`^ zzVD77|D44$`G^m5kFMgxXO=NnYr%;sH;#YKj=37^yVEtcKYP4=W%)r*$P?!d|A(_@ z4*vM`bH9qoi(kOSlY4)vzxXd7YNo@rv$tAbx^Vap&p-KjEWdL1udkea=jR?~JC_-8 zz0eiI6-GacolzLuw{WFDi7P6(Efv8ck$b7%kA)!@t_-a%_lNK8+Y=nBC( zBqowXLRUeQX6hJ8qM&ucNwcY(PNKqNPwS;*c^WYqMSMC6fB!y0`U zIGjgXQ$cbvj?zqqW|Komdj}zD0XrL^S#h1IG=it4238Eki7Zm1G42cLMH^PeHKa^Q zaze_f2jJnk_wfJei9CrzrUd7t+{b@$j&J|a>HEVhw_8k~-qCvgvLe0IWelX3LelR> zymJf5UN?$;TNDzKmyCCG1`(HpI?2Z7HLxw!cH5&k7<5sn{n#Ugzpe7}_Yn3vjS5lh zw`WKw5k{D0`^rg5{aMd z^FgZ5GQzDW4-yHto0E#WY72I{XgV~uo!%`Hi4SkJ$;>JlSMeazCUv+-8d^JFmr#0U zY&Ti4r*bOOg5%O;itm$^S!OiIWVh7YvLbra0vEMRy7y&iD7TyJU$SZ*tWv-adpWGT zaU@=A_%B%!M^PF+RWI?eh2YU8WUAJXd~5@EYfadzqoLGEIe8-vNgb7jMHG*kP&snH zkfiizmkx`m`V~}~my|+`;Wn<*Dz&fzJl7Uv`Fd%H6(nV3T8*giJQY+%l$)enJlC(b zAkplEt8KyazB_%cI@GPDtIbwL-kp8K`Df=JjedQueiNsT8E}#)b*4UmGYwm-4|+1r zJaKF5%$Wl`d-fKde*E)z=H##9#Mw7P4|8HKUOXIrdd*NCgY+ z30xxbuMCA?YVlx!B(X%2xIB`;bqe8%K7;jcZkwdBsOIm(2iTI_iK!eG=aN{H{^Zh6 zL|WL6-On3Y?@!)b(PY0b8O=Y;WxvDvInPM)?@|=zWPc73J`xipIEQ^GkJ1c=LkOu! z4gEnlWDr{h#Rr^lh{2FBoTC(|fdFFTB#Frovf~k?NOsBDBC?YQC`@Ny8Ihjm+e)k4 zfPNcceH00rd3v^l=vY>1xKy6Vps0)EaGd5d5kPbzOktD?zcIK*CBd$Ol-`5#WQ@eZ z$ao@&$Y_qnw2$x%>1kN1F;S@P}17g8i>DNLB8s9O6X@OA}|)!LLK!hA;p zMgBiu>&6{*47)U&Vh4q}OU4d7sL)V$NG?*s<<^Lo#v=6MMp-h^HGWb>khI~(Aq{D_ z#ffMmg}o<^FVY}xcRFy`Dvg=kO0LO13F4M+6^CLp6!}|iIUERSkZKM)%`~Yk8cGH? zd&9U%Q^?b-%A|x{nW5Dt|Ks-f*p~?cQY4TRl%mF+^F#T1g;TnmL zpJ~G~N>cXZnDjKCN22=L{Yt&0kb{~fR2tWDv&xP#4W?Xerhz;0=3~n!JjS0SlH09I z@K>#4w>pNB%+6{i)%BPWsP`kv-?tksAx(-*Ht@4;Hz-qivq#sJnOWgl!`~X~%=O=M zr}b6gd%7bYyjW*YZlMhSiv1U#s`FHxd&J*2{+NIERF(DEgPw?)C%(Az?D;R^%#&~7 zi6`#j;^{j$J@?kmL!8r}c=CyZkH2>APUgk4J70S3>0euUm~F`N3NNUIUK_44LfdGM zVZI}ad7|-EtqZU8dU1uwzDAmOgW|Kqi|)GChs%92Tw~;Oy*G|Ujnv30V4)|g^sCyPLVds*wmF^9;?7IT)?U>2lHeGw%H1n$HZ(BxH=H| z>$Q>6Lucs!s2BH4-r*U}z8461~MWu!ftMoLWzp<%~HLx@aDtJ*v=13{Y6jxvqd&;7{r^NSDhF<$&V z(`fm}ZpPIIS@xSV23|5T`t@r zail4jw>#{}wJhOHQc6j+Ns{s4ZkvpCcyS;d1KUhWgy;s&(l0!MzLxUrD~GHqkR6@7H-vBQK~m1e9Va5DkI5d3pEt1FEc=F5_Fn#{lpZdmire1#gBhNg4pnvAVTmRFU3w!_bOBe2( zc=E-}cYObA&)oUw>u2A5-_imvJauPc_2OP}>FKw>qibHLDa^MS@jSn%FEwPbzzF9u zqm^sYrk2lL(WG&Gz=I970}De=%u99N-XN~FB}pA|tWVgnFyh2jDOWO>!j0Y%*4k6b z;Q7{wB>!b#X0*IGS|nK{l){2afArx17IZTHk>PsVSQ#x~X{dnf`XWq&*^u?g?2rAR z?RQ&(ee0h|=|lfXD(Xs$SEo!G;Bm!8IW7P)>2ss4~sT0KKEPLcT16DcG{W%f`Jnc)~86&%bKkf&jVCez4H zIglUYy3;Y5O8}|S5_a@yr1fr62F*oVL9(xi5Glv6Pa&yIkxUAR_eb9?4(6wR)aUWT zEXl#Z_-$1ZC5rLRfCEJ-x6bd}O%nA^vmF%*`nKAS97QA7C%36wc(Wsj1PPyy^(JTq& z^E9EIVFOLUsT2y_k#UyZ6jCIM)PMygn%H5t9r-@)D{1Er-{(-u&-e26q#~J}wcSc{ zVkDjB`|URgq0kg*B;`6kDQJ^Xwm})}J>a!`t4HcWSxG=9Wy~g_lzZPoLPfJ_l~U}> z|Bt=f_+vLE{(CLurfVNNs9J80H@{*?w;FHlR$sqYY`A{UTf1^EUp;>>SAF%~PP6G= zrfU6Osn&ikab)dYxpDqpp~`TNG;*)PzwaE0+`E0ub#MFAoA-7)-1iDKwtM@ZT)bDT zHQh@z*|<*dUcBCQFV(zxFJ7~Duhg>g#Z=4Y%~0#=)xenjgAX^HdnEeFlaH)FG1j}@V>bl-)p&WVfVr_kKg_MCog>de>?Hy-Dkh`|7XtcYtKFT z_Sc`fc!1}gy@OZI?|kj$bK8Rt^Ip$AdHd&HzHs>NYcK3z(lzBc)8INFaGw82y??q?gAo)`zlRo*(f1 zH_>P1{g$%cZH-8NhE6iY^9T=z;OkGqsmdbIT|{_L%8-MJ#Cp@(K-^9TA zcUzL0@JGuXzTdsw>qoICOfv|f+!{wgYs0}{5{H8{6B@;KPm*SlN1390SMmdT!>9}< zNET@v_WMxk4kI@Z!d)pLLi0&en78}w6mt`aAWM=7<7S&hY4o&10?(_x*lCyk8zh0g zb!3%(=`<-J1(JQKm1foz<~B$oG?W9<(48&^4UHz$8bP)zghQIdp5$$G%WN@HN9!u~ zo7cG=FG?K=5(M8v#bc@u;4S{1lYw>;i%c9yw%zZCQLeLKyM2rMwt=#WM%B8h&M%cf9vrE)1x0| z`%gdf>mR#(diOgT(YdGIGM#_&O}sKq&0{%sYUbLI$V7T_z$NMJVY_-v{QKESm>olu^nHN z4!(Xr=AII%a3hF{_)uL-NOp(-iJ?;DTKhrL4cnS+i^1PWs%c(7@u>Un!zigX-T8cr z8HI)@vSMnk+)vM6tnqNu;JdIyBcExUF9kgeb<=>C($|KO6=6$cek@BhxF>EeH9ANU zUW!V9W;HWP+;VclwSVyK?4>uk76 zPis=*#xVczH(U9--G(>X9k|j*k>Le3+8%(KhVB+Uv>|%C8WGyP(}$ai4943@G^7cU z$#A5V2JWd4hWGn1+`5SA#t6&`5=na)s_q~r+MK18=jvy}n0$)Ia3F>eS^DV1{??CM ziS~t-C-;*q4$0`XE*#kho%Vrpq$EJeVL zieQUfV*J8~z*C7*O4)RW8{Vg46caDPY~M$c@RxFb&mnxkkC2j{FvY?3bQHz|PT0AR zNNW_4ws4&|0~lO{8)r4`EkqFWxg-0DD;1{ zC>G8&X_084L2sp}PJswEJCRd5u|$eaQWO^XdAxZX%MISrxJk6mM3FIJNepa7QNb8B*e5}KIfVo@G4D6HHc}!RxCHGFelT!cgsskI;gc# zv`7$QmY33ll|~~nGLbY&hoFSC(0rQ&sN#D+>pGE^i)gu|5nf{%wx6kYBUVQWqIjh# zTp~P6P6SotwY#;)fUPPEvim45k_mWgrYRr>WXkl2*9;=PXX@ScGV^PX`n-Iq3k5`W z;Bno%&pbA}dh!d?e=)vrqTKws{&L&SBb8JCePEC3-RH^@4|NZ5;B?~ezI-Ot^5Q_Q z`i0~1_YG2BJiq+-tuuw+zc8?jOUF`p<#h4)pFgwmzI$&suf{)TxU||gaV~rO($&I4 z-&PY*c5jIA>g_YQ)NI5!|MX)G7L1S}#wB*#B&Lt?qPi`j&bFE`NssOpg+@J(P4u(5R^$CexeJ9h7v@PD@pdB;q`@Vn1(60LQY4W~g&8p+6m*`l zrEny6L={#fyX=Uynh_dwkexh+vM}MM)M3k?~JvL%x}wzC?{R5_+MX+jmX! zGdFVFHcuxhTBoAn*CNwk!W|m?bf*{DS_(*wAP5y=jb*rMD~6HXYeaId3R{)q zY=08zn(;pfR$kxvsL#u%x;(q@>V5=5ac=5a;*O4$n7-{g} zHgSHq+DcNHz!bf;!4?N@(P-V|MX75SdDER3re`+YHiv0a$F$s88XFm;@f)E*8=-e= zZnHqsJqsO$N!y-;fr6tJNr2i@r4{PqZ5~W09idByu6*KeU;TN}_ZWX^u6OP~yTtU_ zLvc7eys(IAyEN=}v7k^SgL--%b7Enkh;P-kguwn30#64@V<7Y0^N2hhETzbGcP-MR z6@%_Rqz}$wq1(sr7ZC1qB1|vZ)8j;>(~r5%Su8zkN0**eV8Sym^xN$uu?$u$gpn~DUw^21{!1w ziVa~J?;w_yVPr+_i#jnT;>SE4sRWJkW^0#~mF0gvzsK>f zj+D>-=8-bPZyu?2|HtDy$8fsx*6%)OyB9C5{jJ02mj3CbfhD|fB7@go$>GXC4kr%h z9ttVnIJdldncKheVjLG=&i%o|v?0ptk|3`0f*2tNUud7jO{o{J(nHfrEhGyOuS&$d zn#DDd-c~V-kv=Pi>1B=cqL>x~@I6LcBi>IC$5j-O5v2nv8ZHCHM?}^&v;^Vgg=_Bc zl+w6Pv7WevW#O|^=7x>i8rx&ga6j6XQjHl-<|v?cI6z&B3mL6N2jlJ zOKR>=w3JcL+Yu&3gi6NT!hUeKifn&N2=_z~=ySvIbf`2tYN=NYbc;tAbA1skKjlNH zlOAZh2lIRm93W9pC_*CX6Z<{TOHmU0VOxGN<|1;UD-y7=U5|+W)SZCopc_GD4Dn9V zOqUC>?(laUZT?3Zrv1Y%UYUKzGM%E7+>PZ18xkFMthC#(NN>I26jxodwB2UzdZE# z?b-)z^TJE3zx?8f;(xh#b_wTRTF1btC0rOtkRH;(hdIWJXHrkSK2Z4MOXu_7d+o*K zNU7c0=dzfD9xa{O0W5~7AI~r7%*BF z#7GlqquqkhhB=JVL%u0e;3`cdkpK*AW4bX6O-lrNSp-^z2bxZiFT;gtZX@!7%u-Oa zB$cTq7wpYJI7mNMr4#BV@3)4kLyZs97a!+h=t%mO4p1oC`CFgyz}CJ1$8#QpkCF%u zM&aoSBSb0-c0@}d*u#B7Ks-ntioo6DK=xT5#lQ*o0UDoH8m8_D1;v2~g*r@lwXYg-$o$`w?Wjz8(Xj zb@qQ9mYII6S%x3Cl5I1-)D;8Lxo6q;m#loAm6EJ)+|CJh$qMc!|*a-D5S90*B8 zf+Ib=++Vs@OG=p`t!OD4v&ifvnUoK)jaw9*8%XY%L85#D*-AAcHP({IC8|_N?h`Sb zbbscMtLq>2`S__R9gk1{o&H}jRvoL*UOckXr#r>Lub#`}`GL%@{ z+esn{8j%BWn7Sz@Z65YR6h5)wup@{74ZKLp8t$4$nBN7TwPNmIu(V=5abUib5iiE~ z*>(~JMJd|uN0jHr-CKm^X%9kAdokNX!4caxEy61Iz;_@7x16*h&tSQI37P$~2($($ zG7&7cxM1GzgT04d?EyW7#`dYa#j&W2q*IMX#ON5MNYdFBGuHY34h{91mA$4@xu_!l{a*~I{6kVC7v`!>Tk%%^#;S;HI`Ro>XLr6o*bh1({ z{11qg!)}nNEu=2=Y8Lwl{&fx%WUMr9NjSpsSjhgO7o zr$N-VI7k>)nAna^P9mY8Xj<)1@gh_4;?%T@oadyNP7h}JxwXafjfeS-ANOK8=xBC4 z?MFleE$sCn_RJz2T_VzL0rM2oNJkcKis@Y6JS@*8kmP59E<1vK4){A3;66y>Q=$$Z z68pWS^(3LrF!Ca>q6oey2G2X@5$FkFNl776E|X?F@X7<2>*0R;5(w`Pz}J`MV-@iO zVnE(kni?GGvy*H*aMDoulorH$Ee{;u?UPW_3fp9geUqhQv$o%jyNxc~Z+2m`akey; zu|WYSHrYueH2U=}tkuSl;pbv~M@lgT56K|S*DE4qIvs#}tiL3}i%q@L9yX^OpCO&B!QKdFc!rCeUq5>tHol|2v(X$k&~*BtBbnnu-gi6X?pIM!UY4dcv9-XP;Im%E-FO?6ZGI_n!`}H z*fHH4!A!jmCPfIw#sIeum8Jntw%SVMq3!Tuvc>}w_h)Q#z|rW2t!sfK639HvZ~V9y zYtP~@1zP7YcfbutcM#sEGw^n25E3HWJx4Kd!QB?X0*$;~J`aEAA`Ov?0uzG$AjO3J zXYO5uN;!|1$R5~BV(GFV!us=_UXnx@xu--@sUY7|G&*Ax50Ts3Pa$!aqV&3Zy%d@# z0-Z?&o(^;0e$2Ib;p}x`p)*p77d5BwA^nW|`A6MfA4SP_24BkZ|CuLk@HfV8I z#F3(dvnY3>*k~=$N1??=A+TVfK8U4O6H-bQ>zlAmkAImWQB>NIrO+1ZXOWf|kma^v zvG7kZiv^_>TNIj{LW6Ck7kN5W^9>>t35BD^g{)Z6SEE9#Qe6rqvRpq!LDA6AbDLUq z>GoK%ZU!qIlgKtsBQJ!YPK5>OB<31bh*O-BP2;7N?CVY3zM16FAObc{AxGL+l#EjJ zNIN{IR@bS@b+xMu?!_Uf3pjG$Ml185=?r3Ku5$=NxLaLQ4{{P+07|=uBd&u2?82 z0w*e+(9$q!NgpbHHYKxRnh!DNF-AJkDBL8L5DlXbligbA`ZUfDvmZZBCEOSLmxshBN=U0)I* z-$kM-8O2A8bJSX}RXaxGZYI%a5UnL~NOWw^P@1*1*sLj~yiLkSu+&7_XcT!YW{~dC zA}&)S(`-VaK}Ug7b9*Dg5+U-W7m7r#MuSW(#i(uqxn>>Kg@^69ldy*aRf zt7o=w;q>;Oym5N9{fFzl++RHQ(%C!z>B6bZw=X`w`A@DMPCoH4$8)_giW!L=S87a{ zASxU9=ho62(n!@9lG-rQVJnS^Op`)p+N~JjpLC2D;iQye(>#ZfrT_`U4^zDj<|Z-D zp@W(pok|%aols1aQA{gctmngYT>wTBj;13DV`~T#tpSYFsH*81I$I;=hdKUFLJ9S{ z{&S=|0(;*g%qNFaSPDy?>a>>@IVM{8*|VO8N8MK+MTyrtT9@nGG^i>plYCPoyIdQEqs@r58Z)+~ z5k%<>6+}q)b~To&%t+RHC>A1DxwRgB?w>ty`14y8 zI^3%9;u^7BCDr0OjsGYqWU|VJLDI%#wG*Q>Vly>vih!5Gp~X<82gCJt+@$9=A)BQ@ zIAEi}G7#5It@Mt?ct?jH6BGu$%ubOYeYAx!DYsHI7BJK1hOyg3I`Ltw-A)p5!mRLX zAGTjV?j>-*{Cj}|v+#*&yFFn9yXO$35qI^@!__WAyHhCINoVaYL_2ecwPq0N2q8cM z@pV{WrEzk##}R0YA|zjcwKoiRZvtKlx4kU{OWy)KiUllCFuX1NuH8uziNn&9g6q&C z=DF<#sU_R(p>gsd+7iS(sWD3$n(y(H);G+zkxmr0Pkaor4=O7Si;}!7f_%9H`ML-e zJKZF%7%9Y$^;SFPBv#~#<3_JrSPa(e;NhFG>X+a#A_%xb)(2Osj(~*`9(BrgJK~D+ndI)AREHGWD*Na zLO|HIg5uOb@e%7zNH5WPEw&_DY}OhsKkD=Fv+`q>fz!*rmqeJ7)0=pq|4m#twu%>C zyt{Vz;GG9rJa^&L>fwuL@_23FO^lLMUg=-P&C|<&aPjzC54_6sLX91lYMdC6c%UI3 z-{M6vs31j1ycn;Y#W*hL zU9%BWO=gTsP0+KBSxo2ccH(vy-?P3>?uS(wqVPm9-r=Q?m@y@HsUGy$KPkm_(EH6; zUl87oENq=2%ylPVqj&4;%D{FoSX!^)Rieth9SbGZd>1GwvVoRXpmx`Y6h>#IrA5=cln1w{!fsFLz@_K9A*QiVB6a z&|tuVf(Dl~mf>}<-e@nyrwU4~h*bxWkdw~ZtUMQ9%*$<9t~8<`BX!jKu|h#vk^~TG zrYK27TXv*L1O@2?)@u#?UPEI)gS2cK3pDUa3PhS@P!K{;uc5ObNMZ{4S{=n^tduJ~ zC!NA-xf7{s73=Z1nv4`M1Cp{^SZf|avR=*4Nf|Nj?7>_Dq@=GfD_M(dt`eE6ZmY#ck0 z|M^p=mj15`r?+rvU=0^f=J3M#6}))z&cA!{%*ro3NLy$6x2tZQF1~y1#5yjXE#lJI zG%gQp{LWLSHy(Hn|D=@0zDkUJSfP^4!caB`t<(uag$hG88Von-FwP5VQmTi!br!c9 zTo|tjLPu)R*1Mplr#8e3OVv1sVO~s{CO-)z099)g)AAUm>%>&uAdD0W6NSb>57@#B z(%2>z5nB0JfT7ic$wr&$L67~DQv6-!-?LH}=9DokwRuVrc0+w)93+Fe{gF~MUT;U9 zG~q>{cb0T94^IclY2TjyZ0#z;?wr z2gRy&kp?e>EWPOrg(t;xmtlWryBr8Mh`b*Gjdt%dL(Of zT8i~f5<eq*xf+pweTw2Tg+)G+jpN8S4sts! z67^G9sGcFE43)yEWVk<(y)a1;THdEYrb3O?3KB@w)YV6Q9)4=dp*=Sn4nH~byNAow zfBM`LH*uhP3`cfMqJQ7ew@-gr?Ku1;&jTCko99BGId|&*H_o5Ck27aCaq`3}UOsjQ zmrri|&hr<3`v+!Yjh&4D!fXBO*RS_yvzJd6-Z*_a^aBEO^Kjua*I!z*yxyPxvnvDJ zc=dGlTSG^``hg+eZ=JZmf9O>4AKy5gjgP&u{e>U?{&fn-tvWqME3KHMCw7}?t|o=d zv^X%;OaiH~l1_BEUF*R07Aqef7!@Im&=VW0^kPQN?OP1c(UY6xV}?ScZjM1+@54-^ z72~ZjOty%%BR1$;gD^GvV61n*AnxarKveZn*k~MeO1tYp`}C7ieDcVD2(*(jx&sJ4 zHIGP(nEm93|6ml619muCA_yr(@~8;rJ2G&P7(D#W*CDbCMlsjsLadEOUv5W`#NdC* z4KFFe_H+{Q-Z<;Kkr0WX+9LeU!!}u$LXu{`N4mYpwdhe$+L0hxCJ#_xw7Udyi+DT<{HMU6_V&(vKK#IRq zvUWxcG*e_v?3a;J2EkR5bf#S_BGh7$#$2rP5aCGXnnmI$8;v+grOr-+?;?H7Vv$CD zphn5&qgScte*vnV#I zu}&JfL%~VasF4+U)5SQ`6F8g|8kuR$G3m+rT(pN zUOu{pv&U9&zdCY0_x#95qdDx5QeI0 zo+4u_jhjqYYA{8i(bl_Rl38GH2~s?w&@@Cb+Z4fcvy&p?hOXTYbF&TF#t;m$D6~yc zSVTN6Qi`!59DSHR{Uj89Z}5-2G+q%Jdb>OTFU2z~7WcI}O5-9XF*B=KWWVzu)8T|` z|005l3_|p5qupTyI>T`F*%0pvAl&UktV2ZCO(UsH!_$}G=d(y0iXqq;gT2oQkI4Pq z=_t`mX?@63&Qc!qRrY_1`>^-W;I+jN>I@;=M+)ik(x}=n(VFgi)P40)l+{*a&x*o| zC8Zxpi5IKAM&y)c(t#BV3Ly|Cq$|}}Rt!^QZllmRg}W_U6y@~xWrL-7Q*%`YEY*$E z@Kc0aMv&%tT9Ip5PeUQOMY5sC-!z1Ls}?bCmuVbBynebgrjlyVV!fIqqnt&)Wd?;V z0~REvk{I1BS7C+UZ`4{T07kr7J%TrDrm-fWfK^+NuhHTz$z-F(LbBk#s*DtfDT>bs z31kYJduOmI#G=ATa?;~2$zzFRlifFge60qH)e}69F%jw4@TkwjPenQPkE}ySc51PI z$Ia~Vva4S?`^0OXf2K_L3#WGv{oQlYLB-LX8XT{g{udA1{(3wAv|c>1 z^1hHLmj(*G=Z>dv@p%ddDdok}S=>0D!O)2}4}MU4e^r@2Q`~vuOaYfptzh!}7Dfk_ zH-5OzNVx%1H2Rtb2X52{G1_3n?Ir^T>s=VE^+45Z!X)2MO6?etsc^l`glqJa^dbYH zoZHF6n58#6(&WHQtrHH36Bc@VdND9g+R(LzG2UW=rNN2WmU-x#0x-+0u#t3(@(928 zBiI&)uW8|Z$M$I`F{S78-ahNQuFiS*6*0JJu)T^f%xzv0hZnXEF@T+doBilhc;ITc zmDY!Zd(BAg_aoFDMEIZ|ZVJZ&ML2RG3Gcyqcu6q99@0lIU-x>DElm*?BIHC$=D;&4E>lTB6I$eYuhE@*)0|^8=$5eA3Hi+AKCBo!6E*Haiq zC`c3{lEPZ!B<@t`DK0erRTkVK>D<{liKXgE5(rpph%%8R?py0nTBLD ziS^2HY*py6Nh(>ZnZi1^Pghxy->t1*OM@2G27$7YZ=EmA)Y2#%-wbp(9(Wen z$G!M#!j1kuF_TB6w~fNvu>k*5F}O$();23#@+f?L?ow!XQ*#LR4nKVRBgiUA5M3c; zDGGBgD}Dr z29XVr&#Cqa6llEDvJq^y+(Je%MWGp^P*||sIz!RWl1>cBH;-dYp{6KPMCx?NH;`Uh z$5E`CK)PWH_Zn1KuGeFg)FGzp1}Huv3hqXO1smLUyA{OoS| zG`4pb5ZXJA&2l3)r51{i7MnF}pTx3NdHd^IH8=l{M}00n_2uYZgZ%KmiSIx4*w}ZT zeN44=yj<@(^VpE<+=;eII(lO_@lPJa==FirKf7>p`(M5K%KGIOPQLwf zKiuA}Q-#l6JDvQ(trxR@|AY27RTscSy&bRBTQMxv;5xmb5>4>WKU#0VxGaFzNF}57 z0nC(HF+`6_CHG)fLNSndFf9#Yw%I`XutF!Bg{{#}y09TgLh(r`MzwZ~QbhC$H)b2H ztV=_xOvBRc!fc0#gB5|H*$Jn@^B;_zGhh6mWBeo+U)$tM=B@~w$^?A9A@)lE{$>~K zT}~K8UJz1(Q>+v}V1uWFV$l~O9eA-wad!7akfE@+`Xca=)E4_@;Vrc_kJGqSJENSC76zBdMv<>yNZ zic2enp<9KdoHWp+$BNv5JIZNf>xQve>p-!Yf+5pkRUyVf#z~0WzSWCNr5&qs4RW$6 ztW|4h%=M%lii$+ddJ`1!8Kk5$Sg)PHM(qfSH7cxd-^=?9SS4w^Npbjkod!keZETg# zU{x}O)k-yTd$ri61DmUv#>TE`3d01}C8VC(F=T2+P}s-cRZFs=1GvQ3nZ5jt@1K;ml*> zIK9L4?)h@#ub-^4{>Th}A4VB`e(m7xfy{p#J)Zu~g;N{<_I&^9`^e#qGqKL=XHwt4 ze0T{rj;`TmfAP;=AIJ?o=)Nux%ZGRYU6ZMCP0X;Wa*#s!kh)>0p@7tCFe5QzM5=|l z(uQ%lh@BKDP0!WWd!VVaVw~QsjTE77iNVw0r+^6Z2*IG3C1pgQBAKXL9Hn(1Mv{)c z&HxKZ#Vq4K6z_$SSIKREI$iF3>ci>5k5BO*^oIOROIYlT!Ax4Pbp~J)sbgCm2)9LG z?WNK05Ghd;h$+pOS33FHhNa#xf}IgW*x!pSvnaOOQ6NEt+sw#xTM;-Q!nylOF|*bv zJTY#URtAxw7#H@tF{iK~)FakBcuU`9+TuucP*5mD8G7Og8p=dZ9E)uv71B$p#Z3b7 z{ii}_>Qfcaa-~iG*+R?A{}ACvmYQa;(r8AMp8oS97UxHn+-GArYK~a zCa~N{>S%ExLFZ{pPSL2;;U10urU-(lv?4>NO9)6#Zo+EC5VBHZDVfwplM$IlijcyH zM4O&{szH$Z%81qL^~0o>N#rWE6bv(R6s`Q8VQh(8SycuslknH;L;|QO63HO$kchVG zCb2=mc$4ktDI~cn9kLR(C8dZ|Pash>iWCWDMM9CPQnAi7R!K68jbea(3i*91tXCMZ zEYTvh=l1?beLj9FO5@I}pFOf`O7hG;&17HAMErPVZ0XoewdLiq@u$yx#s9be`WF8B za`SZN@7z3n{{y4wiZXd@V5lm5U9dg}Si_{8xduAfL^;%pM5XY;r{kl%dJ z;~18Ru@4g_8mt&3c}$4~giRu)tpl2xSxi;BFibq3qKK$uCa7wBBnK0;Bn@-@4Af0x zj6@Apg9$TYje>lR0uq6q7p9ZcVXpOHl0q_}6jOLDn5Fn=n(ZW!ARI~$j5KiL3LnhG zc%4i$_@KxCaW3Zmsm}!4qQBwqO^^Z>;5kTo*q^{`cNq5WIP4uU*jv05f_eBlj1-3u z#lnF!J=$!SAM-ToX^Ki*Oxab2krFYQND|QwJ3?(HczZ?m!Z3<$MkISpNRvbs`PqWf zfdV~YF;gf-W4F{gi!?Fvgwjqy31fkxo)mP`9mJx-^}UcHwEI!_+lN_l zmG&>MHi`(kDr7olu%x_3k(fk5j6*bNu^=0zC-1>hiwZuz&xi$k?Rt{P1XlS!-{7`O z3KdpK7mG?g;?<;x=5a*ma1~oL38ZRf-dj^rMH;E1 zxKyifx1ORw$`FaJ()&m)B&)1cO@SLHZHVBBBdpKgP$dS^omi=$s8!62JnHlDQ&$ds zdFbbl?XZ0Q#VY+TA1ik}epovFJrn>hg~1DpA_7}e2v(U3voa5~6pdLTyh`STp*9Gu zg73RRu(bv$C{`FLSXQNxbav23$b1glI=#sr1My6v|^)82!;>}Qb~^<`7R4R zTG9mjJz4KZu7e&k+f2535or(ewOIS1N3=_%aP?rm$4(>a#**BPWVau&dXdxHigcZe zf)hr%jn7Kw>yNtMKFpGoO$=<(5N9NlG`uwMiW#gmPa|EaDlNK8HIqadu2DFKk*6o0 zqUT;{HlfJ+`IZS}DKHrt^PFO`w9-2#*JH7L5=CAwg%-9$I$3Vgm2#i2Q*81r+*jK$ zl1eQJg(4tQ&5}Sufqznp0jj2Q^>3u_3=J9TojpH7PCbNeWw0jb7kxDXBJyz>gSl&I1jolL@ z71n2+v{e80ohQ{l(rNu!`4GynFUZEhuNP)J5Uw3Dc8yU|!)xtR;iR4nOAV z-B{>}V_E4#sxwL>8AG}&N}-x3aRjhL+DVAyN$nnNkP?@8uF~8`s+pA18~NI!?za!C zc$&?h$xFt!^9`ejH;DyypZ;QJ~lqWNd>%lJDesYM4O2UQ7!nebmol zk^A1HXst@Eq=sp%@1yvW7FPCXuwBXRYR2$pIbT<)@MeP!TO^_4J_FXu$8ncLQK)A7 z6p^(C6;{fpu|h&g@waU6yMauV8B0|>#_AEo>n5-z8%B}Dvdn#?%127C^AwLfx6M~i z{NZYaR{E&V%cr{ZKR)!tnI}|#d~AmX{g2(m;T=;rT`_^vUmnHLT~jz&75tNz%U$0- zz0>ncAJsA3JemF*7y5IF*G^|Kcq)l2C)RM|c+z#|T}y#P7KzX zq2gn*PGlVvyz&voZ*s(x*E0Kc`nm&q|0zjddnjX5Y6p?qEMt^}qke3^;YEO&^svlNEv>C)oC9Jejj82A}QO04IgXx&#B@m4dB zt(@XfIm%Nlrxy%)OcsY*W-$D=7>W+)tH`b*FKMVzkLNR;zf-(m||~M%H&u zk!oh}R)vbs3F@6_D<*>v8Is`UTED}UpeFSs4gJz>K0JBI(q znZ3qKXZFnwoPF$e&&zewed}6(0h%+*7&}q8`R1;`+EU`o2c=OAX~Hj0OnLKCJp?&NDP{E8sl-F7(ItgC8?`4j&Kk90(S z#nWQ>lTfpl^f5=zpW@g~f#`L?Msf)BL=Y5tBw9sY?nNXzMFJ@Y0-b5Z`MtZxgC)5J z>m97q?LqoL2w{>#x|M<F_EMVf|4 z9i!N)*HNq~AQB1*=_B5vVSj6|*{;Pp>0w!7<9Q&>G>sr%H;fHZ%e`8*Q9Fg@dXWc& z;v|Hq%7T@8BSl5rLQ|L4YZU9)j${h=s&3;x+b`DGQEaeawOWf`BT3w?pa|4hu(5{% zv3CYvt$9yG-mFmL>$`M#yIhNVWrJ*c1}nU#*C`@vdrjEdXT;VXBkodc*2{e5%XgU%vA5C%$BT=g^l`I9xsR>`!|CR}UqR+&&Y>EB$HQI=Y0{2a>qazl5>l ztMUhJ>%!@`|JL}q4fl=nYu|t4eD2#LXO_H4;txymWB!yzL264qL($@qQNhd`ybE$#iDx&f>YDgm*iaU*NvmU7y zJ=SF-B#3FORlk=LWTlOsf5#Zxm}0+{oEOUCiFolYA032fC& zp;$GJjjCyERFWJ>J0hR@Drvs3kIn)~s2~;L+sBY5%`B6e?o?UvR@Erpt{NgqjAN4y zoe+et@0iBd_Goc;$0W&Q9NV>nxLd2ndX)yhzSn@SOJ?x(-IMr6`3&;>Y+a0*?7hym zuj8&{{MMsBKcD*Y(pScQ<>~72s{RVolZT!#>^QN@syO+j@rsVeZ~mSCo$cGs`hNbe zul2#@#z3m<`at&229G9i<-`(h9LwX4Bgy}M?cnnJavEPfmHzzYmlj{U@%)l~^yK=} zAJpEZmvcWidSYwOrGf4Dc^*R|rWMhBs!oS#8k%XT87d)g$z;rbln47#Xl0Hn$QD|%CpqEf6>P#@pLeTR&Gd*KdlN$yaND+{apPCT{g@sgGjP| zyfKObk0sCJh)c7G^*AU-P86DL6q8xx<+gWMq|yFI-H$&``MOefC@C}G9{<~S+b6Ls zpTOPbF=QI_DAF*C6trm?{H4YjB=`_PC$sWlq)66T`6%Y=waB*_u_+^2G-Nd-eDRf7>@|Rk&Yiz}L#A*w$_2YHv`u^tik4HtyBkz|x+< z@BB`q{;@}Wjvlo9#**?se!jq;oCEbbk8C% z^~$OFKfLvP60h|q{+s{#)mMMG&e-8QpS?MdPhUHc#bAF9w+EJS^a2tQJCSScQht5!z6KIR!ZaE*xQcRciVCjGxbFDtadu@ny zy0OwuLZE<%SXZ0!IOcj>r6|0at{_&G6beBN`@L9^M^U6$%ym#`+9)X94iwux2({0m z&}qj!No9pba=G2d_ZAXHl=R{(Eil|}6y!mpN3qVeQ7C$yNO#AO?yzG?;X+Z7M4>~h zHwj}|VfnxPO|uW}1U<@Jb4^o7S5Zt!y9*-aD-C}^tiZ3IL5%d3Qy7q~ zp1`J95l-3`QE+n#6;k!H$Wb)XmE5+@h@kya{fSlc7WM^_rq&Pw^2R4fWqQ?&Icl#4JUMx;m`8#VgUOscQ) zyUlt7-j)sXyIXjxdIVpqBNa(WGJ;?#xxamOij59;c8=nHg%Q6d#ya*4;Wx$YtvZTW z6{&-tZ&8reC4)%Sj-#+wO)46~UFjh1>>b2gwW94Cj(^>H^Aj`Uab5a`Y<2hL-`%Xb zisg!%DDJt+KDdJ4dU66Qk4;iQZ((c46)f#kV|(Wa{%QF%1#=n;yF^?t3yM2Qb+to? zS4<+ldlX61S#B@e+B3q>#*ujPCZap^Sa@<2Ire>a59{yLB3W@2ney9k*9>A|=kWKu zUzvC=^s9f(E%rG((4 zVHaM$^O={=B>&OKsr24~XVd@igX&y8w)Et!(WZkdIJ4-G}cq%=@eEKoJi!rH8YrPhR5nGR-N$T|v@mEvP(U3-fj zwg!GD_iU>BJfE09_@9?lgL~()-2Ghye{)i!kn+%@KZP!W|(%O&%0#qPWvW zfuewmx(jj=(Nn2Umwhp}93MOG$~EzOi}VP%@fafe1aO2eL}5Nz`Q zou+6kw`fX(+=-59%*$wCzfCHPXm-g%$rqszQ+h^VP;F zyhY=lp}n}bkA}N`n#NsQT4656D2jV&(5vh;=vu5+OkuM^kF_UB8hd#>jki&d@>r#U zjEp6LS*;kQkWAz2(qU{@QIu-A-!cvE){t)YPL$|lwSEk5QsBN;ZN^)BH28Xz7I!Li zq#zPkjftW%h;Nj=!R;wR4MSM1xq&-XH*mLVkm7QcWHRx+RL#`UM}4mTcgpsjH~Ln- zH1eI*^2_YYE7&SKkLBI3rlimFiQePfMZpS3ne^rOfo=ME_ zy3Wt9p-ACK@1mgY8%4fs6sbMe;oUbw@>3yGIf$iQLllUx!1ET{J;TpN5#Red zf}}Smg*sIA#dL?JfRBICee?KNCUIns`d{=v?mXJQ!}=Guj{2W;p1!a{{ryur zbSpnjr@VAL|2JPbF~9KI(YWqm+c^7d{_)pNrN95$$z|N^&*0X{ZQMGM`jOKxZycHX z{O}83{nGIHwZFK(JxsbV^A9~HwZcSCY>a>Uk!l;pt3`Ns5(B*>HN7bj@?@&PNKx^@ zL`2q!>AN)^K65+1*4BG5BJq+$Y|zou6?vsiyzsnoQcL3qtgRN<>cuT92Q=L0Y~u{< zjXF4_4%j4isOZV*8YWrK`rfQ8x#|11w%Hq3yBr_cWQ7mA%qg>fZ@$g`_I$Gy;dUcD zB!`$XKw9v_(d#Z{whNHJ!Y!l_8tn)Lz*`qYv|#}eib9;mDbf`}tjCXB=R8u~P9%CA z6a_Qfoiyl58)?Lfc;_q)ogE3W*s#+EkKBvoK?gFO4!$=c-^cfTVG0dNqAiT9(nD(D z_N^X@7(M2OIeObcBxxXXO%$kJe#ib_X|^C;A3?Ukja433QK9BF@oQ$QkO{NE=lKW^%R)uQM|op9B=O%#wzJ(vu2#) z_A1i*u2JA_N9xP4okH~xf)ppWh5$~?CzdB~z z%$G_mj$=$m5DfGY-+NS6BXufyo%l6;-aXMytApJKlosKVFJGN;1tLhD%UHkd# zXL8N2o?4UkpD4WVw%N_&G3#rmQt#e8u}Tri|F0L0t$biOkg?};b=Obj|M}>P#cz+D zO8w5z$@x;;soOPH%t)r7<;9?_vtgoEgGq@Gla(51t6cny;;_#RRk?$&?a)_EkTe_= z7ca~t2qTTXwZQ=skz6lkTG2o`NE1%dh=oXPm;0dSMW}1BU|OcfBuPXsGnKaUh}&LG zd@Xgsq|iXk3rh%zi5Ibf535}FZKu?s{BipECs_*hE?cO{^zK5d3jsx#o~#3@YCB>w zkw(^q#YPVT9YHJ|h#=K7k8Hga5rrM0))-O>0~T7m@Uf1VYP-;##6njP847Aq46>6v z<{APBw1kmqOQJ}+2~(&89Znj?0CFUpWrY)k1`0!O1j*I_+w@|&+lM@bJ1UQ1t~J57 z-6WYMEVeqZA@g9lIf^x90-FjS(mdXEo|jaU9XYv}*6YMvpX+z;JgNUg+{(INIrR5Z zt=GP}-AZp#K81Dp4AP}Qhr(dfP}4!l)=eQMr-4^mP>|7}lQiNaWRc`)sa}Va%!#~Y zhQ^$v)}X$pRDSP@f*NiVC_x3Ez;iA}b9pMoIvu~Md` zn26hB{9H`k<@L5khiThaT};uHTv7XDSQ26IV)?W=H}0D?Y{QwEBoF+VfSS$ zSGl3v+0Z0#7uo8=?i&mdw|w~*L-8#$hjwTjme zD%T@c^E%&8!dr1;^{+he=YKVGK1k zvO;&u?P;mIt1E}D4i%Ym1VIu2iNFAo014(CkTVJ9VpX-&md2Jn_U;)uo!#-+J9F0K zS<9MT$rU2viDW)Sta+^D}43X0F#bF@%r(P3nE z695C~$Q{C{aspj>L(nD}(L={EtP~ER7a9tGKQD|9P3n|ty^ zijzSM6^_thjG?#CfT1!Y9f*OB!i1p;3v@*z1donj8OIp6)fEiG#Qg^NcYh&Y38@io zGgd~oQZa$PlF>itES~sh9VKbfOwU-04ObUV>wn~_7-7Uafw}Sl?9p}2(P@knPGP2e zUYdsyrmI`4Fv80AnLS5f6o?_X%a|@AObGL()1$BvAhvVkSf+beIWq%S_6#-&5PSI| zT)#1n*%uZt|GZ6F)5Cja9)3b8Qapz(x)guOqBIORaMp~2GkmS!=f#T%5a6?Avsj~J z2^0}9#RQddmX2r=-qJ-ZmyNLeF-GHK*f=wZZPv$AxDJM`(} z)}MD4)h#)dD){-EbMUu1PZwa%*3eOyv5-F^rEC{#YIyS*RT3skQqHR$%rdA6#VFM**W94PKNI%$GpFrx>@8C}wl+^wuJI1@EbGwY`4 zv71ED5l;JQy*MHej?&u+3Gr&cyQxMz%Iw2dq6$Z81Yr&%a{@^8mF&_DZRd%Oc1Gvv z^#o@fwsNWot!8{Zx0Q9M#}=JRsGt!$u`PI*+VssxZrcY2==@VYcH-~kz`poNFv@pHe`v^qF4nlVWS=ODEFiEUV=dt9Nz zTa>A>kXjFGO8xA|R{q+vgTHVi+Wg0_CTj6|oF2CljkuQ3fgAkc+=}WYJi6Yi{_^BM zXlmp)ukSolcVRPB^NJU>mmR3N?m+d;P1Ifserj;Fy%YI`<{Lq%u6ogV%ZJwMThQFx z|9$aUXPOo2Ts4~HLr~{;(^(kNk=KuIUQoT67WC3d3=%Sc<*8j7NlElM)ZvN_e;qZ4?%EquJIADubGLC7A`uvNN zVpcdsd;&)~({K~k_H(0HIlY3(vldL}o3MFyoDO4wka(Jr*O5Pi-69*7=wddCrt!F# z5Fm`5+;_8JmJpdBOf1+E3s;p|uu2HIi-!;>Ge~94&^c|CnWc5uon=-m<;}rI;H?&n z{%GMhZ1exr?fg-Xg+leu?&hc`_6kQS+{2g^%em%vU^#t&E>DMf`3TmsO<2wD!%kKY zoTn+?{LKflRM;$0VJe?dc`1dzkitzj@JK$02(JMj%MvrrgA{6a`Y>E#r9irwt%7!b zu9X&|@@E*a$KyH0d<;5*hv_136(PhjoboliJM<58*x&a?qK4Z0Pvgl={_ z>(he66b&I_!ov(Y!W07z61(Ue4A>GNks!fsk{aoTw9?ukt}G3FX~Qf-gZ)ebL(xHZ zBQgXx;5bD^M^lH0qM64fEK*ysm)d|Q$t`%qi2W$D4V&pIX@1HMA>xzu6Lj_1P3d^g zo;DEuPyJnr$4v6IXIxQLM(?SbA3Eah!54d*(5{9*hED2aBLUljy_7oar`Hl7y>Q3S zLCC})gJ!ykYHnKzUtAsA{0?E;h(lQ$_F`)AB%xL69Cs7=+jJv=7`9cs3IW1sHnoG0 zP{AVWz#JW#JC*I6(u1{B(O0F#8X+|wQ;U_PIxNNCf9)rp#Py81FJ664XSkN2*}ogF z`n|8696UOo-nDcqMf>W@$=07Z0;~2V*UvUTzaKyF^89C-6jyiA^Dpnc;`o!Qn@-%h zwThZ+E_7TAK>g-=@UxWHe$BVrbe%4S?n5MARNZk)GtxV<$6(@*-5~lk=rmdwQ5h9O zFz^Sh%NwKP7=k{11jB{XbPR-m!iv5Wx`k35!DEGmk!(K&PG8suL#_z}`D1h{%Pqxj=}d+S2c%;g(McT9mN7cIA1sTb;8n7VZ*veEmCg85gMk18?CFd|CYnX7@{p6GIfw^)fMvN{osU zv&iWX3dIoHF3Cyw^ZN*EBcuHx9MUm(QioWU4!b!9X?{gWVWQ|a!yz|t+kT3-j_!bt zhM*FixngLpxCbvXUun+;Cpy|9q|7Z!X%uh#-Ui}gN`L6*V845#ilx> zVZvgr$Rn1qM}Zf&&qBF$Ibux_M!<*31{}+uraO6(tV1MuKyn*<$*tH*t|I&z;E3r! zm=FoacVZ_+jqQw1Lav&}>c$=gHWFKjU{)2jWzE=1>x7T6*~{+6V+!(4YSW*3)85WG zQa1jR^*bIU-TkHG_{!D8*jhNE9$+i38he?w*pIsdPh1n6vDFCDW%%hNR_G9RV&1|* zR1MyhzsdU3S!Fb_-5X&~BQ(;R2-+sP4LX$AK6quF*gnyQojA60VlN(_P$QDmj%CsR zlz11*@q`3jh>P1#N4LO31L005aAOILOf}|`8t6XS{x4ms?z6eIH&4v`>{sJg;;w#K zUvVX||3USs`q+=!_U((?rB$!^zJKSdk<8C>tov62&pbHq58b^$_i@>S)~i8OUi0Go zTg$o++y34ycUspq|6<)W&rb7A_XkFZbzENmQv1yfTmP-d?`q!)T3T;l8CxKXsG zh!IsI=t#H1#E7Yf(N&v#0_`~zc#2V%VhX(?ZAmtr2qU2G)G_pAj>Al_h_ocSJS$X; zhz$i(Fl0_cMX~HEAcQKoZIMyBm1W4apie#nQ;`X3It~M&p{GOXQ;4Om^2&R^OZ#e&zkw3m7kj#bx%-C^jIsM#v<$Ietr1u*o^jT=(%WP+q-;140Ber?n?B;f0 zpD+;lwH%CcL-H_{zpyKvMsefZPLyvIkF!p5xgiM2n491R92N*&3GHMnb`Fb3t zPjDMTE{&h3=~HZam|$1%5$DB7@3Aq^zA(`j__9BXi#+ek;#%;VMJ zIFZoFY?US_9%WYIVQL$X-y}NK8L3zO-@a4zP0r}XfAo1+`>D6n4ic(=I~3J`y{HyC zvHJ)TAfj%;*mrOwAQxAIt*BOP$B7}h^;rK>HDOr`ADw}ithtePAA$7y*vKS2QmVLb zy>x48kDypfqZ^4~d(e4Ap5^zL*vbDH5G1rC6yJu)#AevJ-6kE&X7YWwJ_(!IEP`~s$U#NR!`PGkWu=}q({JF}jOE151VMSH*y5qv_uP*<+ z*}p9pJux>gI)1n1a`=Z0SN-2_yXZWqy|VLx_0O9xxZ+waFZ^}g8*Ave>_+c3H#)Cu zexiwBU8xpyXAlA@6VT_F&|0A7s|Nj<6mZ&Yqi_leBexkrFF)6$8_-V+ZeT<@kZXV@ z&qQ$_=ld3JYeGN8sU0(Zypb^n4w{aGoB#rEZ? zq8ZytA%$DGFhbN;X2j;X5jqJ1qHq!$IdldD$TDHERI!9$q%vL$$O*0Gx><~b~5nQ+;l%LoxB zK1T44iYaUt4ZuP1TxZ>#=LQKsD^{}xu~9n9sGG+vW%*?b-*gt6Kak+_AM0_Hw(!hq zS#OqC-u9;$8+w6yI6yLM*QzF zKsihko#lhr&L&{^J-Tv-=?(@dx?*h(Zbx_iMC8rNYKJF-AZD3IsRWeFfQXFS(0zF2 zMuZu~A7$y_%Vs@fVg@-QU|v5vVqq%&?-#dPQakY|z47~d>Ed?N01gR=Z=?_s3X!5+ zMK?gOiG5NDuoTgG---S7ZftYk{j?q&E4ri(@Q8vUnBBtH4jfYajwyVR6tT>0J-md( zURoU<6AH&kEp#JIILhe4lbi;GWHqc;6Mjo%@s)`@Uvx-}#E-Juagbh1C)S5OM&5gj z)Q{pTkB?Ft&-uS#`pDHze_D@Sh3bFtB{$W$k~_Y;71f6A=r(whYq5LsK3pkpA`tt4 zuHYU#bWf3l8n{#FsG{oV8u>S&xtmnQ^X$fc3fqC_wJwc0Q!el+U4J+wl_--3ElIvks zs6Kva@oQIhenvbm#y9`=m6PMRm0-Zt*j`+T*W!)XQJjyq;zp+S`_~gq-?{Uv+M3VW zXm8$Jdgj4J=ir?SOSpT*j{2(`KdQXBw*2GUwSLw8g_g^X^tub~_}gzeK6GA0)0NHc z&KnzOy1a_2%XW0WvB_uP;FHutC7VGfZM8bH9}RN8G7=r3v*=U|qDwvu4Wqb`oDn(& zois|TJKKgHvCM73INgR=qIML01q2W;N<;o2#&V}%Bs>fheXXJ&2HI^CBQ$m107i5B zF1~Eyf zEap#Oj4os;%LWTwg|m2!=Q4)b!a1zGFbE6Xgqv-#c6x}yZG^kTO4!(>zLC`zhpl@TFkjJ& z&75Ag2hX*j8+LKahb}Of+s-!cCJ0*@S@V60?%vrBx&VrK?f~`*`lVI>4$`!A9Tx26 zY6*t{>}Kl_=I>DmygSnj2d{%|(dk~)iQOVI_W9fPW_8iMSS4_|={SP?U2o;E?Ff*7 zoQ_A{1G{WcN{G0f*9(uV2ay6EU)DfJpx)n2>-^z(E{5yURVdnVM1cGjArZ=GfIp?4Fsj3m ztQ7~MPb9S#J6TNxL^mA~w=bzdfYbJGVvmX3PII*SCm)`atmy&7;!x}oXUOJ2c98k=EC%zrq z@s)5rOZY|Ig^yrdrpq`^yh|rojX+!_RugLxiFpg|lQr-q*AcvR1dV80x~ddHF`e5H zmJhkz%1O50mvmT3B$S_RCZM#~jA5V_Cq{J_--sGG+`sA z9!s%x-&;(+|H%T^%c*^h7f+1*;7YvyM;D*f@1IYa(OijMyIPZDN`5(c;D6ZO-FeOZ zrN;B_zpQ(G74;XFaQnhK>aK30Ahb06#BD934#Fv!-T=)35?Lah@raON(&v! zpyVtj=p@GZx0n}V${U7}Vm!bo&{#l-FajP|>S166FD4{vd74ZDh3cZLqO0~%$H2VTx!FFVhrvgZp+VS6#}Ru zSRja|Me6m^d1>;nuh_8pwJB+C$+GB-FBbDm=sFZrSX3^+ zU$jU7%{}d?e_;kbkQ{K8o=nDFp>vgc0x$8fIks_V@3$ESJtA9x(as-JnJ zGlj&U;LJB-g-{Ta^nBU9+=fwn zJ|o*SJs$FRcO)F5oRKPp+N(kQm+ z6j((A-12(Z<&50(YNd|kEh!HdBXUOFkFu&*em#6Vc7VWm7~h2L+#Y;`QMjLiy%WRF zqw5f%1KDDk6k2Oqze zcK&n6@iouA7uS6HAhCAR09rt$zb|X}UL>&r-cwE3jcLU$U6Z(lv_&9#;##m7)k&dm zM=+YMlJIpUwqifJi|(O{?x7OC=qA>&2A#)bN9!0gl{#+Xa4AGfRR~X%cwWwMwCA7jpx3d&ojYT?;B>^H? zEj%grmOk;xRwk=IcRhNz>#MB%#c18PYGXV9o^V&b{P5@Ry|VX-@?w41qatj+UESXt*~2fpx)a-&yZ==n_TcXNA3b<=Q`36h^}ZW< z`-0=kb(aHwUU}7lrt2ZJT=9Ot_D%2eKdBDJi|>7|GqoR`ya@U-^yrfhK}GAX%^HI? zu^&A$Bf8RtVBil}lQBdn=+Im)hT9IKKaJqX9ii|WrJ=ZHMrTZszj7)~7~Xol%p z^yuf)kY&b*a3DoGZZnM0tZ|r%MFK^_K%_`vIm3j|0LvMq;F&06om!MvNDmzBgSk92qV%eNCUQ|E16Ro&5Ntuf!H-|JrDi zTRHJ3bQwRi(MhbnAeQZ>dHxf;ua8RByg0+=Ch8bXF_56PU|cpa>6Rp@7 z|J8gwLiv*jWDZkcb!-E}U)qZ_6AQ&N%C(m@Kb36C3x&qd`wPbNXNr4fwn|lh=`8Al zy?{c>@7|>tW(2OmL0-F*K0T1%4_|o;cFy!;BTtJ!SufU$2=Fu=oY^8NqJdGn4&Gb? zqf;9~31&q2oA&Ux?@jNc*wfL`T?7jl0T=cnLg@HdrdYyuFP&{ghpLFckf!0hB{o+ez(?ZkdoGoIvh!I9Dpmr{FWH@&qdM6oOX$MUvnqH`w8H&tKQ^ChARHQ``5A#|x~HVK@bDDD8CQo#iMQdT6Y;T( zKw=#nd10{Xd&gji0_h`>P3-*2^+g<+V6{{2O1iCf`jS zEW4i2Q}Rys%$avy*hu)eeILB$_~rW-mtVZ`wdISqU$Q6PzP$5(1K&RH`rQ53*O%{K zT|m<{7am+%Mb9ht4@^+0z3lj6RZ96 z%1dE==QaOi?}f#RPkMg+`Bu6B6Lh?g)LD9%;>{Q&Nc0q)9!C9^Gy}AmM#*KgXIapb zNkQjDV#*h{u}tVFG+~JQx94`FC10!~qJ<$-O^^up&<7naVhu%dP|Wqn8bLoVDnqdu zMvCPCK_s&B4&|F*CQ$T+dZ?9RPKc-vA(Sa56cRpXhcQ$l`cm{5rSmc5jnSnHVm99b zTOK2LiuDqOd79-dD#pKO&7Yi_PPe={R@9X>m}~v=WXZz6R4Ye6Qq-n0xd;;IcGf_&6;?Qh0G(ujb#NCGLcg&{xM-wxD!k?Tr|Xc=aRkMpRbtq#NZd&GA=VrT za(lnzg2V(x9UNKhJpKS8rEKqF4Q6bVVxI+5U^AVu2-*hFybjUN?@4jM7rDM>+k7$QjvY zc1uHP1BxN3?;@aJ8HB+)|Bfj7q;){tDI?g+93mj;_88rUMc)dy3rfP=jh%vSmSccN zrhy|(jeY(Ow;B0+>4v~rqO6_FUij%= zc38&<9Z)2NK%v_?ARHfOinT=4*vYEJkpM)9V)ROMaL7@23JA@9?}gxj%mSuOeY@3)#5O| z9+8t(2**_8-6$<0&vhe2FdQb(QLtPwp>Q*)ims2_CRO9`WS5k~%Fg3>6Iec-jX2Lx zdDtj1dj`oF15(9c$W$@HQ3 z25(%nJ^R+hwQt|P;FfMr-Mg^--TUV^Zv5SCD$jfV#k~vDH652W0##SmRBf*;e4vB8 z@x1p&+od3?uiA0*njKYF>}b3Zz`d)H@AjVeed_f=e*B@&(nwI~Pc@^PBGQv$!5A&P zR^AVTY*b2F(wnP;{+yUpC?*V=`5xUzkrlm+emg`bcea@hMJzmJggV~XY&b*eIAIbO=+7oF^y>%eiA1Egt=$MLF%OoRN9?B-RvGtTP&)C|Til!`Ps(ihUO37Dl)> zgbU`emO24{{uG@DMYnVn^Jj)INBD}lApX2w*eS~MY-=Z-!d98czAGRz4lf1URX7Vf zox{f2Nd#Fhk$%L(x~)}+8SbM9iBVN-vjCl(yL3fzK9MsvxW6(<2Q-dVF^j!|QFAGQ z#{ESZq7&X%EXtW71U4{RVZ!EV7uL?%5P5!-a2duyu9fiVM&N7@1)2g|(u;Ye7WRrB z%;qt2r%PNDGvCX_LRT|V-$@|X3}4nL7U{-9`6F1!9r^xZ;rM^rI&J#*u0s7T#oq47 zGcFuun@qzW^X5@`#X-jk^kgVc7J~|rw5KQ4LpU6Lg@f}bUthO6nQ$0 z9R?Xeg2Kh1WT9w;ZQ4ahw(&b73<{g2M2UVnjTO4MZMxGf{)XIy$VRT1c`n@85FN}2 z-pd(8K&HY00VU>gY%##`h>r1eH8y#U(f|gQhFb1G~u-Tt>LwBsz+efy>{Cz5PrmT{mz@cs$9` zO6z!pd0jnD?Z=*iP9%lSO29!%{mw`^xr@J}I+ojp{j^3Yb-S3)vBmeC86uTOt2D1e zq}~WpBp>F8&s(uar}Bt`8kTjiEbY5xI ze6I~bI-Wy{=nkX&KxQ>O1WcHs9LlUDI2qw*upXIoQ1JxIsYWGK#-PsPO&d zN=Eu(7;`rqvDFmy285IOn%D~Ob9583W;}Yfi-6*}#nU~VRKXWjLkJSiu>?R=BaTz< z;84sWKiLi+!5APod?(qqCu(?%8iGyq=kUDgB-RtF;1V6>rz#=lj`%Yl@cl;Y(_Od| z2@pjs7I}PoD%&x>k3b`cPZ6qdgy|`hG#6xv^_Py5T8-)SYD~rmCsczCel{AfDg4yp z`est!uii*D{Mlt5@MdBkt|oQ;&8rzx|NE_I%OBl~?)~1KFKN-3Wd6|9A$QNOYVLk* z6%Q`2;NIm$+&#a7+SeT4ZMn4esqP=vU0hRDUD`m+O&6*ztl{>hB{W@JM}rvJ`{u@n z&OE>IO8E1=S0Z1~T-o`F#O#wB(W2C&ElZ8Qj8Sxn+f}sV+Vp8O6pTr?r?iYd`*Mb% zD$o%Ggp0xmO^yjY$paEphA5!=tYPS~tHex9K{>3 zroxEfY$I%nUM#R2Yvwqn810V}L>3-rM)@>7`eMNx9l{*{AHkGDbovid9Jx)Q1xtnF zbU|Zu9X8D5iriO|(gISO8FWPj3s_Sc;GlDw70!a&uIJD>oSl1;6`)%e}U@zx;;+7PT72#(=I#IEvx|eO{r+5bF1Ot5C zB-nh#tOvpBrHHQaJM&WH?EKy%S>ufM38WWPJYPDdvT4}QF2Qw<$1Nvp*=~D;*{+-b zE$WrhrSFy+@vwq*$fMv=%pFBy1wcmqbVdG>emY1(NkJjx`EkMQ5=>1DCuPhjf17Ts4jffL%J~ ztvo#r@AS~;oQ=qshS}0z%JZ>j;WmY_)fbG!5A0>_=BGX|jec+0??)Fl;V~?}@ z|8zHrZO(HR3qM6<1JbPZy=)cS+2X!`4R*7du%AuQ&+Vh=*0Ib6$%z~=Vm?S?1f5ld zP@1^C)kzpp@blW}K3I=fH9|=}2vUsqQ|QdH>UoTNI7k$o&jijX0)*lmVMP8gz6Fmn z=yK8-u_w0BF|=Ygz8XhyH3R|0n4kIL2%@A~DeLXK8O_*=?I1YXu@!xn+wk}l;%z#N zCy8QIR1+fce8$$p&d>cZby$zChL50mH@1OJqXrM@VD=Og_-6?wF;63jWyW;D@#~e? zPEcVdN(`;#d9toPIt}4woB~XzYNW}8{!_Q%I4MSF-NABVJpstJh^m4wwh8M=49RJom6uP!d!Bp3G77WDlZ4K8x<`Nji*ax(+%4Ms*rqxLvfxI{6T~2$#OBWej9kU=-_x&`}r) zOc*aTV@xb-s}K+}^0P^33oPhhrFNMvM_M3v>q4rIU0Y{G2XiJddtK zIe@u*Gq<0>GC#BC21 z_;iTtG9Az~MbCI<3}dHP`L~{*O=E@8{36R2x2P6H1St{gKwgvV;p6x^rvQWoBIg4Ds z+$8{a>9SmGC$ZR6P&tU5B0`ee?ww_{Uq;7L)Gy6fam%}~LV)HQwURZI20LpGc<_E z)%Zrn#FhX5+S`_!I((^wbG%smsSC$q4fKRM9Ew@hbRT{NMKGfYkJDQTx+WYD9&QT$ zR$3K>uz`SN)SoIwTyfj1M#4r6!KE9Duf-ngvL8oB5!Z;vN!>W4BMGGu8nO4KITQgN z=QvKd5xNwLe=wy*nuND6tAsD+J_08mP;d!@M3zNx9EfN>g^nfV=|nq!Oaq0U+r%|+ z`!+&=fJlD8@>}Q>s^N~U<^D}Lrjzj}br3jB@W*xHke`PMmc!_7JW3J?4I2@NX~#F| z2p+}=V9@=Xq$4^>7j=qYdA5_~FxVh~+%h$GqTga!cj#R1U|ZBP`ZfX9gz&Ss2@bj? z(N{!BY^Al*NeSrD)g;%$&F{pQb|3CEx*dY^U4rX?ufFGM;1iHI*-a9AJ^jBXD*)7k8gYD>eeqcUtRk1rW-Caz2(IHD-N{19zJTg9{I?jXDUW%oiZaGh#sA^%e~48 z^zq^m^FTUi`5UrE(Vjj5eX0R1ycpC)X7r^F5DKH{Wuzx28|rE0yV5B1!Y$;AIUmD> zKtD$1Bd`_>@zo41g?)%ZXHX7Hqp=1`XXqk^F`YetaXK1{Ty%;XFf6waPzJ(e5=M%g zg+ex%WkolI(2&pa3h5RotfLeWF;Z)m(X@fl{zSe-N>x5rMrfq36GT>+DW(&7BbcN} zPb)Q;${)hKQtV3ylV|4N!okoXDd;*<369)ljAp7ZlRw7j*(}X0-{kquvP>r<=XpAn zxqKrbG|O|Ez!K|YJ7DF$s639`?dDM)~su$qH84RtvfFa8j`R zA~7G^!O1q*6k;eQ3Gz5ggweuR=pJ}nhgh4UTqIf~2na6snPmjdj!0k#mJQJnOiAl} z%)B@XFWYH@&PU7_St_$&o#!iVaUGl<=4U3X6j10(ma!{3<>@l~r51|*Fd}CNsFEGKfv?yo*rPdPKajo(-oK~*h2&#VOuB`vl@eouK_}5KX+QH z#}1um=rl#UjF96wP18a6pEnaEBCQI=m#$Qt&t^#<+ou!D6klIyHv-veX}*Ss|8Exc zVU6b+sL;|e_3)fD+?P;Nh*azRj#-9}zgI`0N?NAYnZve9V>xs*(o>}M&R0wQ7f!m2 z9WfG%j@FYo!l+zJa1dVdK0M0p#ZGZAKO-D64FsG9&h&nKGrb$fbm2kCwTRXJgd?ES zDAZzy<+<4w9s(gucd$*E`f?hiM22GVrbl_KN4j20nD{Oo+jd5;)VCqlE*E*R_T!u0 z2Z$$G|LX^G_5b>afQiVvv726v?aXS}Gb;&?dTIUhP(dq&tb>0yVJ}xDb;w63&O52K z2qv~jo#`R&vqQi*DVoRWU3emLJ&BxHvL>lR*)MJ-5f~oACX!amx;J85@qi**18Pa0a9QlXu|Z-}acd=&)+w ziJ{}6Ti8sdxTkf%lO+0CS`eWN*rQ81NPIwdq((phHb#y8Qz|?rbRH+XL*Z`2K4ErD zxICiUI6Be6HY8B!XhJ7?2*Z1YfQXpL|EY$H4`p6tJKIkpdXVl{_v#hK1t{Id4t>nBwMm*NM%bvwD|2XCJ0!|hYO z?|j(4_g>$Wy?JTruPQIld0eM+xZ?cLolBcTpQRoTUSIpw=8KN$mMgCBR9<)gPUWrD zI`!S1pa1dg?q2kty?@DxuA45p9XA>;1yKLG>%Ugt*i?R0*&^Z>eG9`fJyf|GXwuct zD$M9eAC`vm8hHM?WIei-;=aEYreY0-XvwVwW6<*RZVG%)ff4+nC&MZaG^t3}+45uv^hwnFru~t5eP@%Y8HHdk- zn@u{GK*0b4qBHocm2eR2lP|&k(j=XPf&UL-qrwa)w>_Yv@l=e%^};k}&xoP6V+a#2 z8)q58^1E5frPC;yr28N&=?0vs^H?Knw$4oxE+*`pHDZtTjL>zge?@>~6uuHG!u)?p z(F@mU;i5Ef=ZW<%)Y#_V>q?Pikdb+jfe@s-AP`)I7C6q$Q~WK`C@}|L7b)l=am$M^ zbQEc@#r=;;`zgqTj93imj0q8?Sn*J-32q_)2sLSFts0vIhrNi7B)=Q>`~hqfw82@X zhwnwgrBsDY{ykaRNuY}1m;^O{clMGVicr6l7fRet3YTlKo~ed6lWjz&>JsVB35+m* z7f+OJ*b<8?@wfP1x`Co1+@D5TX;8rKm=4h=*Gu~^(FqEX4leFm6ajc2r zH-6>sUVl%LI+yCEfF%w)~hoD#wUCN+)kCC$IEN{S$tQPjfI_xr%7yT3NR5gOE z=T>Yh4(081F0E4eM~PL~Pp^|Q%05o2qaf2YFmm5YdVn3emOTPxH-TlRcVZ*8lFp(R zj^qZqA4d7{-PogWyJU0#an0CzwgE@6;?@?y!pPkh{}#bWv3|A^;S+>VTor<8x3Ev} z_@ioYlpxa2>+wWZhk(2ZM`FUG08erwyuU`#r(=u6cM=4`8MP2f?T8Rkp6D(*wN~tk z+j0cN2G7qIPX`fO32#gfvE1!KG!Tyq5JZM6FvX=%5z%WIHAGSFKG#ke%y;5$IZB=@4bHVpmOgSM(A~GzV&A^7{s(n$x;}JS+E4OmzP^xM zcX<^1U$szEpjG@~ z)@OCYkY+)z+=l+NVHg>$YRZjJr(4m-i*$e&SWns*269BSZ-Bm_3tbeR{=81~QjmrS znNdb=trV((Y!j@CA=nCRbRQI61%;g#+ziE8Kwy^9^GG41d6qkyF^UoHJ6z64n+{~| zv<|j>Jr+fufEW(Tr&TE?>`h4XF6J3&&R19nj%m!(2}~D{V1Qz4$r!?Pj)B6>vI|6- zkqJzdiTqT0So0?-fCK`gV{7&-#wqY46yABdo3#Q0B2V1r(qfv>u$7vyQQVIi3j0#d zIKf~bM8q5pkwMo$Aei7QpMryt`%1|mBY%n&9Y^HsEF%I>+G>L?Wc$TAI)_o|7Mhdw3zf`b;|zhN6mvvaF9Iygwh@UF{S<$vSj)Rm)Sr%} zXjDqTxF#Q@8_-D|%Ig(6tTV`Pi6vT#=dhC3j}^9sU)=5ztK;R3O3or6@_X@k;bI-! zVu2=ppX+oi&H@UsVo+Ky+{bpdE5z_vM($#jKK^!=^IG99V&u-h-GxKgD<6cP=OK={ zsMNup(FZSG#ulM=SfIjoo<>@0!^7X{7J(7rIUN!{0fm;)t_gecF&w6wrL{Y@3w3xT zmWj>MVT-}cQNC~>ZSacqI24SAQArr<5N3II0tmpXFp z6S zG!r6?SUvSNq1Mb`gHDN#B%HzEgsw6`koXfD=vvsmVn}qdSSO?v+ew5g&og+c5g{IH zHM+z0S+)56$1BgZ%dY>LVdZ9`_J4aT*7T=$PP9F2iPM?xM0F?M|3%GDZFc6p8`ik$ zSC+n0f6Z~@gG=8)#Y#3-}LpGE-t-4Kc(^&+vnPDY`Xd{Eu!W6CYmmI z(Da7CzxCCR9>Q9mp`znZQQS3X$QxtCPtoRu-^~j}^n<8*f%azVprP1z5FUnd;DiwS;?ax2`q^VnwO>&>yiFJMc^IK_yp(>5%>K*2AXpu?HK0v+38u@!6Q z7U?pk;Zd#-Mube+sFXh|aLzz@YNSz84hplcOmt?euu2GSmr&G8bOco&R_GX{HPTt{ z2pyqUY|rypecnn43{iLo;3%a7DC>i#M9jbF!zKaiAvl)`DKPwPEYl4|`2G^z)`rLx zMMoLV8N)^)|EE~H^VFE({$c{ApOL*ij}Q=(11Y$E1`!MSopgX=6d0prIWKOx3Okv4 zX<;B&IzcN&Q^|)U7ZMT)0vY)qG9cPl&~1o?nh35Zbh$w~j^h#|UBLi_xe38swy|8K zEHTnC^-H6!T$zlbb2K=}P~%COSf@jUM-=E?)-BAy$}Qu$$#w9v&d2$^QlH6gUJnk^ zdhtl6#Y4K2Lzx*z8N)bY;B}a*r907JRW7pej?fwOAslaz0OGsse98UF(hqj8+OhwF zRlT30`Y(|bM&x4VIK_TPn)lI$gOoODbdy+~)}JmW2-2;I1d1ZHdt9dkk%#GR2#AjD z)K0oEkon+w>Dy5X?ej$vCX4bP)ivBm_xvxlto>+}gMkOA~8t@*W5=g3+(xh+2 z=y+U_8l(;T2|8}4;@|bsWV{`D>(eNmZbrrrA z%zYtDA5rMHqN)*2BQ#Ikr^_N#W4f>&)r8fUyND#-!d83@9MMe(#NUBkCMI1r;4tF~ z0&%s3LmSrO2p0bBh;E@1d4Pw{cEW$MTN;(+ep8e zI<7e1znxY2vi;{9E-YT}x)B;}y5#uCd{~W_e@lW&Z|Vp%jNJON2B1kWqMH{^Z-xOq zvH_SV_Nr_nL85^s(*||+5V{Ih(aH$AFLM^AOukY`^o-;u3#TxgFVdZK!EmMz29b7z z5!VpK+$7dOXN1?6O}J!^vV1eld14sr80Pb)pGN-_;;G_cSj&Z|o22ycVjU7&;Q;1} zG?=8|S$NS;5)d|m%3Lsk@$4B`^2}JQ7{@%$A;}JD8S6;%q3V;G~1{DQ7A6gbUq_H-8YWLL2%1Fuy-(@ucEjil2e-ARJ11_}%E>$m{0$QT*vNTxyb$DlG}$pMF*Y;As4Fzis(`?i4ehdKz9=4=N<+s zj|k@=Uwuk3thb-StcO!Z2&MPY1rbhZ6luDKNQPM7qle!O#gT&i9)xzxZ(Ip~apmHgD|~%vYvXv=1Fu*Y@6MfCT`cpNWWPE!%-&NiLT-xrU5$y+0F@W8`B9-lnQ$UNGPfvfm31> zn3zz=HjHbN98)OnJ~m_M!U)Ktc+q!5h@R@eE?vuU0>AsHcDyG#?N1V_$u-z~POJ@b z7xvh@@JCl-KIT5*^MKI2314iTv~X51{ysdhcd(f#>eh$_Ld>1`HhiZZz|ML-I@u*T z9d{JrNQbl&)%mxMlO3OUF5ORjycMJW!sRb(OK!y)+G=C;kKc}I|BE-G`+j&OR*mal z(&4Mo!+-cm%4vMn_30;~{P#!e8-bsxdu{dWDgvbXbq88+*wOpC2lrq1{cZc@jl54= zUj0Si=R00qd!{Mdg6{06eHT5M!%(HoLPde^Pw$477l;^A+mmHNhkO`q8FUuB7`jAK zLou&|uA_@0+mknl0nv|=PCziS)H8A%W3)LYAHht91>*%H807Y5mOolVcf*L#$Vha6 zj$n|kq@Pi6sUcx@K_rqT5J*m3v?l&QWYF@3QJ%YdjyXY3YXNKWBJp-p0)m0=$Pno3^_SOj*XTESc772JE0?{qJzI{Xh@%`fOzq_N-5$xsnBTQEiD5R*S8sOz`bzj+uKpNc*9gH`-0~?tg(hy&_ zNc$nyGnY3JCQXcVDWX{v#-wfvVGR!RTChhLK2|WAPivv@w&0*dto*0O0bTYk%R9~% z6AZhsOX!4nZXyHkP6{0k1#*WVu*=&eQT*++nkyf6J?=m{sOq|_#!j4rr^ z6H4eMI0y_e!<^taqQLJZGkO<^1L;NrneEu6z&}oD#~~d?B>Cx@=X-LIk++2c-zaq& z?_%1L;A9P9RfTURi%EJs*Mv$0qx*3f-7Fnvm!f{0K>?>=AFwSV z=~ax*#ZX^5nFO9Ik9EYz{atZOilFj|;lx>@!}>P%Q~TkFr+X2T4e27b2pE5?nEBqq z`rgBSa--Cb;insL(Iq@g6QkDZ5G1r7Q^fs=cL|z0>=QO_I*RafEjSi)K=hwu5J8s!zJ%aO*ib9tKM-Z3|tKVy<1stzPj>EXZkdHV|D0H z?86W*xQ-k>v~(Wo{4R9xqBA88Kua;#q?*x2a7eeQ7&Q+h8KITWKu0L38JTM-`lEz_ zzMuzPj7|-_Fl`j9(IPRzN{`uW8^#Eejy#G!#dt=^$X?`E%D3>dWlT_zLua1dEKsMaQ_0AX+ONp}eN1tym=(oOxn}K#_`lTpAYaqu6_MS$6r1)K-DQ zd4$i7BTQh0SzqVbIRwhYsHSDCpP8f^;CpAr5iFUa(2gQRkS#4*hk0W7uE+`13$ILtLq_>NvEBv)l!tla1dEXn6%+pW zJ_8gl1Dz1J^)Zs)%F|(6!7|hP;S@{9@^8PaU&=dmz~ehI#Ql0v79;sg0y2#tVZgL2 zh6XdDe<){Unn1Ua)eJ`tMKFtC;PIcxI&nysut!I7sA!Rf^6n@*;8TcOBc1rSxt(}V zQH4i}Ryq|i#I_at3_t?0O&6MdQOe{?KHEyZ;rNLcM>i3x)$1Y$JsrO??B zP`j+dURpP{6T9d3GC`48Md#5<0j?23DBX5)r;15@4e&FfKOlVe#1P<9 zb$D`u`^3^IM0H?0TDS?i7y?NwUgb&^DdGveI1P@Yy6~=;&=*U%vE29K#i%N=#zr0X zQYpsI)k!(14$^t%ptHGywb&}QMI-i7?!fcxb-IzaVJA!j(IWK;T~927m3VQ>s|wB&O<0lD z|MqzP@W1{n8vUV<8?w%rx8p|s>g`|CPx4m1kqaef2M7u=}6?)+};>z)s+OH%u~``N~8 z&UeKsggtMqqxFr=zt&xJ-udzETC>K`OMzD>=`fI`L37p!Iy1zrEWXDJNtZ2Bn@m8L zF@@F~E4MYnkZB-PhA^C?BT%f+@U@4LbvvW#zT*C;Yllz}Mi_}sGoqc&Hei}j;|N8i zn^CAS*M^DoVNCJjpC}uK>Hj0|J)j%AuXEq!#Ln}5PF|c9JC-zsy%U810d{hVMsgfG zwx(Cr(G+rqz4rq44zTy$3%~*bAlS(%k{vs7l;kEi$#Y(ucb(?gdAUw(N#FbSnQJe{ z8Wr10Zr;mTi;E<1a2kN`+rRVw@4YE(=6ExPWBSl-XhkIrp^aiJ1KJ(>4$^^)U)0k$ znqZ+>nYE-F8eNCf(xRxhMM!=~7y6%WC54#KqiRR1rV5SW+^3Es|*PxNCo ziu-?d28$=#`1}Ay!)ZJ;UnwhZGtkgZ)KIXcDzFelD``S*w-v)qT3U^7#1hS5M#*xQ z40W1@l&6>IoV2(^qp?TjW0}Ud7*)#O4NC6G)YIikGT`h9u6N2zv*LM27h*2FOj&O& zlJvk}Vmwp^nTs&ReGN)NVN3zfS+-R{+RMXif(gqBH5ffniIHQBDs`k5QsQn{6J86G z!EqWSgBdpo!yZ}(cT^c1G@U(C)q$RL!|T4q_h?g7$EdUdv(YBlNW*Jk)mS6JID<%4 zYEqbFWQSB^K_j=Zq^VOec62hxOHorx$@Ls%x~emPLK&8Y?WhcFN9JQGw1C3P2%YA( z6<(n9;Mmnw;el@zHjF0xo{mv*1fy>fij|SSO-(~m=ivdxe?wb^UH+`8>hU|;d>r_e zzpH`BvL9S^{Ju}l-``i4NR!CFH zQF01`vN8zm^tlZu1>fPB1^W?h zlSZ%D*2t){Z1&*CUN)n2kjm&#HeW7PZ-Mdt~vL zGnBq1>!Il+mqNHnN47&b6#p`;29vywl)w>^hb5lxJomfq_cE49R%;$fq$-+%It9yo z+t3zbUQlKIt6NdY{`sGL#jEP@du^lq@{q0I($u$7E-beFtjCe{`Kc%FTpapZ=EaG( z3(rg=<)ZC-MVF>Z9=C46*`e~{vm+?FFp8`f=27y(8cMEC|4r@lQ~#q01%W0M z5wlB35GDN5nrI}ZI19?ci&3GkL4F9SL)VYy$ObfMXdY@a%EPq0A||7Hq%n`#hUk!`T>_?j5uwotr!qMFbhT}LXA zAMTHWGY}@Xz#7qO?4?jOV2Xy;8C8w$*e;R;%_OXu#L>=kBUwZ>~Hpb$%)s&-h94KR{!qCqrJOq;N7Y{OWL zPqbQS5W6pO}3Go+Sds`$D}UU%toC}S?gH0Vs4VHsv($`$h(4=>?4 z(lkhzW07+FBo|WZeHE`AKZjcsdPf9Bg;cv7{-`;Wn}YpN%V?K0y2prqgLJ$U(FQxO z@4dK3x6f<_9+$qH24JFSG*cWaXd-+rv_ctEv1to&pW8mr6e=~b_O#N7iDDa;g9kF+ zB9XtkNXcSe50Ty*(&Cm8JdMyTIRn}vEX!0?Z6(cz|A*&cH-#7JKmD0m#TE*Tc`Lg`QITCWl~J@X|CzYZ6migkI63wuM2_r%Yb?r zoW5BKHMo7HU}Oz;f=ltblrJHzY>>k2ycYJ5a@c8J^Q4!}kTkwe6ki{i3QOVkFQllK zz#72EDC+xxlHr|=-J{pBNCR@{Y0RXNRX#p{^bU5t)3HOLKk#H=BDo%i#e9;n<;`Q~ z_LSZp9?4h>kOopT#o$!#hv&%s*!@Y=L9#9lPukD&H8V8Lxq!Q{du7o)lQ9w~MHg>i z$y25s--h)lDU8o=23*Hkg@1Y5zvy?%ex>qV6Hdxc&wecZ!uY3dUS4?L zN~JG+Zv7L97pId9r3u5S};nZ7^ASyF`Dg*qqv8)VNlnJVU0z(m1Uza&pvI&xZDzv!Q_+D zZnllsjV~hemM{;rV{8r*g2~Uq#lawF z+t7dqVh()owrD>4k)#D0(SeMa1lQB7DWVkYb$*U^wRNPKLW*P^4XFa_5k;_4&>zT{ zNeGQ1JRgqm3heQ7v>a51Eo~llXc`AJkOM}(`!vV}zTQQ1S)_n&lT21384(+3EHcom z&c~igNF)!N`kM?G7$Hj;b{TXhjjid*@VbE}6Ic$Xv|RN~y7J)4_%D9_`n!*ZKJ>Wx z(H-CX$~9H>n~uN+l2JaRb!la5!bV^4zRsiAHHjOMxqA}&u(eFgRZUo~73p(|^=-~gIG^Sog(>)}K0bMt`<+hk{2t#5Z6raWzvr^Ma+S`uN zm?mPHl|lGHnw!~JkAA+kgOPp@siD=_uC&zcj*=!#Gz*Gn5BEJD)Q)KyOa}#QJhW3W zf_AOcYwExNUnd;|o1-b1#%@eU@m7jtw=ℑ#duq zPjpb2`ml7o4a=u`;ChB;5jjEul>&xlQV8imW@X9oJv%@_?@CnG5!G^tGz!$b_Dv*>E9ORcU`?bx7cEs*@CNfgc#Gz?OfgYV~9Y&|1> znwyN1#P*YZx-h`^*8Xe(kBwqVN}G>qRdO+wPu9a8UxJYY8l+=A z6!>P$kus+=`Ha?!X{wSaS%HNZ6Kv6pY$@`CG`b~e*d$YdqevuCQYp8V(S9{ty#BAz z?6*h=(#6ltpk=JH$MIDLTrNrW(@9zNQf#R!l%`G&T`sIzN$ktPfsR6|E5Sy1hEm9I z&A*OjR{(c-0k22im9JkP|H%H0_pXUIAC&jez2N*IyRPuoB}{7SeUf0JgUs;^WP z-r#fhy-YaprJ?wg@!SfqL@IDTk%yH~igsWIO{-9;@at5k;B{3R$wVsSrYN_O)=3L? zZxVwS35W!;q0UrhS-GCzb)iu#a{E#rOA0q01m3_nO=d+*LI(1^`P(~@25=KDzhu}y zO;Vv@tUV!yAbrgXX&N{1W$MvG5|a$rZ+hJ#4blvQ?_lp}x>EAp%57Oa)36|!=e|jp z^WZl8voYY6!sDVr_+7`2*Q=NbkQTRzyl%JQ@JpAe#cwV6W%mE{O8rwTH$P?aeDP?_ zKfU^t3C|yGM6!Pk?)p|D#jEPu$-cGAuX>i1-*v2{?8T0c=07)?oO;Ff=PB1`zM1p9wLk6h>ZgDBe8Kq%&+PMyzn}Kv9P%%W zBmITxf62MBnEjL6$y1l3D6kf#G{ka!2a1esC?b7SYFd!TFJ6OG*VR{}l8;wJwNc3H zP$8w*BcyGulw5B`lh%TIl7l6x4rLUMCW?HMURuz0p-<_^M>CO5hhb8YB4||lG{tIQtCa- ztYo0eRNYPk%|Y9&-2Ui@A3&FOoU}q3QTOoqdeTcPMN-PElSpWY-EnmoV$?gW8zRlr zV@NBx3XG0phA^%sDX7~?C@mD(2AUO*Gg6GBk)+X%g>Xjf@m-jeL1G=nn&!5^NPaB7 zNhu{V&Y#gJ3Y{2CtQ1eQP{b+9l3*y~BvGR@hYqes(|NiVju`$g6>>=mQa5Tc!Hlu! zE-b|NkR}>nBOy)4^nZUTvg=DfeDB5+TD_KHTK;mLhHcX{^R=y*q%q2%yCef{(rA}? z+)`40gk-Xsz-tg!MGnpIMDkH_vBtwdE5R|P{>2)!>c1YGa4bdr-q>*0XJU5?Yn#3)*_?(NzY!8$%gF>25 zwlcle5u5_MWQ&t1_Pz7*x>pGWzL=4Dkuq(0O|~h$#Azx!!K6LET;*qj=HZf3D4Ik# z17+VNG@mMpGT$Q#_d!55?)zjZbg)O0+c}!2%1?^wW8qQXDY{ zR;ku|C>4&wH{t$`EUZ7l_lw(-$2L5(G4D-^P{|Cf9PETj@Auny?MMO5E`|FjP^S0F zt+w^R+nDjoB7J2k1tw*@YKElbkl9();DdS%9D8cHh(g|7>hJ|C56(>0@>B2h^Jsg7$$MR1oI9iwrK#!!fib!btQqno1IN$O~is6YqBrA1#u8fm6EwW1}V zT_KQGicx!b1BRpO(V`>Sgteh9j*&V|WrBn=pqATDy%neFv6g@I6ij$2&*@Qek(JG{IvscD^|hJ(~5C@89pnn2#;SHh+8WR1+ipI;=;_ zjIee%!dkI#tc}rqEfzElnACO9IO^b}`E5nlV_9RuJdb&vhA*|DoDtI9uNd7ij8J); zvwWRRiZPyQPzJo0b@g1gp4ZGmbE6STP6tifWt2f{k_^RM5)EB475JD5v!_ZG8e0f! z$2^H(PDm!UnAf_T6j6h98p{KXlmnsIM^<4wyb}AQ;VtRn$H!diVp2jC?(%<%JZ5KB@D%TtVMNBVn{oL5xM+Qx!kz6(X~yVv6t;eJ+Wn4ErG^*kPo<#)#N%pqXf8 zJSSbLue2Ij28X(U#8a-+;ra@R#Vu8WLn@UL9E$!0P#=EYnzOs2~>?8MH8va1R z=DJVPuR4NimUn{c;P9^^>7^=FYMpZbKOh$lACi$|TWeF5f#8{tBrK?sc|L{M3Xp4) ztCT?5lNnrr6*%xNz&Zuq;g?2{%~xtcZTORJ{KS~@lv_`)`=w&jH<^EDW6noPhe#FP zA`;qNo>MyaSBM?yUGC37fpoI$U8K~&vPrpizYGd^3KmI6E5QYDdD65zDA?TIyhkF* zBOOaeZz(-SUL)-s$jmD4XN6QHy+#~Q(onpLuugJuc@&UrxX;6hq@)5R5pj{n?egI{ z_+?{LlZss)*N$f%?s=vtm4i2EGMoN)VLeKNlv`PTcd^2Kt!weRTN@$b20p0(~~o?eH{?#j44 zg#!L$zcTuR&NC|?tY`24T*_6ID55x2s~b_ImpG9->d>vBuo@-Lq5PP(u}R->JQIv}&J!n)9@@25G)RNfAZD4FPHnE6~6qu!yYL5jBp zz0p-9m{yVp1wTQ`qsZ;0CS_oJBuWNM`>>>`LT_vfsihNxjI?D?z9X)g6wyTUsK-Qn zGb7#x(g3#+-ic8fL9b4iZThT=AVs>GKIO0su#)3rkrzGjmf|aNH-m=BCW$z?BzM{ZVROPYB*xK*T##EAIIxzd$a!gS`hiSZ0 zRB=VdJmOnulC3l`iaO1ElE%EmAYoFBBZBl2DYt&=c^ssHW0f?&VjgD&O`$?5GqTEK zwkDXc`b<6vuY~4Zi&0+h^@w7u#8)c9y=-h!N`^?r`HHa=GsmhhFYgHrW18Ds(>G#= zL?h!J3#5h((!?6Cu`{Gj={>&1eYr_;+ZxgipWBL%0eG`A81GW^Ixuk9)>JFAt{h<| zY(y5rt*>AdU4s?BT5QlXU78AfH8fATU9>?$STh#ko;nrx!!xiSlZ$)$0yt?_TRf&6 zErrOCgZY=1B&hgRW5&1cy(3pJBa?1i`Bc}*BzlItDV$g zrD(ds_!E?j9se{eNjZ+-49o>feh0})awsUa4=BQWNAj@iCDn9kqF%Y!JyL>Qzluk- zr-GzoUp9%0=IdXBJ?}!i<|_pdrC4GymMQ+LLK`Hq-wBY8gy|RyBE3nHU|=qmLqzy* z)1b0s#!%5ijp}`!bnfj6TWV-TEn(RnrU^HZj?i2)sP&9}T{jg|;Gt z6q3P+v_fBq8dU>|g(mb;uDuF1!F9@%-pc3}l!rB-lmb;iVJ{_hG-)eP86-)8wP4c))8&${WX$lE7G?hm5QEWzIsxTZRx0bkW6wQTS z_{orV^k@n(Mx*H7nio?<^s<61ag3&%4}%2eKA={k697}k^hn5Nmx zKGTRfiuo+fM8++qrM_3ZbW-fch`s~!BrM78n4&q)($tm>J=`xT>9=~{n*Qyk+V$g; z?@7Ds#+`rRgq`2NCBzk3zj5i|lu*=up7q!2O0B8;nR zXv`(B#>!-aDw>9*x7mey(kJ5gPBE@G+U}9MYJnbW!Ll)-E+G%FK6JRZ1OGXogGS1393+P zLb=rxP@2VJ=%dbhJGwjA4W$@GrQyCN341XVJPNa&5&8xrb*Ju8u7yjJj(wWRc5pq# zR;v3lq7IR~4hp6&MVYC!M1i{>TB@|7bxA`fLn*8>JB#b?lA<f_4D%(0d zNg{q#c<7k}8;^IHgzy@pdnajOQ_d$KPnrI^Nb=YUN~bwV?g)uS$vn@*L0B5!53WN) zaR=pKiP!0?QgVdE;66%o=61IN6sNU7T~h+qUx0j(o1{ z%EDjezi37IHQV16UUvMJJf0EKfL#A3Mk^EO3=WF%M?~e(x^s%s2LW$8H3RSN)AY$rUe}o zuEywUv@#0rQHl^YD|sAajPCn%t>{-Z!W`U;e(tAV-9jU1K##hK>o=oUZg=S|=r?qs zjoa#GVsttf(FhC0NAg$tD2l^z zofwL3$5gZhBhNNrmP9hBCzVjlCuty)S~KRNn=q$q!;JLpi0Z{md>^JyNOIl)tUUHf z8sL0*8wrTw93eRzT#qKR#P!7#CK!QBf}K?LUDwf|qPsA8Yy=C&PArh<#%LJhkrcxS z8JFp%cn>JGshlyAl-KsAJ7K2g>tFa_<)n9`TF)=VRewir%}tON786KTQh@Rp_aD`) z^xxPxX~My?4Ol)^skDGyFqXp=R}Aa%8rWn&nj~SPAzP1`v1F*CQPz^=qURQ;ekMSpqT6U9q%^6YCDVKP8C@lXEU8O* zP0P9eO1>rw+cM3S#vlXvo6&h#m%(mn{~Df)H45&Uj@Lw=NrRxN@VVtMNeW~%OmTF} zEweyLxFf~MbX-+E4T7StDTafDzQ&0BYhgJwg>3AH7gAKKvCZx5Fanq6L^5-0B}ft! zYiT4E%6Ny0Bkm)h2BUXEa$yUs#VU_!*FWRY46eWe>`>r0f+a76 z<{?DmUj&FKc@a0hGJEZn1+NUx1m?qkI#4>pp-8jZu{TMQ=+a@99iDTU&c zo+Uo7!098?YPo-J$p=YMYI8Y!)3I=r$IESOAIikGrx+Yfos_cS#p5`17dubUoDS1W z#6Udd??miN0Y}di?0n`nspdMjoe3LFMk@7gd)y?MNVyUkN8oiD7Wa4f2AoGH<(mbb^?#i0m*4-YNB!L* z&x*nf|EljLdFB1-_28;cyw`TpE|31=&Chnf=dGrfK0kXj@wxdwN;^M`%&XJLyfXI9 z+$+7BA05lN^^fIUvsPrE8~fAhbCcglIX^y8^f~K08ch9gDY!QGCxzGMkbTXH;;W0u zcxCl>Dle~mG@nsjxxX1@p|z+AH=&5)kxdaU3vNTPx)aqz{Z@4=iYTU~6znpJWOH~A zDjE5AP+ZJ`7SyWimBH#61IZ%HLh`7kd5|E&DI{?g3`TXrtgFH(w>_wDL%Ea$;XH?W1QO@mn6TK zW=xPW=AY*A8oMw|x|j@WCJhWIKTPgrP7Qnaud1D~x?O0cgV zCk=(yeQzdiAgBNMaIV9A=eu82fQ;!uI*IH~1wsf6v@OtmW z6<{r@0Cry6IT?88bsYK48X8=k(tBeup&m0lwpAL=$|)(+-bll%#?(m)&~X!-$G8rO z)uy4@#8i`5DwWQGQzvS%Z>%R-Na9~Jg}Div{M@){I$x%-3C&29?xJ})`1yIjpk+0> zf!nUZfv%Rv*Qyj%Tr;#N1snIHX?z+Qh=E4IAY~!cf=$x$7Rf`p_PrK%msC;!8wGBk zbm1}-@LUWgro&&87N&ReV60-H0U^levEF}93_*z!%oP85YvRZ0V(Sn9Hss=W7% zjEc3TcqpbK1I|Vn3&~RYaY&hv%}6O$Sd0T7nIfBy{lGdrWbn8dRICgXZ-{r1UtoeIfRCIxB=T- z=b^R`RyB#so2H;8Iglu1{J<4*N2zA(^pdjbiMY?m-zm4@yz;T_TSDU@#qhZeZ)ww; zgbfvin?~Sfq;EZ(qvUc(lH$hEB-o^2V_+fnNh#YwDY)lfg0GQ&c7-NLK8~X$%5ARc z&t&6)KdI1HTI{|=Gok7G-5_n;#saBjm*%qOaaSqxKIhN#I&upuG_Re*NhFqZxV`U^ zhVH=rl;nQAiXG2Xo>vBJo_tQmas1Q2yKuPV>QD1s`p}oFhfPOc3~IzJ&&L08-K#P& z`O{@TN_o%LCH-%AJxu>C-LL4oS$+jae%^g0p0?G#`lUH!NfW6H(@4C$fV}e)n?Jc; z+v({~);#BUY|hA!FWILT|2pTi6-8HPkaKAq`7f{iLFr}te<@Yhp^#tHGDdYp6o^LY zkFKjmb(k6Dq?5YfCNzdfM?f=b7**yPTTrTLMt!6iZCtlW)r=+zf;md2&(_1Nr>W44 znn^V6jLJG0NtNQ zb*L=p<^D$*HBIw4yZHrdiRwl_qv>`E`G9d0y~Yk$bbLL@VIWdw1=XWp--&Kb9eN`q zXQU0oDw$2ys$|4B(*(NJGz}@!eyRqpC`Pp;gE6jaHORm^Nk_@SFh44>A-C2@5uHjd zNvksBXqg1Ebi9ugV!;p%%`W9W3T}rw!6CoQ4onh}mcPL;nxhgSSPU@g!0(bCb&pY8zE99 zI8V8?WDlV+^Rw@g+z+nj(v~P~WbIN5C8jXX5a$m+bLQ)>(bx^>iE$x;%QJK|b@>i3g2w=`^TnGTx+ z8>vHPb}jS#W&#s2>vMxdk;Laxl#0SqxN%+D`g$c}$2SGrhtu%;o)mh?`FJV^Rw0bf z+{Mb1cd$+h*^=>$fLm~rO70y^!3s%a&gYI&#(jktJ2rpBklimgl2zn*(2-UCeP!8F4sO)7I^z`Ig$fS_kQ%4 zZ}rEo26lzq@h`VzcvZiBTT}7P>ppGo89{uICGFz$cH-F)WSyDB&C_#u_2L}Gf8ty3 z^_t`>3%{0fX(1r{{Ng*x5Eq=Yr4(MA{$BN^1(aN!`hLyX#Y}lT$IpxuRV9jp8&OAL zF9NYenI!j}uU(9H&Qq!)8yotur9$reZZiXeY8I97d zkb>JFGq+?06(icd*k1HWLLP;ron&C9Kv?v>7zu4dH$}IPf@(H2R><)T1hjvlUETIK zy%hRk5>S6g6Na=VSOTliL~-fTThPfU)S~I*-)(5uwqa1+gQ>7qOhlHWC%O}Dx&{nz z-$N1Y=r>Z-41?&V@l11@(xGlBp$T2j^eWSB7bB%?M-xq{4&%q92RMo6X(=q&k8wk< zBK|XR?aBc8#IY`nN$(J2Cr0B&D9C*nkOA@d9<0Q7khIK7_rE?FJ2~E|ly9FQ0W5GI zGd$0ccy5OVF+@sQpmEH`_Fyr-Td6QSd90PkOVWyO`h$VkZsX7U`I&#V_A`#y%8kVn z4R1N0X;2Ce?#9$%UB*JYS`{76^$9!x(hDkzFyK5y4lz;S>#-Sb#=5>-xn;CQBeALKX-I8w7{pNeu|}iVQgPd%b&PUlu((ng<8YHA zAA~SkR+TWq&s7GN5A<2sqftsfakrYHq%L~vzOKm9e|1tbVK4>TRpoE*%k3vI7gdJR z;OIW1@-2S8528!4A0g%BbCo;}YiKc+cwAO>8WuG)kWiVOMS}^KM92!IW|lKJi@^u~ z=jY2ERDsRlYNd~cn9(}PWW$g2!t-AAsZ<14`j>A8=V30u^bP}YKHpcmd}RQxOEXw} zc?z~m!`2V-9{-Up%lDp7s(n8=_xl@?Y3^00G?`jbS7D#$urBqh{EM*bS4;9Bp#;%1 ze9Evz!*&Wa`4`e)^0CQl;Rumzb&|$W5}o%Q?8+@Il7(zbUR0n*DOjpUUKD-CF$ zbTdPWS@$bYYF0S{5@}L2Nv~TtpomMt-nxkT(MJualrDyR-04NCkkTc5e1qn*?r{gs zr;->Cr2FH^(X{g+mck&PHzUgyz@5}F-PxtS#+r(WU-{3?TT(Ask#}(%8E0padVT>Z*QOu5+xi)o zhL7c)o4r?Ze&KuN&rg3j(T$(+Ta@}56`r`1_v>T(!AW|_S{7k9B zOJOeH7q%+60VbJgMM17qx1ci6g65Dxw30MBLu$~VX+xQmCZUOR$JC?x5I8zmGZ!t^DqaEWse@W+0NLz-{-1^q^bYjXr}#=~FJ5-9sVuih(rxb$+BT zy#EUwVLg8_EOo3%3(XX&E`HJ5DT3|BTE13(ni;WopLixw*ly)k)|5O)qmXj$ zRtnvev6~TkBPJ<^LKwqR$|GV3GsZsUwNvrjc4WI!r)pNHgak60Fn~D{$c&UJiR{PZ z2^lYGRr-R=Qf%j<`~-bNd@~9cFFT_ zYV)xZU4eZu8%f?{FtlMP`@Su_s{ZSS7nHFhdvJd6nkwh*9bF+D5hMc|$tn#&au{r+ z1xIK(c0(z8x@4?sW%_9vmUPnUH5qnN$Fx5q?@$_uQo5vqpAQ*35+flQtIBDFHA?5g zT|EU+`gZd;W%{fmq*&>?w-F+(VY5g+#c;$Hyu(0NUmX5T*R_%V=icRMI4@1Ub6R0ejuGcoI%h{YpWnDffVycY>4L z;r5YaNy#K98F=uz`EZ?(8<-(QIJ{G^OGDUtG6$=C?KBBthlH{AWGU{GIJRXLl}9pZ z<1W6Ux{U|kk7iB17I*{q4$}m@Uxmvn1#W3=E9Ku0--gvYo7+gl0SU?BdyB`G2&=!$ z%DRPpzeMZ=-+{v;7b~8xe*1OLl$0I6yZ=7ZbU)~F{mGh~7oIHsmup8GkbI;SS?VSv z`Ih{k$gBFRsX+}Ncz`VH%KCT|H2K1!=5gD~c+UFEnHMI%oq2W~sn5?M`|=D*pP&D)`R8o!>%3L^ z24rdLQL3&$wxJFU{A~&?MNJfGf?wcDRTG*FUFaZz^aV7d*4T`akVcH@y3j*0vc$G1 zT>{%E?#)3B=nj?+fo3$50L)4`kUBKS)GPTI-A2*}#dlE0h+p4G^XNuD#iNJfJrMke zKz_Kig*K=rwOzkIM8fF`lL7QbbQ*+Sy3nj^#2^iBO!}$QU}UCOkJLYk>A_HBH#+qs z7ro>|bnw{PlrDhF;f=~v-09=pjI5h5qBbcdR0e4-W2A_=;~g00`ePb1jiMPNky3)C z3GC;Vfmx1;zLo=2ms^Q?Z+0&L_M^i%bX~K$5iZa%~t!u$9pWlysl=QbO z8RL9zljb7*!xy#H6zF=`5@=M0D!BB`*whu{HAa~-&DIf?2Ui@aMsE30(0@lvfKl~5 z=}B%(`-ich%$I-s{5PXAgWZO*w|6P>LMRUcv#}x3co@k_UxaOnq1``KsmZjgE5$sc zai=;N54CsU&}1kHdSYJd!4=qM;Br99*wz%%FzR7fWnw$19PWrbZ1K2V0eMO?q(t-E zQmR8+g(cr|rP0y~4P+xY{qkeqqiYk9TQ82(99)_G!cIcZyUq~Wy0rAsl@|s-y?fF1 zuY_@1|B~-&aN6>cU&#+Pd<)?Wq*>B5Tz;8Y@h*bXrw~j2kBDR=s8q?h*imP~P5N;1 z{1>DcqdpH-pL{IS^yZ}JhU9DpmwI6%cul&OGC(8y3eQPE&gQ`rG{-tNm_@w>HPqUpn=VqQsIy?C< zMQ0|Fd}bEu7Z(2|@zT_VA73Z!;_RnV&d$B`;wAg7q|e#jQE~T&OYWJK5E&~;JvW1_ zOS8zQfs|jK{cg^eW@Fy%H9uU+RgEYKmRyfYRBLKc5>|uS;7XcGE1DP;Hd371bQaVE zlO}=(NFdc{2x*|9Fv5>+p;5Fj;+GQZT-Q(rv$_MVdZ}$yLmFv?g+ejNNVm&aigpTY zGfk<7g4;#2YL9C`FAc0GtRAyo%@sd+Yzx0R6w@8v`xpJBo^~3SjAwLG)VpLx6@`6B zTIiCT+9~EeG6PGJ6T{k-ejXhO?aH*cWXLY~7CNop*P!XHJWkt91B#li_mB5c5T>;Oi&|0R7F zHc1~g>8U}(v+B97-pok99m{b&SUz5dRhou0tQrY7V?gp*PIm5%o~(J__vz$xV zoQ!OmST;0%-z9_QG>2hz1MEB(DS$W^OCq81?doVG;iRLuYMLH}{bT{=g;0(aEB!oX zq`wBuV>PA}dr>5UFez#%wV_(!2&d^7q}nYFNt?;vCLHkdv>L&PnD4!eU92&9knxZ; z=}oPd8d0@a)zo0uAPu1!VbjnsqHC1r9EK7SN~w}Wwy7yndQ-e+$W^BF?l8iC7%XK+ zGOaqNn45s z+6)>@9*wg=F`wNK8c2W%OF?;WPX-j6`?q?J_m33+@@t2)Qg;GM{%p-hW`Gr8nL@sO zq!`YlIm)=muJl_EE5rurV_lc7^h#f&iEIYk;PGdWT1-k7`c7~PjV~LvBSkdV0zCAr zz!r^qD=V1=xbcY0#!`BLRkPi9!9TLrEKr;3M zlX(u;Ng>yj(k8pUH*w$N);I14W~A>Oy8XZZTV3P-Fw3nA_UVk%b4a>8g^VwaBkQsi z>8B^}z1#X3=Vm`udS>oY(W^7LX;&7{4Zbw_qXGQNGemNmWPnTK=%Y|~M3Nk2 z;G17M5&E%{P46%eSz0d-{YGb0clm&-`~PmG@HfR*V<1-QJ5|yg8Zn@g>AqDojv5k9 zCq`u!7>#dSR}KqBeMkoAX%f>EiH(F7ihVbnu{0Z1Go})HNEsHT?$SWCZm7|MQl}sb{OcGJo9s=2Aip zR%qffOKUcwR+(5J35-LZt;Aw@6{h(fNvZagNEs+69mF)iX=sAWD4FvW*r%B+M^|Dy zo)LLW4UMRbqR$AGChg$+?&NjdeKC%r6)ELK3Y1k}fdp#B;VBPaJX(VPN3FW0a}4UMaXIMG?LE|KXSL!1rY=AIau^ldyd#6}w(pSUZ%# z;Pd)leZ?!$WdGIAz31%L|6eWX&&|#xT^L8kd78-C1>Ama5?QB*4<5H}^`$kByt7je z3(n3V|MUowFO4Ak+Q>%U`H3HWVKUB6y^wKc^1s)dTSUp(rN2$PFmdsN97k(V1sX#e z&>3h(PCz@#!dpJH1*FaT%ZJp_z2*_&c}}tszoiunPlnD@(fGXbKeekq9#^8j3m1Ly9g=XzMW) zTZ5^vO3WW`!lL{gAyaz?(Hm2#lvNp3wJC%#p|55{-iDQ!M(l)jV2T7WOQVrwJ150< z@mMdG4N{=7Ps!n!*LTBe7=%kQ*m>=w^6o@@6JOJW(S%+aRV!@90W3xIW8+l&cb8(@ zo7ayGdi->sm=AB6I$8az)A7~GOUDYoIUQStbsFG&bOo#lWw4zfK}5DBssm5AF8D<%MFKI>WNh0C(O1{QdrBk1jF^Q~F z%=I-A#w-Qe5!gsmvCxoec#WG#8Yaw&vB)+fYiO`08d4@!RVJ*57GqYOtIUepW3;@d z&%`oGVP7akll_f-P1ehkerfNTE3;%Q{3W+0Wno*BMpG(+O+;10Xq-a28ur$m}T4he(Nc38PWQ}-AcZOFhmBl;y8&MF_<4z7S(%pte{2mVs?DxXADifw5* z8_Me>gW43~#lRBW4=z@=xfE>rgT0WZ9~C~G{ru1;C%?4$o^_Z;ztG{i`o)pjy^G`H zdlzTUt)H{}!sFK4d9LsD{j-DL+&$0F=H-FEb(|i!xe`zmdfzLfY0IbNTU$QGum+~W z8k&yP5DKr?9i{n{9RE^~OxI1tqE9KwpcqRO=54}8%+&X8U9=(n!Xz@zPXBYk`I&d%?ai}OA5A|yx{-2b z1{tSkQGIR_g%_uhe{mLfFHDa7__}E?SwBt^399_k><4z@GGcvgpk$IaqKcSarDUMD zpfb1!wWJV}j#Q$mPzKOD_$6uztVW02rXroRshZFbTu%cq!J?9OucQyX^a80voeZ$c zZ6_MgCN(nJ3ZES}kX*|Y0r7=^7x|%dm`<@2!!=*Z^^>d@@+J{zM{SRhz zlDtM?k1EHQt{J1zRZ1n;ZkpXFsX^*jP0&0hBcz`>iNR<>k7fWfIui}15|et8iX=4Z z>lh8UVl1wX)IdXskh)lPn4$>I@wGw|eKg8hV+Bcr$Igg(z|etZ8jR#kOvgxBlMX&t z4=eXOVQeSOv@6-`vxYVrR}<{9oqxR=Q`z+g;hpbWv3dE`xnEkoyz*F~_%N59GnF6T zk1fugjmi7-fzO)WmI}L4wq!-$M3XI3dYIctRbOF%vBhiVin1tO{#GgOTcpetLoL3h zw2Ey~GTmn)OGq>nOI=5gA&PP}cBNDbUo)d_z&5YNoREpO27BD5o1cwEZ4=im zBQ=)58eW2BT@i_q5pyubTbGMveFe5+DOxm;H9qe!rt|&E{nm^zuj!#z+K+BmI9?ul z*U$aF&+TU&s)9GS^f_1vyF*GySL!;gG6EN&-_hp5?VEumT`6g!7#pDlO273jem|@v zC7HFfrq6{-T}1J$!FsSv@y)>s*L@gLiQQn+qaNkzY!XKyoW3&KsSN8hrw!><9+vZ# zGo&<0j%DlO>_>MlOy#X#8vMTdLhm0he$gEKxMSWt)AK9t=O^4Q2AYm5qgXsY_&w*v z@h|=4dNb#yp4zxD_Gi22`>=6-7>)~La9$ezuJb}$h&*q5BI8qAKB=W!L4|*4_0M~2 zKOj$OlI7xe{J^&WQ_`X~Fr9|NYwnwg^`rSX@JNOIh!8`nlE<=5v#=lGb@NJt)mO}d zMi3}n3bV0wGy_f&%9_8FYL{d}QjOQExEFXGcF!bynZ~h8!xCfIJbD#2Z!wG<{2@)o z?UO{}xW#QHgZxBhJVNJYb( zXYPND*;W55mTRBy|HZ@$16OXHonN~3rRlX>m&dQ)ees<=0?IE;Kbd!7;_tG4dj@yT z%p&(R&F1PDvMx;^_4M#x{k+Ff#%QWu#yUc4Q5#-?7JkVq_*k8$9)oh5NmB<)urzk6 zggL4T4gA8?g;b+Cv>ukQHl^KbM<|Vg5m~RX8ZDt+=+!m=3B$ ze?%)9jBQHm+aU_ixZX^mZ$-DZ3Km`42Qm;@9)xFqay-VIvY@N`-9>%HTSJUqr!FrT0cPoZ&Pvz1;qy z2u2NJHO}(a^Wm+QvAALN2R*;d=WV__&u_eIt+nA7tsj-2cYFR}=I$>{CpkZGW$3mr z)8PqYb>3WD*`Ex?X2Tj^3R`$3TqL?pUbhGOGB|aT*U`$ys<0AWj&(x`+@yyGp-tE_ z@-_TfCs}MLwW3TUiU!;x$$U9n%AU{&Xgo_KRhL$BIvPm;G!r99g$8U3so-@ZWrRuY zMHX!Qj9GPzid7V-@Eq(!$PA}4rC0firsP}OK{>5EhRl!;dVaF(gk^oism-i%h8O;` z-H?YJT?V#-lX)B&Skq?WfI`0&Q};<7atmx#Q^uca8dKSulj^*YdA~B{`@MQ?>N881 z`~S#&p$jV)da(FhU(Vx>dH>3!@BY=1@2s31$Ikg#IL{7a?fh`rPp)_G>`>71`O!DG zPmjQPc>+skMzMBo5StfAj{W$0R*$?VwmeLUZtq;zqALFftJ1r}Hw%lN`PlcRA^PTF z?NAoBJZKnCWGJ(z-2O#MS@^GnrqLAdC}q=kz4EauRd2ne{uGVqP&Tib+~&%{0*%B) z8d>5xZW_TV4MfQV=VLP4%6TLM>mHe~`=%<7IgXG_K6Qgs@**}4XOeJkW8L#rrAy(e zlu9}D3YMQDf%qi9HGL@k{%lZU%8okciJ#`Z|4+SSzc_m+_v-YwQ!b99_`(8G&J7{& zIUBNH8b`rZ+h4xh>q<}C427o`rV7u^e6{HOYRTQ(kFSwc$B3|jUkGzh6DgzyZ6SrI z;r~*?y^~*xUKzij8H|KWF+`dMzeo*{6{w{MO9}VpU^6U1wJ6tcz3>*aQ+PVG#pq$w z*`P9`k7UwMobOVVp_fM0M4{-`l%P|ZF)`8|*R^6UxDvx*B_G&8ethYRt3EYkY_Ro* zmHgd|phgllRxKi7y5jJfl#wlRKaa|bJwv#$6=!xk; zuVh=tbYe}@1ve?mYBXa*W>`hmVv(jXcie)hh+4SfTCgnr*QL;+Bhr)$5p6HQ&1 z*Q(BpO@5|!`Tt6EHyj2jThb1fu?nk(Vv>s__A%Nw=3_?+Bx)(Z#&X#8S)`FX+z(6t z=k18n*S>5_y7TpfSAOZ|IY+mq$l&nJ9d#RvcXrizzqhO|e`7PS@{Rp)^Beay`ER&_ z^50n3m%p(bR{zFUaM2rXZN?i9HPvrC(B{423M+bJGc@mwxsdWVUJGe@V<)KWja5z2 z8{3+)H(aW`H{2nmZ!88E{-IM>{$K5Zbdbf_l03>Tpogp1hRc`@cd7%*MGX_S8dPr?byyu z(ljPXA>$;F@jrH+nRr*){qk?z@E`L|t(o=7{>rA;o$u~@(uLPuo{XFmcf7&JM-1x_`&rfU@ot=V|ZohMS3^`w1K*5zM6kS>we%x!SPcD2W z_u|YqOQg^uNu}b_JgQz?EPmX&Rm9geRRgLuHK_HgB8`;MNV?IiZ9_*yi_&krT3e20 zNdOF~M~A)>BWg1`)nXtGG@LrLgfyU0-AIZlLsxW-(r2SDte*5yOQWbpzh6CiNFo-0 z{;12)8BmJRU>PT=RHpL|Q#=Nvi+?5qk!3lg|09b*<%WUq=8`c($;N=*{JmZt@2E5& zN@&JpLZecBcEr$%>F8P|*FY+~PH7u3L5dnks7AN386za9*@zMVWI&t0*bQc^NFydA z>4io(!RU9GG%|jIWI-_G3Ay{3r>OXPIS@PDlN z=Rd__F(iFtFMcSlxEH$~s zx5#ViR#o7Dw7J4yLL&EVoeA5d7P~Zz;&tED){$t6;FiH_jZCG@#s>d)M5Qz0&wx9O zQLP~zc4Oi1uLWjW9_Vv_dpkDc-@a$Vhp?<)wtj5=@_UyllI8xD(a#*5?|xzJw|h_Q zJU{xbwD{$-bB2Snqt^ZN{a;?b)SI<)rsJ`ntJhDDM66vI*>;@m`={ND!(UywF!)$g z`pIP{VD3Lib>H=nEbMrv!cGHOIGRcWktR;bSd!w60hy$h zEUft5#j?*$>?wWHb4VT;Sn*7Qi?5UMk!{ZuYkT!L4ZV?LtFnI~qwE)nWZ;RMo>o18CKX zA#}o`Z$oWpBU)%Sy_#0^sESc<$b!XCiY7*w&0(cz))&I8FGo{E9Xi75(5$M(aBvl- z4GpjaO6yrtjn;&AeKX9udUUI3D!MY5LK=Q#Ahj>fd}jLE%y0Evvqld;zx7iKN5^xf zPYy-YTw4oonzm~jes|u`_&4JT&2LUeRbw@(kr8hN#tjvi)io(~ucZI^0LAhDW$#VE z+Pu!R;m?U(r{gx0G)bM6Wz9Hf^WVPWN`YrAa1<{rpGFA2*J{2nWYmxUcKIkfn3ZyGTCw zxzDp;p$UX5@)hq%7iDYrDbsCG|o%T*s~<@II!8@qg@l+xw{EcaGh|FWDdM zvwOeT%X2@iT=>2MhbKOEKls_~_D_!&_UxN^vUxc0c;}0=+nawinCE>C8cg-40r6wb7=>qPF!td zlyK^Yp^THi8_PcUb|C2}n*5JJ5D~{j+9~kI<1&zH2|kQeMS3;N5~^V``6P^|o`K12 z$034X9b;h7SmJ<_ajz_fz31JZ*H8kR&oEra&ycL` zB8si**t50=YuOHhl8HUuT83S>UoN0|_cky*AfS#xYf%8?G%s}0$G%qjWycLGovz2! z3qgOK`8`NY41zSL10-3U=)Df;%<6!y95lhBm3w!A5_-Of6vY>mX!7rcKD06)+|iAQ zqYtg#QM+6@XbCdV!~+j=#h@za0Bv3?j3DwbWw)aVS_K_=L@If5=tZk{8#?C@Q3h%> ziFa;CeGB@<@$2c~Ia5d2zUMKn2ZlJ#H}pLJqWi;%Lc5#)Dd%JD`Tp(7;-Nx$o&Blq z9_tg0KHW~q1@EJMuQ+yUe&a0Ydeii%6sZ&$S_SLCv{UiF*v|GpTfU(BmH$cg4}y>N zKJWOVDq{OWcfWN<4?py1C)x72B0u(M!*ygaP!Zf=Jrs<3k4IN6n*76t&_4CuFXv0L zXTK=gbNi4yG9>V2d5rYJvY8f@xA$7J--O=RQ@r%UH19}vHt2PQYT|iw8K=M z@``t-@DG!ZpZdwf_Ldi)%NK+Lg)O@1c{!iyYi^U=TmcHYs#=Q7I4k(A~dUs{c3BIAT>HvRC~Ag=lw^GS6- zj-;Rb^-M<1pXPE;zaGIKy*iCM_2&52gMXY$I`FG->e2t4A_%@4%dEIKmvkZ$d+>lR zgllPxr5~ZaoWj{Pm$`q@MervUwMoqzsvkLBcrv9yb~LB{F9(xapd2swPF>o`oBI2o z`f})3Ra8VCB{n#XCjTmy>uXE36_$9CL}dG9eW@S#j4?P!$6h^6^H%Po{GSx&7lrRR z!|=}4lw((f90$l4;a7)wjz!&iyO5HQBckZRwSyu<1WH6G@|;d+&eDJ)C+@C0u#m7Y zZV&0oRig<%UeQktgJ@-`ibs?062V}6TM8!yC7}hBg<|L;qJD)6Kod6f<6EGg&;~?TV5iare?5{? z14mLtz~9pR*Znx5=Wxc!n;KJV_L;3mC=NJ8b;4ni8xByfS0LBg(Y*H@X4ohBtT@?( z;Alwzj+Z*1f#Q8tP#)U*PP^0G@PiFZ>~I^~+1AYqe`Trjs)@ab7s5ww-necNw%c{tEdZciQYU#k}ktMT4#Nj=pzYE)sQKEF4 zeVnde^8aESo?^?BQ}mG^MK~jGPBQJ?mrHz$+DCrMaNj#mwvC5rMi{5*UmquRRhyo% zT}J}nqAEWfr0K@SDI+jTHo#f*_Z(WFj+a>AdnDtR-f0(N>(|KRPJw-2#9)Y}!wwcoA9H;GTCH6`S7R!#QZfBb2;uHnnpOVxW_oeJLKhP5gt zR=}tD-mYWXKtS=pDT*Ch;;B%0Uf|QczQy{y8$HF*>*5Ox1T7+vUXbOtLH8~>4DC=u zH&L_TRi)h31!AHSRCzKmj?%hJ@-`NRTq)>X}<#6NUA|f&7HzHN@e_mKZu48j6GZ?k7 z!hg|my}#6wBMGN9H}j$uVP@oiP369~T@~R|iS~O3meRoe7~Ao1uwvr27{EPM(Y`>{ z*j2-!kq@mKVu+?o*!_1Uh*U&wdtA%=g_j$+JHpW`r#X%nUM#oXloaB)OttsUay&4~ zvWjokZj57nD8}}DXN-x#B+LBu|KM1cRMei*VSSisfpHcFQxxpiXGyL_V?-i*-2XaG zv%NM&H$#Lx1QU$@i!Q3^I^}eM-uv(+n)a2+q9KUw)59#u1XD*=4?;+mjpOEXk%lRsCT+FbFKl%ol4yQ~y-WbZ#7V+5d%;RPYZ3w0D9 z2#Y)*C~<>;?YyC0$U6}4b!};8*d=X@kWa<&%Jk1f7gqHm!XWhJbRbgDK<921bY#dt zo!bYB90_zFMWW6@Y6Gc+!FYMvyjGB+$zNF@hW_1B=-n=ZUV<9>@)R(V+XTIZa->RH zL4$~)o7e%vgf8gIS0cjbT_B8sLMhmY@v4C`z=)mT+TOQNcSDz}2JO4zKmv!yKNH>` zebTkxOVH9S2~Rvd-0-F`Zlu)1^c?U~LMz@zGF3-61}lQM<$H_6c4|5|RE1TU4lRC9 zSfy`c@VIx`iR=A9f5?4X2;yeI%EIdqfqdZSy%ngLz6~#MVhOoOf1P2>n_*f+7)5^5 z`ak1kLmTHfm=^sq-<+o#+CvoEk_(i}3}eG6)plhHEedCfj4xj(_APoXt|AvMOgH^( zf;0?~k^wl+95mTEqt`v-`LF6f6DuB>nJKcte32Dqsdk7W^)a!}h}4K}Pa^lYh7SNo zl2ngTQ(Ip=g%n8H*5>aY&XMenzt6}xx&C+|Kl4DU@E?w*)a*KvRQtY@d>vy3x79Md zf2<`t;dDHCBE=0gG#9kd>>yzJIu`5G#GBb5-Rt{SJJSt)OdqJ3UeJ_!Cx-WqZd7C_ z(WKuGirg-c?Nop!ryEphogm9<1zDzgL7m78HE4qG01YAyab5@X6?TGVhX~qt_Cim- z3RHz1pu@ExA`+o-q-czK&}>(L6seewf;KdU3I>Q$7~I(o=6nU%b~OXGqZ!PIl;UYd z;x#+8J6f0Ba`N(~?>Dd`Phf0ka;!S~fy!4{jG?_=jF_Hls;5!l2trjut$gF}9be$njP z?_h#${kJGXADAPXx-OSF7X4j3|BP|Zg<{k1Vu&=pxmO2Y+h@HxP8*Dcc=|h26MPMe z!FdFH$0X zd1mM==mBL;FZ5?2bwU&_R2|e7)T4I)w?(DQ;e)y`Zfr*fQG^tW7={Z~FqqK}+H48v z@GW3KeGEhe3~!f!p->Fg$CNO#yA6zwci)sibX9?eEWGHXi9LSB&K~_ykUq9+CEt&S z8~T!=GSF$>AN=Y{`dvGJS$N!35d>SQkAExu6M?B}W?kP2(~^^Wpw_qEdeqNY>hlqX zd%e5Djk#EsWs_)r6yW8B-*;utHT=K-cX5yY>B}WX<1Aeth%tK_!sN~+7a?o7!#J3! zc%*VGKXNFe32IUs;c%wxjic$RA6BP}e^#B`4ArS^eanp@`f~KH6UE-i8mbd&=nkkO z+1@@zbN=hQ+ng#H-B-8I8*OF;&Py4N!A`b^DkFQlluUcDHQDvISi%uE~pW8z6|=Z1kg^DLs#4lI0umrQWkL{8H(55 z$c?-A_P{VwB<39oFg(@`{Ycde$IH_`Dh0^HMqrw{ghRNgDCeJbJ9o?oHV;PIA zG+V-3m*Yj)L$kcmk3@jtO5iLp-ybf*zH)A_ojbK>WOuB1_=YVrFI|6^jXr!IR-*sN z2E2akaA2GEuG%Dl|KKCXVSl>t&9dY(zEi1!qS8#ky>*!<@@vxTUp=_>^bZaf9=d6; zObaElxvt0ve1;e57*41zalxq)mu|7PC(HWp64Jdj?R4Mk?UVpWQ6D}dgp`T(_foRE zEZ(NIGJ2PsJr-2cgV0KHLpRk2;xdn}m34N#1Wo8o#181n6oUp4h!Uv=)h-oM810~n zyZ9E!p%oo6q7vHkI>E484!WK7(1X)Jf81R+w-YP`5p)yUpob_$WYPkid5tibD+VJP zN1uaK2cZG9dF`MnXa-|GQXFyRvAY?xkIBLOm<+VL1u*=C=%xhHQyJQ+<3;{tWd*>& z8wGR4MET9yzZ>R5p8Iqh)3^2IPQVUgf1au`-_kae^+5POJu|lG)>)Y6f8?b@;XCU7 zc_%S{(s6I>wC#=(h^2KI%We}*aCG-QM;+et9L@T;_sjh^eST)T`+*?E^e@pe18&LdEOtCHUSt`Yde#Kz z$X1vmSznzdoA_R4*L8Nc2-&#FLmBz_*Uk7S;V8a2??93m%95J@!vB2zbw;Y%RN=p$ z+@^m0baKtlE3-v6bxW;##x113%Uzs z&|T03n(ZP)MoJjo(TWJ99=hXswUA0NWGTV0y%Vqk8CbTr!_Z@B++9+z=H6IP>b*b; zl4b zO>&SfFnn*%lI+vVwLMF7d~UkL^7Sd28Ri&%+j8wE*zT%Hw)O3^OndBNm2FA){jV{! zpPQwbFN7E-2(oQ(o?$$>RNG1FaMIc0k#CeK%~y#+XBQT`IEly-;L= z*%AZHmlz?q&j{xzhQG$h=IaV=U8dRBTz5ZgIGQZny+7p;@HQWQ{+;)#9yzh)KoVbY zDy8i=2h+rGJf$UAkB*7bP7z%PEkPL!hSEpoL|D zwh|A>kkaTPc|gf>gNlpQ-0VEIBUWGevP@_|Oe`@SZQW-#bm!UmPp3z#P>A z^CSa|mkh!jc^JG5`)#w`tzngL;Qk{Us#1>s>~Km8oZ5VHq$cT*zbd)mt>bATIG!fG zaxzPFfs&C}T*x~(Lb);h$h*--@eZ}dYoJp+ASC%gSb~%Y%?0h`AjsKBjW9i$rTX%# z{P!z&A;poV0$ElcXz+1^xe_#aXyPZfB4TI-1yKg=xk~6HG(h{#X6Q>p%43HR+L2P} zMarb3unD@5V(BeZL0_H#DG@cevt?k;LnQH-6l_S<#1%{24l=Z(A8fl6pnt3l?0Fqv z#dm|A*a41(f=t(MVcDyO?lM#jEV~5|uJrxAtHKu6@|@r)4}z0DY6)?dt||RqOqEW4 z+EyO?xxF+1gKYQL#bt}TO)duyZ!~v?GPmgVMyxV$*IdQeu1mbYN7{Hfwhvp?=zcZH zxNcydXVn$q?ZD+~+omzDcGpWh$FjdI56|)F7=0u<#T#05*WexanL|m_q`@E0QH(G} z8@xKl7*a0P_Cl58=_#u9)oJ=LgsFzBjy;weY=GZnx;9Skv4qDd28c5B5TOpiD0vWK ztdXk``tZTT?sN@z6h~5O=_fKzzi}j81SistLoNO^982N137N+nQRK&?gj@^u)wt^;820evJP(IoY8l*0|b5t;t+XBOxBIwBGLq}EvA`S`kXSKjcfeZ%l zb)ZEga?laAXdDgbcPn8yzXuFi{eW#(gPkY^T~0e#h!WIBg-AvK z#_fVziX8Wq`kr=`je(mJ04v82c7`8JyuhN~Xp6zijo_n*c)|z9V2~GiyRU3K;l7Dt z@?d1k>_PAU#JFRNW*1mP;+Urx*Tgx z$F!Upqx7G%P)7#8PBAY^=Aoh??gV+{^>L&+qDb9DXj+&do4z|yGMIYXZek6qiz@u# z!V{lsm2*?2@*`=d_8cX&uRoezzx!ZLUC+VP`rn-Td=pfsi7wS_JF_FvF)3$Z2|sxhsFWczh9*$!r#d=AboZ)z<%F1) z^?si9%ygNKxY*~h%K3Y|TT%;((TY?r*nK9sh`k|fy@rjBcI`4nzG_w^taT-wM zwu3xN4ZV2^kYou#k*xw{p$3$>GSK0XGC{;QlBIy|!Y1fX6G8_fA8dEL-bXWf9pBbc z!4M)8eW4gk1!By9=U&glm&L50Xg4f@?GFmBg@qp%CCJNp+U z5YJ)Pr`@c;S4?d42}`-{OIW3G(Mf)y)UtV)>%YOq5aR_l4p+DdmTK!Q*&j@0)537M z1FT%vUoMsp-Ig>bJ(a$77LM-ed8Tj4Yn&_#8D}e8OA^4vgZ=|Xu4(XQ=b5i?X1|~7 zyj48UbKkKQ@EF&MpJG~maFO9~CenVKYdqv-J0ZgKyd9)Xq};6SeC6QXLAsfFkzsxL z5^bbzxv{-_qU#+rGto~T$>|~W-*l0qW%@on!_@tIfNrIXQE%>=`~!~Vb6=yFDbo}) zeU_~I{BqZ^hK~sLXNnmoQk&p#QroMwNykr|{A}elzw4URPX4jv7C4>SyzIWXh-xa5 z(u^RcSm6xGzMwLMCC)cg49k*RHENb^R7Q4zfa(C{J}0y?Eg)xNv+r#u%+et8XaOZr zz7W^ZRw#qMd@;!L+n^^~0UbmEbmxhnE0YgB=?x&yZUl7>Ii{)mB&pUe4%!F=jZSLPet$!{u{aCj=O_ zSEosy)SI=9u+3Y}G6sKqo;3pJ*ycC%l$!_T1xe0leI&mzAmC|tRo%fpXvinbEi0Tfb3Ai?efAw$%pS|2TdXn#+wteQyZ6$6{ zkOCkg*{({-nB(N0nML=38;ULX21GKAB^an^PH1OZp@r;*PLdCX_ZqMBdg(ywqZ_9N z1wjllq(Uq?N>HXtp$#ViO*$eJf)u)m3g{zsA zNYT%XF@uWnvT*TYpU<)N4;v5JzR+{fu*!wdqRt%#+Q_aMis64EWCM&BjllG?S{Ntw|MR=s`1ZNi zq2oiJ4S5akK~1vYVC|L`XiTp)i;{)gYw^uhr&8K398VQpt;O@9I$bb!?Ecn-0^(Yj z)~y1XvrR$qV_n6TGbc-YOWHN+ihaAJMLv-4vqBsCaVuyZXs3Filj()7XD6?#Mh`NSXBG zn-H-yqqZ_I<|;v-(+-2UR_KZoOF=92Zx?|fUxJ8347x%!45Rmr=-;*-a?lsb!BWrx z?t*R@D!9>z-cuCaU}t$>9HIL_&-H#+P}nhN%+EiV8>F34EQ_ zxAi>3%>HJXZrON}*`k50FjeQ6qUuH_Nwy_zYCrq8p-){TSxqw}BU~ogVP@aRb92vH zKC@WcQHm}(Lec*;7%vz_(?Nt{e0Ano-N(jD?Gp{kHvR+0llVWbPLjcylrvDDatbQ4 z+u%sH^7jqfTH2eoHEpPWq+z{a+v(3XWu9EM$WT5re2==s{ZkPu2oi1(#7v}IsBY*d z`+lLXyxuEIk|_i^x~BHrKIqA9g+aUq`mTY=mK5t{{9&7b$+OUMeYi>bm|~6| zpe*+JL}g&3i8**nt24Cn+~uMB|6niogPj}t_N|PuhSfosY)y_Z-S#Pt;bW0V@-oZz z+2_c%)6pV_Vxq#Gdb753B@X7fBHPPTMV9YOlI#_sXV$6V=ZG-H4bh#C90SmM%&l>O;1>H0(gWvDjV`q&u#y z@ z%kkZiWO4q8ah?5um*yzM~M%msKxki7`7-ugY-ZmLV|OsT@j9$<9JgpKcT(k!zAOz6BL{IG{yCnl;W}i34I0e%7P8h zjnsi0FJFlD=*EdbgVahVdaaI=LvG7L2HyT%azr{vVW55kc^#lfL}V!F0ApS|^ksKK zZ>|cdlx9RQh(u5u9U=@XQ4T%3J3#w*H}pT+4f=u}&~I;BksB|@@P5%#>iUI`GYXDU zKa4ZO-y1zJwxS@wX{zsAV@xl&S>7ulW-wvn{JWv0N7w0jo<##gJzUoVk#g_P17$vN zmU+u=W&95VCMujyMJw!!#<1Kff*jMPIkxTOe6e*^CcRjZdtHcbdN{`GOlb8z%^Z4q zj4=$8Ov78x^GwTnr^Q&d%*#x(W4^>{pQh;Um|OB1KH;d|Dr~BKMD$h_TGby<&UlH`hK+5LGJ&9?Z7`6o01KfLlm*R*2$axU&;qLbCg{wLdtkLdXNF>dT(lW&peG1nB%>LI za0)Qxw!`ezut>cy6@5?#|O4b@7pe> zf7R2pM=39Ej4*s8MH#sRH!^v$;*S2IDQX6IbG%3{MYek8S)y#0+ zC!~A+qrD`aw!{Y_Dt1}^?8uUtw$=10568#!eQt`ZMslf>uy9Wc_J9jk%O6_0y9ww zhMYFg5rtquD#n0QgEmLCP)N$2D+CWw2BuviFl-mWU_SbM`{Fi`rC_b_2s}(DjFx&~ zqHGjw9Pbr3(|33!W5hVqyFwKc?S8JmBEt4uH-X2>eCwiY$Ib{d^rdN5=$6x72N~|a zpDuH64V5kK1vANYyZz;(Z%kGC7u_NX9SW69vx4?8V?2MUb39)*iHmXrOI9Hqt#CaX zDYM|0`rKD;B}aIUr*C#2w*%u$*xJ+dqReK8+W8S$}?Y$ zP>pYg$>!tBwLi}?b1#z2aF%Mlv|QU)NJDqcQwJ-i$ky%|is7b##n>~(yFv_8?I_(c z6{ZZABy#O*_?SR)zoJ`^avEyWYqk6bN*A`9j-+tb3z8aG4ap6E=*gBs^_C`jqT`*U zM0YloxJ=D#uW5jV-P8uqL-8!SH73tK1AQ4%=t2ac#UYh|NMMNA1{%Bo+Va#O$&x{L zRy?_)3@HyKn6niN=}d-lWT3@&fMR<*y}KN&`3j^w&=L2p%0sFmM+`%VY;^JbS@>4e zUJe>WP@PD9VA<$E#AHFrV{m&jbnTRazEHU+fta4wFFNshnL&b=Zv9?>69lZ>11^^L zLZH&TYO7iQ{?V_DvVE_HOUEEs=@WR%!nef@(#3NW&XxO_m#Y^qLpygMI5k-nf+)|o zXrNk@YfU}R@%<&j2`;Hz{pA+pSgC7KCxY7vf6?gH^ZUnFEqHN)XL(?Xys1-Gz1Jt0^+UCrPdzEfl3MFg+>{mkffD{71vSaff=GpiAL zGFqS`RkC38ZY%77jtm*};DpeT*$!P9Drk?p`6Bh9ibtu(Ng!JT+QL?F;FUQ` z5J3jIY&i@f(!kJXdPF>ibj3oRtU*LPTA~W{nPTW6N`CC)k#Z5Yu1fDRWoYShnlRpSs!hhulo( z0UO)Ryv)3~;_aa!Ubqh{_d$f`1FXV3>n(NNkvI=O*ZVKi2mPAJ0q3F$C01sNQRzis_BOwx_?EAdkr6PbR-Eg z4{Z~?&VQul4Z-JYUOxSB?OSyj0uW>huC!(f?5Bw*9&F66*(Sn^^iB8{zi6A{nraZy zjmsJbO=XVzqwQ)PEok_0R19OHMPGG7y2}Dq%QY zsSv4>k@QwDA)+y%_Y6DQ!A6inB)1KMNVS+Vo58kS2F|>eYXmaD7~9#$bo|o9@qmwo zfuHLJH_tm4AWbA>aMkk$*9SQsx1EJ1f0h?U8J>hEoOhrt*ZsSr9QzB#G=r_&$&Zye zZcrd$is#+|>eA8SpL9&HOzg{S{jw=OZYN?4+Y^X*e)fG@e{-UJyj;h6bbM$_-D33E z-IM!l_lD?+ISj1fV+3Kkq>`U@=BMIq;@6tD)cm?7l|NC7Z=q|m1rLhwO_kDg!4E_P zq*BuB|5Ar*eJs&&?NsYMGKOQIH=fo#p5C420|mwTriN+1>3`t3twW#J2>t0Y&}27) zCS3?!+0CHH6+s`N38@b;DB@dNh(NkBTcIC^CjGQ{$y)RpkxM&X27_4&7{s+Nq;Ma} zjmL?|K)(}_3Q{G5`Dz%zDN!2{Sh9uaTtdJItzgd=gEdzM)+_;7h+?qiE3OfUmF40) zSwV2KJm5tHg0WrTX1o4+fg5}(k!LYT_TT3#^}ga`xxmYGz!=9rlIU~1?wMA2w;Q>p zfAAji-LU5#ZiKk!HiENf<-guc}S(Por zc^&g?r5K#mJ8EQ~H(hO>x2!?SoXw8s<29MN9mIS)< zqBiN3Ia8+m)ath@pp|f}St5LN6kQ0W?N`wiNXECKxP0g(F^H#gpX(kS1~bb+(f1+G#=BHSQgbnmM+ zn&V4}J`WFPR2F1=u3$9J>k&?PRU^>jh;l48e_1@~o)7$G-X~Xk{Eq@A8TPME9&o*W zk!4>s+f#(&e8@@GMXZeBB~yK@6z3R@&%W|>Vua+T5AmYpKA5HsK!9QsFV}vqD7HC7 zvj-z&>$&GkoC%j_Si>6LH;!iV@8fT)sTHO+|D2zE23l~<|0_+BIP3CSuKC;_&FkD$ zx2A=2v4kV9vB zGYq7jhQTy3v?obHjTDFm*95%;(SoaPy!g`)@eJtj5*W#90ZX0+I`Z3KFsB8P2qKtF z2@Gb`!3d#k!5*T`S3no84UMS=T_Nh9rvfV?6n7RP4Wv?>1PzSlpuYu(n25q_1ma>w z^W5yfk3w`0_}Cr@RC>Y3bVdd_-UT~EjMceew9JzcqWZENEXSgI+g?ie9($Sh2{$)T zsAsyb+s2F0f)7rx-G@h6-V?qe&n=~CzfAEx9VqvF-OqCe{gsg$f)LMhE#xrQJvCnL z@0mZ0-7y#4_X982xGr?q_`v+h#TCXSj_08ZJP+@c%IK4qV_26sjx_xHDYEH}bL0Wm za_vJUHpVy!yAmM{y&WQ(mL;f5U$G{gCtDB2=2~7{3#GOh$^HWJ$Bb?sc z0HS36@ae6Bg+M?-cI&3o>7v?}jHX}Jr`ADTI{)RK-JI)4K>e>{>w2CxU7Ni0MYeS^ zx`o_M_Wd@VvA2`#xuPtw|A)DFbay}7@<4k@_^DR9>zS6~XsUw#x0nP)ycS5jq-_fN zi${|^P6mBh8t5mqf-a*2tZ6bB&Ta!u22v`y@r=6hV8V7#BE=C`E?tCX7|azyf35<$ z5%F|n$-$o84&iJCxbf{^%tdX8h=dB&U_ys|rxb!jHF$|l5I{#XM+vSR)Q8w{gB$M< zV|0U&>AO$+<-d6lVKjRU#<5+o^IYJg$88+et9q8L%eKdpu$PgW=b0MgMq!-ofLX5j zwUvH%%i$u&vLQwfgu?6qM7f?no~637-rMFJ!&Mfm^!~uh@%W-F&j;=y6vsTpv0=Q_ zzT+j<;*J*crC7F)VTpW$Xj#kBLK1l)UPNUnziRL8(H#oQL9nU_@{bNkFW6D0=CHyFk>wX%lm zaqN*J`3>9ZJL(^)|K;gzf}fvCZY2v+`GiJXOIVa8hW6xU5T%P>IhAu@-+TM2pRug# zcpAIm2dkxGL=D+>K=M^Luaz{I+{tu4BV&%f*nvnzMzMjC>izjZN#L4qx)G5>U!HWq zh_1wmK$#_kUR(!sXDUI4R777&^Fs7^PrO}GI^?{X(SY8M+0ubc50ppE+!^n#yY{)jVU57%{@L$8M;2R!J;D*3WG@))f$8Zo| zCdY31Kdp)BczA|2dcao^ePXfCUu3u@#}9;H3X$Lx*KtD%mU*V*vlq);+DWdfajA2} zD)sAUj$%uC3(b|99vm%oEg2wqiGdZ)u$^zj7+CUh_p_RqV7oQ5bQ6TwPT6XV_uMhx z#&b-2_XVn17NR6pjWTqkXA{OAIud0XKGKy3*YF94;-QmwRd1EZ=N(v{N;J$X%F&ilWVBw7(%4co2~?X zMk68;5&9c1hpU8sd_1j*7>4p2!GLcCJ3#@)3?XQdaxoxfV$GC*FHZvYUESa+Yy%@A zqyVuQyy!h+zC@bHJ(D+bwBIUu)_}R{>2+Q_cKi4-CWCv1L^2`&(OqrVz<+#5SW;>nX1MVBH-CG*`^2IVY zOtE}#&g^$BTAFmK+|w83*q00@b1)od&a!Q<14MYlmC*#hk)z87VCJfDse zN4$VkPhJaBAuUKzh!)7CI|p4ymJ~F}4bXu@@8cDqM(SiBMTtnG8+vkD7i=5d+nW%v zv_e-#4|L|%!|?WI(BVa(Lw)**Vnifoz>*_?A*6Kt*)8Blq~bz^G8%W~%@cxmy9{iE zCKxGbNl1Z&S-u?sM)1cjx(@>Am}K~VHO&oN7lj>TdY^J1uoq!R+$&ZPdWx463gHSz#Y)FW6yy868@*iHqATVY*Yep|sdZqI?O0NQ%#>k=$2i!M&n?W>r^d)G zUg*fmYQ(=^m{{TU7Y`VonlCdiTFN%Wa;!hkSUPXp2Z1T3^-EEz>5oYLC1eA;SZX3f z7$d)#BoDnc$ut~Dq>rEG+;Wy1Shg_L`@u_uBcG0CRt?Q$9J>-pJ_Qq5r;wUI`TYq( zMaAXJlONhxmNl#*S{@Yenl?8;Q&t1CWH-OswpqA;AnDA)t;WpJv#1$M!`RY(ZX%WBoaXPvK(* z{~Beu-}ceHK?mKxqBQD*6elr28~v-F76L!p@mhpo`}cQ$ZsPW@_m|oBxfr&CW2E4s z-ff?BOmUo?dG^q2^JR{oO!8c-QfTT0ntj71n#J=R-8&X5a=h=TdW>nj%U5jAm}cq| zGS;7^TK0^S4KPnJfS)w7Y+RAA#6s{;27Wk2GycyQ*_Kd#yIVmhxsE!s?bPcprX2mw z#q>j_v6O>9il$Xwoy|D##<}eM?JuU3FREVG@G-)HZTtRtuVziMzOwpXbLj>}HNB@sLJMuXXUGKa|wVXR!Y~Qt9 z`&o*;B1$tsgfa}XCB|k>WY3fs2tG32%{2XI5BdlTN_=w!;fbc`BnQc{;h$ za5k;>%B8Ib-X2RnDw^NSUn^((Nx-4|PCuZ+we1yW*6eIgQ+}l-we@9bdJ`zN)xN3B zkXbu3ge%(HRMA}jG(^JQkU#5yUXmL+$W9m}IiYi}`>KX&RrQxxHua$KjfgZfIYOi+ z)S%6d*FKkmmMDYH^cGMNnxP}54LUM}pv-KAj#NZ4nS5|$$`{hJo3k51jg*987g8p~ zM$qL8(Pv`7@JM+Sib0=$1`Ii;!I>iiNBlEHRJNQ3aN!j0lMRt4yq5_bT^Dq-B-pb;Ts$f-VX*i*rbc; zL=%G(qHGt;a_lz*5$Z`VuOIzt@MD|eWS(MK@2qgM&K~eo{pZ1<8-iU!9RH?soDp_} zsr%;`vuD*NuUVR&=wp}@iVlw>z%+mMIkKsJf^O8?$_Enex8!)#kxxytED3pzP11un zcZun{9+Kf#XN%1ZiS(JGTf9-a`J$(EFku(wv#AF)(ex@9%i8am%&l5*B>7tU(Z8EY zuW5)py#KF(jOyQ=&8W#qbi6fu#39cb`3!%nP}r8Nc)cO10h%&S{a%4grns{m3c_MCX(C?)g}H0Th4h9F!pnp;yP zh#W+K5gL)=XaNraDGdTrAjCE>#kZ^qgnMNF0rCXKNj ze~Om7-+H0Kv1-|N^*s0bS*rc1i?pFt*%};YSf7|Hwp^Xz=)uV{eBd?fU!`lhSSa!_pj)A&LkcE;&|5a-$(IBAdqs@H!o?E z?lRwsO93y135tn4%e4P>uGH}SJpKJ=Kb|2wlOiR?Z%lDUemqk;oX}QglquOXL$wGm zGlo~3fb)ZblR(S&J-}LcKJyPT?wU7%7rQ(BkVsiIYJWAs!o2 z554Jp7$LTR3AJ~nwSzNN0mEs61^b3I?uv_u$CAqj10o?SP7F4JdZ8Y=AyWyibOAWB zTEU74!PJQpoq=C=PR$3F@`W(a_ByE(`|J(VO#f~7EOVyw9}DCj`OirDNlPfTVqqcpN+$pA z=Lw}HXEQ5)7)#v`vl&NWK6^h*;dpOFwjC186OXLfKt4%OY-`%k`iLyQDNTAt{7^kb zk=e%8B-j5*m?8!>uKByNtd=ramSlHZdd&|y(}l0~B{vn^^0{`j;M;NpKNh8l5kUw* zox+C>oB%ZWeCR`JLroAscXk_S)1?baqo3FYy;&O2rU}86B?U858MX`w*mA^Rj;o99 zV({dO5Sg^VP?ij=NTm$s$ialhzzQXR<+OpJupKah3|s^ixNxE?Mxvr`EVuWji6d=H z?*UIKb~RLLhcPBn5L7p~>He4Jn1RnHdaoGW_VOs*1!D{wxM>);nHc6H2d>#F0%T9p zEDz&EOEAWKu`%Is-;Mbr`nzX2e&$84?byU&_YJ8vj2x#r#Bsq{E(Q}!$1g7J^Io^# z{Q}$YnF}oMM1<$~@9|QTkHONSSHV*;4bf zVOn5CzvpSX>49&uMzUk1!S|gNI84EkF3<;KU#IGgFOindG8`)LJTXnRyy2zk;8L-1 zGW@jbw%NX3+I--y^BE^jPp8+tK8ZVdbv(81-1#l#kA*Tw>%RWbiTf|4m;K{~E&BzN z83!k0nH4jW+nCk!4^-TdEC*}&kU*SOlPtis{a&$63SFs!NoC=gl=%B~yVV<8vX6gF zhE$3yMe^0<#!*wEORhp%a!&o53NQGrDYXFxGFm{6)JA7&Ett|Iph$0kZlV(U^MwmW z@_1gXfo*aa%xFL=LjoiCW*E+CfFV30hFlfsaui5;h`>px2VFdAV)|+Dz-$ zM17>-+};NEcw$AQNX7_ackKR#WnFC@+xNU=^R?`$FC7YO3Ui%bi)R52GccGg_MV@o2UdNH$#-o0pC-x! ze+u$UaEW2p&y#G|O{DWI+ds^gq5EWGFkf!^^#!KuhG!E!WEh&~S>SoDb=JeWal(pH z9S>e$n|?C4->h8h8lL0olU`sOe>=;u{kNyox+?jj5kd1p>BrA+khu~td=cVwMmsc%Lsy;tV(zm+y4zAe6 z`K2`ehTxW(Q?ay~KaFM|hKaPYUtGd77|&u~C*J7HM#Tw>mRrf9fQ(Ejx)0KI^-CnC4k>MJ3B`$v0%2Y-+>T zJ2hFyC;J|5oik+g&h=#r=M;IWxn6>Ju0LNhXU!1JS#w(FG&$0_ku1raEwgzJBM9ak zsj@kHx^iwbO+1HXHO(0ab#so~nmI@2nK^Gx!<;KiFo)qf<{W9t*-(x=%^2jee zWBdQuI}h-vj&tu{nvE}R3D}^r+P)EQ_C4>M;NTOF`gDI-p zd+)uRt?HuMR&boyvEw){af}PN1(4=j>3--UBpV3?Zu@(lee6ARX3p%cn3;FxegE&Q z5L=xVWhiFs0DLM$A@Ku8IkF*`CI`X1j%yaNyc{c}y0tr7@LrTEzG%tR+_0d}uS1@x zUJxJ&4hN|`Z`W$=RTpxjb*ODc#w_6(L#FsQQ@P*1c0-v0?PzKa41u+qoid zB}^zfeGFd(I+4YnbYqI(2!73lK1A-;>-ju!eqiiDKU=aFw+S4)=TPg&`+eva7H>sv z6K#pD?pttsz+S?({kyU4Gm_9}-zxQ7(Ms*@a!AZIQ*{SL+4|~cl48cTFf>_;S-LFw zHT6|aQ~tMss^4qOHGb-1sevh5d(uwRk0so4aZSHBK$hbYn(nNbZt!+-)YEcZ)H3ue zBS!~ZGzI94t^Na4CFo`8p1S-VXOZTf6J%A^ahhVehOD?@+2H=cQZ4GvRXze2dM1q| z4pC)y1SlF(n5Dd?ZMGW;Txh=YMUpPt%oI%MTY#j!y^CZMnV9OMesmo6uVSQ60*n(VsWd#8*2x3UG@zc$|g+V3Y${*&Ry)>gxqjpu#X zDvKp8b1Zdn7q&RP51$|C$8v^ylk!0)z8FN8;`ME6R+-)Sy+;+0&)@Rwg`5jcTo}aC~Af;=8 zkgOdRWGelNKOCHrT)KwpoTXo-OEI$a_v<*iN&V&hEY%z*-9I}D!Q%#C!!ss?jA}Y) zq}zgOhP~NIlOwOJD`H9K1&U2+0gmjKT9RaJfU;2?cUoA+6Aq>gXxS!UqUuMytfuV# zI|590+-~5iBW_52u@h3AchL3TE}Cl0lIf-E7Z@O8kC&;v=%A=Ru+UYM>9qf2z{2gg zwU@56_tJGkE~ao`vU}Q@+Id|C(_fTCAvsD2;$Za_Yz75eAu!_My|UO3lr?S<@lY6*&%Bv^i}M|)J#F*sjVYTp?8a?9zZ2ask~akGNZ$VD3&^U{-HWTP zThuK!fy`gC+Bmv$+sD%UT27Y(30V&KkOuH$192N!J}k}_)l8+WBXsE>Rb<_p5{l&l zpRQ5EB*T&;P;jkwV@Y4+dW=<3_^kf=->ZQtmWur)vuPpm9 z;AFSm)s<~{>ljA`%nZeh$>ety=@LUs@jv}^&3it!WWvH?y0kORvyCJ}ftN0{t25PC zO&c90@#lD{{KIyN7`;9bqvI3WH_^K3GqOXk7o!V(W=%^4qZ4G5-GzUfY?d@77A z;0BY5f&pS#x@T!=feT-3b`h&@+L>0I?+Fd)eRH?t8-KeM*|>2NY7^(#6+3u1)u4@ zO-I-LSw%Mk3)u*a&`7FS{cB>%n&FtGL58^#-DkgvC`XO#rt8mk(T%`O)ei?~+Gj5>kC06F_-T?}2U&T} z$5Q;YADVjl-R~2Aj_L_FQxx_xnazYWzzt)Uq&2YJN1j za-!Pu)vzvnHY1Qw*?uB@>*zTZe8=Y5QGEHT5U%X?5Gogh@C9eOQHA|}OaX8cN@yko zV^$E88_&xg=i8%WwQ_j}RXe1l8i1T(0Y?~GAj{TWkg!zTtJNKNl0R2X5h+wuIWSOk zKub4##%G9sJJn|!vW)>V$qckK1+X&Iz)sgkHNTnY!uoYcnAS(#6rJC}(!6D+X;ntD z6m_Z0#S&{xkO=5=6~JDg1R93rGXu@^)HgIYDFc}ndOuySw$atQWh-UlyrbMy>sk*@ z4`8|mcv%|I%`#Z8tZ$`i5v_Hg($4m;QtgDZbfB9e z0}h&|Wh(Xj*{z~Lb~~_9jBBRzJYPoRT|P`xD2%KIL3H&?QAFLCqOXZiyfj2$_Ij{I zpc|VH`cPcZL(Ki{G(S?DI5W>as%kcmW@wa<%^%1iH4xI(pBYzaoAo5+*kGc5TDIU2 zQ#I*frGzTEPeIi=3!xY6h#nvF?S`cRGPX@T^;n)iOoP(oU zd1V^|h1$6;t~$e9EJVfz${Th_4|AknyCOE{i+2XNijP7$;=p9(Zc6(fqg>s9ovoUZ z{QgwwW~s8=9Jv>!C>v&?Jy(|GHZM#1MuaK@?ksu4!w_AUw7KhP_wC-5DteSGWXM=k zOA}zo<~?W261>;H+MGX~XGM{z8kyP* zH_145$f_wc%+*lsJ540R1wF+QvrhCEtQ6}`nCY34bpo@j3;Z0%MJL@19CRD7v!%c} z;tEXGj#&sYlIV5|Tg>t^tebkcmeqq<=4tP5!)1o~0j5nArkUQgGgW>)qkWuRLnL4Ng*Qj_eu!h8us0vhGcR%%828$8O^0BXe$wb_ zV~OnH-h4xDXRd2<3nE9oGMppu_2$a6CM$R3=zF{zIq1og&3LNcuft0c%?TCCUhZN@ zfrqBJFi2Hgw{>=uB3=MfWM^&J0^p*`Pukgnn=TvU{awd=S7yhwWJCs+HO!5@M*sfc zrBzP_(A958Q`P`4s{BI(v4P=Bs5k^KDRoB?IZ4M*rOyTNB`*vhE2h0g7AMYC*^b;j zcVEgzVmr3-;jM_h*X8ormTlhFN-^n|ih}>FTxr7Z=^>2r=Bi z3h6+QD+VS=@ui-uppTT9NTOM>$wd*Qsn17aZHL+VFZ*eRHG@>cv?r0?EWvG^Tw_XS zzGcE*bT7$y*8o%fsv|auggLseV5Tzr%6pNbT+zL~EbUtXmT<_&R!^z`Ysu5v0$e!=aP-c}%B@uUL!lg*+?S`|Ozl%o zjX_8>E5g=d;5_rBLWvRe`NY)__M+f?{ z^q`xj20d)!--7g+NrZGcF0uq?;+Qu0*{V!U^&|m+R9(A^BOCVRsn7T3=_dSE!5odT zJ2nBinMQCt#}vG>+*_ba3~&@5`8it9m8aN0Ss(NT&POzmUT)9RJT^0*YHB!h53F+0 zm1lx1^}j<*BXzQS+}W}3ht)PMDUc9ZyV%c_|H70x_4Bf?qj%Mjxgkoc*+Aw2JGI?0 zl{&#}MM8kaI~*eM#UZ--nxepak>v{<%ks2QWbUvBSqdW9QV>E{fGEB+sQq;gzL!{> z>c{5wcvCo_8(RoMgo;0z7H9ro(*?x&mZ|;8?j$*J+sVWDO~YGo+rU0_-OzS)E!c;w z|M1z2HP_@z&nH=r^C(In%{BoET?zQBbrhkG4yjaB#lv-sRnwA0*q`WxtMC8 zp(;zK@?2%EW-iQ8Jg;P`hV3i~5Ysfn28Lpvr0D-7K;(3Nsuxl>I7uof%HmJD*S(u+ zo$JeTB@bjd##v%8P4in1QwgjL1?VEhR_1ga@G^~`1jweb?`e?PHfNA-e%wzoQ=_XV zS{E~gnqY67Eh#{up{$8g3Z!o0(*QsLTudgg0s1eO_FvXccuDZTA zUp(ar_GS=d%lo?7GT@2*_Msf%*XD7b4zb_-E|?}A@UtYJz>s)KuI5+cSmxo!B6P7e z1hxHcD)+nDq8GZTd=R8IgZ`EL7yHT2Pb&lFXd9_HPdU--3y~#7Ctza5uO0Y;U3N_V zc^{?{1n|X?0aV^S4peSRH;Uu$MCOA&T%K?GpDa$CDX=#|vUmr2Z|DGFCuk;aJF|;e z&uB+C+`0#~bN@l&9v~p@YP*(prP-1{h_kitw?lj&&C~L$Pk+V^dT#yeaimovOMWLGfhMZQ#9KQOcB|^RgA5^JY?NdPKx@|AbrHWQUPGP z+U3ZbdJga?Q#&t6wY=`5>HrMsK_FWN{4}lBLAH$rXhyl_j!usLVw9;FNg(x_AF@s< zu=2+QoXejVW-3bpRK>=A?vV-GrBS-z0L)ba4-}g~n5s#wqv_&hw=d}-3(xdW6~IGj zyK0)*nI^QFf2$E{@i?iX*x!v9gjPvD@RPOEb~ElomQ(~2*8(@9L_hK##uv=;rIf86 zAmsk58&eG2$vMK2>wm#EEnVQWlQqDT{{ua4!0p2&I$?J#LWVu(? z9mq1w(vx}jX&CxS3DwNuU#6fdBZ=lZ+59{QMeVndwCy&sc3OjTh-MIZs9F$V#0I8p zCD4=Apo^xQ*1@HVZhXW}RR{eH)!81BR`278#~o02GK}ZKBN-0Ku?d7>hz)|`89g8P zA>g3$AU|7t(FuuS-@nEDIOt24Ri@i4k%$brfE zK9IcQ;&a$-K$yIH)P3|hWbLmth^7hKAk#(qdlg##r2cgW zNem<-xyTvX3lfI9cB=K|kcy;bYc3cmDqy3jfr_pZPv`kAs;t$)P=Ii373#{;fDlIp zZ1nLHfSsgV9Hj8@QI_hOqK88i{afCcg<|B$fjvh9G&IRL(Yz2#L=Q71Z@O}2XS%8K z9oO=V?*p_d+@$P*KsO@@nDGC!1F@<=DH^vPqX z1q;ph=HBMU&}DvX(XbCwamInD7nFSK***HltXaG{`-SA%5B8v%zyVZkxB7%X(w5Mi)sQss&!HgrLQBDEm{%I z6u;vmtAK;51YStf8eRE+gIsTJD|3M4c)g3Eeb2v2ErOpkOndbkrRZ~DNb{+QsfZ1Z z0$sGwI&N@u7O4AeEFQ2h4WNsmyr8R+jgvCbkgT&hx#r(_se*EK-qjRDDy zauJ{e54yPDm{JJr#IiFz>6OpBGD^y9X+{5rk&Ji!iMiJm3>hcRl-Q1IaPUxDK?`x0 zzb;8{Gc&gCg4948 zdl~3Ppr`AAmMQn^8LBC{7s~0fN32ZU8JM8~wwM*7US*s13ce>>m|)D*uC`>!9v*=H z?{GazH)hzGhWDcs83<&nL3h?jGAVWablV?cpir^EPg39Gcv3oH1@2;t`y9Cb1v5PGMFkO2(NYhS;(dc4nSG-sxr4HumzBMf)INOx3rYP!O zV~Qpf==3n8pZ3RoBXco-(ybp6i9g58GR#=K1zgxX*H4p%jumKQiwhwL(iDc7XgA2^ zFYTo^@APw?pEMTcN=~ej-xAGI2m>Su2xLlLg-MeK(q0WEV)K9d)5=wTZ1Hd~vGlYx zwd4&iZY^*kE5C5zD%MBRi*ZpxrRzAd>|z(D97M5Yz?V|_MR!VB`L#SRPMm486}yhO z4^e;mS!5%44!H>&MAZ*(!EJOj<96KJyr==E!tVOJIB5@1AnQJO7QJOk`3Sy=mO}A8 za*Cl(K~@7jRRwfp*M^H~j4~MrZ1Su|zoL_W8S`eYcUSys0is#P_veb|EFpbwN>uqBypYEj#-gYy@ zpLcPk>n4;lsfQgb-HQWE^MqCI%UsoR zZ?^8)F1r0^ll9>w#kA6$r!$9WlEX3xp71@%D%$SwGGt;uQ!LbwCw`A!hA!F6l$;O8 ze#0nRIjPccBuDj!2uV}n$g|GK_S7ZRlmpw03?6)p+CCv-yNle4J53V^-85~xp~yJp za}G@we}QFX3nZYZrtKAVC8sF-yIpkUvA*n9;A8MWgkhPG#c?WhFRLJUQrCRpC04xA zl?)|?Q>(BJ)EavbQ3QPW%FlWd*FAGAq4LfUv9!pCE(!LctITdx1?l9n!f(vh87Iz+ z;$FOa(Sd}wqHV`F8}!$}yjH%fga7_a{C7 zg#4by<=67O=2iNis--eG!r>Tv>MvNFL*v7Ux0IsjAEz>}#v@66URE|mu<&PM`}@-IHJ6vPGy zprk6*Vv_79)9K?S1}b=SOibRS1FM^ak7^y{KOBbfHltmx>-uWm3`>p zIO}>LDX^1eAi@$%h~EiwdhWEdB(J-E3S{E3oyb}(H2khLAf5;whyY5JBKSQz7m??&Xq_#-|G^c_kt#FweU;0H9UHO4G zC1>JgQ^%jg)O+I$tg~p8^%6t)jdM=#?$rWI@AI%nL=r@MeZ>Dhq~;&Gx}A%H1j;D==A`yk!4Jm@APvQoDwOcwkr zn8gQXTKkapDgMIiZF7XqEZzL*lT&vQAHkGRF!pcBj_2GS#Fj@Rs8SF~;X3W>yL2D=z?=gK zJM&so8ZRo)TY-X5|AqqH^tu3D1KNpoQ_jWlP585v3=2sMlyoC7Q%yilH^*M4DZ5TO zX~yOLOwIdY$P6M>3+P^Ll18cgF$+Y)brA*{oC6fgDAEfgwje( zeUCo17|tk}6+o0oVPpyDPbez-rZ(fmxv3n&Z%u4V-h5P(xCK0)yce|L_gxSWw#kEu z&p#H24oKtxZ-et4(c1)B_&2_OqFIwu~-%Q~()2wZvA%-YgSv(sZ9& zvJ}gvTHle$o7F|qKYfhs)CQ^c-QD!Sn7l}IrDz_^lI(Dk74HQpstz1mvfa5ISQvPd48jDu@5|~hJ>RE$iWdAc=8LpEAQ#03+JB9)GXAH zo2P8~Xdu->9mI2*$;y{|p|9k zHk~&;P&2xsZ^iGtu0u$BrY_%|tvRZMB;D<7C0PWiW@LM0AWQu_n5i4`KuX|@Svd}> z{-S{N&`@$wsekM+lkqqrdd*ZT98--UF);pr#-WbB;DN}_QdqIMv6AePSW;= z7@Dw?VxE=|U6j(1JF;(XBPoGzRm^gTSr1;CYTI=C*IuZY=i{oeHku$Az9LQOahAG0 z%#wo6*x(nSi9v6!vM<2Uj(Lg6%G%~T^CXAET=5%6sWPvOsrpeKfWln<0t?h49-xW- zcxBlDOEt>{2@8BV@)J=KzqOarF(c<|Cd&T9E_yq#WeR|k+VY8o!MpAWWN@WouAkO^ zUz8=krh|MKTbR^?D|eqxE@`@S&5z3?bmEx*wiEM!6Sd|~Fg^zd-i?C6&);&ViFO&5|h9{8u|w1)LN;a8XoKQa`;!h$_K* zsEW5nCJ-xK3v6V?nIKI$78GeJl+HJ@jr)BxO~lAF3(Xwqv=3*UbYYs4BWWY|7 zd<0W?sRzal1%|>8{Ndcwn+GI8*85aUUj~- z?l`6WDVQ`dkBgD4Sm>g(AJfqU(@LClHJsT;Qmo{bdK*{7f?29@7M_pWzSvI@sSVk! zeRf)Fu?E^dWy@Aa5e`X-n7!Waz!`sgm|?P$3#zVnRKTqw2u(=!zO4 zzGl`l5uncnJzlpq92&bw%;30rJ1s~uyeS$FGM`f?4K z|DBZR_Rz(G9)=9`FkAj*gW4zE+R;6ONrea$SV1nWe+A zNsc5o@S_U>j421UrB%%{`Rq7xZYunghV24G-531W#G!~;8S<3pKGC3et(85}JlBlg z+^E4f0TpWVkQBYK`0{#kbltsb)SBzwhLDl9v&4{sY+_3KEOceqMNy86A90Y|<|=8L z2bC1%&owpEPe&SHN{9TQKO#W=y-HK-{=qU5xH4S_kZrh!m zIhsR$n&_==md5=tSvGEPg*jrXE6)IeEENbtY7pgUU%tHD%~Gbg*~<6BbT#nN^}tP4 zxZv#Zu_Qr;@`+wZXOA$Ib~r~rsR!GinS#5bG!^FS`Z@4>#aw@;1RrHuC$)eAR7GZ# zDt$#m(ocFmp{4QnS!se{m??O>2jX2-UobfVL zAA6Y!z5_B(%iLE}<$5bk0aTC_IARunjji~12$GEr&RukIb|+JEI>eN`V`lKJj$9t% zTKezEB8AXyGv96jjDGjv>Dew^kk@Z~50oHt^@Xn~Wh zK5Ju2$D*yJ1=>5kEbCvQkOrKLm3ugfw;UDvaZU|`#fH1P8J6P%RLQ$tRL!`_{t~@3 z<+A}u@}8e1oz#oWMN%Ab#l9mqS$%vk>+poFAonWG?O}#sm5*_F${uvrOSFtP%SCIE zc0(}>lGc3gD79nK6w1@ZPE#(7klSkm*)8;$Xm2tC*up1*=!%bB=n~*eEb<&B<{b^C zSNvC2O;N zT#2L?;x{?CqH&YpakBhz52U`}VakASWB_NW-ioq?KfP2QW=jx0miFvOBz-qc2f85r zMOT?7_3}2NkQsux5)gq@zz!)U1Q3qU%aW-)Bsu7YjJqc*H&b-?z${T^7n65W1^d+5 z(x14g0@%smzvRjiPB^%ZvZQnE<>H^gD`iuj+~G|AyZ}}5yo(|oj8NP8gHMSj%?*E} z=}yR@hSE!i{HRi3M-_kozH~H+7L2c%7sRfQd+f#eVR1NZ_u>xprryJt4SE+k0xqP}WW}TRrN}!@?fh#ru8=1T_ zSJsb`+7?8!#sv}Tv$ePSb9KEiM+=T})S#cK zdC`7+q(fg;_KC`q+?kgNDkt$L%3c0wof(D?|N2f9i8 zY5%^HVWkQlkI;F;oiWP=X1_3Wj3oS40`p$OZ=d7E6(06sO970{14mQx8orHw#EElL zXv4NH;$b$mN|4*%P+)ePGo^3%j{&=Oy%OD2DM4-l8f^VpLDI(2eOnp2eSv=I*0UzU zRv<<rGWA*M@*OUU{3~0w@}irmQrWrUac;oAl|rmPOGQ>@X{YUS z>Ea6Sw=;zQb~B~GM3VvyR|ss7WSlkSp$f|UG~s73Q+m-!m$h0M)*I$AbAs%cA7v>f zb>TFTRSSJIT}dZfSl&);o^&#Eu$t%gka;&O)$a8GS4ih|XCFpQR_0(Gp4&$|#IjLj zwJ?b{sRHnHiO@P$IVde4O6G%ZYAf($H?N(EcCQ7+168+&u*C{ECHGI>@RneQV>R@lj^hx_yO!odGD&x2PV{!V&MeOIuu1RDm(yuZ1iju~^Cjs8Y4_c-|=CAhk!GWz7jNd-G`sE>aJSsoHU9-asRP)vkATpsmX}2<$4}w|)WfrTvkxvBBul3pm}e)o z5X4-?12E~}m=i%Xsesv)SpB{)sQ~zsiaorOGe&8i+c_p}XKT`F;CbkUO^dBvu>7r%XrVVgf!KkaK0#i;6 zEh)UNY=>Wmo21J0kyT&1Xj0%HNk5U1G>=|h9?g_4&}S*Qa;_{Nrpm5qYbTH|Uu!Rv z11nVlLbP_d>nZ-&$`t0bCihTPfA>KW;DqGB$5DOh%vDZEjq0Oo@?DwYjhbrh_cB5~ zN;1znM&o6Bn7bRzbm^oPM3BtOvt_lv63pIrOy2(ltwlCKZHoRj_ zU;C8~w|PPU*!41)E1exAOIK-F^4(FI^oh&M;5@?|m?G=5;xF$U> zKUIY5gcSE{iVu(Tnt>s8ikqhRr<*APE@(tbLw3EHD-xaQeeVM_bYIw{isF=g(bkhCbsZrj?=>KKOU zEx=C~e#px?IN=P&PHFx_h|yxOk)NN@migD|rEv^lZjx}{U%#QDL)LzK@K zlD8`aXJcst$GK zbM!QEy`5=rshQFSQ?Y2;XDz0c+Btr@=4lubJRY4y>DI+=Er7{KJ_Hkc5_=NoCA&EVGCyT9H=MzV5)5c}|w zE_&-8C;ecHpZxs&U)Ro+L}Lq&Q(XCi0n)^@#GSXUNA=-K3opI5;>&;MPbmZbrHqfG z_=3?1Duk`NKa41P-L=-YkoF4@;e8nske#6*6VZdxzCnfF}G7y`-(k-nT zJA*J}>?*WlcMs{XJHC+OUSMCke(b*bTlML6%lQwk!`{e#+L=1Mho&9!vo*lRQGqZ^ z37mBO|6G30QMPI+oTaRbLSp(r?)1{+UXC_fxYUr<{t#Ry8fT%HxQFNYY3*SjNdzJk zJqU8dp9LuDaTBSRVtC08>A(P63Or0X2(T36TyXoD!cEZ}C9uW@?qeJ)2t%3)d+(N2 z^3~l8+3^rn(-F;8PY7g$8S;)OLp+m(bb@fSlW9d`xGb650()2 zAWn_m@rfAQ0Cbq@uT<#uz3%0W+nk8Ki8kE9KY4I_ff=*eYQkUV6|CV3cFlX$h~Tr2i0tt3oW4R_KdAk0ykMNIA3_RQc))!in#?9Es?@N=dA zadAwWZ$uxQS#pAtA<_pSd3Q8VKpxCBkCPE`&{d%Tq>oJuGT^7h25>5WoU}!lCN>}C z#3rB|F$l#zN07mvAmyPWYx)CJ9f&d&7hQS62~pz5$Sr@0a*o6-oE8wK@k3XZ2gw4h zpDYEvG#)s`Z2RXRcf1;LfX1KiW43dYw6-QQsd>T~&y6BXJwiT}-3~%o&A^}C3PRAK z8?xFr{bc*-#BU+g++)TUAN64h0i0A3zEs}1Jg2e?O9v5R-e)fni&-Pre5tG_+=Ma^ z$|!p6M)WC8oF6;`xC2Xd%WET=Wn14?q;C7bjNSaY3VDbaLGKX5s9m5dvFTk?Qp0G@ zXFX;;!Gdgj)tj;lSg;#EFcY`DZb{h+ENPoV(X`qd5?c`^iDsLa3PK-Qk!IqW#~qG3 z=p8I4OAowE1qd=Fz)u%^Fj*TB5`WtIThrkvOHAy|)!MC0#ml`c^==2%FmCnl%M=%M zva~}{h5~q~f{UGyntEj$c8+92H$(oVo~b-4qH8Cd^K`FnDUZ_mXZ=ve8Bd99LVabX z?9SL^S{J2u?2BXxCY&|gNCK;cIdWI+I(rwHKkKo_dy zQ$L!$CuV)P1H=;G!q#YKvQKg1{HU8^3Fu_^7YTaOZR-`NZ&nd zP2JNlu()<~HS0ob;=#D>F^gpT+rf;+Uun?0Qe~(euUK&PUj%U5p19g`W*P(a@QTin z%y_X^gdzey59)sIlniECpY+PBEPboV^9&_-#4Vg?KxvY zHW)fncl^;!*!ZbGwQ=D}@BHl}TR(}Z1r!J4>p<<;805%?(lA}3kwbwhk% z0Md+;V)2xSS9vHB;GwsI04?@kifG5T(Z}yQgHP_CE9dg6Pjh*bs?OT7Uig)3<&mg| zc0@R^a@P$F#tbZ6Pwd20J$LMO_NcqB1y^^6A6xAYC*=cA61Nw||8EAtE@BA|#;$p@ z7h4GWFeNqzzWn1zQlZR;Uw3;5y_V@llKRY!GgUNM z9rI;b&rJ}uv$AEJ5%*kM%sL6P_`s9PJ8+T7y=HGsF_2C;TENJ=-XLb;me`;hK%9ofUyL8>3QuI_XsTu@PMMLhS zg3*kd0bJGXJ;;)pZbA|8V+x1-NqL|bQF8c3_AgGHIFq1$=)inEYMbAR+zjB9%F#XL z?xkxI-1tp{cKk-*z}2*V+kKpj_N;^Ry=1<_N|RiOu!a3yH0ih)4HMI_B1lmVMX4$f zq^Q6tihNw0iz8PtzndZ14O3Ks9+pnpm)UVs6>e`9a*}efktVfz8LdLo<69?80=}Z> z76r3fJTRFjxY>Q{C$LiN_m?k?qMJu8n{Iqjo)5EbXaHUDS??oj#sn*Su=$1_Ofl#| z=D%u4%y}q?Dd{&YB9^6sgWocLB%oH#cd#Y21MI&hn<9&{Zr;@39W zk?gxd#CnYnTQ}sxZu!82UN?3BeZ=mYnk(8y=ii~Cs#Bv($%NoI4J3X%L{$JEO$vNW z6*$UNjLX4sEZeft&+ZrwK@tEnl%PLXI3W{aG)pwcOOpS}$KX#p&F)Nbn$@xRB>iyi z0JVM6T=c5E=QzGB#Y+ylAQ0v9Cul%LD6(=Jqy>H&57?l#Nhe9!D*kPKPqyAPt+4O| zYZn?)D-;2I@jt`Z;(td76(JS25`A)U(@$VzMXwKA1Ykn(Sn6aOj$qNO*ozf(b!8JgqesH}Fn@?$SWe-mUJ zJ{v7)19nI-6lJ!LQ>=AT1W6%C@Nod@03%requCSVFuYH-q?#EW|2RqJ9dkg68|tw( z6`~mhbGj0tP&APV{K&G8x>L$N>qF-P53cYp(X@sI{(oGkpdSqVxcHN5Yl{X%swy{%+O)yCYP^ zzx!A+5M;_hSFZdWxQsu}&DGB`JmJofzU^kpKXEhoEti)?E2MYZp_Vfqju5z^W^kO_ z4!mUPu_)6%<|Z6w@_PLwK7eV>po<{{FeG>nCX2^BxsF2O6gyRLF3b^uC`$l*9Fgq5 zCc#fWe7Bp~HX}0Z*GnXMp#9t@+4&V>Aq*+`u?^tTy zot@`599*=~JwCGH9qL(m6H$)qZ z-bQ!5Yu)<~-2LUO{o#B&>#W~?_CC)UZ0Qzvd<dPUU9>hT)=GfX*UncxLcSNe&68L#t=BjETIu|c(~%Z zp5hos(`fcggMTx1gXP6{UcIkuVF2kW`PuG<1sbGRvwpq;1|GrD`Nr`?Qp=wP2 zbU@<%Rjtrceyv}HiFiE1RXus!o%4Pjo@H4@F8S~9S-+yJS@C++2k-J{q?NJq>=PG$ zpov3kUWMaq(O%J(=a^GMfi{8U536r8b>Jk+QSvLXK^#k(bhTbWb*pR)i(inS~6F zEz7Re_d5ovP%Y+GYtJ2KEk36I)w1o-U3o~YyD$-q%V5pq@P6J&#!#~w6+ff2{OEod zdw-@J-#gMK|N042NI%^$8YVwSW^j4+f^;#Ge9Sk>q| zs%rD6w_;T0XKu1d(OlOYa_!+OFybX;{}ok^psoncCCH~!FUnkhOX|Fd@yO+%r-2lq z2diU?x2$jo!=IG<3VIc;t1T4zSEW1#n}7UVbJq2*`J9%!o18?*vrRhD#+(DfpHr0o1Kqu=Q5DR$}O3OSY`J zy3nZAsJ0=pousCI^zB0>#wd%!eu*N-k`<*J+`zZKFbj#Xcg8fAWOCA^6&s;}pQ#FT z5~wxDC({fb%lw3EQ1A1&Qq>DO4YQMQ+e|APr=0IUXd`Bm!36hvVtgCN#qTgzXK(y| z3P>B^voWsWVTvrLs);Y6x*czLDaJ^AM|PwsO!kBJ!ialkk+FNfk@c$&s<1El{N8p( zFPx={`3i1yO-#hjPi23)pb)nAjZgVS()a;rG*)bnUVB05Z>?-ZWMHK|!-A6)*!q^g zi3M#d|BN8G_U#R%Ama4h^rXHXgnlQ;#^e6-jL=wEHjd00b6FKyXUI=fKnkUcd@fS( z%~aCVP5~cEYyBUd`8~2GOcOq@`xMrF+kFYzN)m$f2m%~7)#bzt1H}h{;S5%>dmbxX@}%z9 z>DekE6J)|Y)N!ETrayAiJ#XNF-;n&0?{}}uK5q{_v^+uHanK|Eb$i3w>mtRZ-ljLL ze;&{<1j2kbuiMArauTL9mkU*kTVM2`P5^~ZcI>}K-JU-Vli7gRNxN=3ARKMN4I4Xa zOjIU++Ef!5UL5hk@nKTBFbDOQ8JDxmLvx78LV~Nl@#wKypCvVpo53;GT@d(D+bvqJ zK+^o7AnBsO;YIx=8HNBqrb2J~xlZ)b=aXLgvo}>45Z!;nU)rbnRe8e*#RAyuf?tK8 zdjt9N!-K4_WgW+KONcg$Arz%rdF{F8U?7|4a8V+zSL2U~uoqr}WNqGXvNYq|NEMG^ zE`6sRmC(A7LuD@3p=$an)_U6L|NLqv*f5%cASxdqoEsdl# z_k`&R*4SQ78~cm93BHw`7uoJ|0My5vw9A%x_Qu+I{cUvZ!jc(0i6}}P zQ`iMc+2-!4a-C0S=#iS@5z+d6x4s*Fz@-o%30?b!&E{wfr^Cu82`2sFz1n!}7AsJr zSr;{mdQSW7c~=Fq9ZK6p8enu9Avn@7Vm|(U`cl4^WVX7d$vkQn}bhHt$UPlnh_5MzZ3Lt>~`R$r3DC)oz!GYge z3!all6C8jq3nG@8X+|TVe1+fHt2TnoIAU*)!10k3cQiC@O{wn{odyxEXEW(QUCG9m zT$DlZLR5bl={c z-I=;-)V2HEA`}#612hL{0&5>GLG(_mDcbBcKu-EC@eqQmk}NgE$Eo;-2Q4aP6)`_5 z2zEc~w+5uyfubRdQuOJ-k?Bkt)?{Y-=}eFDE>ypP6pA5n z&70VIg^2N5f+9}dgAgN&S-+}Ku`@d4vJ(K0!`XP%M_CF4J}h}&xneC3A*l||2%U+c z=p}4S(XqhOW{0;9PZ#*SNRUcpsSh==0Sq-AZksdyN^`LcQo3Hvn?Ol{LOT&)Czu2# zi@I8$JAYw80%}b+?#9Hk1s%Rv$9#>T@^AR(V&rqqbNaJag{k1I7%#n=xAQ%eDP3=R zNy%~-D^DH7NQ~vejZUd?j7EcB?YZk(_b=`=-v9e<>1%MmjRcmt!qXxEax=Km(u6^1 zDog9?DcNd#5Z#uYs*l=tQV8D@Jb1)9f_seT{%$WKE>*DQ-dBD%YS$9|_qA5Bb3T1; z?XeI{i>nb$!%c{=8N)squpy>7UX|P-bPOoPT5hi;@fpzKpoTx?${Aotx;;oI{0VOP zzEmJosNO5jvoyoSiTDa&)M7ZDWEI8g7iKs6OIk;wA1 zW{}~{Tq0I_mBgGzL(4{u$DhYheWWXxR(rA3xKHG+6E@2KPe{9w@b~#A6(@U#fs0EO zQq&MBh4o-_NO^lSTI7q&acNL#A8jK>o0OD|=~ z6NKbs;A`F5;ju+s_D0%t1#ccZk?)$!XZF7C2Y%&`R&-DwUHse`r(QzGoGSiI=9$tk zuF}bU#Rm7J>?KV$F=by58<{Oq!;&0V`Ic7B>~M9Y@I1bgmTfNNiZF7dnh_6Q~ELB<&v4S9Uke}00;rMyfb2fM3zvWUg2uXm@Tt_ogy zas4%}zeUE)9v89{+?(D%=}bIC6Tq!6B2eu)8Yi9sjkJOR^G6AEZ5hv)?Hg$zh;x-x!u~WOeT}J5vKLcP9-~EE;4VY&@H_}ZVOHTBcg?YS-0$mM9l8n*vrEcmOMVh zbxjFHO4LL--H(z0*6z3!oY+Wk^W%s-w|8F{tQ+iSe>_jQ&m76nwqtYIGlJy6)0&90 zr<}Ft!s~u)dtW(Fa8C0>o%P2Tc6z$4pniXt_wIpcDjO)*o3X8tRV1?Uj%yY5)n3AI zjBrv`Sud`BYdnA2N%uC8<_@dNMULQsg1=sdw`kuP9a|ozr%^~s)=qe)l9n~;?^~A- zR<{Zf#&BQXRQunFn)QiHmMjHY6p>qV=yX#(xco#`>KbJ9TzpFHqX8Ci?$xC-B^z~; zB(g))16}#1fp+9qYDk*WR!wp5<(MMiZ%QE+JRNw^X4SOUfseXT%d}yWtC)edrk?4A zH~;EXv7iDfMjdJ~LO8tHW^&SheMd|B) z0=NZAXU9ZnU*@Ck1=*wc~?(f^YWn z7Y0(ox&QcRujD6>`@YT-NHi_S%$y4_$AVclGnJ(08rxuMR!?$47yOaCi;#S*ZD7J4uVy_i|(6`ns7H~!Oj?$mk4H`&`sZ^|1y{7^9r z>uJk;GWz9GNR5wyRoVj?csbZnci1ppQL@7iL`CtR&z7VDPWjV-htCEke3TT ziQQ0TEyV3bT4xVddtsaS95_*%rSwF(7BSwMoTQO8f&3+{Zc1?;Z48=?eI{s3PNi@^ zR7oDCRuqCBKxYKie>My`Ner1kDQ5P(?9vQkNk>AJeB|a7Mg3(^2j72q?7Sq@N&284 zR|xGwa=>?*X7mPml^Gt8H5CaX^?-)o=Ys{E1!+qKvk5ECc}#5p?F%fG+vm%Tvp6jX z(%~`I-8x(s%Ec9zq_yWQ`VUI3dD##1!ftXe{$VsjoLZdOv3B5ePSbwSbgk!4N-N(E zRH_cRzuGuZ{b}D@Q9JwoBODi!Yl&v_ZeCC(Ra;66*ydi;O;A&3A?YEdT(0yGSy~jp* z_cQWtlZ1*`WzU&Z>r3JC^uM)N{JLJ15#x>=@9GvQ(|~=hu_o6Tz_XXt_F_6y9hOMK zH3RO)IcfQ&3zhPehKq4NC@ts((BjI5t>UOBz0+H;j5gvu3~Q>f@gh;%mxv@fbL_&y zQhUK%`E#W1_+d-5dAH+3$tI>c>2(ILgBFNGI2wV^572rPE@xjJ9Wx=zgBtZs(F!Q zubW+b+92b)P`|9FzPQGYpP1^5GcEdX!vB~E06620I`T7;+57#p+=Z+J{azM*prw`UPRaK5zl+CszWyu*A9!kT5;{>>vJMVqNWjS(NV5jw8-!I;LK zJn=my+PdFd0}XHapo=~)IMVwYd(I(+s_;n$j!6x=Jp9A8+la2vbgq}m;s z{MmT{xT9^fj=rL-*|l#_U6m%xb5!IrYmM`Yc{-Sz5d|iCFmf;^vY-}KcH-*P6rNEw z=V}yV-IRzC_WzLJYjiuB%O($gCE6im{D2D3F2evpI{WK1Tw_Du61CKfAUhWpNiIea*4qs)Pz))G zA(~~vT3h&v$_t)9A*fOYq*t7h9tADR^*HUQCV4ZIeo8$F{*k?)+K{Z2JHEFr_C1Bc zU`g{bZPNG5zA==%z(@mHM42GxWg6}J^&mTBaWlO$SDm*GJ>%P2Ie8ob`K<3q@CX37 zWwYE=#mS14S}Ol*qIw2nE#-FjY4e7H%=Y96$C`3CUX+p*V(o1Yn)H;6(HJ&Ipc5qe zk9A($vb&vv$M+L39@9NR7Yv+r0(oV+rt=V?V{zeIDx zYbAIE>TTuuT9Q|XZ7@gj(_!Aly1-P>l91ol@-QBty?1Cr?=uD5GRMa^V^hB^vBAgm zzQl8pT$0Blj>!u!r7{5=S;E}At7HBtIB=*Oo;Ty0{lmb4!sctk zG@^jJXfKwkIC95huJrOfc&>#Nls0+ JLdH1ge*li)-4Xx* literal 0 HcmV?d00001 diff --git a/docs/build/doctrees/environment.pickle b/docs/build/doctrees/environment.pickle deleted file mode 100644 index 6bcb79100dee686b230ba3aeca07112508f0e4e8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10131 zcmd5?-ESP%b*D&)Tz-E@qGdacS20q{vB<5JxHa55jV;!3LrZckr7>zN)7ja(J2RY} z8O@y$NxLeXqIE>z7B1i^(4x;p`_iWXeGSk60SXj-?sNZ={?0k~&g{|@S_LQ&0~UAB z*S+`L^L_WT^MCvJr5XOuZ0S6V(xY9KK8%Vi9jmmg&BohRlnnl00`z75@=xkV^))l+ z=XrhGEDf@tER-5VMQy&_8zouaPkO;n_X2=)Z~W?w@4Vj22Lb;NV%_VL%%0`bwVBri z(bndb{}a2;sv=Ne3$k<=jcT)MckaH=-xNf%)y3?gDvD^JG-)7?$crpi0Zu*l{@%^nECyMATtuS~kS}li){R%Mci+vbxJtYC{B(4^`$v(Vb@%z> z+fmxx%NSTiX`qg~``IT^mR#@NtqPTP-#$+L>)rimJU;H;s;+lGh=R~B2HhX|1<=>K zKTt(BR^7WlYK+@m^b^1DYan5TjL@ZDMnNNR!4m+a zt*N}~dx6&8FiLRASZ_xB% z*wP2JIlo(I@`^&~vZ#U*mpdq7MYT4oX*Gs5dc&+3`z3qGf)-gg{9XCf{U(_^$iN7L zdnX$0GU!6-_Y*)m_vkN!mV#`ccKS&c9Dx75NS9GMB1~oGANonfwtOwT9B!LAxNLp< zr*-&j_+0p1`tVG6#hj<(64)%j`pL>`GgC9(EEFnHzE*X(Cx8oOHt>&|uZ#TE8;;BR zwh(j@7<%Q8lmjedXMUnQa&q<_`L1hU7_P`EJ;%pallpYE+n*L$RzBMO)Zrd|3LQSO zds`awt$T9PKrBD*o0&A(t;m+$j8sR`l)+|Zx7aFI>eK0)y zdia76tPIr9uaeR$GHBkO@`^tgc*81X6M&_`Ibn_(x_&HN8$>Y91{F?DhiMSMQ5Mk{ zm!FksRw}PhLzoUCfv0mH!Ro{=s>AHi6R*_{xRxJ}DB5Zm^fB+{ExNU>5TPfb@YCoJ zS7~=`Pr2FBWVx8FJLvP_*Fu)F?)=W{BLwmVGJN$wsoXpCixe7cHnXX#JcoJf7IWV& zwvBW1b$CHG9%rcsI&vG}S>O_U!&7;bWFx0f1|h6YfVNms6#U^s-Z1g8wpH$04juZ% zF|K~>M=;mf941KyZB|lb7$}HSRDJt4!8Qao5yT$>L&Rf0&0(tAs|t94Na&YxcVrjnWL>cwczu#f{zFT(n*dPsS&2pmIdB{c|ibyb*p|WUs^HLk|RfRd{}q zIPOh`s|V1Niwkx)%Z>wH;*P&mCfb9mM#EzVS|(5c11d`G4zjGFqw+xoGix-rH39Q{ zwq9hh2YYaER^O#bybZnN6W7e5^Femv+1N)o?Qnu7uenco9@<3Y<1nWxk+;eL7vWj4 zM~B;@cZRxP^ZoC2Y-Mqt;2epTm_j0a__Oe5bX!|3%$qP`###EPgjS(jk(GE;U1{Y>RDUes4VH=GEGQxlVx&W65DdyX#EgDbqSA4gP3Ac) zBFc&skdaoFPzpt#fY?uD!LS=%wK0%WKii$F9|W;AftWBpiD!ai<1cGk7>Jyuy&7|1iSQEiim3yvEq@*BsWg!k^mU1VU2wxNy?B67AE95=r&9J1R}`*9Qq=2 zNra|*i~!Eb+pfWGwlNTFR`Qoc$UT#JQwix5EN9EAGOv(Mn+oRwS2%X}Vzx-bC@xSQpg5#d zzxby&EK1 zH9$d8s9k8L=Yz19M18cN!qvO6`#Za@^|&o5cVqqg&^*%boA=a*J8#{(yL0Q-d)~YE zZoQWk_+)j(nVnWVt%^0b6&v9wQ_g;T4c7kg%e9iLXqX3)MmKcK>2&tvkBI2LBWeW6 zZd?plA1O3OM~1|<={x3na4@q zrGe)fRO|PpUjRx)fJFBd(%cbDazr8BbaGNQAW}H33)V$Qt{uWZMpHcqc8SO~)xU0? z>O88O3JIgEeYWEzIfjH|hHMA~(+-#@M>|_8J}(H0A@XUAb^;+gsm7@-GObdG?-j9_zk!5F7qz=0Hi^9ezirT0@I+kZYp=3Jy+H&zmixP)OQH4g2SqWdc)1V}uqdzFj0TARG!ov_6@*di)f%H(9! z&-wf3+_kR~ZL3nz*+q>TNz`nI&&)T(k>;EbLZLcRK2{Er|uFWgC0 z7kgISi6W<)4ZF?2Y4uj51C^kDg=TbPj9YGjuNvQmLw)5&9pX&mX|~*+)pT6fDwliN z2}p4;Rr*l^i>a&`5r66Im1bDT?+f30bLY>$@yegtFtN%csXC++1AV?q(IiR;xyU#w z5Sj$l^f0t8i?)5#RN|77`UvA8G@WJHG)T_`c`h}>SDRAi*<^x&hcOe^kl2xnXO{@Y zJ^oqP%g{OQUL`pqWje6^KDrQ0rW}SN@x|t=BSg@>9sj2d#+O?3Kt04@V8oM{3o6~g zO*dp$S~9zw>!ZxMS6i~RmQ0GnM!>{62YqM>GYT4IlC|%&{2MJlsJJ~oOG2T>G{u<4 zKhyHc7{GbuNh9LfmSh~InD`ZAFG0dUkI-slLSY&q5MDpfb@=l}$iyCHrZkCMZ&$Fz zc8C1=UEEW9%I8}mnxbJUlzP(9!E)M-Oz6BJlYzwjR@Qbai%*S`N~(|bUZsm(KT3OH zRFzTEvP>wdkt;l#4Ua>#T-|md)C)8R^xzPfjiLlEvkdK&M5PUwk(lMF_Ir~8)ilP; zBjh1EiDz@+#{_4Twk?vPt1BCARk;2Dy%TxDsUQKjYKxAAPJzTD`^K;P6I>CLopdF3Hi)vWMn%;;M}uH zBMq-606%+*CH5cs(g5>$PhqLptoLWlI8HA^bLum|x*QOAUgXsKuUF=h z4K&n9L(S6m)hA|a3XUQaL$VVLATyfHQ7K?LI6n2?^aW~D7=IzK(+m@n7HH^<|gEnfWo>(^iW=l}4v!3>*U-$2X9pL`ZB2FHz6_p9{yp(DmpObBU>hi{;{sV0O=Sd07eJCCH*c)&3P-I zdFEL)&GuM_6c6=oO0gF+dvF5C%1yauBELtc*Wu=IQIZ`%FUAE^ZG zkLs3L7eW5NKl^(u(wOLB=AULfXURGn4I&qzH z=H-5cmnUiEaddfNW@AAh1z2ue{ubR{892}B!43QLpCeP5MDKp z{Z&OI{(w8=?trx)?p5&t7~;?9A71MbK0V!9R>Y@`h(i_;Kfs@}ZYKT&2v~qTgg;4T zPjS}9Ov&HP0#Y2eQn0`k>$$E=gMa!*cFH>otMz4oY8G$OR0*l^4U~ZNCXY}yA4UVj z3Vd3%QW`)TSrJtuqTpS`5Ao;B+y1jBA%5~C1bfTYAIkWk z>!*V*zxV3t@O2h}9O~bBNB?WF1vsn{{|7z0x5+i$mFrx<9MU8sR3I(nqKm2!sKl_y$)z8`j6sa&iJ~vfh3C z0$_uk616(Y3>mIL$SOG>VEh{y&ZwKt*QTKO84cA{oM|Z$oeQ|ATFCnJlN4GfytzM9 z;YTS=;Ynl*_G^Rh1g`c3npnVLA$)>W_l;^Hf>dS>eK0S!OwaP3J|6cobTiLGYi9ng zd}~q?h1A<-k-Moo#U1(?&6;~)ejmwY%$1pgyj7q$yiH{Z@@XM+%G*>!a`_F+dZa&4 zlh_lp=uK1vX4Ui2JKSt*gbE759-KaAV>-mOcxH}ZWtO-~OnjB@2Q%SR z6_{U|3kWZeUZNn1-vAHemF(T}^M|}L+B|qM2zM;%`9c0%|@tT!e z{9}jilcn!XNT0V4De(^ge3HoJyZpM)eIFCbGxxjSN~|cmBNH`>WbIiMeqP6KgY@)4 z0H)>b9zL%tzO9OKCp2`*_sMbR?Hi|AwuKY7^Je26JOEKu@xwHFuqPEY62cdC9~PWQ z297_X4Xp0rc^I9}fH<@i0A>rXk(iXDduQ=zbRt-SStQ@^te=}@cp-INH2VU!>hJy! D@mUsz diff --git a/docs/build/doctrees/index.doctree b/docs/build/doctrees/index.doctree deleted file mode 100644 index c8cc22f27e2a48d6163b35d8c916b9ed9f2816d9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4786 zcmds5-D_k?6;CG9o$h>eGMUY?=*;e2aAvZ@cK2qOks%537q0!=37TtGjA) zzxGz$q#IOrmBk5^g6{Rf7k%{Azd%F~_TB%$|3E|#eDQbce)R2*X5y=X4qc~CovJ#& z-#K+oe{%CzzuQ}}f2QZhUZ!QB22t#Dm75L;LLTY-^ZdO}^B?91rkk>J6=$i(bF%^s zPsqSed6YlT55$UD_u_D2foPd8zVoeTPGyN{8AndYl;){3mH~g?@lwt-_npyfLnT5+ z)BeYq^iGv#sdk|Jp~#$XB&l=w#5sI?^yul)ldn3DK6rFE*!X^&IUb7~=CedQT1aJ0 zIygiUaLt36qhn`=<2r#nmC@vVXOwA2YDbAU3w(!ZC*U~Ch8IVg$p|*Mlg6=jUg+4< zDd#U7U#8sC@{A91vHm>AC2xMnr{t3Vqnmw}TX;oFv`sr<095B(~qfm-D5y9En&&AW?wfBodw zmo(>IJ;dFHbT@JDL_9cwW5t{Brg#g4`Vu~O@Oc{_=XsuquZTX>9~SE3zIbrFZZFay zUdLJFH;!A}#OxH2Ghn`7#_oDa;7hvybz`w_TGodE7x<^LfOJbnUXb~mg4H6~3E9-= zi5B_s4bT9gN6t0fXNV!JQ%CSW!Ioc`wvfJ$joCQ$>Lm#M*;ARb^-%jfd&1mb%|8l5w6PzlW5w_4- z@W?XfMU^7E$RTV3I7CBRd}i?zMuwvRWvJpH#i0M>&R8E--=B{YN z6`wEjULCSx4o${4PY!|_#I7>Zo35UNg;ZTg{IJAp( z6$;gG5rcm%AMm;u{BwEXH86PDUR}ZSd)?3g4Ar5VAiM#;{^P3Mn;?+#-(IJD8HD+3 zQ04k6k(+jr7<04w&_2ySXat^3qYH1z9%6`+cppi;UncSS`%%p{Q~Z@r26FRNM=Q%D zai3hUYIUklP*uwICTX`5uiK)&2cvm55P{G=|w9oYb7^#mu8ks zy!?nlt5?{apP^cPXK`X_vDu|NC^$;MMS(5Jw*wYUGB)AxxQ}P-2C6o58=X&_`Yx(H zJlNQVO2QMmuhZZ-u4onxb%D@ln}|#LAjEEuGpHmZ5*L>qK<~c^YimTGSecC25~wW z`uuDNO*PETKCK{&w8BNr1TSmb`0%8E?}Nv1OD`RJOWTlSzRjI#8y&}J$tBlgyN%Fc zfGxVnE(igi%J7c43qpl%9b5JB-J;{VFmKw^$Ovs0mFwUw`N#*6H&JcZLj>NgnxPk- zxpif|#AU9JN^Bq5(p0 zNqy-nv&N&d9L0|56qx{_2NqF=rjFh%sxaHq+`>j+g+@YzZ_w*duWR-eq(xf`F|3X2 z0ab+55ktOC!s7dZ5Q4uvM2x;1;9q9CG)D!ZZK8!Zv6Cm6c#FC~E*@GvahjR!CWzMV zHfl&qSE5THp+KwjiVHL)g&z8HKpBqWpyq{DePc;jYj+nccB@_OqP+$C_NN(2()-J{ s%9$0@K^myldgulaJ@}14A;S~~>u6sPZ`NW9)`m|BsEk!AV6)NS- .section { - text-align: left; -} - -div.footer { - width: 940px; - margin: 20px auto 30px auto; - font-size: 14px; - color: #888; - text-align: right; -} - -div.footer a { - color: #888; -} - -p.caption { - font-family: inherit; - font-size: inherit; -} - - -div.relations { - display: none; -} - - -div.sphinxsidebar a { - color: #444; - text-decoration: none; - border-bottom: 1px dotted #999; -} - -div.sphinxsidebar a:hover { - border-bottom: 1px solid #999; -} - -div.sphinxsidebarwrapper { - padding: 18px 10px; -} - -div.sphinxsidebarwrapper p.logo { - padding: 0; - margin: -10px 0 0 0px; - text-align: center; -} - -div.sphinxsidebarwrapper h1.logo { - margin-top: -10px; - text-align: center; - margin-bottom: 5px; - text-align: left; -} - -div.sphinxsidebarwrapper h1.logo-name { - margin-top: 0px; -} - -div.sphinxsidebarwrapper p.blurb { - margin-top: 0; - font-style: normal; -} - -div.sphinxsidebar h3, -div.sphinxsidebar h4 { - font-family: Georgia, serif; - color: #444; - font-size: 24px; - font-weight: normal; - margin: 0 0 5px 0; - padding: 0; -} - -div.sphinxsidebar h4 { - font-size: 20px; -} - -div.sphinxsidebar h3 a { - color: #444; -} - -div.sphinxsidebar p.logo a, -div.sphinxsidebar h3 a, -div.sphinxsidebar p.logo a:hover, -div.sphinxsidebar h3 a:hover { - border: none; -} - -div.sphinxsidebar p { - color: #555; - margin: 10px 0; -} - -div.sphinxsidebar ul { - margin: 10px 0; - padding: 0; - color: #000; -} - -div.sphinxsidebar ul li.toctree-l1 > a { - font-size: 120%; -} - -div.sphinxsidebar ul li.toctree-l2 > a { - font-size: 110%; -} - -div.sphinxsidebar input { - border: 1px solid #CCC; - font-family: Georgia, serif; - font-size: 1em; -} - -div.sphinxsidebar hr { - border: none; - height: 1px; - color: #AAA; - background: #AAA; - - text-align: left; - margin-left: 0; - width: 50%; -} - -div.sphinxsidebar .badge { - border-bottom: none; -} - -div.sphinxsidebar .badge:hover { - border-bottom: none; -} - -/* To address an issue with donation coming after search */ -div.sphinxsidebar h3.donation { - margin-top: 10px; -} - -/* -- body styles ----------------------------------------------------------- */ - -a { - color: #004B6B; - text-decoration: underline; -} - -a:hover { - color: #6D4100; - text-decoration: underline; -} - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: Georgia, serif; - font-weight: normal; - margin: 30px 0px 10px 0px; - padding: 0; -} - -div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } -div.body h2 { font-size: 180%; } -div.body h3 { font-size: 150%; } -div.body h4 { font-size: 130%; } -div.body h5 { font-size: 100%; } -div.body h6 { font-size: 100%; } - -a.headerlink { - color: #DDD; - padding: 0 4px; - text-decoration: none; -} - -a.headerlink:hover { - color: #444; - background: #EAEAEA; -} - -div.body p, div.body dd, div.body li { - line-height: 1.4em; -} - -div.admonition { - margin: 20px 0px; - padding: 10px 30px; - background-color: #EEE; - border: 1px solid #CCC; -} - -div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { - background-color: #FBFBFB; - border-bottom: 1px solid #fafafa; -} - -div.admonition p.admonition-title { - font-family: Georgia, serif; - font-weight: normal; - font-size: 24px; - margin: 0 0 10px 0; - padding: 0; - line-height: 1; -} - -div.admonition p.last { - margin-bottom: 0; -} - -div.highlight { - background-color: #fff; -} - -dt:target, .highlight { - background: #FAF3E8; -} - -div.warning { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.danger { - background-color: #FCC; - border: 1px solid #FAA; - -moz-box-shadow: 2px 2px 4px #D52C2C; - -webkit-box-shadow: 2px 2px 4px #D52C2C; - box-shadow: 2px 2px 4px #D52C2C; -} - -div.error { - background-color: #FCC; - border: 1px solid #FAA; - -moz-box-shadow: 2px 2px 4px #D52C2C; - -webkit-box-shadow: 2px 2px 4px #D52C2C; - box-shadow: 2px 2px 4px #D52C2C; -} - -div.caution { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.attention { - background-color: #FCC; - border: 1px solid #FAA; -} - -div.important { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.note { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.tip { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.hint { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.seealso { - background-color: #EEE; - border: 1px solid #CCC; -} - -div.topic { - background-color: #EEE; -} - -p.admonition-title { - display: inline; -} - -p.admonition-title:after { - content: ":"; -} - -pre, tt, code { - font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; - font-size: 0.9em; -} - -.hll { - background-color: #FFC; - margin: 0 -12px; - padding: 0 12px; - display: block; -} - -img.screenshot { -} - -tt.descname, tt.descclassname, code.descname, code.descclassname { - font-size: 0.95em; -} - -tt.descname, code.descname { - padding-right: 0.08em; -} - -img.screenshot { - -moz-box-shadow: 2px 2px 4px #EEE; - -webkit-box-shadow: 2px 2px 4px #EEE; - box-shadow: 2px 2px 4px #EEE; -} - -table.docutils { - border: 1px solid #888; - -moz-box-shadow: 2px 2px 4px #EEE; - -webkit-box-shadow: 2px 2px 4px #EEE; - box-shadow: 2px 2px 4px #EEE; -} - -table.docutils td, table.docutils th { - border: 1px solid #888; - padding: 0.25em 0.7em; -} - -table.field-list, table.footnote { - border: none; - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} - -table.footnote { - margin: 15px 0; - width: 100%; - border: 1px solid #EEE; - background: #FDFDFD; - font-size: 0.9em; -} - -table.footnote + table.footnote { - margin-top: -15px; - border-top: none; -} - -table.field-list th { - padding: 0 0.8em 0 0; -} - -table.field-list td { - padding: 0; -} - -table.field-list p { - margin-bottom: 0.8em; -} - -/* Cloned from - * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 - */ -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -table.footnote td.label { - width: .1px; - padding: 0.3em 0 0.3em 0.5em; -} - -table.footnote td { - padding: 0.3em 0.5em; -} - -dl { - margin: 0; - padding: 0; -} - -dl dd { - margin-left: 30px; -} - -blockquote { - margin: 0 0 0 30px; - padding: 0; -} - -ul, ol { - /* Matches the 30px from the narrow-screen "li > ul" selector below */ - margin: 10px 0 10px 30px; - padding: 0; -} - -pre { - background: #EEE; - padding: 7px 30px; - margin: 15px 0px; - line-height: 1.3em; -} - -div.viewcode-block:target { - background: #ffd; -} - -dl pre, blockquote pre, li pre { - margin-left: 0; - padding-left: 30px; -} - -tt, code { - background-color: #ecf0f3; - color: #222; - /* padding: 1px 2px; */ -} - -tt.xref, code.xref, a tt { - background-color: #FBFBFB; - border-bottom: 1px solid #fff; -} - -a.reference { - text-decoration: none; - border-bottom: 1px dotted #004B6B; -} - -/* Don't put an underline on images */ -a.image-reference, a.image-reference:hover { - border-bottom: none; -} - -a.reference:hover { - border-bottom: 1px solid #6D4100; -} - -a.footnote-reference { - text-decoration: none; - font-size: 0.7em; - vertical-align: top; - border-bottom: 1px dotted #004B6B; -} - -a.footnote-reference:hover { - border-bottom: 1px solid #6D4100; -} - -a:hover tt, a:hover code { - background: #EEE; -} - - -@media screen and (max-width: 870px) { - - div.sphinxsidebar { - display: none; - } - - div.document { - width: 100%; - - } - - div.documentwrapper { - margin-left: 0; - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - } - - div.bodywrapper { - margin-top: 0; - margin-right: 0; - margin-bottom: 0; - margin-left: 0; - } - - ul { - margin-left: 0; - } - - li > ul { - /* Matches the 30px from the "ul, ol" selector above */ - margin-left: 30px; - } - - .document { - width: auto; - } - - .footer { - width: auto; - } - - .bodywrapper { - margin: 0; - } - - .footer { - width: auto; - } - - .github { - display: none; - } - - - -} - - - -@media screen and (max-width: 875px) { - - body { - margin: 0; - padding: 20px 30px; - } - - div.documentwrapper { - float: none; - background: #fff; - } - - div.sphinxsidebar { - display: block; - float: none; - width: 102.5%; - margin: 50px -30px -20px -30px; - padding: 10px 20px; - background: #333; - color: #FFF; - } - - div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, - div.sphinxsidebar h3 a { - color: #fff; - } - - div.sphinxsidebar a { - color: #AAA; - } - - div.sphinxsidebar p.logo { - display: none; - } - - div.document { - width: 100%; - margin: 0; - } - - div.footer { - display: none; - } - - div.bodywrapper { - margin: 0; - } - - div.body { - min-height: 0; - padding: 0; - } - - .rtd_doc_footer { - display: none; - } - - .document { - width: auto; - } - - .footer { - width: auto; - } - - .footer { - width: auto; - } - - .github { - display: none; - } -} - - -/* misc. */ - -.revsys-inline { - display: none!important; -} - -/* Make nested-list/multi-paragraph items look better in Releases changelog - * pages. Without this, docutils' magical list fuckery causes inconsistent - * formatting between different release sub-lists. - */ -div#changelog > div.section > ul > li > p:only-child { - margin-bottom: 0; -} - -/* Hide fugly table cell borders in ..bibliography:: directive output */ -table.docutils.citation, table.docutils.citation td, table.docutils.citation th { - border: none; - /* Below needed in some edge cases; if not applied, bottom shadows appear */ - -moz-box-shadow: none; - -webkit-box-shadow: none; - box-shadow: none; -} - - -/* relbar */ - -.related { - line-height: 30px; - width: 100%; - font-size: 0.9rem; -} - -.related.top { - border-bottom: 1px solid #EEE; - margin-bottom: 20px; -} - -.related.bottom { - border-top: 1px solid #EEE; -} - -.related ul { - padding: 0; - margin: 0; - list-style: none; -} - -.related li { - display: inline; -} - -nav#rellinks { - float: right; -} - -nav#rellinks li+li:before { - content: "|"; -} - -nav#breadcrumbs li+li:before { - content: "\00BB"; -} - -/* Hide certain items when printing */ -@media print { - div.related { - display: none; - } -} \ No newline at end of file diff --git a/docs/build/html/_static/basic.css b/docs/build/html/_static/basic.css deleted file mode 100644 index ea6972d55..000000000 --- a/docs/build/html/_static/basic.css +++ /dev/null @@ -1,764 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -a.brackets:before, -span.brackets > a:before{ - content: "["; -} - -a.brackets:after, -span.brackets > a:after { - content: "]"; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -img.align-default, .figure.align-default { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-default { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table.align-default { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -th > p:first-child, -td > p:first-child { - margin-top: 0px; -} - -th > p:last-child, -td > p:last-child { - margin-bottom: 0px; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -li > p:first-child { - margin-top: 0px; -} - -li > p:last-child { - margin-bottom: 0px; -} - -dl.footnote > dt, -dl.citation > dt { - float: left; -} - -dl.footnote > dd, -dl.citation > dd { - margin-bottom: 0em; -} - -dl.footnote > dd:after, -dl.citation > dd:after { - content: ""; - clear: both; -} - -dl.field-list { - display: grid; - grid-template-columns: fit-content(30%) auto; -} - -dl.field-list > dt { - font-weight: bold; - word-break: break-word; - padding-left: 0.5em; - padding-right: 5px; -} - -dl.field-list > dt:after { - content: ":"; -} - -dl.field-list > dd { - padding-left: 0.5em; - margin-top: 0em; - margin-left: 0em; - margin-bottom: 0em; -} - -dl { - margin-bottom: 15px; -} - -dd > p:first-child { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -.classifier:before { - font-style: normal; - margin: 0.5em; - content: ":"; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/docs/build/html/_static/custom.css b/docs/build/html/_static/custom.css deleted file mode 100644 index 2a924f1d6..000000000 --- a/docs/build/html/_static/custom.css +++ /dev/null @@ -1 +0,0 @@ -/* This file intentionally left blank. */ diff --git a/docs/build/html/_static/doctools.js b/docs/build/html/_static/doctools.js deleted file mode 100644 index b33f87fcb..000000000 --- a/docs/build/html/_static/doctools.js +++ /dev/null @@ -1,314 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/docs/build/html/_static/documentation_options.js b/docs/build/html/_static/documentation_options.js deleted file mode 100644 index 6d8651025..000000000 --- a/docs/build/html/_static/documentation_options.js +++ /dev/null @@ -1,10 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'None', - COLLAPSE_INDEX: false, - FILE_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false -}; \ No newline at end of file diff --git a/docs/build/html/_static/file.png b/docs/build/html/_static/file.png deleted file mode 100644 index a858a410e4faa62ce324d814e4b816fff83a6fb3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 286 zcmV+(0pb3MP)s`hMrGg#P~ix$^RISR_I47Y|r1 z_CyJOe}D1){SET-^Amu_i71Lt6eYfZjRyw@I6OQAIXXHDfiX^GbOlHe=Ae4>0m)d(f|Me07*qoM6N<$f}vM^LjV8( diff --git a/docs/build/html/_static/jquery-3.4.1.js b/docs/build/html/_static/jquery-3.4.1.js deleted file mode 100644 index 773ad95c5..000000000 --- a/docs/build/html/_static/jquery-3.4.1.js +++ /dev/null @@ -1,10598 +0,0 @@ -/*! - * jQuery JavaScript Library v3.4.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2019-05-01T21:04Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - return typeof obj === "function" && typeof obj.nodeType !== "number"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.4.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a global context - globalEval: function( code, options ) { - DOMEval( code, { nonce: options && options.nonce } ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.4 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2019-04-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) && - - // Support: IE 8 only - // Exclude object elements - (nodeType !== 1 || context.nodeName.toLowerCase() !== "object") ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && rdescend.test( selector ) ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem.namespaceURI, - docElem = (elem.ownerDocument || elem).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( typeof elem.contentDocument !== "undefined" ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - return result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - } ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - // Support: IE 9-11 only - // Also use offsetWidth/offsetHeight for when box sizing is unreliable - // We use getClientRects() to check for hidden/disconnected. - // In those cases, the computed value can be trusted to be border-box - if ( ( !support.boxSizingReliable() && isBorderBox || - val === "auto" || - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = Date.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url, options ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " - - - - - - - - - - - - - - - -
-
-
- - -
- - -

Index

- -
- -
- - -
- -
-
- -
-
- - - - - - - \ No newline at end of file diff --git a/docs/build/html/index.html b/docs/build/html/index.html deleted file mode 100644 index 23b667b69..000000000 --- a/docs/build/html/index.html +++ /dev/null @@ -1,110 +0,0 @@ - - - - - - - Welcome to PDC’s documentation! — PDC documentation - - - - - - - - - - - - - - - - - - -
-
-
- - -
- -
-

Welcome to PDC’s documentation!¶

-
-
-
-
-

Indices and tables¶

- -
- - -
- -
-
- -
-
- - - - - - - \ No newline at end of file diff --git a/docs/build/html/objects.inv b/docs/build/html/objects.inv deleted file mode 100644 index a10697ef70b0d4bb13b0fa9de6f17c275d0fd3c1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 236 zcmY#Z2rkIT%&Sny%qvUHE6FdaR47X=D$dN$Q!wIERtPA{&q_@$u~GT4qkF0+3G5&n+lQEiO(?Q7A3W%u83O%E?U9 zf4l-t{VV%9Cdr6-_f@I8rr#o>Xx4&^i=c zJn3_Bu&CE3QKpzV@}_5W!Wgu)&ibC!_S89f=6vvzQ)m4TCiy!q)$?IWnvh@TmFcH- g<=CSuM;1Nd36T{r?0A&WQR9$Qr6tbbdfz=10Im{Uh5!Hn diff --git a/docs/build/html/search.html b/docs/build/html/search.html deleted file mode 100644 index cb115f05e..000000000 --- a/docs/build/html/search.html +++ /dev/null @@ -1,111 +0,0 @@ - - - - - - - Search — PDC documentation - - - - - - - - - - - - - - - - - - - - - - - -
-
-
- - -
- -

Search

-
- -

- Please activate JavaScript to enable the search - functionality. -

-
-

- From here you can search these documents. Enter your search - words into the box below and click "search". Note that the search - function will automatically search for all of the words. Pages - containing fewer words won't appear in the result list. -

-
- - - -
- -
- -
- -
- -
-
- -
-
- - - - - - - \ No newline at end of file diff --git a/docs/build/html/searchindex.js b/docs/build/html/searchindex.js deleted file mode 100644 index b10a399cf..000000000 --- a/docs/build/html/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["index"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,sphinx:56},filenames:["index.rst"],objects:{},objnames:{},objtypes:{},terms:{index:0,modul:0,page:0,search:0},titles:["Welcome to PDC\u2019s documentation!"],titleterms:{document:0,indic:0,pdc:0,tabl:0,welcom:0}}) \ No newline at end of file diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 000000000..ffc6fc227 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,2 @@ +sphinxemoji +breathe diff --git a/docs/source/Doxyfile.in b/docs/source/Doxyfile.in new file mode 100644 index 000000000..f86c67d4a --- /dev/null +++ b/docs/source/Doxyfile.in @@ -0,0 +1,2693 @@ +# Doxyfile 1.9.6 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). +# +# Note: +# +# Use doxygen to compare the used configuration file with the template +# configuration file: +# doxygen -x [configFile] +# Use doxygen to compare the used configuration file with the template +# configuration file without replacing the environment variables or CMake type +# replacement variables: +# doxygen -x_noenv [configFile] + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "PDC" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = "_build" + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096 +# sub-directories (in 2 levels) under the output directory of each output format +# and will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to +# control the number of sub-directories. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# Controls the number of sub-directories that will be created when +# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every +# level increment doubles the number of directories, resulting in 4096 +# directories at level 8 which is the default and also the maximum value. The +# sub-directories are organized in 2 levels, the first level always has a fixed +# number of 16 directories. +# Minimum value: 0, maximum value: 8, default value: 8. +# This tag requires that the tag CREATE_SUBDIRS is set to YES. + +CREATE_SUBDIRS_LEVEL = 8 + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian, +# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English +# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek, +# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with +# English messages), Korean, Korean-en (Korean with English messages), Latvian, +# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, +# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, +# Swedish, Turkish, Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = NO + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = YES + +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:^^" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". Note that you cannot put \n's in the value part of an alias +# to insert newlines (in the resulting output). You can put ^^ in the value part +# of an alias to insert a newline as if a physical newline was in the original +# file. When you need a literal { or } or , in the value part of an alias you +# have to escape them by means of a backslash (\), this can lead to conflicts +# with the commands \{ and \} for these it is advised to use the version @{ and +# @} or use a double escape (\\{ and \\}) + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice, +# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See https://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 5 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which effectively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# will also hide undocumented C++ concepts if enabled. This option has no effect +# if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# declarations. If set to NO, these declarations will be included in the +# documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. +# Possible values are: SYSTEM, NO and YES. +# The default value is: SYSTEM. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = YES + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class +# will show which file needs to be included to use the class. +# The default value is: YES. + +SHOW_HEADERFILE = YES + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. See also section "Changing the +# layout of pages" for information. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as documenting some parameters in +# a documented function twice, or documenting parameters that don't exist or +# using markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete +# function parameter documentation. If set to NO, doxygen will accept that some +# parameters have no documentation without warning. +# The default value is: YES. + +WARN_IF_INCOMPLETE_DOC = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong parameter +# documentation, but not about the absence of documentation. If EXTRACT_ALL is +# set to YES then this flag will automatically be disabled. See also +# WARN_IF_INCOMPLETE_DOC +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If WARN_IF_UNDOC_ENUM_VAL option is set to YES, doxygen will warn about +# undocumented enumeration values. If set to NO, doxygen will accept +# undocumented enumeration values. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: NO. + +WARN_IF_UNDOC_ENUM_VAL = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# See also: WARN_LINE_FORMAT +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# In the $text part of the WARN_FORMAT command it is possible that a reference +# to a more specific place is given. To make it easier to jump to this place +# (outside of doxygen) the user can define a custom "cut" / "paste" string. +# Example: +# WARN_LINE_FORMAT = "'vi $file +$line'" +# See also: WARN_FORMAT +# The default value is: at line $line of file $file. + +WARN_LINE_FORMAT = "at line $line of file $file" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). In case the file specified cannot be opened for writing the +# warning and error messages are written to standard error. When as file - is +# specified the warning and error messages are written to standard output +# (stdout). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = "../../src/" + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. +# See also: INPUT_FILE_ENCODING +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses The INPUT_FILE_ENCODING tag can be used to specify +# character encoding on a per file pattern basis. Doxygen will compare the file +# name with each pattern and apply the encoding instead of the default +# INPUT_ENCODING) if there is a match. The character encodings are a list of the +# form: pattern=encoding (like *.php=ISO-8859-1). See cfg_input_encoding +# "INPUT_ENCODING" for further information on supported encodings. + +INPUT_FILE_ENCODING = + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, +# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C +# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, +# *.vhdl, *.ucf, *.qsf and *.ice. + +FILE_PATTERNS = *.c \ + *.h \ + *.py + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# ANamespace::AClass, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that doxygen will use the data processed and written to standard output +# for further processing, therefore nothing else, like debug statements or used +# commands (so in case of a Windows batch file always use @echo OFF), should be +# written to standard output. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +# The Fortran standard specifies that for fixed formatted Fortran code all +# characters from position 72 are to be considered as comment. A common +# extension is to allow longer lines before the automatic comment starts. The +# setting FORTRAN_COMMENT_AFTER will also make it possible that longer lines can +# be processed before the automatic comment starts. +# Minimum value: 7, maximum value: 10000, default value: 72. + +FORTRAN_COMMENT_AFTER = 72 + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = YES + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The IGNORE_PREFIX tag can be used to specify a prefix (or a list of prefixes) +# that should be ignored while generating the index headers. The IGNORE_PREFIX +# tag works for classes, function and member names. The entity will be placed in +# the alphabetical list under the first letter of the entity name that remains +# after removing the prefix. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). +# Note: Since the styling of scrollbars can currently not be overruled in +# Webkit/Chromium, the styling will be left out of the default doxygen.css if +# one or more extra stylesheets have been specified. So if scrollbar +# customization is desired it has to be added explicitly. For an example see the +# documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE tag can be used to specify if the generated HTML output +# should be rendered with a dark or light theme. +# Possible values are: LIGHT always generate light mode output, DARK always +# generate dark mode output, AUTO_LIGHT automatically set the mode according to +# the user preference, use light mode if no preference is set (the default), +# AUTO_DARK automatically set the mode according to the user preference, use +# dark mode if no preference is set and TOGGLE allow to user to switch between +# light and dark mode via a button. +# The default value is: AUTO_LIGHT. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE = AUTO_LIGHT + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a color-wheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use gray-scales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag determines the URL of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDURL = + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# on Windows. In the beginning of 2021 Microsoft took the original page, with +# a.o. the download links, offline the HTML help workshop was already many years +# in maintenance mode). You can download the HTML help workshop from the web +# archives at Installation executable (see: +# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo +# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe). +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the main .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine tune the look of the index (see "Fine-tuning the output"). As an +# example, the default style sheet generated by doxygen has an example that +# shows how to put an image at the root of the tree instead of the PROJECT_NAME. +# Since the tree basically has the same information as the tab index, you could +# consider setting DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the +# FULL_SIDEBAR option determines if the side bar is limited to only the treeview +# area (value NO) or if it should extend to the full height of the window (value +# YES). Setting this to YES gives a layout similar to +# https://docs.readthedocs.io with more room for contents, but less room for the +# project logo, title, and description. If either GENERATE_TREEVIEW or +# DISABLE_INDEX is set to NO, this option has no effect. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FULL_SIDEBAR = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email +# addresses. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +OBFUSCATE_EMAILS = YES + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# With MATHJAX_VERSION it is possible to specify the MathJax version to be used. +# Note that the different versions of MathJax have different requirements with +# regards to the different settings, so it is possible that also other MathJax +# settings have to be changed when switching between the different MathJax +# versions. +# Possible values are: MathJax_2 and MathJax_3. +# The default value is: MathJax_2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_VERSION = MathJax_2 + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. For more details about the output format see MathJax +# version 2 (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3 +# (see: +# http://docs.mathjax.org/en/latest/web/components/output.html). +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility. This is the name for Mathjax version 2, for MathJax version 3 +# this will be translated into chtml), NativeMML (i.e. MathML. Only supported +# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This +# is the name for Mathjax version 3, for MathJax version 2 this will be +# translated into HTML-CSS) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. The default value is: +# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2 +# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3 +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# for MathJax version 2 (see +# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions): +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# For example for MathJax version 3 (see +# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html): +# MATHJAX_EXTENSIONS = ams +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /