From 5cadf056b887922f49a8ad2afb2596941d343c5b Mon Sep 17 00:00:00 2001 From: Pavlo Smahin Date: Mon, 2 Dec 2024 11:34:33 +0200 Subject: [PATCH] feat(cn-browse): Implement Database Structure and Logic for Managing Call Numbers (#702) Closes: MSEARCH-862 --- NEWS.md | 1 + README.md | 125 +++++------ doc/development.md => development.md | 189 +++++++++------- docker/.env | 16 +- docker/dashboards/Dockerfile | 5 - docker/dashboards/opensearch_dashboards.yml | 3 - docker/docker-compose.yml | 109 ++++------ docker/opensearch/Dockerfile | 2 + pom.xml | 3 +- .../PopulateInstanceBatchInterceptor.java | 7 +- .../search/model/entity/CallNumberEntity.java | 135 ++++++++++++ .../entity/ChildResourceEntityBatch.java | 9 + .../entity/InstanceCallNumberEntity.java | 18 ++ .../search/model/types/ReindexEntityType.java | 1 + .../InstanceChildrenResourceService.java | 51 +++-- .../extractor/ChildResourceExtractor.java | 9 +- .../impl/CallNumberResourceExtractor.java | 130 +++++++++++ .../impl/ClassificationResourceExtractor.java | 16 +- .../impl/ContributorResourceExtractor.java | 5 + .../impl/SubjectResourceExtractor.java | 7 +- .../service/reindex/ReindexConstants.java | 4 + .../ReindexMergeRangeIndexService.java | 7 +- .../reindex/jdbc/CallNumberRepository.java | 202 ++++++++++++++++++ .../jdbc/ClassificationRepository.java | 12 +- .../reindex/jdbc/ContributorRepository.java | 12 +- .../jdbc/InstanceChildResourceRepository.java | 5 +- .../service/reindex/jdbc/ItemRepository.java | 10 +- .../reindex/jdbc/SubjectRepository.java | 15 +- src/main/resources/application.yml | 13 +- .../resources/changelog/changelog-master.xml | 7 +- .../v4.1/create_call_number_tables.xml | 63 ++++++ .../swagger.api/parameters/feature-id.yaml | 1 + .../entity/tenantConfiguredFeature.yaml | 1 + .../InstanceChildrenResourceServiceTest.java | 28 ++- .../CallNumberResourceExtractorTest.java | 71 ++++++ .../ChildResourceExtractorTestBase.java | 7 +- .../ClassificationResourceExtractorTest.java | 3 + .../ReindexMergeRangeIndexServiceTest.java | 11 +- .../jdbc/ClassificationRepositoryIT.java | 3 +- .../reindex/jdbc/ContributorRepositoryIT.java | 3 +- .../reindex/jdbc/SubjectRepositoryIT.java | 3 +- src/test/resources/application.yml | 1 + 42 files changed, 1013 insertions(+), 310 deletions(-) rename doc/development.md => development.md (82%) delete mode 100644 docker/dashboards/Dockerfile delete mode 100644 docker/dashboards/opensearch_dashboards.yml create mode 100644 src/main/java/org/folio/search/model/entity/CallNumberEntity.java create mode 100644 src/main/java/org/folio/search/model/entity/ChildResourceEntityBatch.java create mode 100644 src/main/java/org/folio/search/model/entity/InstanceCallNumberEntity.java create mode 100644 src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/CallNumberResourceExtractor.java create mode 100644 src/main/java/org/folio/search/service/reindex/jdbc/CallNumberRepository.java create mode 100644 src/main/resources/changelog/changes/v4.1/create_call_number_tables.xml create mode 100644 src/test/java/org/folio/search/service/converter/preprocessor/extractor/CallNumberResourceExtractorTest.java diff --git a/NEWS.md b/NEWS.md index c77f3df34..05b70d433 100644 --- a/NEWS.md +++ b/NEWS.md @@ -8,6 +8,7 @@ ### Features * Move Instance sub-entities population from database trigger to code ([MSEARCH-887](https://folio-org.atlassian.net/browse/MSEARCH-887)) +* Call Numbers Browse: Implement Database Structure and Logic for Managing Call Numbers ([MSEARCH-862](https://folio-org.atlassian.net/browse/MSEARCH-862)) ### Bug fixes * Remove shelving order calculation for local call-number types diff --git a/README.md b/README.md index 6422f2eba..4b43af31c 100644 --- a/README.md +++ b/README.md @@ -213,67 +213,68 @@ and [Cross-cluster replication](https://docs.aws.amazon.com/opensearch-service/l ### Environment variables -| Name | Default value | Description | -|:----------------------------------------------------|:-----------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| DB_HOST | postgres | Postgres hostname | -| DB_PORT | 5432 | Postgres port | -| DB_USERNAME | postgres | Postgres username | -| DB_PASSWORD | postgres | Postgres username password | -| DB_DATABASE | okapi_modules | Postgres database name | -| ELASTICSEARCH_URL | http://elasticsearch:9200 | Elasticsearch URL | -| ELASTICSEARCH_USERNAME | - | Elasticsearch username (not required for dev envs) | -| ELASTICSEARCH_PASSWORD | - | Elasticsearch password (not required for dev envs) | -| ELASTICSEARCH_COMPRESSION_ENABLED | true | Specify if Elasticsearch request/response compression enabled | -| KAFKA_HOST | kafka | Kafka broker hostname | -| KAFKA_PORT | 9092 | Kafka broker port | -| KAFKA_SECURITY_PROTOCOL | PLAINTEXT | Kafka security protocol used to communicate with brokers (SSL or PLAINTEXT) | -| KAFKA_SSL_KEYSTORE_LOCATION | - | The location of the Kafka key store file. This is optional for client and can be used for two-way authentication for client. | -| KAFKA_SSL_KEYSTORE_PASSWORD | - | The store password for the Kafka key store file. This is optional for client and only needed if 'ssl.keystore.location' is configured. | -| KAFKA_SSL_TRUSTSTORE_LOCATION | - | The location of the Kafka trust store file. | -| KAFKA_SSL_TRUSTSTORE_PASSWORD | - | The password for the Kafka trust store file. If a password is not set, trust store file configured will still be used, but integrity checking is disabled. | -| KAFKA_EVENTS_CONSUMER_PATTERN | (${folio.environment}\.\)(.*\.)inventory\.(instance\ | holdings-record\ |item\|bound-with) | Custom subscription pattern for Kafka consumers. | -| KAFKA_EVENTS_CONCURRENCY | 2 | Custom number of kafka concurrent threads for message consuming. | -| KAFKA_AUTHORITIES_CONSUMER_PATTERN | (${folio.environment}\.)(.*\.)authorities\.authority | Custom subscription pattern for Kafka authority message consumers. | -| KAFKA_AUTHORITIES_CONCURRENCY | 1 | Custom number of kafka concurrent threads for authority message consuming. | -| KAFKA_LOCATION_CONCURRENCY | 1 | Custom number of kafka concurrent threads for inventory.location, inventory.campus, inventory.institution and inventory.library message consuming. | -| KAFKA_LINKED_DATA_CONCURRENCY | 1 | Custom number of kafka concurrent threads for linked data message consuming. | -| KAFKA_REINDEX_RANGE_INDEX_CONCURRENCY | 1 | Custom number of kafka concurrent threads for `search.reindex.range-index` message consuming. | -| KAFKA_REINDEX_RANGE_INDEX_TOPIC_PARTITIONS | 16 | Amount of partitions for `search.reindex.range-index` topic. | -| KAFKA_REINDEX_RANGE_INDEX_TOPIC_REPLICATION_FACTOR | - | Replication factor for `search.reindex.range-index` topic. | -| KAFKA_REINDEX_RECORDS_CONCURRENCY | 2 | Custom number of kafka concurrent threads for `inventory.reindex-records` message consuming. | -| KAFKA_CONSUMER_MAX_POLL_RECORDS | 200 | Maximum number of records returned in a single call to poll(). | -| KAFKA_RETRY_INTERVAL_MS | 2000 | Specifies time to wait before reattempting query. | -| KAFKA_RETRY_DELIVERY_ATTEMPTS | 6 | Specifies how many queries attempt to perform after the first one failed. | -| INDEXING_DATA_FORMAT | smile | Format for passing data to elasticsearch (json/smile) | -| INITIAL_LANGUAGES | eng | Comma separated list of languages for multilang fields see [Multi-lang search support](#multi-language-search-support) | -| MAX_SUPPORTED_LANGUAGES | 5 | Provides the maximum number of supported languages | -| SYSTEM_USER_USERNAME | mod-search | Username for `mod-search` system user | -| SYSTEM_USER_PASSWORD | - | Password for `mod-search` system user (not required for dev envs) | -| OKAPI_URL | - | OKAPI URL used to login system user, required | -| ENV | folio | The logical name of the deployment, must be unique across all environments using the same shared Kafka/Elasticsearch clusters, `a-z (any case)`, `0-9`, `-`, `_` symbols only allowed | -| SEARCH_BY_ALL_FIELDS_ENABLED | false | Specifies if globally search by all field values must be enabled or not (tenant can override this setting) | -| BROWSE_CN_INTERMEDIATE_VALUES_ENABLED | true | Specifies if globally intermediate values (nested instance items) must be populated or not (tenant can override this setting) | -| BROWSE_CN_INTERMEDIATE_REMOVE_DUPLICATES | true | Specifies if globally intermediate duplicate values (fullCallNumber) should be removed or not (Active only with BROWSE_CN_INTERMEDIATE_VALUES_ENABLED) | -| BROWSE_CLASSIFICATIONS_ENABLED | true | Specifies if globally instance classification indexing will be performed | -| SCROLL_QUERY_SIZE | 1000 | The number of records to be loaded by each scroll query. 10_000 is a max value | -| STREAM_ID_RETRY_INTERVAL_MS | 1000 | Specifies time to wait before reattempting query. | -| STREAM_ID_RETRY_ATTEMPTS | 3 | Specifies how many queries attempt to perform after the first one failed. | -| STREAM_ID_CORE_POOL_SIZE | 2 | The number of threads to keep in the pool, even if they are idle. | -| STREAM_ID_MAX_POOL_SIZE | 2 | The maximum number of threads to allow in the pool. | -| STREAM_ID_QUEUE_CAPACITY | 500 | The capacity of the queue. | -| CN_BROWSE_OPTIMIZATION_ENABLED | true | Defines if call-number browse optimization is enabled or not | -| SEARCH_QUERY_TIMEOUT | 25s | The maximum time to wait for search query response | -| MAX_BROWSE_REQUEST_OFFSET | 500 | The maximum elasticsearch query offset for additional requests on browse around | -| SYSTEM_USER_ENABLED | true | Defines if system user must be created at service tenant initialization or used for egress service requests | -| REINDEX_LOCATION_BATCH_SIZE | 1_000 | Defines number of locations to retrieve per inventory http request on locations reindex process | -| REINDEX_MERGE_RANGE_SIZE | 500 | The range size that represents the number of merge entities to process during the Merge process of reindex | -| REINDEX_UPLOAD_RANGE_SIZE | 1_000 | The range size that represents the number of upload entities to process during the Upload process of reindex | -| REINDEX_UPLOAD_RANGE_LEVEL | 3 | The level of deepness of upload range generator affecting the number of ranges to be generated | -| REINDEX_MERGE_RANGE_PUBLISHER_CORE_POOL_SIZE | 3 | The number of threads for publishing the merge ranges to keep in the pool, even if they are idle. | -| REINDEX_MERGE_RANGE_PUBLISHER_MAX_POOL_SIZE | 6 | The maximum number of threads for publishing the merge ranges to allow in the pool. | -| REINDEX_MERGE_RANGE_PUBLISHER_RETRY_INTERVAL_MS | 1000 | The retry interval in ms for reindex merge range request publishing. | -| REINDEX_MERGE_RANGE_PUBLISHER_RETRY_ATTEMPTS | 5 | The maximum number of retries for reindex merge range request publishing. | -| MAX_SEARCH_BATCH_REQUEST_IDS_COUNT | 20_000 | Defines maximum batch request IDs count for searching consolidated items/holdings in consortium | +| Name | Default value | Description | +|:---------------------------------------------------|:-----------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| DB_HOST | postgres | Postgres hostname | +| DB_PORT | 5432 | Postgres port | +| DB_USERNAME | postgres | Postgres username | +| DB_PASSWORD | postgres | Postgres username password | +| DB_DATABASE | okapi_modules | Postgres database name | +| ELASTICSEARCH_URL | http://elasticsearch:9200 | Elasticsearch URL | +| ELASTICSEARCH_USERNAME | - | Elasticsearch username (not required for dev envs) | +| ELASTICSEARCH_PASSWORD | - | Elasticsearch password (not required for dev envs) | +| ELASTICSEARCH_COMPRESSION_ENABLED | true | Specify if Elasticsearch request/response compression enabled | +| KAFKA_HOST | kafka | Kafka broker hostname | +| KAFKA_PORT | 9092 | Kafka broker port | +| KAFKA_SECURITY_PROTOCOL | PLAINTEXT | Kafka security protocol used to communicate with brokers (SSL or PLAINTEXT) | +| KAFKA_SSL_KEYSTORE_LOCATION | - | The location of the Kafka key store file. This is optional for client and can be used for two-way authentication for client. | +| KAFKA_SSL_KEYSTORE_PASSWORD | - | The store password for the Kafka key store file. This is optional for client and only needed if 'ssl.keystore.location' is configured. | +| KAFKA_SSL_TRUSTSTORE_LOCATION | - | The location of the Kafka trust store file. | +| KAFKA_SSL_TRUSTSTORE_PASSWORD | - | The password for the Kafka trust store file. If a password is not set, trust store file configured will still be used, but integrity checking is disabled. | +| KAFKA_EVENTS_CONSUMER_PATTERN | (${folio.environment}\.\)(.*\.)inventory\.(instance\ | holdings-record\ |item\|bound-with) | Custom subscription pattern for Kafka consumers. | +| KAFKA_EVENTS_CONCURRENCY | 2 | Custom number of kafka concurrent threads for message consuming. | +| KAFKA_AUTHORITIES_CONSUMER_PATTERN | (${folio.environment}\.)(.*\.)authorities\.authority | Custom subscription pattern for Kafka authority message consumers. | +| KAFKA_AUTHORITIES_CONCURRENCY | 1 | Custom number of kafka concurrent threads for authority message consuming. | +| KAFKA_LOCATION_CONCURRENCY | 1 | Custom number of kafka concurrent threads for inventory.location, inventory.campus, inventory.institution and inventory.library message consuming. | +| KAFKA_LINKED_DATA_CONCURRENCY | 1 | Custom number of kafka concurrent threads for linked data message consuming. | +| KAFKA_REINDEX_RANGE_INDEX_CONCURRENCY | 1 | Custom number of kafka concurrent threads for `search.reindex.range-index` message consuming. | +| KAFKA_REINDEX_RANGE_INDEX_TOPIC_PARTITIONS | 16 | Amount of partitions for `search.reindex.range-index` topic. | +| KAFKA_REINDEX_RANGE_INDEX_TOPIC_REPLICATION_FACTOR | - | Replication factor for `search.reindex.range-index` topic. | +| KAFKA_REINDEX_RECORDS_CONCURRENCY | 2 | Custom number of kafka concurrent threads for `inventory.reindex-records` message consuming. | +| KAFKA_CONSUMER_MAX_POLL_RECORDS | 200 | Maximum number of records returned in a single call to poll(). | +| KAFKA_RETRY_INTERVAL_MS | 2000 | Specifies time to wait before reattempting query. | +| KAFKA_RETRY_DELIVERY_ATTEMPTS | 6 | Specifies how many queries attempt to perform after the first one failed. | +| INDEXING_DATA_FORMAT | smile | Format for passing data to elasticsearch (json/smile) | +| INITIAL_LANGUAGES | eng | Comma separated list of languages for multilang fields see [Multi-lang search support](#multi-language-search-support) | +| MAX_SUPPORTED_LANGUAGES | 5 | Provides the maximum number of supported languages | +| SYSTEM_USER_USERNAME | mod-search | Username for `mod-search` system user | +| SYSTEM_USER_PASSWORD | - | Password for `mod-search` system user (not required for dev envs) | +| OKAPI_URL | - | OKAPI URL used to login system user, required | +| ENV | folio | The logical name of the deployment, must be unique across all environments using the same shared Kafka/Elasticsearch clusters, `a-z (any case)`, `0-9`, `-`, `_` symbols only allowed | +| SEARCH_BY_ALL_FIELDS_ENABLED | false | Specifies if globally search by all field values must be enabled or not (tenant can override this setting) | +| BROWSE_CN_INTERMEDIATE_VALUES_ENABLED | true | Specifies if globally intermediate values (nested instance items) must be populated or not (tenant can override this setting) | +| BROWSE_CN_INTERMEDIATE_REMOVE_DUPLICATES | true | Specifies if globally intermediate duplicate values (fullCallNumber) should be removed or not (Active only with BROWSE_CN_INTERMEDIATE_VALUES_ENABLED) | +| BROWSE_CLASSIFICATIONS_ENABLED | true | Specifies if globally instance classification feature is enabled | +| BROWSE_CALL_NUMBERS_ENABLED | true | Specifies if globally instance call-number feature is enabled | +| SCROLL_QUERY_SIZE | 1000 | The number of records to be loaded by each scroll query. 10_000 is a max value | +| STREAM_ID_RETRY_INTERVAL_MS | 1000 | Specifies time to wait before reattempting query. | +| STREAM_ID_RETRY_ATTEMPTS | 3 | Specifies how many queries attempt to perform after the first one failed. | +| STREAM_ID_CORE_POOL_SIZE | 2 | The number of threads to keep in the pool, even if they are idle. | +| STREAM_ID_MAX_POOL_SIZE | 2 | The maximum number of threads to allow in the pool. | +| STREAM_ID_QUEUE_CAPACITY | 500 | The capacity of the queue. | +| CN_BROWSE_OPTIMIZATION_ENABLED | true | Defines if call-number browse optimization is enabled or not | +| SEARCH_QUERY_TIMEOUT | 25s | The maximum time to wait for search query response | +| MAX_BROWSE_REQUEST_OFFSET | 500 | The maximum elasticsearch query offset for additional requests on browse around | +| SYSTEM_USER_ENABLED | true | Defines if system user must be created at service tenant initialization or used for egress service requests | +| REINDEX_LOCATION_BATCH_SIZE | 1_000 | Defines number of locations to retrieve per inventory http request on locations reindex process | +| REINDEX_MERGE_RANGE_SIZE | 500 | The range size that represents the number of merge entities to process during the Merge process of reindex | +| REINDEX_UPLOAD_RANGE_SIZE | 1_000 | The range size that represents the number of upload entities to process during the Upload process of reindex | +| REINDEX_UPLOAD_RANGE_LEVEL | 3 | The level of deepness of upload range generator affecting the number of ranges to be generated | +| REINDEX_MERGE_RANGE_PUBLISHER_CORE_POOL_SIZE | 3 | The number of threads for publishing the merge ranges to keep in the pool, even if they are idle. | +| REINDEX_MERGE_RANGE_PUBLISHER_MAX_POOL_SIZE | 6 | The maximum number of threads for publishing the merge ranges to allow in the pool. | +| REINDEX_MERGE_RANGE_PUBLISHER_RETRY_INTERVAL_MS | 1000 | The retry interval in ms for reindex merge range request publishing. | +| REINDEX_MERGE_RANGE_PUBLISHER_RETRY_ATTEMPTS | 5 | The maximum number of retries for reindex merge range request publishing. | +| MAX_SEARCH_BATCH_REQUEST_IDS_COUNT | 20_000 | Defines maximum batch request IDs count for searching consolidated items/holdings in consortium | The module uses system user to communicate with other modules from Kafka consumers. For production deployments you MUST specify the password for this system user via env variable: @@ -1011,4 +1012,4 @@ and the [Docker image](https://hub.docker.com/r/folioorg/mod-search/) ### Development tips -The development tips are described on the following page: [Development tips](doc/development.md) +The development tips are described on the following page: [Development tips](development.md) diff --git a/doc/development.md b/development.md similarity index 82% rename from doc/development.md rename to development.md index 4e8c07521..9d36a5572 100644 --- a/doc/development.md +++ b/development.md @@ -1,3 +1,115 @@ +## Local Development Setup Using Docker Compose for mod-search Module + +This guide will walk you through setting up your local development environment for the `mod-search` module using Docker Compose. +It includes setting up various services like API mock servers, OpenSearch, Kafka, PostgreSQL, and their respective UIs to aid during development. + +### Prerequisites + +Before you begin, ensure you have the following installed: +- [Docker](https://docs.docker.com/get-docker/) +- [Docker Compose](https://docs.docker.com/compose/install/) + +Make sure your [.env file](docker/.env) includes the necessary variables: `DB_USERNAME`, `DB_PASSWORD`, `DB_DATABASE`, `PGADMIN_PORT`, `PGADMIN_DEFAULT_EMAIL`, and `PGADMIN_DEFAULT_PASSWORD`. + +### Setup Environment + +1. **Start Services** + + Navigate to the docker folder in the project and execute: + ```shell + docker compose -f docker/docker-compose.yml up -d + ``` + +2. **Start the mod-search Application** + + First of all the application should be packaged: + ```shell + mvn clean package + ``` + + To run the `mod-search` application, you have two options: + - **Build and Run the Docker Image:** + ```shell + docker build -t dev.folio/mod-search . + docker run -p 8081:8081 -e "DB_HOST=postgres" -e "KAFKA_HOST=kafka" -e "ELASTICSEARCH_URL=http://elasticsearch:9200" dev.folio/mod-search + ``` + - **Run the Application Directly:** You can also run the application directly if your development environment is set up with the necessary Java runtime. Execute the following command from the root of your project: + ```shell + java -jar target/mod-search-fat.jar + ``` + +3. **Initialize Environment** + + After starting the services and the mod-search application, invoke the following CURL command to post a tenant which will help in bringing up Kafka listeners and get indices created: + ```shell + curl --location --request POST 'http://localhost:8081/_/tenant' \ + --header 'Content-Type: application/json' \ + --header 'x-okapi-tenant: test_tenant' \ + --header 'x-okapi-url: http://localhost:9130' \ + --data-raw '{ + "module_to": "mod-search" + }' + ``` + You can check which tenants are enabled by wiremock in the file located at `src/test/resources/mappings/user-tenants.json`. + +4. **Consortium Support for Local Environment Testing** + + Consortium feature is defined automatically by calling the `/user-tenants` endpoint as outlined in the following CURL requests: + - **To enable the consortium feature:** + ```shell + curl --location --request POST 'http://localhost:8081/_/tenant' \ + --header 'Content-Type: application/json' \ + --header 'x-okapi-tenant: consortium' \ + --header 'x-okapi-url: http://localhost:9130' \ + --data-raw '{ + "module_to": "mod-search", + "parameters": [ + { + "key": "centralTenantId", + "value": "consortium" + } + ] + }' + ``` + + - **Enable member tenant:** + ```shell + curl --location --request POST 'http://localhost:8081/_/tenant' \ + --header 'Content-Type: application/json' \ + --header 'x-okapi-tenant: member_tenant' \ + --header 'x-okapi-url: http://localhost:9130' \ + --data-raw '{ + "module_to": "mod-search", + "parameters": [ + { + "key": "centralTenantId", + "value": "consortium" + } + ] + }' + ``` + +### Access Services + +- **API Mock Server**: http://localhost:9130 +- **OpenSearch Dashboard**: http://localhost:5601 +- **Kafka UI**: http://localhost:8080 +- **PgAdmin**: http://localhost:5050 + +### Monitoring and Logs + +To monitor the logs for any of the services: +``` +docker-compose logs [service_name] +``` + +### Stopping Services + +To stop and remove all containers associated with the compose file: +```shell +docker compose -f docker/docker-compose.yml down +``` + ## Overview `mod-search` is based on metadata-driven approach. It means that resource description is specified using JSON file and @@ -44,7 +156,7 @@ the [full-text queries](https://www.elastic.co/guide/en/elasticsearch/reference/ |:--------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | searchTypes | List of search types that are supported for the current field. Allowed values: `facet`, `filter`, `sort` | | searchAliases | List of aliases that can be used as a field name in the CQL search query. It can be used to combine several fields together during the search. For example, a query `keyword all title` combines for instance record following fields - `title`, `alternativeTitles.alternativeTitle`, `indexTitle`, `identifiers.value`, `contributors.name`
Other way of using it - is to rename field keeping the backward compatibility without required reindex. | -| index | Reference to the Elasticsearch mappings that are specified in [index-field-types](../src/main/resources/elasticsearch/index-field-types.json) | +| index | Reference to the Elasticsearch mappings that are specified in [index-field-types](src/main/resources/elasticsearch/index-field-types.json) | | showInResponse | Marks field to be returned during the search operation. `mod-search` adds to the Elasticsearch query all marked field paths. See also: [Source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/master/search-fields.html#source-filtering) | | searchTermProcessor | Search term processor, which pre-processes incoming value from CQL query for the search request. | | mappings | Elasticsearch fields mappings. It can contain new field mapping or can enrich referenced mappings, that comes from `index-field-types` | @@ -251,77 +363,4 @@ assertThatThrownBy(() -> service.doExceptionalOperation()) ### Integration testing The module uses [Testcontainers](https://www.testcontainers.org/) to run Elasticsearch, Apache Kafka and PostgreSQL -in embedded mode. It is required to have Docker installed and available on the host where the tests are executed. - -### Local environment testing -Navigate to the docker folder in the project and run `docker-compose up`. -This will build local mod-search image and bring it up along with all necessary infrastructure: - - elasticsearch along with dashboards (kibana analogue from opensearch) - - kafka along with zookeeper - - postgres - - wiremock server for mocking external api calls (for example authorization) - -Then, you should invoke -```shell -curl --location --request POST 'http://localhost:8081/_/tenant' \ ---header 'Content-Type: application/json' \ ---header 'x-okapi-tenant: test_tenant' \ ---header 'x-okapi-url: http://api-mock:8080' \ ---data-raw '{ - "module_to": "mod-search-$version$", - "purge": "false" -} -``` -to post some tenant in order to bring up kafka listeners and get indices created. -You can check which tenants enabled by wiremock in the `src/test/resources/mappings/user-tenants.json` - -To rebuild mod-search image you should: - - bring down existing containers by running `docker-compose down` - - run `docker-compose build mod-search` to build new mod-search image - - run `docker-compose up` to bring up infrastructure - -Hosts/ports of containers to access functionality: - - `http://localhost:5601/` - dashboards UI for elastic monitoring, data modification through dev console - - `localhost` - host, `5010` - port for remote JVM debug - - `http://localhost:8081` - for calling mod-search REST api. Note that header `x-okapi-url: http://api-mock:8080` should be added to request for apis that take okapi url from headers - - `localhost:29092` - for kafka interaction. If you are sending messages to kafka from java application with `spring-kafka` then this host shoulb be added to `spring.kafka.bootstrap-servers` property of `application.yml` - -### Consortium support for Local environment testing -Consortium feature is defined automatically at runtime by calling /user-tenants endpoint. -Consortium feature on module enable is defined by 'centralTenantId' tenant parameter. - -Invoke the following -```shell -curl --location --request POST 'http://localhost:8081/_/tenant' \ ---header 'Content-Type: application/json' \ ---header 'x-okapi-tenant: consortium' \ ---header 'x-okapi-url: http://api-mock:8080' \ ---data-raw '{ - "module_to": "mod-search-$version$", - "parameters": [ - { - "key": "centralTenantId", - "value": "consortium" - } - ] -} -``` - -Then execute the following to enable `member tenant` -```shell -curl --location --request POST 'http://localhost:8081/_/tenant' \ ---header 'Content-Type: application/json' \ ---header 'x-okapi-tenant: member_tenant' \ ---header 'x-okapi-url: http://api-mock:8080' \ ---data-raw '{ - "module_to": "mod-search-$version$", - "parameters": [ - { - "key": "centralTenantId", - "value": "consortium" - } - ] -} -``` -Consider that `tenantParameters` like `loadReference` and `loadSample` won't work because `loadReferenceData` -method is not implemented in the `SearchTenantService` yet. +in embedded mode. It is required to have Docker installed and available on the host where the tests are executed. \ No newline at end of file diff --git a/docker/.env b/docker/.env index 1308ac0c6..92f093752 100644 --- a/docker/.env +++ b/docker/.env @@ -1,18 +1,10 @@ COMPOSE_PROJECT_NAME=folio-mod-search -DB_HOST=postgres -DB_PORT=5432 + +# Postgres variables DB_DATABASE=okapi_modules DB_USERNAME=folio_admin DB_PASSWORD=folio_admin + +# PgAdmin variables PGADMIN_DEFAULT_EMAIL=user@domain.com PGADMIN_DEFAULT_PASSWORD=admin -PGADMIN_PORT=5050 -KAFKA_HOST=kafka -KAFKA_PORT=9092 -REPLICATION_FACTOR=1 -ENV=folio -DEBUG_PORT=5005 -OKAPI_URL=http://api-mock:8080 -PGADMIN_DEFAULT_EMAIL=user@domain.com -PGADMIN_DEFAULT_PASSWORD=admin -PGADMIN_PORT=5050 \ No newline at end of file diff --git a/docker/dashboards/Dockerfile b/docker/dashboards/Dockerfile deleted file mode 100644 index 661288336..000000000 --- a/docker/dashboards/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM opensearchproject/opensearch-dashboards:1.3.2 - -RUN /usr/share/opensearch-dashboards/bin/opensearch-dashboards-plugin remove securityDashboards - -COPY --chown=opensearch-dashboards:opensearch-dashboards opensearch_dashboards.yml /usr/share/opensearch-dashboards/config/ diff --git a/docker/dashboards/opensearch_dashboards.yml b/docker/dashboards/opensearch_dashboards.yml deleted file mode 100644 index c6f9ae50d..000000000 --- a/docker/dashboards/opensearch_dashboards.yml +++ /dev/null @@ -1,3 +0,0 @@ -server.name: opensearch-dashboards -server.host: "0" -opensearch.hosts: http://elasticsearch:9200 \ No newline at end of file diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index bfb11c250..0cd0a3c17 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,35 +1,4 @@ -version: "3.8" - services: - mod-search: - container_name: mod-search - image: dev.folio/mod-search - build: - context: ../ - dockerfile: Dockerfile - networks: - - mod-search-local - ports: - - "8081:8081" - - "${DEBUG_PORT}:${DEBUG_PORT}" - depends_on: - - api-mock - - opensearch - - kafka - - postgres - environment: - ELASTICSEARCH_URL: http://opensearch:9200 - ENV: ${ENV} - KAFKA_HOST: ${KAFKA_HOST} - KAFKA_PORT: ${KAFKA_PORT} - REPLICATION_FACTOR: ${REPLICATION_FACTOR} - DB_USERNAME: ${DB_USERNAME} - DB_PORT: ${DB_PORT} - DB_HOST: ${DB_HOST} - DB_DATABASE: ${DB_DATABASE} - DB_PASSWORD: ${DB_PASSWORD} - OKAPI_URL: http://api-mock:8080 - JAVA_OPTIONS: "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:${DEBUG_PORT}" api-mock: container_name: api-mock_mod-search @@ -45,86 +14,79 @@ services: opensearch: container_name: opensearch_mod-search - image: dev.folio/opensearch:1.3.2 + image: dev.folio/opensearch:latest build: context: opensearch dockerfile: Dockerfile networks: + - opensearch-net - mod-search-local ports: - "9200:9200" - - "9300:9300" volumes: - es-data:/usr/share/elasticsearch/data environment: - discovery.type=single-node - - discovery.zen.minimum_master_nodes=1 - - "DISABLE_SECURITY_PLUGIN=true" - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - "DISABLE_INSTALL_DEMO_CONFIG=true" + - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" opensearch-dashboards: container_name: opensearch-dashboards_mod-search - image: dev.folio/opensearch-dashboards:1.3.2 - build: - context: dashboards - dockerfile: Dockerfile + image: opensearchproject/opensearch-dashboards:2 ports: - "5601:5601" + expose: + - "5601" environment: OPENSEARCH_HOSTS: '["http://opensearch:9200"]' + DISABLE_SECURITY_DASHBOARDS_PLUGIN: true networks: + - opensearch-net - mod-search-local depends_on: - opensearch - zookeeper: - container_name: zookeeper_mod-search - image: wurstmeister/zookeeper:3.4.6 - networks: - - mod-search-local - ports: - - "2181:2181" - kafka: container_name: kafka_mod-search - image: wurstmeister/kafka:2.13-2.8.1 + image: apache/kafka-native networks: - mod-search-local - depends_on: - - zookeeper ports: - "9092:9092" - - "29092:29092" + - "9093:9093" environment: - KAFKA_BROKER_ID: 1 - KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://:29092 - KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://localhost:29092 - KAFKA_ADVERTISED_HOST_NAME: kafka - KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT - KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE - KAFKA_MESSAGE_MAX_BYTES: 1000000 - KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" + # Configure listeners for both docker and host communication + KAFKA_LISTENERS: CONTROLLER://localhost:9091,HOST://0.0.0.0:9092,DOCKER://0.0.0.0:9093 + KAFKA_ADVERTISED_LISTENERS: HOST://localhost:9092,DOCKER://kafka:9093 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,DOCKER:PLAINTEXT,HOST:PLAINTEXT + # Settings required for KRaft mode + KAFKA_NODE_ID: 1 + KAFKA_PROCESS_ROLES: broker,controller + KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER + KAFKA_CONTROLLER_QUORUM_VOTERS: 1@localhost:9091 + # Listener to use for broker-to-broker communication + KAFKA_INTER_BROKER_LISTENER_NAME: DOCKER + # Required for a single node cluster + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + kafka-ui: container_name: kafka-ui_mod-search - image: provectuslabs/kafka-ui:latest + image: ghcr.io/kafbat/kafka-ui:latest + networks: + - mod-search-local ports: - "8080:8080" - depends_on: - - zookeeper - - kafka environment: + DYNAMIC_CONFIG_ENABLED: 'true' KAFKA_CLUSTERS_0_NAME: local - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092 - KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper:2181 - KAFKA_CLUSTERS_0_JMXPORT: 9997 - networks: - - mod-search-local + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9093 + depends_on: + - kafka postgres: container_name: postgres_mod-search - image: postgres:12-alpine + image: postgres:16-alpine networks: - mod-search-local ports: @@ -136,11 +98,11 @@ services: pgadmin: container_name: pgadmin_mod-search - image: dpage/pgadmin4:6.7 + image: dpage/pgadmin4:8.13 networks: - mod-search-local ports: - - ${PGADMIN_PORT}:80 + - "5050:80" volumes: - "pgadmin-data:/var/lib/pgadmin" environment: @@ -151,6 +113,7 @@ services: networks: mod-search-local: driver: bridge + opensearch-net: volumes: es-data: { } diff --git a/docker/opensearch/Dockerfile b/docker/opensearch/Dockerfile index 1290863f9..2374849f9 100644 --- a/docker/opensearch/Dockerfile +++ b/docker/opensearch/Dockerfile @@ -13,3 +13,5 @@ RUN opensearch-plugin install --batch \ analysis-smartcn \ analysis-nori \ analysis-phonetic + +RUN opensearch-plugin remove opensearch-security \ No newline at end of file diff --git a/pom.xml b/pom.xml index ff3bf8efe..e569507a8 100644 --- a/pom.xml +++ b/pom.xml @@ -290,8 +290,9 @@ - + mod-search-fat + org.apache.maven.plugins maven-clean-plugin diff --git a/src/main/java/org/folio/search/integration/message/interceptor/PopulateInstanceBatchInterceptor.java b/src/main/java/org/folio/search/integration/message/interceptor/PopulateInstanceBatchInterceptor.java index e369da8bb..a0454f427 100644 --- a/src/main/java/org/folio/search/integration/message/interceptor/PopulateInstanceBatchInterceptor.java +++ b/src/main/java/org/folio/search/integration/message/interceptor/PopulateInstanceBatchInterceptor.java @@ -119,10 +119,9 @@ private void process(String tenant, List batch) { repository.deleteEntities(idsToDrop); } - if (ResourceType.INSTANCE.getName().equals(recordCollection.getKey())) { - var noShadowCopiesInstanceEvents = recordByOperation.values().stream().flatMap(Collection::stream).toList(); - instanceChildrenResourceService.persistChildren(tenant, noShadowCopiesInstanceEvents); - } + var noShadowCopiesInstanceEvents = recordByOperation.values().stream().flatMap(Collection::stream).toList(); + instanceChildrenResourceService.persistChildren(tenant, ResourceType.byName(recordCollection.getKey()), + noShadowCopiesInstanceEvents); } } diff --git a/src/main/java/org/folio/search/model/entity/CallNumberEntity.java b/src/main/java/org/folio/search/model/entity/CallNumberEntity.java new file mode 100644 index 000000000..6ca195070 --- /dev/null +++ b/src/main/java/org/folio/search/model/entity/CallNumberEntity.java @@ -0,0 +1,135 @@ +package org.folio.search.model.entity; + +import static org.apache.commons.lang3.StringUtils.truncate; + +import java.util.Objects; +import lombok.Getter; +import org.folio.search.utils.ShaUtils; +import org.jetbrains.annotations.NotNull; + +@Getter +public class CallNumberEntity implements Comparable { + + private static final int CALL_NUMBER_MAX_LENGTH = 50; + private static final int CALL_NUMBER_PREFIX_MAX_LENGTH = 20; + private static final int CALL_NUMBER_SUFFIX_MAX_LENGTH = 25; + private static final int CALL_NUMBER_TYPE_MAX_LENGTH = 40; + private static final int VOLUME_MAX_LENGTH = 50; + private static final int ENUMERATION_MAX_LENGTH = 50; + private static final int CHRONOLOGY_MAX_LENGTH = 50; + private static final int COPY_NUMBER_MAX_LENGTH = 10; + + private String id; + private String callNumber; + private String callNumberPrefix; + private String callNumberSuffix; + private String callNumberTypeId; + private String volume; + private String enumeration; + private String chronology; + private String copyNumber; + + CallNumberEntity(String id, String callNumber, String callNumberPrefix, String callNumberSuffix, + String callNumberTypeId, String volume, String enumeration, String chronology, String copyNumber) { + this.id = id; + this.callNumber = callNumber; + this.callNumberPrefix = callNumberPrefix; + this.callNumberSuffix = callNumberSuffix; + this.callNumberTypeId = callNumberTypeId; + this.volume = volume; + this.enumeration = enumeration; + this.chronology = chronology; + this.copyNumber = copyNumber; + } + + public static CallNumberEntityBuilder builder() { + return new CallNumberEntityBuilder(); + } + + @Override + public int hashCode() { + return Objects.hashCode(id); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof CallNumberEntity that)) { + return false; + } + return Objects.equals(id, that.id); + } + + @Override + public int compareTo(@NotNull CallNumberEntity o) { + return id.compareTo(o.id); + } + + public static class CallNumberEntityBuilder { + private String id; + private String callNumber; + private String callNumberPrefix; + private String callNumberSuffix; + private String callNumberTypeId; + private String volume; + private String enumeration; + private String chronology; + private String copyNumber; + + CallNumberEntityBuilder() { } + + public CallNumberEntityBuilder id(String id) { + this.id = id; + return this; + } + + public CallNumberEntityBuilder callNumber(String callNumber) { + this.callNumber = truncate(callNumber, CALL_NUMBER_MAX_LENGTH); + return this; + } + + public CallNumberEntityBuilder callNumberPrefix(String callNumberPrefix) { + this.callNumberPrefix = truncate(callNumberPrefix, CALL_NUMBER_PREFIX_MAX_LENGTH); + return this; + } + + public CallNumberEntityBuilder callNumberSuffix(String callNumberSuffix) { + this.callNumberSuffix = truncate(callNumberSuffix, CALL_NUMBER_SUFFIX_MAX_LENGTH); + return this; + } + + public CallNumberEntityBuilder callNumberTypeId(String callNumberTypeId) { + this.callNumberTypeId = truncate(callNumberTypeId, CALL_NUMBER_TYPE_MAX_LENGTH); + return this; + } + + public CallNumberEntityBuilder volume(String volume) { + this.volume = truncate(volume, VOLUME_MAX_LENGTH); + return this; + } + + public CallNumberEntityBuilder enumeration(String enumeration) { + this.enumeration = truncate(enumeration, ENUMERATION_MAX_LENGTH); + return this; + } + + public CallNumberEntityBuilder chronology(String chronology) { + this.chronology = truncate(chronology, CHRONOLOGY_MAX_LENGTH); + return this; + } + + public CallNumberEntityBuilder copyNumber(String copyNumber) { + this.copyNumber = truncate(copyNumber, COPY_NUMBER_MAX_LENGTH); + return this; + } + + public CallNumberEntity build() { + if (id == null) { + this.id = ShaUtils.sha(callNumber, callNumberPrefix, callNumberSuffix, callNumberTypeId, + volume, enumeration, chronology, copyNumber); + } + return new CallNumberEntity(this.id, this.callNumber, this.callNumberPrefix, this.callNumberSuffix, + this.callNumberTypeId, this.volume, this.enumeration, this.chronology, this.copyNumber); + } + + } +} diff --git a/src/main/java/org/folio/search/model/entity/ChildResourceEntityBatch.java b/src/main/java/org/folio/search/model/entity/ChildResourceEntityBatch.java new file mode 100644 index 000000000..9acb283db --- /dev/null +++ b/src/main/java/org/folio/search/model/entity/ChildResourceEntityBatch.java @@ -0,0 +1,9 @@ +package org.folio.search.model.entity; + +import java.util.Collection; +import java.util.Map; + +public record ChildResourceEntityBatch(Collection> resourceEntities, + Collection> relationshipEntities) { + +} diff --git a/src/main/java/org/folio/search/model/entity/InstanceCallNumberEntity.java b/src/main/java/org/folio/search/model/entity/InstanceCallNumberEntity.java new file mode 100644 index 000000000..df41fcf88 --- /dev/null +++ b/src/main/java/org/folio/search/model/entity/InstanceCallNumberEntity.java @@ -0,0 +1,18 @@ +package org.folio.search.model.entity; + +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.Getter; + +@Getter +@Builder +@EqualsAndHashCode +public class InstanceCallNumberEntity { + + private String callNumberId; + private String itemId; + private String instanceId; + private String locationId; + private String tenantId; + +} diff --git a/src/main/java/org/folio/search/model/types/ReindexEntityType.java b/src/main/java/org/folio/search/model/types/ReindexEntityType.java index f2f474ab9..3e2681fbe 100644 --- a/src/main/java/org/folio/search/model/types/ReindexEntityType.java +++ b/src/main/java/org/folio/search/model/types/ReindexEntityType.java @@ -12,6 +12,7 @@ public enum ReindexEntityType { SUBJECT("subject", false, true), CONTRIBUTOR("contributor", false, true), CLASSIFICATION("classification", false, true), + CALL_NUMBER("call-number", false, true), ITEM("item", true, false), HOLDINGS("holdings", true, false); diff --git a/src/main/java/org/folio/search/service/InstanceChildrenResourceService.java b/src/main/java/org/folio/search/service/InstanceChildrenResourceService.java index a5c960602..56b43f5d5 100644 --- a/src/main/java/org/folio/search/service/InstanceChildrenResourceService.java +++ b/src/main/java/org/folio/search/service/InstanceChildrenResourceService.java @@ -1,5 +1,6 @@ package org.folio.search.service; +import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.apache.commons.lang3.StringUtils.startsWith; import static org.folio.search.utils.SearchConverterUtils.getResourceSource; @@ -9,11 +10,12 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; -import lombok.RequiredArgsConstructor; +import java.util.stream.Collectors; import lombok.extern.log4j.Log4j2; import org.folio.search.domain.dto.ResourceEvent; import org.folio.search.domain.dto.ResourceEventType; import org.folio.search.model.event.SubResourceEvent; +import org.folio.search.model.types.ResourceType; import org.folio.search.service.consortium.ConsortiumTenantProvider; import org.folio.search.service.converter.preprocessor.extractor.ChildResourceExtractor; import org.folio.spring.tools.kafka.FolioMessageProducer; @@ -22,23 +24,36 @@ /** * Class is responsible for handling inner instance resource which are to be indexed into separate indices. * For example: subject, contributor, etc. - * */ + */ @Log4j2 @Component -@RequiredArgsConstructor public class InstanceChildrenResourceService { private final FolioMessageProducer messageProducer; - private final List resourceExtractors; + private final Map> resourceExtractors; private final ConsortiumTenantProvider consortiumTenantProvider; + public InstanceChildrenResourceService(FolioMessageProducer messageProducer, + List resourceExtractors, + ConsortiumTenantProvider consortiumTenantProvider) { + this.messageProducer = messageProducer; + this.resourceExtractors = resourceExtractors.stream() + .collect(Collectors.groupingBy(ChildResourceExtractor::resourceType)); + this.consortiumTenantProvider = consortiumTenantProvider; + } + public void sendChildrenEvent(ResourceEvent event) { + var resourceType = ResourceType.byName(event.getResourceName()); + var extractors = resourceExtractors.get(resourceType); + if (extractors == null) { + return; + } var needChildrenEvent = false; if (isUpdateEventForResourceSharing(event)) { - needChildrenEvent = resourceExtractors.stream() + needChildrenEvent = extractors.stream() .anyMatch(extractor -> !extractor.hasChildResourceChanges(event)); } else if (!startsWith(getResourceSource(event), SOURCE_CONSORTIUM_PREFIX)) { - needChildrenEvent = resourceExtractors.stream() + needChildrenEvent = extractors.stream() .anyMatch(extractor -> extractor.hasChildResourceChanges(event)); } @@ -63,10 +78,16 @@ public List extractChildren(ResourceEvent event) { log.debug("processChildren::Starting instance children event processing [{}]", event); } + var resourceType = ResourceType.byName(event.getResourceName()); + var extractors = resourceExtractors.get(resourceType); + if (extractors == null) { + return emptyList(); + } + var events = new LinkedList(); if (isUpdateEventForResourceSharing(event)) { - for (var resourceExtractor : resourceExtractors) { + for (var resourceExtractor : extractors) { events.addAll(resourceExtractor.prepareEventsOnSharing(event)); } } else if (startsWith(getResourceSource(event), SOURCE_CONSORTIUM_PREFIX)) { @@ -74,7 +95,7 @@ public List extractChildren(ResourceEvent event) { "processChildren::Finished instance children event processing. No additional action for shadow instance."); return events; } else { - for (var resourceExtractor : resourceExtractors) { + for (var resourceExtractor : extractors) { events.addAll(resourceExtractor.prepareEvents(event)); } } @@ -85,20 +106,26 @@ public List extractChildren(ResourceEvent event) { return events; } - public void persistChildren(String tenantId, List events) { + public void persistChildren(String tenantId, ResourceType resourceType, List events) { + var extractors = resourceExtractors.get(resourceType); + if (extractors == null) { + return; + } var shared = consortiumTenantProvider.isCentralTenant(tenantId); - resourceExtractors.forEach(resourceExtractor -> resourceExtractor.persistChildren(shared, events)); + extractors.forEach(resourceExtractor -> resourceExtractor.persistChildren(shared, events)); } - public void persistChildrenOnReindex(String tenantId, List> instances) { + public void persistChildrenOnReindex(String tenantId, ResourceType resourceType, + List> instances) { var events = instances.stream() .map(instance -> new ResourceEvent() .id(instance.get("id").toString()) .type(ResourceEventType.REINDEX) + .resourceName(resourceType.getName()) .tenant(tenantId) ._new(instance)) .toList(); - persistChildren(tenantId, events); + persistChildren(tenantId, resourceType, events); } } diff --git a/src/main/java/org/folio/search/service/converter/preprocessor/extractor/ChildResourceExtractor.java b/src/main/java/org/folio/search/service/converter/preprocessor/extractor/ChildResourceExtractor.java index ebde8786f..86d0dda14 100644 --- a/src/main/java/org/folio/search/service/converter/preprocessor/extractor/ChildResourceExtractor.java +++ b/src/main/java/org/folio/search/service/converter/preprocessor/extractor/ChildResourceExtractor.java @@ -5,6 +5,7 @@ import static org.apache.commons.collections4.MapUtils.getObject; import static org.folio.search.utils.SearchConverterUtils.getNewAsMap; +import java.util.ArrayList; import java.util.HashSet; import java.util.LinkedList; import java.util.List; @@ -14,6 +15,8 @@ import lombok.RequiredArgsConstructor; import org.folio.search.domain.dto.ResourceEvent; import org.folio.search.domain.dto.ResourceEventType; +import org.folio.search.model.entity.ChildResourceEntityBatch; +import org.folio.search.model.types.ResourceType; import org.folio.search.service.reindex.jdbc.InstanceChildResourceRepository; import org.springframework.transaction.annotation.Transactional; @@ -28,6 +31,8 @@ public abstract class ChildResourceExtractor { public abstract boolean hasChildResourceChanges(ResourceEvent event); + public abstract ResourceType resourceType(); + protected abstract List> constructRelations(boolean shared, ResourceEvent event, List> entities); @@ -59,14 +64,14 @@ public void persistChildren(boolean shared, List events) { relations.addAll(constructRelations(shared, event, entitiesFromEvent)); entities.addAll(entitiesFromEvent); }); - repository.saveAll(entities, relations); + repository.saveAll(new ChildResourceEntityBatch(new ArrayList<>(entities), relations)); } private List> extractEntities(ResourceEvent event) { var entities = getChildResources(getNewAsMap(event)); return entities.stream() .map(this::constructEntity) - .filter(Objects::nonNull) + .filter(obj -> Objects.nonNull(obj) && !obj.isEmpty()) .toList(); } diff --git a/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/CallNumberResourceExtractor.java b/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/CallNumberResourceExtractor.java new file mode 100644 index 000000000..3441d85af --- /dev/null +++ b/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/CallNumberResourceExtractor.java @@ -0,0 +1,130 @@ +package org.folio.search.service.converter.preprocessor.extractor.impl; + +import static org.apache.commons.collections4.MapUtils.getMap; +import static org.apache.commons.collections4.MapUtils.getString; +import static org.folio.search.utils.SearchConverterUtils.getNewAsMap; +import static org.folio.search.utils.SearchConverterUtils.getOldAsMap; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import lombok.extern.log4j.Log4j2; +import org.folio.search.domain.dto.ResourceEvent; +import org.folio.search.domain.dto.TenantConfiguredFeature; +import org.folio.search.model.entity.CallNumberEntity; +import org.folio.search.model.entity.InstanceCallNumberEntity; +import org.folio.search.model.types.ResourceType; +import org.folio.search.service.FeatureConfigService; +import org.folio.search.service.converter.preprocessor.extractor.ChildResourceExtractor; +import org.folio.search.service.reindex.jdbc.CallNumberRepository; +import org.folio.search.utils.JsonConverter; +import org.springframework.stereotype.Component; + +@Log4j2 +@Component +public class CallNumberResourceExtractor extends ChildResourceExtractor { + + public static final String EFFECTIVE_CALL_NUMBER_COMPONENTS_FIELD = "effectiveCallNumberComponents"; + public static final String CALL_NUMBER_FIELD = "callNumber"; + public static final String PREFIX_FIELD = "prefix"; + public static final String SUFFIX_FIELD = "suffix"; + public static final String TYPE_ID_FIELD = "typeId"; + public static final String VOLUME_FIELD = "volume"; + public static final String CHRONOLOGY_FIELD = "chronology"; + public static final String ENUMERATION_FIELD = "enumeration"; + public static final String COPY_NUMBER_FIELD = "copyNumber"; + + private final JsonConverter jsonConverter; + private final FeatureConfigService featureConfigService; + + public CallNumberResourceExtractor(CallNumberRepository repository, JsonConverter jsonConverter, + FeatureConfigService featureConfigService) { + super(repository); + this.jsonConverter = jsonConverter; + this.featureConfigService = featureConfigService; + } + + @Override + public List prepareEvents(ResourceEvent resource) { + return List.of(); + } + + @Override + public List prepareEventsOnSharing(ResourceEvent resource) { + return List.of(); + } + + @Override + public boolean hasChildResourceChanges(ResourceEvent event) { + if (!featureConfigService.isEnabled(TenantConfiguredFeature.BROWSE_CALL_NUMBERS)) { + return false; + } + var oldAsMap = getOldAsMap(event); + var newAsMap = getNewAsMap(event); + var oldCallNumber = constructEntity(oldAsMap); + var newCallNumber = constructEntity(newAsMap); + return !oldCallNumber.equals(newCallNumber); + } + + @Override + public ResourceType resourceType() { + return ResourceType.ITEM; + } + + @Override + protected List> constructRelations(boolean shared, ResourceEvent event, + List> entities) { + var resourceMap = getNewAsMap(event); + return entities.stream() + .map(entity -> InstanceCallNumberEntity.builder() + .callNumberId(getString(entity, "id")) + .itemId(getString(resourceMap, "id")) + .instanceId(getString(resourceMap, "instanceId")) + .locationId(getString(resourceMap, "effectiveLocationId")) + .tenantId(event.getTenant()) + .build()) + .map(jsonConverter::convertToMap) + .toList(); + } + + @Override + protected Map constructEntity(Map entityProperties) { + if (!featureConfigService.isEnabled(TenantConfiguredFeature.BROWSE_CALL_NUMBERS)) { + return Collections.emptyMap(); + } + var callNumberComponents = getCallNumberComponents(entityProperties); + var callNumber = getString(callNumberComponents, CALL_NUMBER_FIELD); + if (callNumber != null) { + var callNumberEntity = CallNumberEntity.builder() + .callNumber(callNumber) + .callNumberPrefix(getString(callNumberComponents, PREFIX_FIELD)) + .callNumberSuffix(getString(callNumberComponents, SUFFIX_FIELD)) + .callNumberTypeId(getString(callNumberComponents, TYPE_ID_FIELD)) + .volume(getString(entityProperties, VOLUME_FIELD)) + .chronology(getString(entityProperties, CHRONOLOGY_FIELD)) + .enumeration(getString(entityProperties, ENUMERATION_FIELD)) + .copyNumber(getString(entityProperties, COPY_NUMBER_FIELD)) + .build(); + return jsonConverter.convertToMap(callNumberEntity); + } + return Collections.emptyMap(); + } + + @SuppressWarnings("unchecked") + private Map getCallNumberComponents(Map entityProperties) { + return (Map) getMap(entityProperties, EFFECTIVE_CALL_NUMBER_COMPONENTS_FIELD, + Collections.emptyMap()); + } + + @Override + protected String childrenFieldName() { + return ""; + } + + @Override + protected Set> getChildResources(Map event) { + return Set.of(event); + } + +} diff --git a/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/ClassificationResourceExtractor.java b/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/ClassificationResourceExtractor.java index 4fa63a050..0957f36c0 100644 --- a/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/ClassificationResourceExtractor.java +++ b/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/ClassificationResourceExtractor.java @@ -13,6 +13,7 @@ import static org.folio.search.utils.SearchUtils.prepareForExpectedFormat; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -109,15 +110,23 @@ public List prepareEventsOnSharing(ResourceEvent event) { @Override public boolean hasChildResourceChanges(ResourceEvent event) { + if (!featureConfigService.isEnabled(TenantConfiguredFeature.BROWSE_CLASSIFICATIONS)) { + return false; + } var oldClassifications = getChildResources(getOldAsMap(event)); var newClassifications = getChildResources(getNewAsMap(event)); return !oldClassifications.equals(newClassifications); } + @Override + public ResourceType resourceType() { + return ResourceType.INSTANCE; + } + @Override protected List> constructRelations(boolean shared, ResourceEvent event, - List> entities) { + List> entities) { return entities.stream() .map(entity -> Map.of("instanceId", event.getId(), "classificationId", entity.get("id"), @@ -128,9 +137,12 @@ protected List> constructRelations(boolean shared, ResourceE @Override protected Map constructEntity(Map entityProperties) { + if (!featureConfigService.isEnabled(TenantConfiguredFeature.BROWSE_CLASSIFICATIONS)) { + return Collections.emptyMap(); + } var classificationNumber = prepareForExpectedFormat(entityProperties.get(CLASSIFICATION_NUMBER_FIELD), 50); if (classificationNumber.isEmpty()) { - return null; + return Collections.emptyMap(); } var classificationTypeId = entityProperties.get(CLASSIFICATION_TYPE_FIELD); diff --git a/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/ContributorResourceExtractor.java b/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/ContributorResourceExtractor.java index 5c0b201f4..7926af5e0 100644 --- a/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/ContributorResourceExtractor.java +++ b/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/ContributorResourceExtractor.java @@ -103,6 +103,11 @@ public boolean hasChildResourceChanges(ResourceEvent event) { return !oldContributors.equals(newContributors); } + @Override + public ResourceType resourceType() { + return ResourceType.INSTANCE; + } + @Override protected List> constructRelations(boolean shared, ResourceEvent event, List> entities) { diff --git a/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/SubjectResourceExtractor.java b/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/SubjectResourceExtractor.java index e239b6a41..fcac9ed04 100644 --- a/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/SubjectResourceExtractor.java +++ b/src/main/java/org/folio/search/service/converter/preprocessor/extractor/impl/SubjectResourceExtractor.java @@ -105,9 +105,14 @@ public boolean hasChildResourceChanges(ResourceEvent event) { return !oldSubjects.equals(newSubjects); } + @Override + public ResourceType resourceType() { + return ResourceType.INSTANCE; + } + @Override protected List> constructRelations(boolean shared, ResourceEvent event, - List> entities) { + List> entities) { return entities.stream() .map(entity -> Map.of("instanceId", event.getId(), "subjectId", entity.get("id"), diff --git a/src/main/java/org/folio/search/service/reindex/ReindexConstants.java b/src/main/java/org/folio/search/service/reindex/ReindexConstants.java index fb16b878b..8311c190b 100644 --- a/src/main/java/org/folio/search/service/reindex/ReindexConstants.java +++ b/src/main/java/org/folio/search/service/reindex/ReindexConstants.java @@ -10,15 +10,19 @@ public final class ReindexConstants { public static final Map RESOURCE_NAME_MAP = Map.of( ReindexEntityType.INSTANCE, ResourceType.INSTANCE, + ReindexEntityType.ITEM, ResourceType.ITEM, + ReindexEntityType.HOLDINGS, ResourceType.HOLDINGS, ReindexEntityType.SUBJECT, ResourceType.INSTANCE_SUBJECT, ReindexEntityType.CLASSIFICATION, ResourceType.INSTANCE_CLASSIFICATION, ReindexEntityType.CONTRIBUTOR, ResourceType.INSTANCE_CONTRIBUTOR ); + public static final String CALL_NUMBER_TABLE = "call_number"; public static final String CLASSIFICATION_TABLE = "classification"; public static final String CONTRIBUTOR_TABLE = "contributor"; public static final String HOLDING_TABLE = "holding"; public static final String INSTANCE_TABLE = "instance"; + public static final String INSTANCE_CALL_NUMBER_TABLE = "instance_call_number"; public static final String INSTANCE_CLASSIFICATION_TABLE = "instance_classification"; public static final String INSTANCE_CONTRIBUTOR_TABLE = "instance_contributor"; public static final String INSTANCE_SUBJECT_TABLE = "instance_subject"; diff --git a/src/main/java/org/folio/search/service/reindex/ReindexMergeRangeIndexService.java b/src/main/java/org/folio/search/service/reindex/ReindexMergeRangeIndexService.java index 5d239f473..a9958e1d8 100644 --- a/src/main/java/org/folio/search/service/reindex/ReindexMergeRangeIndexService.java +++ b/src/main/java/org/folio/search/service/reindex/ReindexMergeRangeIndexService.java @@ -1,5 +1,7 @@ package org.folio.search.service.reindex; +import static org.folio.search.service.reindex.ReindexConstants.RESOURCE_NAME_MAP; + import java.sql.Timestamp; import java.time.Instant; import java.util.ArrayList; @@ -79,9 +81,8 @@ public void saveEntities(ReindexRecordsEvent event) { .toList(); repositories.get(event.getRecordType().getEntityType()).saveEntities(event.getTenant(), entities); - if (event.getRecordType() == ReindexRecordsEvent.ReindexRecordType.INSTANCE) { - instanceChildrenResourceService.persistChildrenOnReindex(event.getTenant(), entities); - } + instanceChildrenResourceService.persistChildrenOnReindex(event.getTenant(), + RESOURCE_NAME_MAP.get(event.getRecordType().getEntityType()), entities); } private List constructMergeRangeRecords(int recordsCount, diff --git a/src/main/java/org/folio/search/service/reindex/jdbc/CallNumberRepository.java b/src/main/java/org/folio/search/service/reindex/jdbc/CallNumberRepository.java new file mode 100644 index 000000000..1f24e315a --- /dev/null +++ b/src/main/java/org/folio/search/service/reindex/jdbc/CallNumberRepository.java @@ -0,0 +1,202 @@ +package org.folio.search.service.reindex.jdbc; + +import static org.apache.commons.collections4.MapUtils.getString; +import static org.folio.search.service.reindex.ReindexConstants.CALL_NUMBER_TABLE; +import static org.folio.search.service.reindex.ReindexConstants.INSTANCE_CALL_NUMBER_TABLE; +import static org.folio.search.utils.JdbcUtils.getFullTableName; +import static org.folio.search.utils.JdbcUtils.getParamPlaceholderForUuid; + +import java.util.List; +import java.util.Map; +import java.util.Optional; +import lombok.extern.log4j.Log4j2; +import org.folio.search.configuration.properties.ReindexConfigurationProperties; +import org.folio.search.model.entity.ChildResourceEntityBatch; +import org.folio.search.model.types.ReindexEntityType; +import org.folio.search.utils.JsonConverter; +import org.folio.spring.FolioExecutionContext; +import org.springframework.dao.DataAccessException; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.jdbc.core.RowMapper; +import org.springframework.stereotype.Repository; + +@Log4j2 +@Repository +public class CallNumberRepository extends UploadRangeRepository implements InstanceChildResourceRepository { + + private static final String DELETE_QUERY = """ + DELETE + FROM %s + WHERE instance_id IN (%s); + """; + + private static final String INSERT_ENTITIES_SQL = """ + INSERT INTO %s ( + id, + call_number, + call_number_prefix, + call_number_suffix, + call_number_type_id, + volume, + enumeration, + chronology, + copy_number + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT DO NOTHING; + """; + + private static final String INSERT_RELATIONS_SQL = """ + INSERT INTO %s ( + call_number_id, + item_id, + instance_id, + tenant_id, + location_id + ) VALUES (?, ?::uuid, ?::uuid, ?, ?::uuid); + """; + + protected CallNumberRepository(JdbcTemplate jdbcTemplate, + JsonConverter jsonConverter, + FolioExecutionContext context, + ReindexConfigurationProperties reindexConfig) { + super(jdbcTemplate, jsonConverter, context, reindexConfig); + } + + @Override + public void deleteByInstanceIds(List instanceIds) { + var sql = DELETE_QUERY.formatted(getFullTableName(context, INSTANCE_CALL_NUMBER_TABLE), + getParamPlaceholderForUuid(instanceIds.size())); + jdbcTemplate.update(sql, instanceIds.toArray()); + } + + @Override + public void saveAll(ChildResourceEntityBatch entityBatch) { + saveResourceEntities(entityBatch); + saveRelationshipEntities(entityBatch); + } + + @Override + public ReindexEntityType entityType() { + return ReindexEntityType.CALL_NUMBER; + } + + @Override + protected String entityTable() { + return CALL_NUMBER_TABLE; + } + + @Override + protected Optional subEntityTable() { + return Optional.of(INSTANCE_CALL_NUMBER_TABLE); + } + + @Override + protected RowMapper> rowToMapMapper() { + return null; + } + + private void saveResourceEntities(ChildResourceEntityBatch entityBatch) { + var callNumberTable = getFullTableName(context, entityTable()); + var callNumberSql = INSERT_ENTITIES_SQL.formatted(callNumberTable); + + try { + jdbcTemplate.batchUpdate(callNumberSql, entityBatch.resourceEntities(), BATCH_OPERATION_SIZE, + (statement, entity) -> { + statement.setString(1, getId(entity)); + statement.setString(2, getCallNumber(entity)); + statement.setString(3, getPrefix(entity)); + statement.setString(4, getSuffix(entity)); + statement.setString(5, getTypeId(entity)); + statement.setString(6, getVolume(entity)); + statement.setString(7, getEnumeration(entity)); + statement.setString(8, getChronology(entity)); + statement.setString(9, getCopyNumber(entity)); + }); + } catch (DataAccessException e) { + log.warn("saveAll::Failed to save entities batch. Starting processing one-by-one", e); + for (var entity : entityBatch.resourceEntities()) { + jdbcTemplate.update(callNumberSql, + getId(entity), getCallNumber(entity), getPrefix(entity), getSuffix(entity), getTypeId(entity), + getVolume(entity), getEnumeration(entity), getChronology(entity), getCopyNumber(entity)); + } + } + } + + private void saveRelationshipEntities(ChildResourceEntityBatch entityBatch) { + var instanceCallNumberTable = getFullTableName(context, INSTANCE_CALL_NUMBER_TABLE); + var instanceCallNumberSql = INSERT_RELATIONS_SQL.formatted(instanceCallNumberTable); + + try { + jdbcTemplate.batchUpdate(instanceCallNumberSql, entityBatch.relationshipEntities(), BATCH_OPERATION_SIZE, + (statement, entity) -> { + statement.setString(1, getCallNumberId(entity)); + statement.setString(2, getItemId(entity)); + statement.setString(3, getInstanceId(entity)); + statement.setString(4, getTenantId(entity)); + statement.setString(5, getLocationId(entity)); + }); + } catch (DataAccessException e) { + log.warn("saveAll::Failed to save relations batch. Starting processing one-by-one", e); + for (var entityRelation : entityBatch.relationshipEntities()) { + jdbcTemplate.update(instanceCallNumberSql, getCallNumberId(entityRelation), getItemId(entityRelation), + getInstanceId(entityRelation), getTenantId(entityRelation), getLocationId(entityRelation)); + } + } + } + + private String getCallNumber(Map callNumberComponents) { + return getString(callNumberComponents, "callNumber"); + } + + private String getCallNumberId(Map callNumberComponents) { + return getString(callNumberComponents, "callNumberId"); + } + + private String getLocationId(Map item) { + return getString(item, "locationId"); + } + + private String getTenantId(Map item) { + return getString(item, "tenantId"); + } + + private String getInstanceId(Map item) { + return getString(item, "instanceId"); + } + + private String getItemId(Map item) { + return getString(item, "itemId"); + } + + private String getId(Map item) { + return getString(item, "id"); + } + + private String getCopyNumber(Map item) { + return getString(item, "copyNumber"); + } + + private String getEnumeration(Map item) { + return getString(item, "enumeration"); + } + + private String getChronology(Map item) { + return getString(item, "chronology"); + } + + private String getVolume(Map item) { + return getString(item, "volume"); + } + + private String getTypeId(Map callNumberComponents) { + return getString(callNumberComponents, "callNumberTypeId"); + } + + private String getSuffix(Map callNumberComponents) { + return getString(callNumberComponents, "callNumberSuffix"); + } + + private String getPrefix(Map callNumberComponents) { + return getString(callNumberComponents, "callNumberPrefix"); + } +} diff --git a/src/main/java/org/folio/search/service/reindex/jdbc/ClassificationRepository.java b/src/main/java/org/folio/search/service/reindex/jdbc/ClassificationRepository.java index 60f01d291..4d5b980c2 100644 --- a/src/main/java/org/folio/search/service/reindex/jdbc/ClassificationRepository.java +++ b/src/main/java/org/folio/search/service/reindex/jdbc/ClassificationRepository.java @@ -12,11 +12,11 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import lombok.extern.log4j.Log4j2; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.ListUtils; import org.folio.search.configuration.properties.ReindexConfigurationProperties; +import org.folio.search.model.entity.ChildResourceEntityBatch; import org.folio.search.model.entity.InstanceClassificationEntityAgg; import org.folio.search.model.types.ReindexEntityType; import org.folio.search.service.reindex.ReindexConstants; @@ -156,10 +156,10 @@ public void deleteByInstanceIds(List instanceIds) { } @Override - public void saveAll(Set> entities, List> entityRelations) { + public void saveAll(ChildResourceEntityBatch entityBatch) { var entitiesSql = INSERT_ENTITIES_SQL.formatted(JdbcUtils.getSchemaName(context)); try { - jdbcTemplate.batchUpdate(entitiesSql, entities, BATCH_OPERATION_SIZE, + jdbcTemplate.batchUpdate(entitiesSql, entityBatch.resourceEntities(), BATCH_OPERATION_SIZE, (statement, entity) -> { statement.setString(1, (String) entity.get("id")); statement.setString(2, (String) entity.get(CLASSIFICATION_NUMBER_FIELD)); @@ -167,7 +167,7 @@ public void saveAll(Set> entities, List> }); } catch (DataAccessException e) { log.warn("saveAll::Failed to save entities batch. Starting processing one-by-one", e); - for (var entity : entities) { + for (var entity : entityBatch.resourceEntities()) { jdbcTemplate.update(entitiesSql, entity.get("id"), entity.get(CLASSIFICATION_NUMBER_FIELD), entity.get(CLASSIFICATION_TYPE_FIELD)); } @@ -175,7 +175,7 @@ public void saveAll(Set> entities, List> var relationsSql = INSERT_RELATIONS_SQL.formatted(JdbcUtils.getSchemaName(context)); try { - jdbcTemplate.batchUpdate(relationsSql, entityRelations, BATCH_OPERATION_SIZE, + jdbcTemplate.batchUpdate(relationsSql, entityBatch.relationshipEntities(), BATCH_OPERATION_SIZE, (statement, entityRelation) -> { statement.setObject(1, entityRelation.get("instanceId")); statement.setString(2, (String) entityRelation.get("classificationId")); @@ -184,7 +184,7 @@ public void saveAll(Set> entities, List> }); } catch (DataAccessException e) { log.warn("saveAll::Failed to save relations batch. Starting processing one-by-one", e); - for (var entityRelation : entityRelations) { + for (var entityRelation : entityBatch.relationshipEntities()) { jdbcTemplate.update(relationsSql, entityRelation.get("instanceId"), entityRelation.get("classificationId"), entityRelation.get("tenantId"), entityRelation.get("shared")); } diff --git a/src/main/java/org/folio/search/service/reindex/jdbc/ContributorRepository.java b/src/main/java/org/folio/search/service/reindex/jdbc/ContributorRepository.java index a014e3860..8b108ca64 100644 --- a/src/main/java/org/folio/search/service/reindex/jdbc/ContributorRepository.java +++ b/src/main/java/org/folio/search/service/reindex/jdbc/ContributorRepository.java @@ -12,11 +12,11 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import lombok.extern.log4j.Log4j2; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.ListUtils; import org.folio.search.configuration.properties.ReindexConfigurationProperties; +import org.folio.search.model.entity.ChildResourceEntityBatch; import org.folio.search.model.entity.InstanceContributorEntityAgg; import org.folio.search.model.types.ReindexEntityType; import org.folio.search.service.reindex.ReindexConstants; @@ -159,10 +159,10 @@ public void deleteByInstanceIds(List instanceIds) { } @Override - public void saveAll(Set> entities, List> entityRelations) { + public void saveAll(ChildResourceEntityBatch entityBatch) { var entitiesSql = INSERT_ENTITIES_SQL.formatted(JdbcUtils.getSchemaName(context)); try { - jdbcTemplate.batchUpdate(entitiesSql, entities, BATCH_OPERATION_SIZE, + jdbcTemplate.batchUpdate(entitiesSql, entityBatch.resourceEntities(), BATCH_OPERATION_SIZE, (statement, entity) -> { statement.setString(1, (String) entity.get("id")); statement.setString(2, (String) entity.get("name")); @@ -171,7 +171,7 @@ public void saveAll(Set> entities, List> }); } catch (DataAccessException e) { log.warn("saveAll::Failed to save entities batch. Starting processing one-by-one", e); - for (var entity : entities) { + for (var entity : entityBatch.resourceEntities()) { jdbcTemplate.update(entitiesSql, entity.get("id"), entity.get("name"), entity.get("nameTypeId"), entity.get(AUTHORITY_ID_FIELD)); } @@ -179,7 +179,7 @@ public void saveAll(Set> entities, List> var relationsSql = INSERT_RELATIONS_SQL.formatted(JdbcUtils.getSchemaName(context)); try { - jdbcTemplate.batchUpdate(relationsSql, entityRelations, BATCH_OPERATION_SIZE, + jdbcTemplate.batchUpdate(relationsSql, entityBatch.relationshipEntities(), BATCH_OPERATION_SIZE, (statement, entityRelation) -> { statement.setObject(1, entityRelation.get("instanceId")); statement.setString(2, (String) entityRelation.get("contributorId")); @@ -189,7 +189,7 @@ public void saveAll(Set> entities, List> }); } catch (DataAccessException e) { log.warn("saveAll::Failed to save relations batch. Starting processing one-by-one", e); - for (var entityRelation : entityRelations) { + for (var entityRelation : entityBatch.relationshipEntities()) { jdbcTemplate.update(relationsSql, entityRelation.get("instanceId"), entityRelation.get("contributorId"), entityRelation.get(CONTRIBUTOR_TYPE_FIELD), entityRelation.get("tenantId"), entityRelation.get("shared")); } diff --git a/src/main/java/org/folio/search/service/reindex/jdbc/InstanceChildResourceRepository.java b/src/main/java/org/folio/search/service/reindex/jdbc/InstanceChildResourceRepository.java index b550551d2..4dc06a4ae 100644 --- a/src/main/java/org/folio/search/service/reindex/jdbc/InstanceChildResourceRepository.java +++ b/src/main/java/org/folio/search/service/reindex/jdbc/InstanceChildResourceRepository.java @@ -1,12 +1,11 @@ package org.folio.search.service.reindex.jdbc; import java.util.List; -import java.util.Map; -import java.util.Set; +import org.folio.search.model.entity.ChildResourceEntityBatch; public interface InstanceChildResourceRepository { void deleteByInstanceIds(List instanceIds); - void saveAll(Set> entities, List> entityRelations); + void saveAll(ChildResourceEntityBatch childResourceEntityBatch); } diff --git a/src/main/java/org/folio/search/service/reindex/jdbc/ItemRepository.java b/src/main/java/org/folio/search/service/reindex/jdbc/ItemRepository.java index 1d2183f2d..37314be4f 100644 --- a/src/main/java/org/folio/search/service/reindex/jdbc/ItemRepository.java +++ b/src/main/java/org/folio/search/service/reindex/jdbc/ItemRepository.java @@ -35,6 +35,11 @@ public ReindexEntityType entityType() { return ReindexEntityType.ITEM; } + @Override + protected String entityTable() { + return ReindexConstants.ITEM_TABLE; + } + @Override public void saveEntities(String tenantId, List> entities) { var fullTableName = getFullTableName(context, entityTable()); @@ -48,10 +53,7 @@ public void saveEntities(String tenantId, List> entities) { statement.setObject(4, entity.get("holdingsRecordId")); statement.setString(5, jsonConverter.toJson(entity)); }); - } - @Override - protected String entityTable() { - return ReindexConstants.ITEM_TABLE; } + } diff --git a/src/main/java/org/folio/search/service/reindex/jdbc/SubjectRepository.java b/src/main/java/org/folio/search/service/reindex/jdbc/SubjectRepository.java index 94ce56560..78adda49b 100644 --- a/src/main/java/org/folio/search/service/reindex/jdbc/SubjectRepository.java +++ b/src/main/java/org/folio/search/service/reindex/jdbc/SubjectRepository.java @@ -14,11 +14,11 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import lombok.extern.log4j.Log4j2; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.collections4.ListUtils; import org.folio.search.configuration.properties.ReindexConfigurationProperties; +import org.folio.search.model.entity.ChildResourceEntityBatch; import org.folio.search.model.entity.InstanceSubjectEntityAgg; import org.folio.search.model.types.ReindexEntityType; import org.folio.search.service.reindex.ReindexConstants; @@ -93,7 +93,6 @@ public class SubjectRepository extends UploadRangeRepository implements Instance private static final String IDS_INS_WHERE_CLAUSE = "ins.subject_id IN (%1$s)"; private static final String IDS_SUB_WHERE_CLAUSE = "s.id IN (%1$s)"; - protected SubjectRepository(JdbcTemplate jdbcTemplate, JsonConverter jsonConverter, FolioExecutionContext context, @@ -126,14 +125,12 @@ public List fetchByIds(List ids) { return jdbcTemplate.query(sql, instanceAggRowMapper(), ListUtils.union(ids, ids).toArray()); } - @Override public List> fetchByIdRange(String lower, String upper) { var sql = getFetchBySql(); return jdbcTemplate.query(sql, rowToMapMapper(), lower, upper, lower, upper); } - @Override protected String getFetchBySql() { return SELECT_QUERY.formatted(JdbcUtils.getSchemaName(context), @@ -165,10 +162,10 @@ public void deleteByInstanceIds(List instanceIds) { } @Override - public void saveAll(Set> entities, List> entityRelations) { + public void saveAll(ChildResourceEntityBatch entityBatch) { var entitiesSql = INSERT_ENTITIES_SQL.formatted(JdbcUtils.getSchemaName(context)); try { - jdbcTemplate.batchUpdate(entitiesSql, entities, BATCH_OPERATION_SIZE, + jdbcTemplate.batchUpdate(entitiesSql, entityBatch.resourceEntities(), BATCH_OPERATION_SIZE, (statement, entity) -> { statement.setString(1, (String) entity.get("id")); statement.setString(2, (String) entity.get(SUBJECT_VALUE_FIELD)); @@ -178,7 +175,7 @@ public void saveAll(Set> entities, List> }); } catch (DataAccessException e) { log.warn("saveAll::Failed to save entities batch. Starting processing one-by-one", e); - for (var entity : entities) { + for (var entity : entityBatch.resourceEntities()) { jdbcTemplate.update(entitiesSql, entity.get("id"), entity.get(SUBJECT_VALUE_FIELD), entity.get(AUTHORITY_ID_FIELD), entity.get(SUBJECT_SOURCE_ID_FIELD), entity.get(SUBJECT_TYPE_ID_FIELD)); } @@ -186,7 +183,7 @@ public void saveAll(Set> entities, List> var relationsSql = INSERT_RELATIONS_SQL.formatted(JdbcUtils.getSchemaName(context)); try { - jdbcTemplate.batchUpdate(relationsSql, entityRelations, BATCH_OPERATION_SIZE, + jdbcTemplate.batchUpdate(relationsSql, entityBatch.relationshipEntities(), BATCH_OPERATION_SIZE, (statement, entityRelation) -> { statement.setObject(1, entityRelation.get("instanceId")); statement.setString(2, (String) entityRelation.get("subjectId")); @@ -195,7 +192,7 @@ public void saveAll(Set> entities, List> }); } catch (DataAccessException e) { log.warn("saveAll::Failed to save relations batch. Starting processing one-by-one", e); - for (var entityRelation : entityRelations) { + for (var entityRelation : entityBatch.relationshipEntities()) { jdbcTemplate.update(relationsSql, entityRelation.get("instanceId"), entityRelation.get("subjectId"), entityRelation.get("tenantId"), entityRelation.get("shared")); } diff --git a/src/main/resources/application.yml b/src/main/resources/application.yml index 0f7d2962a..01c27d403 100644 --- a/src/main/resources/application.yml +++ b/src/main/resources/application.yml @@ -13,10 +13,10 @@ spring: opensearch: username: ${ELASTICSEARCH_USERNAME:} password: ${ELASTICSEARCH_PASSWORD:} - uris: ${ELASTICSEARCH_URL:http://elasticsearch:9200} + uris: ${ELASTICSEARCH_URL:http://localhost:9200} compression-enabled: ${ELASTICSEARCH_COMPRESSION_ENABLED:true} kafka: - bootstrap-servers: ${KAFKA_HOST:kafka}:${KAFKA_PORT:9092} + bootstrap-servers: ${KAFKA_HOST:localhost}:${KAFKA_PORT:9092} consumer: max-poll-records: ${KAFKA_CONSUMER_MAX_POLL_RECORDS:200} properties: @@ -35,9 +35,9 @@ spring: max.in.flight.requests.per.connection: 5 retries: 5 datasource: - username: ${DB_USERNAME:postgres} - password: ${DB_PASSWORD:postgres} - url: jdbc:postgresql://${DB_HOST:postgres}:${DB_PORT:5432}/${DB_DATABASE:okapi_modules} + username: ${DB_USERNAME:folio_admin} + password: ${DB_PASSWORD:folio_admin} + url: jdbc:postgresql://${DB_HOST:localhost}:${DB_PORT:5432}/${DB_DATABASE:okapi_modules} hikari: data-source-properties: reWriteBatchedInserts: true @@ -71,6 +71,7 @@ folio: browse-cn-intermediate-values: ${BROWSE_CN_INTERMEDIATE_VALUES_ENABLED:true} browse-cn-intermediate-remove-duplicates: ${BROWSE_CN_INTERMEDIATE_REMOVE_DUPLICATES:true} browse-classifications: ${BROWSE_CLASSIFICATIONS_ENABLED:true} + browse-call-numbers: ${BROWSE_CALL_NUMBERS_ENABLED:true} indexing: data-format: ${INDEXING_DATA_FORMAT:smile} reindex: @@ -152,7 +153,7 @@ folio: group-id: ${folio.environment}-mod-search-index-sub-resource-group max-poll-records: ${KAFKA_CONSUMER_INDEX_SUB_RESOURCE_MAX_POLL_RECORDS:200} max-poll-interval-ms: ${KAFKA_CONSUMER_INDEX_SUB_RESOURCE_MAX_POLL_INTERVAL_MS:600000} - okapiUrl: ${okapi.url} + okapiUrl: ${okapi.url:http://localhost:9130} logging: request: enabled: true diff --git a/src/main/resources/changelog/changelog-master.xml b/src/main/resources/changelog/changelog-master.xml index 4e3511bdd..8a1b08173 100644 --- a/src/main/resources/changelog/changelog-master.xml +++ b/src/main/resources/changelog/changelog-master.xml @@ -1,8 +1,8 @@ - + https://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-latest.xsd"> @@ -11,5 +11,6 @@ + diff --git a/src/main/resources/changelog/changes/v4.1/create_call_number_tables.xml b/src/main/resources/changelog/changes/v4.1/create_call_number_tables.xml new file mode 100644 index 000000000..6b72359f4 --- /dev/null +++ b/src/main/resources/changelog/changes/v4.1/create_call_number_tables.xml @@ -0,0 +1,63 @@ + + + + + + + + + + + Create call_number table + + + + + + + + + + + + + + + + + + + + + + + + + + Create instance_call_number table + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/main/resources/swagger.api/parameters/feature-id.yaml b/src/main/resources/swagger.api/parameters/feature-id.yaml index 241ff5713..8f5ec76d8 100644 --- a/src/main/resources/swagger.api/parameters/feature-id.yaml +++ b/src/main/resources/swagger.api/parameters/feature-id.yaml @@ -9,3 +9,4 @@ schema: - browse.cn.intermediate.values - browse.cn.intermediate.remove.duplicates - browse.classifications + - browse.call-numbers diff --git a/src/main/resources/swagger.api/schemas/entity/tenantConfiguredFeature.yaml b/src/main/resources/swagger.api/schemas/entity/tenantConfiguredFeature.yaml index eb1a4f86e..73d6ae2ab 100644 --- a/src/main/resources/swagger.api/schemas/entity/tenantConfiguredFeature.yaml +++ b/src/main/resources/swagger.api/schemas/entity/tenantConfiguredFeature.yaml @@ -5,3 +5,4 @@ enum: - browse.cn.intermediate.values - browse.cn.intermediate.remove.duplicates - browse.classifications + - browse.call-numbers diff --git a/src/test/java/org/folio/search/service/InstanceChildrenResourceServiceTest.java b/src/test/java/org/folio/search/service/InstanceChildrenResourceServiceTest.java index 972fbc0c2..80a6f2759 100644 --- a/src/test/java/org/folio/search/service/InstanceChildrenResourceServiceTest.java +++ b/src/test/java/org/folio/search/service/InstanceChildrenResourceServiceTest.java @@ -17,6 +17,7 @@ import org.folio.search.domain.dto.ResourceEvent; import org.folio.search.domain.dto.ResourceEventType; import org.folio.search.model.event.SubResourceEvent; +import org.folio.search.model.types.ResourceType; import org.folio.search.service.consortium.ConsortiumTenantProvider; import org.folio.search.service.converter.preprocessor.extractor.ChildResourceExtractor; import org.folio.search.service.converter.preprocessor.extractor.impl.ClassificationResourceExtractor; @@ -55,6 +56,9 @@ class InstanceChildrenResourceServiceTest { void setUp() { this.resourceExtractors = List.of(classificationResourceExtractor, contributorResourceExtractor, subjectResourceExtractor); + for (var resourceExtractor : resourceExtractors) { + lenient().when(resourceExtractor.resourceType()).thenReturn(ResourceType.INSTANCE); + } service = new InstanceChildrenResourceService(messageProducer, resourceExtractors, consortiumTenantProvider); } @@ -62,7 +66,8 @@ void setUp() { @ValueSource(ints = {0, 1, 2}) void sendChildrenEvent(int extractorIndex) { var event = new ResourceEvent() - ._new(Map.of(SOURCE_FIELD, "MARC")); + ._new(Map.of(SOURCE_FIELD, "MARC")) + .resourceName(ResourceType.INSTANCE.getName()); var expectedEvent = SubResourceEvent.fromResourceEvent(event); when(resourceExtractors.get(extractorIndex).hasChildResourceChanges(event)).thenReturn(true); @@ -91,6 +96,7 @@ void sendChildrenEvent_resourceSharing(int extractorIndex) { @ValueSource(strings = {"MARC", "CONSORTIUM_MARC"}) void sendChildrenEvent_noEvent(String source) { var event = new ResourceEvent() + .resourceName(ResourceType.INSTANCE.getName()) ._new(Map.of(SOURCE_FIELD, source)); resourceExtractors.forEach(resourceExtractor -> when(resourceExtractor.hasChildResourceChanges(event)).thenReturn(false)); @@ -113,7 +119,8 @@ void sendChildrenEvent_resourceSharing_noEvent() { @Test void extractChildren() { - var event = new ResourceEvent(); + var event = new ResourceEvent() + .resourceName(ResourceType.INSTANCE.getName()); resourceExtractors.forEach(resourceExtractor -> when(resourceExtractor.prepareEvents(event)).thenReturn(List.of(new ResourceEvent(), new ResourceEvent()))); @@ -142,7 +149,8 @@ void extractChildren_shadowInstance() { var result = service.extractChildren(event); assertThat(result).isEmpty(); - resourceExtractors.forEach(Mockito::verifyNoInteractions); + resourceExtractors.forEach(resourceExtractor -> Mockito.verify(resourceExtractor).resourceType()); + resourceExtractors.forEach(Mockito::verifyNoMoreInteractions); } @ParameterizedTest @@ -151,7 +159,7 @@ void persistChildren(boolean shared) { var events = List.of(new ResourceEvent(), new ResourceEvent()); when(consortiumTenantProvider.isCentralTenant(TENANT_ID)).thenReturn(shared); - service.persistChildren(TENANT_ID, events); + service.persistChildren(TENANT_ID, ResourceType.INSTANCE, events); resourceExtractors.forEach(resourceExtractor -> verify(resourceExtractor).persistChildren(shared, events)); @@ -163,20 +171,24 @@ void persistChildrenOnReindex(boolean shared) { var id1 = UUID.randomUUID(); var id2 = UUID.randomUUID(); var instances = List.of(Map.of("id", id1), Map.of("id", id2)); - var expectedEvents = List.of( - new ResourceEvent().id(id1.toString()).type(ResourceEventType.REINDEX).tenant(TENANT_ID)._new(instances.get(0)), - new ResourceEvent().id(id2.toString()).type(ResourceEventType.REINDEX).tenant(TENANT_ID)._new(instances.get(1))); + var expectedEvents = List.of(getResourceEvent(id1, instances.get(0)), getResourceEvent(id2, instances.get(1))); when(consortiumTenantProvider.isCentralTenant(TENANT_ID)).thenReturn(shared); - service.persistChildrenOnReindex(TENANT_ID, instances); + service.persistChildrenOnReindex(TENANT_ID, ResourceType.INSTANCE, instances); resourceExtractors.forEach(resourceExtractor -> verify(resourceExtractor).persistChildren(shared, expectedEvents)); } + private ResourceEvent getResourceEvent(UUID id1, Map payload) { + return new ResourceEvent().id(id1.toString()).type(ResourceEventType.REINDEX) + .resourceName(ResourceType.INSTANCE.getName()).tenant(TENANT_ID)._new(payload); + } + private ResourceEvent resourceSharingEvent() { return new ResourceEvent() .type(ResourceEventType.UPDATE) + .resourceName(ResourceType.INSTANCE.getName()) ._new(Map.of(SOURCE_FIELD, SOURCE_CONSORTIUM_PREFIX + "MARC")) .old(Map.of(SOURCE_FIELD, "MARC")); } diff --git a/src/test/java/org/folio/search/service/converter/preprocessor/extractor/CallNumberResourceExtractorTest.java b/src/test/java/org/folio/search/service/converter/preprocessor/extractor/CallNumberResourceExtractorTest.java new file mode 100644 index 000000000..1fb6a9ac3 --- /dev/null +++ b/src/test/java/org/folio/search/service/converter/preprocessor/extractor/CallNumberResourceExtractorTest.java @@ -0,0 +1,71 @@ +package org.folio.search.service.converter.preprocessor.extractor; + +import static org.folio.search.service.converter.preprocessor.extractor.impl.CallNumberResourceExtractor.CALL_NUMBER_FIELD; +import static org.folio.search.service.converter.preprocessor.extractor.impl.CallNumberResourceExtractor.CHRONOLOGY_FIELD; +import static org.folio.search.service.converter.preprocessor.extractor.impl.CallNumberResourceExtractor.COPY_NUMBER_FIELD; +import static org.folio.search.service.converter.preprocessor.extractor.impl.CallNumberResourceExtractor.EFFECTIVE_CALL_NUMBER_COMPONENTS_FIELD; +import static org.folio.search.service.converter.preprocessor.extractor.impl.CallNumberResourceExtractor.ENUMERATION_FIELD; +import static org.folio.search.service.converter.preprocessor.extractor.impl.CallNumberResourceExtractor.PREFIX_FIELD; +import static org.folio.search.service.converter.preprocessor.extractor.impl.CallNumberResourceExtractor.SUFFIX_FIELD; +import static org.folio.search.service.converter.preprocessor.extractor.impl.CallNumberResourceExtractor.TYPE_ID_FIELD; +import static org.folio.search.service.converter.preprocessor.extractor.impl.CallNumberResourceExtractor.VOLUME_FIELD; +import static org.folio.search.utils.TestUtils.mapOf; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.databind.ObjectMapper; +import java.util.Map; +import java.util.function.Supplier; +import org.folio.search.domain.dto.TenantConfiguredFeature; +import org.folio.search.service.FeatureConfigService; +import org.folio.search.service.converter.preprocessor.extractor.impl.CallNumberResourceExtractor; +import org.folio.search.service.reindex.jdbc.CallNumberRepository; +import org.folio.search.utils.JsonConverter; +import org.folio.spring.testing.type.UnitTest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@UnitTest +@ExtendWith(MockitoExtension.class) +class CallNumberResourceExtractorTest extends ChildResourceExtractorTestBase { + + @Mock + private CallNumberRepository repository; + @Mock + private FeatureConfigService featureConfigService; + + private CallNumberResourceExtractor extractor; + + @Override + protected int getExpectedEntitiesSize() { + return 1; + } + + @BeforeEach + void setUp() { + extractor = new CallNumberResourceExtractor(repository, + new JsonConverter(new ObjectMapper()), + featureConfigService); + } + + @Test + void persistChildren() { + when(featureConfigService.isEnabled(TenantConfiguredFeature.BROWSE_CALL_NUMBERS)).thenReturn(true); + persistChildrenTest(extractor, repository, callNumberBodySupplier()); + } + + private static Supplier> callNumberBodySupplier() { + return () -> mapOf(EFFECTIVE_CALL_NUMBER_COMPONENTS_FIELD, mapOf( + CALL_NUMBER_FIELD, "call-number", + SUFFIX_FIELD, "suffix", + PREFIX_FIELD, "prefix", + TYPE_ID_FIELD, "type-id" + ), VOLUME_FIELD, "volume", + CHRONOLOGY_FIELD, "chronology", + ENUMERATION_FIELD, "enumeration", + COPY_NUMBER_FIELD, "copy-number" + ); + } +} diff --git a/src/test/java/org/folio/search/service/converter/preprocessor/extractor/ChildResourceExtractorTestBase.java b/src/test/java/org/folio/search/service/converter/preprocessor/extractor/ChildResourceExtractorTestBase.java index f569f3203..bb7d359cc 100644 --- a/src/test/java/org/folio/search/service/converter/preprocessor/extractor/ChildResourceExtractorTestBase.java +++ b/src/test/java/org/folio/search/service/converter/preprocessor/extractor/ChildResourceExtractorTestBase.java @@ -36,7 +36,12 @@ void persistChildrenTest(ChildResourceExtractor extractor, InstanceChildResource extractor.persistChildren(false, events); verify(repository).deleteByInstanceIds(instanceIdsForDeletion); - verify(repository).saveAll(argThat(set -> set.size() == 2), argThat(list -> list.size() == 3)); + verify(repository).saveAll(argThat(set -> set.resourceEntities().size() == getExpectedEntitiesSize() + && set.relationshipEntities().size() == 3)); + } + + protected int getExpectedEntitiesSize() { + return 2; } private Map noMainValuesBody() { diff --git a/src/test/java/org/folio/search/service/converter/preprocessor/extractor/ClassificationResourceExtractorTest.java b/src/test/java/org/folio/search/service/converter/preprocessor/extractor/ClassificationResourceExtractorTest.java index c1b1955c7..ae11102d7 100644 --- a/src/test/java/org/folio/search/service/converter/preprocessor/extractor/ClassificationResourceExtractorTest.java +++ b/src/test/java/org/folio/search/service/converter/preprocessor/extractor/ClassificationResourceExtractorTest.java @@ -3,12 +3,14 @@ import static org.folio.search.utils.SearchUtils.CLASSIFICATIONS_FIELD; import static org.folio.search.utils.SearchUtils.CLASSIFICATION_NUMBER_FIELD; import static org.folio.search.utils.SearchUtils.CLASSIFICATION_TYPE_FIELD; +import static org.mockito.Mockito.when; import java.util.List; import java.util.Map; import java.util.UUID; import java.util.function.Supplier; import org.apache.commons.lang3.RandomStringUtils; +import org.folio.search.domain.dto.TenantConfiguredFeature; import org.folio.search.service.FeatureConfigService; import org.folio.search.service.converter.preprocessor.extractor.impl.ClassificationResourceExtractor; import org.folio.search.service.reindex.jdbc.ClassificationRepository; @@ -36,6 +38,7 @@ class ClassificationResourceExtractorTest extends ChildResourceExtractorTestBase @Test void persistChildren() { + when(featureConfigService.isEnabled(TenantConfiguredFeature.BROWSE_CLASSIFICATIONS)).thenReturn(true); persistChildrenTest(extractor, repository, classificationsBodySupplier()); } diff --git a/src/test/java/org/folio/search/service/reindex/ReindexMergeRangeIndexServiceTest.java b/src/test/java/org/folio/search/service/reindex/ReindexMergeRangeIndexServiceTest.java index 8023d2b64..653ea83ff 100644 --- a/src/test/java/org/folio/search/service/reindex/ReindexMergeRangeIndexServiceTest.java +++ b/src/test/java/org/folio/search/service/reindex/ReindexMergeRangeIndexServiceTest.java @@ -5,12 +5,12 @@ import static org.folio.search.model.types.ReindexEntityType.HOLDINGS; import static org.folio.search.model.types.ReindexEntityType.INSTANCE; import static org.folio.search.model.types.ReindexEntityType.ITEM; +import static org.folio.search.service.reindex.ReindexConstants.RESOURCE_NAME_MAP; import static org.folio.search.utils.TestConstants.TENANT_ID; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; import java.sql.Timestamp; @@ -124,10 +124,9 @@ void saveEntities(ReindexRecordsEvent.ReindexRecordType recordType) { service.saveEntities(event); verify(repositoryMap.get(recordType.getEntityType())).saveEntities(TENANT_ID, List.of(entities)); - if (recordType == ReindexRecordsEvent.ReindexRecordType.INSTANCE) { - verify(instanceChildrenResourceService).persistChildrenOnReindex(TENANT_ID, List.of(entities)); - } else { - verifyNoInteractions(instanceChildrenResourceService); - } + verify(instanceChildrenResourceService).persistChildrenOnReindex(TENANT_ID, + RESOURCE_NAME_MAP.get(recordType.getEntityType()), + List.of(entities)); } } + diff --git a/src/test/java/org/folio/search/service/reindex/jdbc/ClassificationRepositoryIT.java b/src/test/java/org/folio/search/service/reindex/jdbc/ClassificationRepositoryIT.java index 2c0ab55d4..a391f7253 100644 --- a/src/test/java/org/folio/search/service/reindex/jdbc/ClassificationRepositoryIT.java +++ b/src/test/java/org/folio/search/service/reindex/jdbc/ClassificationRepositoryIT.java @@ -18,6 +18,7 @@ import java.util.Map; import java.util.Set; import org.folio.search.configuration.properties.ReindexConfigurationProperties; +import org.folio.search.model.entity.ChildResourceEntityBatch; import org.folio.search.utils.JsonConverter; import org.folio.spring.FolioExecutionContext; import org.folio.spring.FolioModuleMetadata; @@ -92,7 +93,7 @@ void saveAll() { classificationRelation("b3bae8a9-cfb1-4afe-83d5-2cdae4580e07", "2"), classificationRelation("9ec55e4f-6a76-427c-b47b-197046f44a54", "2")); - repository.saveAll(entities, entityRelations); + repository.saveAll(new ChildResourceEntityBatch(entities, entityRelations)); // assert var ranges = repository.fetchByIdRange("0", "50"); diff --git a/src/test/java/org/folio/search/service/reindex/jdbc/ContributorRepositoryIT.java b/src/test/java/org/folio/search/service/reindex/jdbc/ContributorRepositoryIT.java index 66e4059a9..bdbf5ff89 100644 --- a/src/test/java/org/folio/search/service/reindex/jdbc/ContributorRepositoryIT.java +++ b/src/test/java/org/folio/search/service/reindex/jdbc/ContributorRepositoryIT.java @@ -18,6 +18,7 @@ import java.util.Map; import java.util.Set; import org.folio.search.configuration.properties.ReindexConfigurationProperties; +import org.folio.search.model.entity.ChildResourceEntityBatch; import org.folio.search.utils.JsonConverter; import org.folio.spring.FolioExecutionContext; import org.folio.spring.FolioModuleMetadata; @@ -94,7 +95,7 @@ void saveAll() { contributorRelation("b3bae8a9-cfb1-4afe-83d5-2cdae4580e07", "2"), contributorRelation("9ec55e4f-6a76-427c-b47b-197046f44a54", "2")); - repository.saveAll(entities, entityRelations); + repository.saveAll(new ChildResourceEntityBatch(entities, entityRelations)); // assert var ranges = repository.fetchByIdRange("0", "50"); diff --git a/src/test/java/org/folio/search/service/reindex/jdbc/SubjectRepositoryIT.java b/src/test/java/org/folio/search/service/reindex/jdbc/SubjectRepositoryIT.java index bb95d210a..dd91a3c74 100644 --- a/src/test/java/org/folio/search/service/reindex/jdbc/SubjectRepositoryIT.java +++ b/src/test/java/org/folio/search/service/reindex/jdbc/SubjectRepositoryIT.java @@ -21,6 +21,7 @@ import java.util.Set; import org.assertj.core.api.Condition; import org.folio.search.configuration.properties.ReindexConfigurationProperties; +import org.folio.search.model.entity.ChildResourceEntityBatch; import org.folio.search.model.reindex.UploadRangeEntity; import org.folio.search.model.types.ReindexEntityType; import org.folio.search.utils.JsonConverter; @@ -150,7 +151,7 @@ void saveAll() { subjectRelation("b3bae8a9-cfb1-4afe-83d5-2cdae4580e07", "2"), subjectRelation("9ec55e4f-6a76-427c-b47b-197046f44a54", "2")); - repository.saveAll(entities, entityRelations); + repository.saveAll(new ChildResourceEntityBatch(entities, entityRelations)); // assert var ranges = repository.fetchByIdRange("0", "50"); diff --git a/src/test/resources/application.yml b/src/test/resources/application.yml index f793294aa..dee990fdc 100644 --- a/src/test/resources/application.yml +++ b/src/test/resources/application.yml @@ -53,6 +53,7 @@ folio: browse-cn-intermediate-values: true browse-cn-intermediate-remove-duplicates: true browse-classifications: true + browse-call-numbers: true indexing: data-format: ${INDEXING_DATA_FORMAT:json} reindex: