From 1c9bbae3939a161566d47ab810a09b9347aeeb25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 14 Aug 2023 12:46:56 +0200 Subject: [PATCH 01/79] First approach to use the elasticsearch service --- .DS_Store | Bin 0 -> 6148 bytes .env | 9 ++ docker-compose.yaml | 73 ++++++++++++ elasticsearch/.DS_Store | Bin 0 -> 6148 bytes elasticsearch/elasticsearch.yml | 13 +++ elasticsearch/elasticsearch_query.py | 27 +++++ elasticsearch/publication.json | 106 ++++++++++++++++++ logstash/.DS_Store | Bin 0 -> 6148 bytes logstash/Dockerfile | 13 +++ logstash/config/logstash.yml | 12 ++ logstash/pipeline/.DS_Store | Bin 0 -> 6148 bytes logstash/pipeline/conf/init_temp_table.conf | 56 +++++++++ logstash/pipeline/conf/sync_temp_table.conf | 61 ++++++++++ logstash/pipeline/pipelines.yml | 5 + logstash/pipeline/sql/.DS_Store | Bin 0 -> 6148 bytes logstash/pipeline/sql/init_temp_dataset.sql | 2 + .../pipeline/sql/init_temp_publication.sql | 2 + logstash/pipeline/sql/sync_temp_dataset.sql | 3 + .../pipeline/sql/sync_temp_publication.sql | 3 + 19 files changed, 385 insertions(+) create mode 100644 .DS_Store create mode 100644 elasticsearch/.DS_Store create mode 100644 elasticsearch/elasticsearch.yml create mode 100755 elasticsearch/elasticsearch_query.py create mode 100644 elasticsearch/publication.json create mode 100644 logstash/.DS_Store create mode 100644 logstash/Dockerfile create mode 100644 logstash/config/logstash.yml create mode 100644 logstash/pipeline/.DS_Store create mode 100644 logstash/pipeline/conf/init_temp_table.conf create mode 100644 logstash/pipeline/conf/sync_temp_table.conf create mode 100644 logstash/pipeline/pipelines.yml create mode 100644 logstash/pipeline/sql/.DS_Store create mode 100644 logstash/pipeline/sql/init_temp_dataset.sql create mode 100644 logstash/pipeline/sql/init_temp_publication.sql create mode 100644 logstash/pipeline/sql/sync_temp_dataset.sql create mode 100644 logstash/pipeline/sql/sync_temp_publication.sql diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..ca7594b03a98ca69c91734ab3e1eb25028677bfd GIT binary patch literal 6148 zcmeHKu};G<5IvU)1rSn4Mh{Gdr2_*4QmE1|=ns^(C`C%@ss-lk4158f1K-2U#5JU)`Wo+%C*&_U&wI;1#*#{aw#x32@gjRG- zS2b@n{6z+M?+&qF8Fp+%<^5Y?CudpO8%|ulkfnEbo4s?FB;(AD;T0V}oxPo3JYRkl zG5u!5Vpd+!2*Wq6|9-ffXKtRt3tuQcbIWk4DDIR^MdghU5|+A0IefHJUVfX@dDWsCzBg8t}0<4XWw z8)h%)b1lI!HeejE5X2jZkx-z7nowdG35VSVzc^qaDB)y6@nOQuCR8ZK&yMk}cPA4I zYO4$=162lgH1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0*qwy9}ZcK z3~HLv0=T4U`z)PYZSq;q=lt&d?fFdeN#-i6ubh7JYv=VOdAT?P&VVzpYYd=fi)2TJ z-Z}%$fHSaSK)w$FDws!X4E582My~+CF3c+E>%SZ@hyj>KYz$$6u!RCGl;ahHEgbd` z{ql&7p@kFc%9tmv%<+c8x;pGZ-HCHUZ=C^Wpv}OZJ@%yjUv9tuw}br28E^)6iUIBx z!(xC}inTR)IjOY)dJPp3zs7JCf{vDA_(~~0hN{3Gqyx+&Hioc3{6|1(@WvVVQwBZ( D%KT8h literal 0 HcmV?d00001 diff --git a/logstash/Dockerfile b/logstash/Dockerfile new file mode 100644 index 00000000..8e805e17 --- /dev/null +++ b/logstash/Dockerfile @@ -0,0 +1,13 @@ +# https://www.docker.elastic.co/ +FROM docker.elastic.co/logstash/logstash:7.13.0 + +# Download MySQL JDBC driver to connect Logstash to MySQL +RUN curl -Lo "mysql-connector-java-8.0.22.tar.gz" "https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.22.tar.gz" \ + && tar -xf "mysql-connector-java-8.0.22.tar.gz" "mysql-connector-java-8.0.22/mysql-connector-java-8.0.22.jar" \ + && mv "mysql-connector-java-8.0.22/mysql-connector-java-8.0.22.jar" "mysql-connector-java-8.0.22.jar" \ + && rm -r "mysql-connector-java-8.0.22" "mysql-connector-java-8.0.22.tar.gz" + +ENTRYPOINT ["/usr/local/bin/docker-entrypoint"] + +# Add your logstash plugins setup here +# Example: RUN logstash-plugin install logstash-filter-json diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml new file mode 100644 index 00000000..a48c35ff --- /dev/null +++ b/logstash/config/logstash.yml @@ -0,0 +1,12 @@ +--- +## Default Logstash configuration from Logstash base image. +## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml +# +http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] + +## X-Pack security credentials +# +xpack.monitoring.enabled: true +xpack.monitoring.elasticsearch.username: elastic +xpack.monitoring.elasticsearch.password: changeme diff --git a/logstash/pipeline/.DS_Store b/logstash/pipeline/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..fd63aa2abca915353d409f63dc0c624887439896 GIT binary patch literal 6148 zcmeHK%}T>S5T0pshzL@|gU1D*py0t{Ea?;E1xj12hcsI)dd?$x(!1b0dGwpz!Dfl} zD8x5fSB3!NCcJA4L43JsGG)@>t$cug2BFuQ$C&GaM=d z^6fHOkf(<3((iY+?fTZfEa$bK!*7Mp#=bDFbBb0dI`yKiiKhu5grJ~RA5YHZ812e!|pV$P;4W{bYg8j zSSz!3C>*Ne{Vs$P7ZGhV28@A}fnj_c$^Ae5z5h>>?8z7~2L2TToRw8M!IJcD?JZ93 tS_hp$MI^3`_??1DxQY=gSMen@2<%QKz(TQ&2n)o11Og2<7z2OGz!$NgMmhih literal 0 HcmV?d00001 diff --git a/logstash/pipeline/conf/init_temp_table.conf b/logstash/pipeline/conf/init_temp_table.conf new file mode 100644 index 00000000..7dbb4bdb --- /dev/null +++ b/logstash/pipeline/conf/init_temp_table.conf @@ -0,0 +1,56 @@ +input { + # https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_temp_publication.sql" + type => "publication" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_temp_dataset.sql" + type => "dataset" + } +} +# https://www.elastic.co/guide/en/logstash/current/filter-plugins.html+ +#filter { +# mutate { +# remove_field => ["@version", "@timestamp"] +# } +#} +output { + # https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html + if [type] == "publication" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "publication" + document_id => "publication_%{identifier}" + } + } + if [type] == "dataset" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "dataset" + document_id => "document_%{identifier}" + } + } +} diff --git a/logstash/pipeline/conf/sync_temp_table.conf b/logstash/pipeline/conf/sync_temp_table.conf new file mode 100644 index 00000000..13b90737 --- /dev/null +++ b/logstash/pipeline/conf/sync_temp_table.conf @@ -0,0 +1,61 @@ +input { + # https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_created" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_temp_publication.sql" + type => "publication" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_temp_dataset.sql" + type => "dataset" + } + +} +# https://www.elastic.co/guide/en/logstash/current/filter-plugins.html+ +#filter { +# mutate { +# remove_field => ["@version", "@timestamp", "ts"] +# } +#} +output { + # https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html + if [type] == "publication" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "publication" + document_id => "publication_%{identifier}" + } + } + if [type] == "dataset" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "dataset" + document_id => "document_%{identifier}" + } + } +} diff --git a/logstash/pipeline/pipelines.yml b/logstash/pipeline/pipelines.yml new file mode 100644 index 00000000..844d2b0c --- /dev/null +++ b/logstash/pipeline/pipelines.yml @@ -0,0 +1,5 @@ +- pipeline.id: init-temp-table-pipeline + path.config: "/usr/share/logstash/pipeline/init_temp_table.conf" + +- pipeline.id: sync-temp-table-pipeline + path.config: "/usr/share/logstash/pipeline/sync_temp_table.conf" diff --git a/logstash/pipeline/sql/.DS_Store b/logstash/pipeline/sql/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 :sql_last_value +ORDER BY identifier diff --git a/logstash/pipeline/sql/sync_temp_publication.sql b/logstash/pipeline/sql/sync_temp_publication.sql new file mode 100644 index 00000000..63d26fc3 --- /dev/null +++ b/logstash/pipeline/sql/sync_temp_publication.sql @@ -0,0 +1,3 @@ +SELECT * FROM aiod.publication +WHERE date_created > :sql_last_value +ORDER BY identifier From eb73469a20a7f3f7b51574e7682cd82f95c186b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 14 Aug 2023 14:05:43 +0200 Subject: [PATCH 02/79] elasticsearch query example --- elasticsearch/elasticsearch_query.py | 61 ++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/elasticsearch/elasticsearch_query.py b/elasticsearch/elasticsearch_query.py index 2bc696dd..2bccd082 100755 --- a/elasticsearch/elasticsearch_query.py +++ b/elasticsearch/elasticsearch_query.py @@ -1,17 +1,26 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +import json from elasticsearch import Elasticsearch ELASTIC_USER = "elastic" -QUERY = {'match': {'title': "exotic"}} + +SIZE = 2 +INDEX = "publication" +QUERY = {'match': {'platform': "example"}} +QUERY = {'match': {'title': "in"}} +SEARCH_AFTER = None +SORT = {'identifier': 'asc'} def main(): + global SEARCH_AFTER + # Get elasticsearch password with open('../.env', 'r') as f: for line in f: - if "ELASTIC_PASSWORD" in line: + if "ES_PASSWORD" in line: ELASTIC_PASSWORD = line.split('=')[1][:-1] break @@ -20,8 +29,52 @@ def main(): basic_auth=(ELASTIC_USER, ELASTIC_PASSWORD)) # Search - result = es_client.search(index="publication", query=QUERY) - print(result['hits']['hits']) + result = es_client.search(index=INDEX, query=QUERY, size=SIZE, + search_after=SEARCH_AFTER, sort=SORT) + + # Print total number of results + print(f"TOTAL RESULTS: {result['hits']['total']['value']}") + + query_result = 1 + while result['hits']['hits']: + + # Print current results + print(f"QUERY RESULT: {query_result}") + print(json.dumps(dict(result)['hits']['hits'], indent=4)) + + # Actualise search_after for the next search + SEARCH_AFTER = result['hits']['hits'][-1]['sort'] + query_result += 1 + + # Search + result = es_client.search(index=INDEX, query=QUERY, size=SIZE, + search_after=SEARCH_AFTER, sort=SORT) if __name__ == "__main__": main() + + + +#ELASTIC_USER = "elastic" +#QUERY = {'bool': {'must': [{'match': {'title': "Advances"}}, +# {'match': {'platform_identifier': "4"}}]}} +# +#def main(): +# +# # Get elasticsearch password +# with open('../.env', 'r') as f: +# for line in f: +# if "ES_PASSWORD" in line: +# ELASTIC_PASSWORD = line.split('=')[1][:-1] +# break +# +# # Generate client +# es_client = Elasticsearch("http://localhost:9200", +# basic_auth=(ELASTIC_USER, ELASTIC_PASSWORD)) +# +# # Search +# result = es_client.search(index="publication", query=QUERY) +# print(result['hits']['hits']) +# +#if __name__ == "__main__": +# main() From 8bd8115c5a106f7c0746f7ea96d5c92caa3d7e5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 14 Aug 2023 14:39:49 +0200 Subject: [PATCH 03/79] elasticsearch query complete example --- .DS_Store | Bin 6148 -> 6148 bytes elasticsearch/elasticsearch_query.py | 66 +++++++++------------------ 2 files changed, 21 insertions(+), 45 deletions(-) diff --git a/.DS_Store b/.DS_Store index ca7594b03a98ca69c91734ab3e1eb25028677bfd..268a09f1763afb6c748211774189d3c0782a31f4 100644 GIT binary patch delta 269 zcmZoMXfc=|#>B!ku~2NHo+2an#(>?7iw`g}F>+6y$0S#uR9;+=l#`#tz`(F0sURn_ zxWvHV8Y2@k3o9Et2L}f?M{ICLetB?7Vo7PSQ({pxh!>KdpOXY*Cnkkurk2MGh&box zl_X~7r51rTWTvD7mBfT+=B4D9JLQ+=r4)np216t`I5|1v1th9nt1ZoqbQH`CO>1=& zsx6HSbQDaC&1!2oIYgE9t%KsTb8_?YyMT@Z0!Bs%&ANZD+tYHQKUI9n# delta 67 zcmZoMXfc=|#>B)qu~2NHo+2aH#(>?7j9il&SmieJu>NM+*zkyHGdl-A2T;joL5}at Vlles)IT(O|k%56_bA-qmW&mck5ODwi diff --git a/elasticsearch/elasticsearch_query.py b/elasticsearch/elasticsearch_query.py index 2bccd082..3c01917e 100755 --- a/elasticsearch/elasticsearch_query.py +++ b/elasticsearch/elasticsearch_query.py @@ -4,33 +4,32 @@ import json from elasticsearch import Elasticsearch +# Global parameters ELASTIC_USER = "elastic" - SIZE = 2 -INDEX = "publication" -QUERY = {'match': {'platform': "example"}} -QUERY = {'match': {'title': "in"}} -SEARCH_AFTER = None SORT = {'identifier': 'asc'} -def main(): - - global SEARCH_AFTER +def main(index, search_concept, platforms): # Get elasticsearch password with open('../.env', 'r') as f: for line in f: if "ES_PASSWORD" in line: - ELASTIC_PASSWORD = line.split('=')[1][:-1] + elastic_password = line.split('=')[1][:-1] break # Generate client es_client = Elasticsearch("http://localhost:9200", - basic_auth=(ELASTIC_USER, ELASTIC_PASSWORD)) + basic_auth=(ELASTIC_USER, elastic_password)) + + #Prepare query + platform_identifiers = [{'match': {'platform_identifier': p}} + for p in platforms] + query = {'bool': {'must': {'match': {'title': search_concept}}, + 'must': {'bool': {'should': platform_identifiers}}}} - # Search - result = es_client.search(index=INDEX, query=QUERY, size=SIZE, - search_after=SEARCH_AFTER, sort=SORT) + # Perform first search + result = es_client.search(index=index, query=query, size=SIZE, sort=SORT) # Print total number of results print(f"TOTAL RESULTS: {result['hits']['total']['value']}") @@ -42,39 +41,16 @@ def main(): print(f"QUERY RESULT: {query_result}") print(json.dumps(dict(result)['hits']['hits'], indent=4)) - # Actualise search_after for the next search - SEARCH_AFTER = result['hits']['hits'][-1]['sort'] + # Actualise search_after and query_result for the next search + search_after = result['hits']['hits'][-1]['sort'] query_result += 1 - # Search - result = es_client.search(index=INDEX, query=QUERY, size=SIZE, - search_after=SEARCH_AFTER, sort=SORT) + # Perform next search + result = es_client.search(index=index, query=query, size=SIZE, + search_after=search_after, sort=SORT) if __name__ == "__main__": - main() - - - -#ELASTIC_USER = "elastic" -#QUERY = {'bool': {'must': [{'match': {'title': "Advances"}}, -# {'match': {'platform_identifier': "4"}}]}} -# -#def main(): -# -# # Get elasticsearch password -# with open('../.env', 'r') as f: -# for line in f: -# if "ES_PASSWORD" in line: -# ELASTIC_PASSWORD = line.split('=')[1][:-1] -# break -# -# # Generate client -# es_client = Elasticsearch("http://localhost:9200", -# basic_auth=(ELASTIC_USER, ELASTIC_PASSWORD)) -# -# # Search -# result = es_client.search(index="publication", query=QUERY) -# print(result['hits']['hits']) -# -#if __name__ == "__main__": -# main() + index = ["publication"] # List of assets + search_concept = "in" # Search concept + platforms = ['2', '4', '9'] # List of platforms + main(index, search_concept, platforms) From 2e7934b5154462b9a4ef5a065b0731c219ec7dcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 16 Aug 2023 14:14:28 +0200 Subject: [PATCH 04/79] Elasticsearch setup configured --- .env | 1 - docker-compose.yaml | 12 ++- elasticsearch/.DS_Store | Bin 6148 -> 6148 bytes elasticsearch/setup/curl.sh | 2 + elasticsearch/setup/curl_dockerfile | 10 +++ elasticsearch/setup/dataset.json | 86 +++++++++++++++++++++ elasticsearch/{ => setup}/publication.json | 28 +++++-- 7 files changed, 123 insertions(+), 16 deletions(-) create mode 100755 elasticsearch/setup/curl.sh create mode 100644 elasticsearch/setup/curl_dockerfile create mode 100644 elasticsearch/setup/dataset.json rename elasticsearch/{ => setup}/publication.json (82%) diff --git a/.env b/.env index 2ad0a122..497c328b 100644 --- a/.env +++ b/.env @@ -13,7 +13,6 @@ POST_LOGOUT_REDIRECT_URIS=http://${HOSTNAME}/aiod-auth/realms/aiod/protocol/open ES_JAVA_OPTS="-Xmx256m -Xms256m" ES_PASSWORD=changeme ES_DISCOVERY_TYPE=single-node -ES_CONTENT_TYPE='Content-Type: application/json' #LOGSTASH LS_JAVA_OPTS="-Xmx256m -Xms256m" diff --git a/docker-compose.yaml b/docker-compose.yaml index 1fde7945..3c2b6bf8 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -98,15 +98,13 @@ services: retries: 30 elasticsearch_setup: - image: curlimages/curl + build: + context: elasticsearch/setup/ + dockerfile: curl_dockerfile env_file: .env - volumes: - - type: bind - source: ./elasticsearch/publication.json - target: /publication.json - read_only: true + environment: + - ES_PASSWORD=$ES_PASSWORD restart: "no" - entrypoint: ["curl", "-u", "elastic:${ES_PASSWORD}", "-X", "PUT", "elasticsearch:9200/publication?pretty", "-H", "${ES_CONTENT_TYPE}", "-d", "@/publication.json"] depends_on: elasticsearch: condition: service_healthy diff --git a/elasticsearch/.DS_Store b/elasticsearch/.DS_Store index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..6ea5e8aa2eb3605e15e2a298d0b18b4d2c9274b7 100644 GIT binary patch delta 205 zcmZoMXfc=|#>B`mF;Q%yo}w@d0|Nsi1A_nqLqSfuVQ_MOZo$OFmGvNDR)%7RRE83U zQiK#x6b6uFbMswXl5+BsfKnWt&wkvga69SJkcY?BQ{ST=KW faB~11yD{-Q^JIPzMOG%DyuxHd5#h}qB72wtxi2yZ delta 70 zcmZoMXfc=|#>AjHu~2NHo+1YW5HK<@2yC9nSjM(_0izY;W_AvK4xj>{$am(+{342+ UKzW7)kiy9(Jj$D6L{=~Z03gE-+W-In diff --git a/elasticsearch/setup/curl.sh b/elasticsearch/setup/curl.sh new file mode 100755 index 00000000..c7510b3c --- /dev/null +++ b/elasticsearch/setup/curl.sh @@ -0,0 +1,2 @@ +curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/publication?pretty -H 'Content-Type: application/json' -d @/publication.json +curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/dataset?pretty -H 'Content-Type: application/json' -d @/dataset.json diff --git a/elasticsearch/setup/curl_dockerfile b/elasticsearch/setup/curl_dockerfile new file mode 100644 index 00000000..cda3901b --- /dev/null +++ b/elasticsearch/setup/curl_dockerfile @@ -0,0 +1,10 @@ +FROM ubuntu:22.04 + +RUN apt-get update && apt-get install -y curl + +COPY publication.json /publication.json +COPY dataset.json /dataset.json +COPY curl.sh /curl.sh + +ENTRYPOINT ["/bin/bash", "/curl.sh"] + diff --git a/elasticsearch/setup/dataset.json b/elasticsearch/setup/dataset.json new file mode 100644 index 00000000..8b497f89 --- /dev/null +++ b/elasticsearch/setup/dataset.json @@ -0,0 +1,86 @@ +{ + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date", + "index" : false + }, + "@version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "description" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "identifier" : { + "type" : "long", + "index" : false + }, + "is_accessible_for_free" : { + "type" : "boolean", + "index" : false + }, + "name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform_identifier" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "same_as" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "type" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + } + } + } +} diff --git a/elasticsearch/publication.json b/elasticsearch/setup/publication.json similarity index 82% rename from elasticsearch/publication.json rename to elasticsearch/setup/publication.json index 1c4a43ee..151ba894 100644 --- a/elasticsearch/publication.json +++ b/elasticsearch/setup/publication.json @@ -2,10 +2,12 @@ "mappings" : { "properties" : { "@timestamp" : { - "type" : "date" + "type" : "date", + "index" : false }, "@version" : { "type" : "text", + "index" : false, "fields" : { "keyword" : { "type" : "keyword", @@ -15,6 +17,7 @@ }, "access_right" : { "type" : "text", + "index" : false, "fields" : { "keyword" : { "type" : "keyword", @@ -24,6 +27,7 @@ }, "creators" : { "type" : "text", + "index" : false, "fields" : { "keyword" : { "type" : "keyword", @@ -32,29 +36,34 @@ } }, "date_created" : { - "type" : "date" + "type" : "date", + "index" : false }, "date_published" : { - "type" : "date" + "type" : "date", + "index" : false }, "doi" : { "type" : "text", + "index" : false, "fields" : { "keyword" : { "type" : "keyword", "ignore_above" : 256 } - }, - "index" : false + } }, "identifier" : { - "type" : "long" + "type" : "long", + "index" : false }, "license_identifier" : { - "type" : "long" + "type" : "long", + "index" : false }, "platform" : { "type" : "text", + "index" : false, "fields" : { "keyword" : { "type" : "keyword", @@ -72,7 +81,8 @@ } }, "resource_type_identifier" : { - "type" : "long" + "type" : "long", + "index" : false }, "title" : { "type" : "text", @@ -85,6 +95,7 @@ }, "type" : { "type" : "text", + "index" : false, "fields" : { "keyword" : { "type" : "keyword", @@ -94,6 +105,7 @@ }, "url" : { "type" : "text", + "index" : false, "fields" : { "keyword" : { "type" : "keyword", From 5ba4a7cec514fdec9a7d3b2c9ebf57bb5caf7439 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 16 Aug 2023 14:20:18 +0200 Subject: [PATCH 05/79] Remove .DS_Store and add it to dockerignore --- .DS_Store | Bin 6148 -> 0 bytes .dockerignore | 1 + elasticsearch/.DS_Store | Bin 6148 -> 0 bytes logstash/.DS_Store | Bin 6148 -> 0 bytes logstash/pipeline/.DS_Store | Bin 6148 -> 0 bytes logstash/pipeline/sql/.DS_Store | Bin 6148 -> 0 bytes 6 files changed, 1 insertion(+) delete mode 100644 .DS_Store delete mode 100644 elasticsearch/.DS_Store delete mode 100644 logstash/.DS_Store delete mode 100644 logstash/pipeline/.DS_Store delete mode 100644 logstash/pipeline/sql/.DS_Store diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index 268a09f1763afb6c748211774189d3c0782a31f4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK!EO^V5FNLnI8>E#0I5eyaDqrAg|>xBRiPQleqMXV_B?Cs9RLvRNz??W0f59pSb2ceHA3T5 zO0wp9HW7uHBZnA{AcGzRC2j6t2AF}n#sJ0c7RGCW5u3qc{Ejh_2jMgug^3PP>ogoC zah5k47h-L_a&P0llyXyk@dtY9=RrQn+QIk}eZAK@39s5g_#y61yS0aJb(#lp+8Y~# zIO?Iw=Z|q3>1kU}(kL^wz8#R&YPDP2p3PeOJ9V|!+@06ethLjqtCuhK=JTq2^!WL! zcipeaFxB5Jr2=mtBU=uy;20Y#JWTpc|40uL_ZX?*H zT!L(rL(gGh5l2vjHAS?h!lf9(nq$9mdY;3=qBRHMk`Lil7A`{(W_6rj8FdhzMXs3v zW?-3tb^BSP_y6GU`Tw#Je=!5hz<%qK*YtP`)_fnYUv?~p7B1?^hx*L#$&1$6F@CDhQ~s#gL=0zlH diff --git a/logstash/.DS_Store b/logstash/.DS_Store deleted file mode 100644 index 4ba1f514d476e5ea0be026522d81faf76ae8b65d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKOHRW;47E!IL2SBYIa`)pfy5?MfeZ8ks9%tpCPHE#iJNc&R@{IkH{b$X0`Y8* zO2f3UL#VPP&zpGc8RyL?&JYm~?k9bs9uZ|w!OkI?4I<;BiFBex23hC0rTJpsAC9ZC zPQ2al9~qFn+ov&Aw4@m&`^S&p$IGfHMvH0$d-nS9aWFf5xk!B`zxvcS>*qwy9}ZcK z3~HLv0=T4U`z)PYZSq;q=lt&d?fFdeN#-i6ubh7JYv=VOdAT?P&VVzpYYd=fi)2TJ z-Z}%$fHSaSK)w$FDws!X4E582My~+CF3c+E>%SZ@hyj>KYz$$6u!RCGl;ahHEgbd` z{ql&7p@kFc%9tmv%<+c8x;pGZ-HCHUZ=C^Wpv}OZJ@%yjUv9tuw}br28E^)6iUIBx z!(xC}inTR)IjOY)dJPp3zs7JCf{vDA_(~~0hN{3Gqyx+&Hioc3{6|1(@WvVVQwBZ( D%KT8h diff --git a/logstash/pipeline/.DS_Store b/logstash/pipeline/.DS_Store deleted file mode 100644 index fd63aa2abca915353d409f63dc0c624887439896..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5T0pshzL@|gU1D*py0t{Ea?;E1xj12hcsI)dd?$x(!1b0dGwpz!Dfl} zD8x5fSB3!NCcJA4L43JsGG)@>t$cug2BFuQ$C&GaM=d z^6fHOkf(<3((iY+?fTZfEa$bK!*7Mp#=bDFbBb0dI`yKiiKhu5grJ~RA5YHZ812e!|pV$P;4W{bYg8j zSSz!3C>*Ne{Vs$P7ZGhV28@A}fnj_c$^Ae5z5h>>?8z7~2L2TToRw8M!IJcD?JZ93 tS_hp$MI^3`_??1DxQY=gSMen@2<%QKz(TQ&2n)o11Og2<7z2OGz!$NgMmhih diff --git a/logstash/pipeline/sql/.DS_Store b/logstash/pipeline/sql/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 Date: Wed, 23 Aug 2023 12:18:48 +0200 Subject: [PATCH 06/79] Created an elastic search endpoint. It isnt working yet --- .env | 2 + docker-compose.yaml | 10 +-- elasticsearch/elasticsearch_query.py | 56 -------------- {elasticsearch => es}/elasticsearch.yml | 0 es/elasticsearch_query.py | 63 +++++++++++++++ {elasticsearch => es}/setup/curl.sh | 0 {elasticsearch => es}/setup/curl_dockerfile | 0 {elasticsearch => es}/setup/dataset.json | 0 {elasticsearch => es}/setup/publication.json | 0 pyproject.toml | 19 ++--- src/routers/__init__.py | 24 +++--- src/routers/resource_router.py | 9 ++- src/routers/resources/__init__.py | 0 src/routers/{ => resources}/dataset_router.py | 0 .../{ => resources}/experiment_router.py | 0 .../{ => resources}/ml_model_router.py | 0 .../{ => resources}/organisation_router.py | 0 src/routers/{ => resources}/person_router.py | 0 .../{ => resources}/platform_router.py | 0 .../{ => resources}/publication_router.py | 0 src/routers/{ => resources}/service_router.py | 0 src/routers/router.py | 10 +++ src/routers/search_router.py | 77 +++++++++++++++++++ src/routers/upload_router_huggingface.py | 3 +- src/tests/.env | 3 + src/tests/routers/resources/__init__.py | 0 .../{ => resources}/test_router_dataset.py | 0 .../test_router_dataset_generic_fields.py | 0 .../{ => resources}/test_router_experiment.py | 0 .../{ => resources}/test_router_ml_model.py | 0 .../test_router_organisation.py | 0 .../{ => resources}/test_router_person.py | 0 .../{ => resources}/test_router_platform.py | 0 .../test_router_publication.py | 0 .../{ => resources}/test_router_service.py | 0 src/tests/routers/test_search_router.py | 23 ++++++ 36 files changed, 212 insertions(+), 87 deletions(-) delete mode 100755 elasticsearch/elasticsearch_query.py rename {elasticsearch => es}/elasticsearch.yml (100%) create mode 100755 es/elasticsearch_query.py rename {elasticsearch => es}/setup/curl.sh (100%) rename {elasticsearch => es}/setup/curl_dockerfile (100%) rename {elasticsearch => es}/setup/dataset.json (100%) rename {elasticsearch => es}/setup/publication.json (100%) create mode 100644 src/routers/resources/__init__.py rename src/routers/{ => resources}/dataset_router.py (100%) rename src/routers/{ => resources}/experiment_router.py (100%) rename src/routers/{ => resources}/ml_model_router.py (100%) rename src/routers/{ => resources}/organisation_router.py (100%) rename src/routers/{ => resources}/person_router.py (100%) rename src/routers/{ => resources}/platform_router.py (100%) rename src/routers/{ => resources}/publication_router.py (100%) rename src/routers/{ => resources}/service_router.py (100%) create mode 100644 src/routers/router.py create mode 100644 src/routers/search_router.py create mode 100644 src/tests/routers/resources/__init__.py rename src/tests/routers/{ => resources}/test_router_dataset.py (100%) rename src/tests/routers/{ => resources}/test_router_dataset_generic_fields.py (100%) rename src/tests/routers/{ => resources}/test_router_experiment.py (100%) rename src/tests/routers/{ => resources}/test_router_ml_model.py (100%) rename src/tests/routers/{ => resources}/test_router_organisation.py (100%) rename src/tests/routers/{ => resources}/test_router_person.py (100%) rename src/tests/routers/{ => resources}/test_router_platform.py (100%) rename src/tests/routers/{ => resources}/test_router_publication.py (100%) rename src/tests/routers/{ => resources}/test_router_service.py (100%) create mode 100644 src/tests/routers/test_search_router.py diff --git a/.env b/.env index 497c328b..352f3968 100644 --- a/.env +++ b/.env @@ -10,9 +10,11 @@ REDIRECT_URIS=http://${HOSTNAME}/docs/oauth2-redirect POST_LOGOUT_REDIRECT_URIS=http://${HOSTNAME}/aiod-auth/realms/aiod/protocol/openid-connect/logout #ELASTICSEARCH +ES_USER=elastic ES_JAVA_OPTS="-Xmx256m -Xms256m" ES_PASSWORD=changeme ES_DISCOVERY_TYPE=single-node +ES_ROLE="edit_aiod_resources" #LOGSTASH LS_JAVA_OPTS="-Xmx256m -Xms256m" diff --git a/docker-compose.yaml b/docker-compose.yaml index ddd2d93e..83bb6fe3 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -20,7 +20,7 @@ services: --limit 10 healthcheck: test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8000')"] - interval: 30s + interval: 5s timeout: 30s retries: 30 depends_on: @@ -37,7 +37,7 @@ services: - ./data/mysql:/var/lib/mysql healthcheck: test: ["CMD", "mysqladmin", "-uroot", "-p$MYSQL_ROOT_PASSWORD", "ping", "-h", "localhost", "--protocol","tcp"] - interval: 30s + interval: 5s timeout: 30s retries: 30 @@ -86,19 +86,19 @@ services: - 9300:9300 volumes: - type: bind - source: ./elasticsearch/elasticsearch.yml + source: ./es/elasticsearch.yml target: /usr/share/elasticsearch/config/elasticsearch.yml read_only: true - ./data/elasticsearch:/usr/share/elasticsearch/data healthcheck: test: ["CMD-SHELL", "curl -u elastic:changeme http://localhost:9200/_cat/health | grep -q -E 'green|yellow'"] - interval: 30s + interval: 5s timeout: 30s retries: 30 elasticsearch_setup: build: - context: elasticsearch/setup/ + context: es/setup/ dockerfile: curl_dockerfile env_file: .env environment: diff --git a/elasticsearch/elasticsearch_query.py b/elasticsearch/elasticsearch_query.py deleted file mode 100755 index 3c01917e..00000000 --- a/elasticsearch/elasticsearch_query.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import json -from elasticsearch import Elasticsearch - -# Global parameters -ELASTIC_USER = "elastic" -SIZE = 2 -SORT = {'identifier': 'asc'} - -def main(index, search_concept, platforms): - - # Get elasticsearch password - with open('../.env', 'r') as f: - for line in f: - if "ES_PASSWORD" in line: - elastic_password = line.split('=')[1][:-1] - break - - # Generate client - es_client = Elasticsearch("http://localhost:9200", - basic_auth=(ELASTIC_USER, elastic_password)) - - #Prepare query - platform_identifiers = [{'match': {'platform_identifier': p}} - for p in platforms] - query = {'bool': {'must': {'match': {'title': search_concept}}, - 'must': {'bool': {'should': platform_identifiers}}}} - - # Perform first search - result = es_client.search(index=index, query=query, size=SIZE, sort=SORT) - - # Print total number of results - print(f"TOTAL RESULTS: {result['hits']['total']['value']}") - - query_result = 1 - while result['hits']['hits']: - - # Print current results - print(f"QUERY RESULT: {query_result}") - print(json.dumps(dict(result)['hits']['hits'], indent=4)) - - # Actualise search_after and query_result for the next search - search_after = result['hits']['hits'][-1]['sort'] - query_result += 1 - - # Perform next search - result = es_client.search(index=index, query=query, size=SIZE, - search_after=search_after, sort=SORT) - -if __name__ == "__main__": - index = ["publication"] # List of assets - search_concept = "in" # Search concept - platforms = ['2', '4', '9'] # List of platforms - main(index, search_concept, platforms) diff --git a/elasticsearch/elasticsearch.yml b/es/elasticsearch.yml similarity index 100% rename from elasticsearch/elasticsearch.yml rename to es/elasticsearch.yml diff --git a/es/elasticsearch_query.py b/es/elasticsearch_query.py new file mode 100755 index 00000000..2514637c --- /dev/null +++ b/es/elasticsearch_query.py @@ -0,0 +1,63 @@ +# TODO: remove this file once the search router works + +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import json +from elasticsearch import Elasticsearch + +# Global parameters +ELASTIC_USER = "elastic" +SIZE = 2 +SORT = {"identifier": "asc"} + + +def main(index, search_concept, platforms): + + # Get elasticsearch password + with open("../.env", "r") as f: + for line in f: + if "ES_PASSWORD" in line: + elastic_password = line.split("=")[1][:-1] + break + + # Generate client + es_client = Elasticsearch("http://localhost:9200", basic_auth=(ELASTIC_USER, elastic_password)) + + # Prepare query + platform_identifiers = [{"match": {"platform_identifier": p}} for p in platforms] + query = { + "bool": { + "must": {"match": {"title": search_concept}}, + "must": {"bool": {"should": platform_identifiers}}, + } + } + + # Perform first search + result = es_client.search(index=index, query=query, size=SIZE, sort=SORT) + + # Print total number of results + print(f"TOTAL RESULTS: {result['hits']['total']['value']}") + + query_result = 1 + while result["hits"]["hits"]: + + # Print current results + print(f"QUERY RESULT: {query_result}") + print(json.dumps(dict(result)["hits"]["hits"], indent=4)) + + # Actualise search_after and query_result for the next search + search_after = result["hits"]["hits"][-1]["sort"] + query_result += 1 + + # Perform next search + result = es_client.search( + index=index, query=query, size=SIZE, search_after=search_after, sort=SORT + ) + + +if __name__ == "__main__": + index = ["publication"] # List of assets + search_concept = "in" # Search concept + platforms = ["2", "4", "9"] # List of platforms + main(index, search_concept, platforms) diff --git a/elasticsearch/setup/curl.sh b/es/setup/curl.sh similarity index 100% rename from elasticsearch/setup/curl.sh rename to es/setup/curl.sh diff --git a/elasticsearch/setup/curl_dockerfile b/es/setup/curl_dockerfile similarity index 100% rename from elasticsearch/setup/curl_dockerfile rename to es/setup/curl_dockerfile diff --git a/elasticsearch/setup/dataset.json b/es/setup/dataset.json similarity index 100% rename from elasticsearch/setup/dataset.json rename to es/setup/dataset.json diff --git a/elasticsearch/setup/publication.json b/es/setup/publication.json similarity index 100% rename from elasticsearch/setup/publication.json rename to es/setup/publication.json diff --git a/pyproject.toml b/pyproject.toml index ee217c39..e101b693 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,24 +13,25 @@ authors = [ {name = "Taniya Das", email = "t.das@tue.nl"} ] dependencies = [ - "urllib3== 2.0.4", "bibtexparser==1.4.0", "datasets==2.14.4", + "elasticsearch==8.9.0", "fastapi==0.101.1", - "uvicorn==0.23.2", - "requests==2.31.0", + "httpx==0.24.1", + "mysql-connector-python==8.1.0", "mysqlclient==2.2.0", "oic==1.6.0", - "python-keycloak==3.3.0", - "python-dotenv==1.0.0", "pydantic_schemaorg==1.0.6", "python-dateutil==2.8.2", - "sqlmodel==0.0.8", - "httpx==0.24.1", + "python-dotenv==1.0.0", + "python-keycloak==3.3.0", + "python-multipart==0.0.6", + "requests==2.31.0", "sickle==0.7.0", + "sqlmodel==0.0.8", + "urllib3== 1.26.16", + "uvicorn==0.23.2", "xmltodict==0.13.0", - "python-multipart==0.0.6", - "mysql-connector-python==8.1.0", ] readme = "README.md" diff --git a/src/routers/__init__.py b/src/routers/__init__.py index 647f251d..62ce96d5 100644 --- a/src/routers/__init__.py +++ b/src/routers/__init__.py @@ -1,14 +1,14 @@ -import typing # noqa:F401 - -from .dataset_router import DatasetRouter -from .experiment_router import ExperimentRouter -from .ml_model_router import MLModelRouter -from .organisation_router import OrganisationRouter -from .person_router import PersonRouter -from .platform_router import PlatformRouter -from .publication_router import PublicationRouter +from routers.resources.dataset_router import DatasetRouter +from routers.resources.experiment_router import ExperimentRouter +from routers.resources.ml_model_router import MLModelRouter +from routers.resources.organisation_router import OrganisationRouter +from routers.resources.person_router import PersonRouter +from routers.resources.platform_router import PlatformRouter +from routers.resources.publication_router import PublicationRouter from .resource_router import ResourceRouter # noqa:F401 -from .service_router import ServiceRouter +from routers.resources.service_router import ServiceRouter +from .router import AIoDRouter # noqa:F401 +from .search_router import SearchRouter from .upload_router_huggingface import UploadRouterHuggingface resource_routers = [ @@ -27,6 +27,6 @@ # ProjectRouter(), # PresentationRouter(), ServiceRouter(), -] # type: typing.List[ResourceRouter] +] # type: list[ResourceRouter] -other_routers = [UploadRouterHuggingface()] +other_routers = [UploadRouterHuggingface(), SearchRouter()] # type: list[AIoDRouter] diff --git a/src/routers/resource_router.py b/src/routers/resource_router.py index 58688ea5..fb5feaf5 100644 --- a/src/routers/resource_router.py +++ b/src/routers/resource_router.py @@ -23,6 +23,7 @@ resource_read, ) from database.model.serializers import deserialize_resource_relationships +from routers.router import AIoDRouter class Pagination(BaseModel): @@ -35,7 +36,7 @@ class Pagination(BaseModel): RESOURCE_READ = TypeVar("RESOURCE_READ", bound=SQLModel) -class ResourceRouter(abc.ABC): +class ResourceRouter(AIoDRouter, abc.ABC): """ Abstract class for FastAPI resource router. @@ -329,7 +330,7 @@ def register_resource( user: dict = Depends(get_current_user), ): f"""Register a {self.resource_name} with AIoD.""" - if "groups" in user and KEYCLOAK_CONFIG.get("role") not in user["groups"]: + if "groups" not in user or KEYCLOAK_CONFIG.get("role") not in user["groups"]: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to edit Aiod resources.", @@ -371,7 +372,7 @@ def put_resource( user: dict = Depends(get_current_user), ): f"""Update an existing {self.resource_name}.""" - if "groups" in user and KEYCLOAK_CONFIG.get("role") not in user["groups"]: + if "groups" not in user or KEYCLOAK_CONFIG.get("role") not in user["groups"]: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to edit Aiod resources.", @@ -410,7 +411,7 @@ def delete_resource_func(self, engine: Engine): """ def delete_resource(identifier: str, user: dict = Depends(get_current_user)): - if "groups" in user and KEYCLOAK_CONFIG.get("role") not in user["groups"]: + if "groups" not in user or KEYCLOAK_CONFIG.get("role") not in user["groups"]: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to edit Aiod resources.", diff --git a/src/routers/resources/__init__.py b/src/routers/resources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/routers/dataset_router.py b/src/routers/resources/dataset_router.py similarity index 100% rename from src/routers/dataset_router.py rename to src/routers/resources/dataset_router.py diff --git a/src/routers/experiment_router.py b/src/routers/resources/experiment_router.py similarity index 100% rename from src/routers/experiment_router.py rename to src/routers/resources/experiment_router.py diff --git a/src/routers/ml_model_router.py b/src/routers/resources/ml_model_router.py similarity index 100% rename from src/routers/ml_model_router.py rename to src/routers/resources/ml_model_router.py diff --git a/src/routers/organisation_router.py b/src/routers/resources/organisation_router.py similarity index 100% rename from src/routers/organisation_router.py rename to src/routers/resources/organisation_router.py diff --git a/src/routers/person_router.py b/src/routers/resources/person_router.py similarity index 100% rename from src/routers/person_router.py rename to src/routers/resources/person_router.py diff --git a/src/routers/platform_router.py b/src/routers/resources/platform_router.py similarity index 100% rename from src/routers/platform_router.py rename to src/routers/resources/platform_router.py diff --git a/src/routers/publication_router.py b/src/routers/resources/publication_router.py similarity index 100% rename from src/routers/publication_router.py rename to src/routers/resources/publication_router.py diff --git a/src/routers/service_router.py b/src/routers/resources/service_router.py similarity index 100% rename from src/routers/service_router.py rename to src/routers/resources/service_router.py diff --git a/src/routers/router.py b/src/routers/router.py new file mode 100644 index 00000000..b2de2fc0 --- /dev/null +++ b/src/routers/router.py @@ -0,0 +1,10 @@ +import abc + +from fastapi import APIRouter +from sqlalchemy.engine import Engine + + +class AIoDRouter(abc.ABC): + @abc.abstractmethod + def create(self, engine: Engine, url_prefix: str) -> APIRouter: + pass diff --git a/src/routers/search_router.py b/src/routers/search_router.py new file mode 100644 index 00000000..e46a5869 --- /dev/null +++ b/src/routers/search_router.py @@ -0,0 +1,77 @@ +import os +from typing import TypeVar, Generic + +from elasticsearch import Elasticsearch +from fastapi import APIRouter, Depends, HTTPException +from pydantic import BaseModel +from sqlalchemy.engine import Engine +from starlette import status + +from authentication import get_current_user +from database.model.knowledge_asset.publication import Publication +from database.model.resource_read_and_create import resource_read +from routers.router import AIoDRouter + +SORT = {"identifier": "asc"} +LIMIT_MAX = 1000 + +RESOURCE = TypeVar("RESOURCE") + + +class SearchResult(BaseModel, Generic[RESOURCE]): + total_hits: int + resources: list[RESOURCE] + next_offset: str | None + + +class SearchRouter(AIoDRouter): + def __init__(self): + self.client: Elasticsearch | None = None + + def create(self, engine: Engine, url_prefix: str) -> APIRouter: + router = APIRouter() + user = os.getenv("ES_USER") + pw = os.getenv("ES_PASSWORD") + self.client = Elasticsearch("http://localhost:9200", basic_auth=(user, pw)) + + publication_class = resource_read(Publication) + + @router.get(url_prefix + "/search/publications/v1", tags=["search"]) + def search_publication( + title: str = "", + limit: int = 10, + offset: str | None = None, # TODO: this should not be a string + user: dict = Depends(get_current_user), + ) -> SearchResult[publication_class]: # type: ignore + if limit > LIMIT_MAX: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"The limit should be maximum {LIMIT_MAX}. If you want more results, " + f"use pagination.", + ) + if "groups" not in user or os.getenv("ES_ROLE") not in user["groups"]: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="You do not have permission to search Aiod resources.", + ) + if self.client is None: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Client not initialized", + ) + query = {"bool": {"must": {"match": {"title": title}}}} + result = self.client.search( + index="publication", query=query, size=limit, sort=SORT, search_after=offset + ) + # TODO: how to get Publications? + resources: list[publication_class] = [] # type: ignore + next_offset = ( + result["hits"]["hits"][-1]["sort"] if len(result["hits"]["hits"]) > 0 else None + ) + return SearchResult[publication_class]( # type: ignore + total_hits=result["hits"]["total"]["value"], + next_offset=next_offset, + resources=resources, + ) + + return router diff --git a/src/routers/upload_router_huggingface.py b/src/routers/upload_router_huggingface.py index 0c92445e..d0b01ebe 100644 --- a/src/routers/upload_router_huggingface.py +++ b/src/routers/upload_router_huggingface.py @@ -2,10 +2,11 @@ from fastapi import File, Query, UploadFile from sqlalchemy.engine import Engine +from routers.router import AIoDRouter from uploader.hugging_face_uploader import HuggingfaceUploader -class UploadRouterHuggingface: +class UploadRouterHuggingface(AIoDRouter): def create(self, engine: Engine, url_prefix: str) -> APIRouter: router = APIRouter() uploader = HuggingfaceUploader(engine) diff --git a/src/tests/.env b/src/tests/.env index 803f6c1e..aebf011b 100644 --- a/src/tests/.env +++ b/src/tests/.env @@ -1 +1,4 @@ KEYCLOAK_CLIENT_SECRET="mocked_secret" +ES_USER=elastic +ES_PASSWORD=changeme +ES_ROLE="edit_aiod_resources" \ No newline at end of file diff --git a/src/tests/routers/resources/__init__.py b/src/tests/routers/resources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/tests/routers/test_router_dataset.py b/src/tests/routers/resources/test_router_dataset.py similarity index 100% rename from src/tests/routers/test_router_dataset.py rename to src/tests/routers/resources/test_router_dataset.py diff --git a/src/tests/routers/test_router_dataset_generic_fields.py b/src/tests/routers/resources/test_router_dataset_generic_fields.py similarity index 100% rename from src/tests/routers/test_router_dataset_generic_fields.py rename to src/tests/routers/resources/test_router_dataset_generic_fields.py diff --git a/src/tests/routers/test_router_experiment.py b/src/tests/routers/resources/test_router_experiment.py similarity index 100% rename from src/tests/routers/test_router_experiment.py rename to src/tests/routers/resources/test_router_experiment.py diff --git a/src/tests/routers/test_router_ml_model.py b/src/tests/routers/resources/test_router_ml_model.py similarity index 100% rename from src/tests/routers/test_router_ml_model.py rename to src/tests/routers/resources/test_router_ml_model.py diff --git a/src/tests/routers/test_router_organisation.py b/src/tests/routers/resources/test_router_organisation.py similarity index 100% rename from src/tests/routers/test_router_organisation.py rename to src/tests/routers/resources/test_router_organisation.py diff --git a/src/tests/routers/test_router_person.py b/src/tests/routers/resources/test_router_person.py similarity index 100% rename from src/tests/routers/test_router_person.py rename to src/tests/routers/resources/test_router_person.py diff --git a/src/tests/routers/test_router_platform.py b/src/tests/routers/resources/test_router_platform.py similarity index 100% rename from src/tests/routers/test_router_platform.py rename to src/tests/routers/resources/test_router_platform.py diff --git a/src/tests/routers/test_router_publication.py b/src/tests/routers/resources/test_router_publication.py similarity index 100% rename from src/tests/routers/test_router_publication.py rename to src/tests/routers/resources/test_router_publication.py diff --git a/src/tests/routers/test_router_service.py b/src/tests/routers/resources/test_router_service.py similarity index 100% rename from src/tests/routers/test_router_service.py rename to src/tests/routers/resources/test_router_service.py diff --git a/src/tests/routers/test_search_router.py b/src/tests/routers/test_search_router.py new file mode 100644 index 00000000..77d15ea2 --- /dev/null +++ b/src/tests/routers/test_search_router.py @@ -0,0 +1,23 @@ +from unittest.mock import Mock + +import pytest +from sqlalchemy.engine import Engine +from starlette.testclient import TestClient + +from authentication import keycloak_openid + + +@pytest.mark.skip(reason="This test isn't finished yet, we need to mock ES") +def test_happy_path( + client: TestClient, + engine: Engine, + mocked_privileged_token: Mock, + body_resource: dict, +): + keycloak_openid.userinfo = mocked_privileged_token + + response = client.get( + "/search/publications/v1", params={"title": "in"}, headers={"Authorization": "Fake token"} + ) + # TODO(jos): mock the ES results. But first we need some results + assert response.status_code == 200, response.json() From 11233350d2c5c87db1ab22c988572a5b511eeea3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 23 Aug 2023 18:02:30 +0200 Subject: [PATCH 07/79] Just to test the new database --- .DS_Store | Bin 0 -> 6148 bytes docker-compose.yaml | 2 +- elasticsearch/.DS_Store | Bin 0 -> 6148 bytes es/elasticsearch_query.py | 2 -- 4 files changed, 1 insertion(+), 3 deletions(-) create mode 100644 .DS_Store create mode 100644 elasticsearch/.DS_Store diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..a5ceb7214e5992c025777938c07d4a9259eb1d5d GIT binary patch literal 6148 zcmeHK%}T>S5T3QwrWTTtm@P?a52cW^zK~Di^Z3o~ z7SyVE644!){bqM(Hp~aJn;|068um7b>O|x~8Ow_pJ`mn#osl`SQU(fjMotlRDWg6G zGv0Q=GGH0_Z47YjE}>pyRBT4C>o-Lu--M%V5XLfu*Hbu%qbzSWpT%6MJikzJDo)M0 z^l#+I&x3rJwSx4VXD3p|;nWVo)2Kh{)Rzxsk_S=JPgOwF>%-;ZEJ}KE)RM!bm#J7^ zZ#Y$_+Nn3j*8 z7a89BX%fri8eV)t1uS$a#XU9v8`&Sp|6Nw^a-K3}@qaO3D#oe(ue5EpmI2GaHyGgi z!G$t<1`Cbq>cF5{03iQHD1ptr1jo1rJ%fct>_CJL1?o^?S`4AXvF;i_&tRcZhm$bP zhcI6jrb7|x>p0(4?j$^ocC-vw2HrAI($hM>|3{zq|8Ilr%ramZ_)`psa@%jWkdk>@ xXOiQ$)`ea{S=g`8s6sH9=U5s16gQwsu;;P?^b8gnu>!F_0*VGZSO$KSffxN{j^F?Q literal 0 HcmV?d00001 diff --git a/docker-compose.yaml b/docker-compose.yaml index 83bb6fe3..03b9c96f 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -14,7 +14,7 @@ services: volumes: - ./src:/app command: > - --rebuild-db always + --rebuild-db only-if-empty --fill-with-examples datasets experiments ml_models organisations persons publications services --reload --limit 10 diff --git a/elasticsearch/.DS_Store b/elasticsearch/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..777cd3a8fdb655761a52a739948b58b760ab5f61 GIT binary patch literal 6148 zcmeHK%}T>S5Z>*NO({YS3OxqA7HqYO;w8lT0!H+pQWFw17_+5G%%K!=))(?gd>&_Z zH_&46C}L+|_nV!c-OLBsAI2DW@?gZ6$rv-BA#zk|1kJUsh7LyLa*ml_q%%Jje$6z0 z(}dsNW+@9<#&Wj${U81;j?!#6{N#;htG(4RI!4d9cb`PzW?q)3jyJ!>(UlOfU+JEI z9fifz+&LFX=0!<3mkCi2Lde}slmw!1M4kkx%ypH+=o;Or*Rjia@jR@_x8^&r_b?ABHk2{92l3ft+9f4P*y5>b>~Sel1H#s8C4`9F+dCu1H`~) zGN8``qqmvmQ`N)(G4KNexIb9X5N(a6Lb-K7hu3HHw-8Z4$F~HcrP0<{Dg+M*SE+z1 zmFp)4SLtB4beyfRRH)J!mn*|Odgbcz!sY5Z6&FH`%- zUr(VCF+dFbGX{8T;!YeWN}sLY%EPnPg7yFn1>;IoKtNx+1i%3Ik-l^)zlA!)*&0iQ UI1AckIv`yHBq7uh1HZt)7h+^f7ytkO literal 0 HcmV?d00001 diff --git a/es/elasticsearch_query.py b/es/elasticsearch_query.py index 2514637c..6b93c1df 100755 --- a/es/elasticsearch_query.py +++ b/es/elasticsearch_query.py @@ -1,5 +1,3 @@ -# TODO: remove this file once the search router works - #!/usr/bin/env python3 # -*- coding: utf-8 -*- From 80b9c3c4c06016a78dcea67f4c4f447a0624a0b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 23 Aug 2023 18:30:23 +0200 Subject: [PATCH 08/79] To rebase develop --- src/routers/.DS_Store | Bin 0 -> 6148 bytes src/routers/__init__.py | 32 ----------------------- src/routers/upload_router_huggingface.py | 29 -------------------- 3 files changed, 61 deletions(-) create mode 100644 src/routers/.DS_Store delete mode 100644 src/routers/__init__.py delete mode 100644 src/routers/upload_router_huggingface.py diff --git a/src/routers/.DS_Store b/src/routers/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 APIRouter: - router = APIRouter() - uploader = HuggingfaceUploader(engine) - - @router.post(url_prefix + "/upload/datasets/{identifier}/huggingface", tags=["upload"]) - def huggingFaceUpload( - identifier: int, - file: UploadFile = File( - ..., title="File", description="This file will be uploaded to HuggingFace" - ), - token: str = Query( - ..., title="Huggingface Token", description="The access token of HuggingFace" - ), - username: str = Query( - ..., title="Huggingface username", description="The username of HuggingFace" - ), - ) -> int: - return uploader.handle_upload(identifier, file, token, username) - - return router From 30d6edaf4beeb42fc05ac033179cfe072ca94139 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 23 Aug 2023 19:20:15 +0200 Subject: [PATCH 09/79] Working but not up-to-date with develop branch --- src/.DS_Store | Bin 0 -> 6148 bytes src/routers/__init__.py | 32 +++++++++++++++++++++++ src/routers/upload_router_huggingface.py | 29 ++++++++++++++++++++ src/tests/.DS_Store | Bin 0 -> 8196 bytes src/tests/connectors/.DS_Store | Bin 0 -> 6148 bytes src/tests/resources/.DS_Store | Bin 0 -> 6148 bytes src/tests/routers/.DS_Store | Bin 0 -> 6148 bytes 7 files changed, 61 insertions(+) create mode 100644 src/.DS_Store create mode 100644 src/routers/__init__.py create mode 100644 src/routers/upload_router_huggingface.py create mode 100644 src/tests/.DS_Store create mode 100644 src/tests/connectors/.DS_Store create mode 100644 src/tests/resources/.DS_Store create mode 100644 src/tests/routers/.DS_Store diff --git a/src/.DS_Store b/src/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..04036f0e09a83eb543455a1785d0b40c92e62441 GIT binary patch literal 6148 zcmeHK%}T>S5T5A|M!fXsabKYpdQg;D;z<#qFCeu=4-F~S-tz*2SHWlT=6m_g&R7!? z!IMatfthbJKbg(9A)6&4o^0krq5%;lP{GbVOp8cdv>_eyNRZ<_7W7E#<^6DaS2VKS z@gEtGwcEq06||uzs;pnWiqqxSQ8ObivTU|2W{4!$XQT1S<;Qtj*8CS)s|R^otXua+ zn)c&N*0A<2eMOJn>e1>vPT!xej&&ZD`n>p_LwiQ8fB!}LyEp^RfHSb`3}DX|Ne&d< zIs?vtGq7bqz7GK^m>QOf@zVhlj{v|P+)*%=vxL+H!_=@;L APIRouter: + router = APIRouter() + uploader = HuggingfaceUploader(engine) + + @router.post(url_prefix + "/upload/datasets/{identifier}/huggingface", tags=["upload"]) + def huggingFaceUpload( + identifier: int, + file: UploadFile = File( + ..., title="File", description="This file will be uploaded to HuggingFace" + ), + token: str = Query( + ..., title="Huggingface Token", description="The access token of HuggingFace" + ), + username: str = Query( + ..., title="Huggingface username", description="The username of HuggingFace" + ), + ) -> int: + return uploader.handle_upload(identifier, file, token, username) + + return router diff --git a/src/tests/.DS_Store b/src/tests/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5705225dbb4696f81dfb4e77deae87f051d69e86 GIT binary patch literal 8196 zcmeHMO-dX=82w5mwuVTS3N8wCBk00yKEWyoEb1}el8HK|VDDWiSr7T9t@91sV@0dYVa5C{GR2XJO{NlMOrJ1VU>AP)SO z4)F6qqcWxrbBpHDfySf&z!6+F1%2cJCPq6<9p)BIDCV@;gL0wDr5MVFV_Y*Gm^#cY z+Hg`foRnKxxeP^_)e)x+om6U3T5&)eC_2Eo`#7@pjK1OHDdV-X zN~PAyY8X#`p3Z+-UAp*K&TTxI+xguaAKu6_7ye2nn=+6M7LUFNAc2EUrnlhFZs zb?yhs)a@^*kFb1gSML@c_BhJys~gM=^bOsyZ=+Df+M+vqb6I5gx{jxg9z1(AkH4Gc ztU!-CVLf9;Nf#|^+N0JZzi9SPoaO8KJ=4<_na|fYm>KA!4c3=HR|l#x++_{^HUzs# z6W1}0ukGt*Z!ZgT`N{_ILS1~HapiXg?J#wiTeJtbod4{G6mj7792mAsPjddhQvUn@ z>j{*V#Q|~P*&R?Lm1<=f%h;p2xE|-)8g&bm3-fY|CIpR1$AKmt2loCjL|=o-F{#7c SBCepwi-55~8gbx79k>Tmb^S5T32Y28+<67a_0EtEX7vNpF1t?Z1$?y%fY#UcjSx5kUkmK8IHyz_SnH zH?x~IF$sDSm6@{hC9^ZxFCS#5OGIX{n)Hc!L=<2!ws+uajOTe&tmPwIY~&eBN@$wQ z`@^xVWNSbbPz8RQ0{rcEXiArKMky7(-*oAyw%)55sg2`NVn>(>Ztkv64vt?A>#`=_ z$XYIVx5!sxI@F~Ft!R$lIbFcdz-^rGD|u${+MHSB_xP~p*YMobZg_s3UBh>qER>sq zYf6dR{c@JO`(Rm`FLJwoT66o9vtiVfQ_JH@SXi2nE%WHy3Xj5kk;mI}fk!E4!=ovu zh6k2X`vVG~XR`%ehw4=URX`P3E5Q3hh`|_n%pA(r0VhWQU>jjETuToJh8zG!9y5oC zz>F&ex>Dnh7{-+&9{Rk(y|wak+-pOOQw%oF%N$AxoNvc+!L4`?Lkx2vUjQSI SnL|Wi`bR)yP^Sv~r~)4=dy&lm literal 0 HcmV?d00001 diff --git a/src/tests/resources/.DS_Store b/src/tests/resources/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..fe6df7af3f15d3abb7d6f367e9d971e2289cc763 GIT binary patch literal 6148 zcmeHKF;2rU6n!q0XaR{13@{*NV}YUUNTCD?!PpCwCKVkLQc=2R-~!x+3vdJu!pO$^ zwkZi|5EDY^|LXfWKi~Gta~j(K%wUo9fgXSkU9i<o0e5+zi$ugtQD-5xBR?jGNdz!pH9$#5*U;4@NPgc!Y6+RdVpDq@dA*FWL zNXUq9eU%e-R%exY?Qb7_XttuiGPmzDtNi5utfmU60;)ix0$j7jI-3r)R0UK4RbZ)r zd>;b3VCb=MXg?h+>=A(2X4o3*cJDz9BtQ&377iJq8P}BPni_w^Fs?b{fzJy)77kr= z7&|ii@gp05LNR{lV*;BF6FSsV6;K7L3aq)yo~-}p&FBAWBfU}uRDpk`fN4j=XuwDE zYisS{WUUS8Bf6O66%Ln8Sg}hnb7d*srCVb>kP0#MSU6;arW*k(gBGg5uPX2k6}WT< literal 0 HcmV?d00001 diff --git a/src/tests/routers/.DS_Store b/src/tests/routers/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..f594c603bf858a27b366a39b8933ca6b03ec0dd1 GIT binary patch literal 6148 zcmeHKu};G<5IvVlq+;m?qko|*lUs$UZ2bY$v?@pqsfxOLMuLeC;3xP5{)v%yzAH3J zKnxY4=uSF6=X@8RUo2ltL~gX14T$s8}urlcH*P zo8d1qz~Ane7Br_@=+5^mSCa12OVgvQ@_f3irWncY-ySZ{$FIXSuh|d0R@b~+bT;fF zNDq0;>5eMepjU8T_S80~G0)uMwVPLY`nb)j)$=pA?>%ez(KF{~Vhk7q#=xO7Ag3*5 zCx^Ze%ikC<2KLSXpAR0&7!;#mIy%s#D*&(ya}xCVF2ONgF(^hsj6j@(0wvVx6~jq5 z>|XtXVic5ca%pC~M`w0=Lvd+#*nM&*7X+;}28@9^14r`M=lXx~_5HsdWKYI`G4QV# zaNT^8kFX@It&PQTt@Y3=C=2^V!H*DhQYnTnm*O*M64*U=fI%?|!UC}$fkcBf#=x&K F@DA9{QLO*~ literal 0 HcmV?d00001 From 28e58fc68dc49f30a291a7e7ee9ec8ad71c69e4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 23 Aug 2023 20:30:35 +0200 Subject: [PATCH 10/79] Service working. Need to get up-to-date with develop --- logstash/pipeline/conf/init_temp_table.conf | 68 +++++++++++++++- logstash/pipeline/conf/sync_temp_table.conf | 77 ++++++++++++++++++- logstash/pipeline/sql/init_temp_dataset.sql | 6 +- .../pipeline/sql/init_temp_experiment.sql | 4 + logstash/pipeline/sql/init_temp_ml_model.sql | 4 + .../pipeline/sql/init_temp_publication.sql | 6 +- logstash/pipeline/sql/init_temp_service.sql | 4 + logstash/pipeline/sql/sync_temp_dataset.sql | 8 +- .../pipeline/sql/sync_temp_experiment.sql | 5 ++ logstash/pipeline/sql/sync_temp_ml_model.sql | 5 ++ .../pipeline/sql/sync_temp_publication.sql | 8 +- logstash/pipeline/sql/sync_temp_service.sql | 5 ++ 12 files changed, 186 insertions(+), 14 deletions(-) create mode 100644 logstash/pipeline/sql/init_temp_experiment.sql create mode 100644 logstash/pipeline/sql/init_temp_ml_model.sql create mode 100644 logstash/pipeline/sql/init_temp_service.sql create mode 100644 logstash/pipeline/sql/sync_temp_experiment.sql create mode 100644 logstash/pipeline/sql/sync_temp_ml_model.sql create mode 100644 logstash/pipeline/sql/sync_temp_service.sql diff --git a/logstash/pipeline/conf/init_temp_table.conf b/logstash/pipeline/conf/init_temp_table.conf index 7dbb4bdb..a9244e8a 100644 --- a/logstash/pipeline/conf/init_temp_table.conf +++ b/logstash/pipeline/conf/init_temp_table.conf @@ -24,6 +24,42 @@ input { statement_filepath => "/usr/share/logstash/sql/init_temp_dataset.sql" type => "dataset" } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_temp_experiment.sql" + type => "experiment" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_temp_ml_model.sql" + type => "ml_model" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_temp_service.sql" + type => "service" + } } # https://www.elastic.co/guide/en/logstash/current/filter-plugins.html+ #filter { @@ -50,7 +86,37 @@ output { password => "changeme" ecs_compatibility => disabled index => "dataset" - document_id => "document_%{identifier}" + document_id => "dataset_%{identifier}" + } + } + if [type] == "experiment" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "experiment" + document_id => "experiment_%{identifier}" + } + } + if [type] == "ml_model" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "ml_model" + document_id => "ml_model_%{identifier}" + } + } + if [type] == "service" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "service" + document_id => "service_%{identifier}" } } } diff --git a/logstash/pipeline/conf/sync_temp_table.conf b/logstash/pipeline/conf/sync_temp_table.conf index 13b90737..142e1784 100644 --- a/logstash/pipeline/conf/sync_temp_table.conf +++ b/logstash/pipeline/conf/sync_temp_table.conf @@ -8,7 +8,7 @@ input { jdbc_password => "ok" #sql_log_level => "debug" use_column_value => true - tracking_column => "date_created" + tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" statement_filepath => "/usr/share/logstash/sql/sync_temp_publication.sql" @@ -28,7 +28,48 @@ input { statement_filepath => "/usr/share/logstash/sql/sync_temp_dataset.sql" type => "dataset" } - + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_temp_experiment.sql" + type => "experiment" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_temp_ml_model.sql" + type => "ml_model" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_temp_service.sql" + type => "service" + } } # https://www.elastic.co/guide/en/logstash/current/filter-plugins.html+ #filter { @@ -55,7 +96,37 @@ output { password => "changeme" ecs_compatibility => disabled index => "dataset" - document_id => "document_%{identifier}" + document_id => "dataset_%{identifier}" + } + } + if [type] == "experiment" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "experiment" + document_id => "experiment_%{identifier}" + } + } + if [type] == "ml_model" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "ml_model" + document_id => "ml_model_%{identifier}" + } + } + if [type] == "service" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "service" + document_id => "service_%{identifier}" } } } diff --git a/logstash/pipeline/sql/init_temp_dataset.sql b/logstash/pipeline/sql/init_temp_dataset.sql index e78b3737..4323af1f 100644 --- a/logstash/pipeline/sql/init_temp_dataset.sql +++ b/logstash/pipeline/sql/init_temp_dataset.sql @@ -1,2 +1,4 @@ -SELECT * FROM aiod.dataset -ORDER BY identifier +SELECT dataset.identifier, dataset.name, dataset.description, dataset.same_as, dataset.resource_id, dataset.date_published, dataset.version, dataset.asset_id, dataset.license_identifier, dataset.issn, dataset.measurement_technique, dataset.temporal_coverage, dataset.size_identifier, dataset.spatial_coverage_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +FROM aiod.dataset INNER JOIN aiod.aiod_entry +ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/init_temp_experiment.sql b/logstash/pipeline/sql/init_temp_experiment.sql new file mode 100644 index 00000000..a2e417a4 --- /dev/null +++ b/logstash/pipeline/sql/init_temp_experiment.sql @@ -0,0 +1,4 @@ +SELECT experiment.identifier, experiment.name, experiment.description, experiment.same_as, experiment.resource_id, experiment.date_published, experiment.version, experiment.asset_id, experiment.license_identifier, experiment.pid, experiment.experimental_workflow, experiment.execution_settings, experiment.reproducibility_explanation, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +FROM aiod.experiment INNER JOIN aiod.aiod_entry +ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/init_temp_ml_model.sql b/logstash/pipeline/sql/init_temp_ml_model.sql new file mode 100644 index 00000000..6a13ed5f --- /dev/null +++ b/logstash/pipeline/sql/init_temp_ml_model.sql @@ -0,0 +1,4 @@ +SELECT ml_model.identifier, ml_model.name, ml_model.description, ml_model.same_as, ml_model.resource_id, ml_model.date_published, ml_model.version, ml_model.asset_id, ml_model.license_identifier, ml_model.pid, ml_model.type_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +FROM aiod.ml_model INNER JOIN aiod.aiod_entry +ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/init_temp_publication.sql b/logstash/pipeline/sql/init_temp_publication.sql index f6262f34..ab0786b5 100644 --- a/logstash/pipeline/sql/init_temp_publication.sql +++ b/logstash/pipeline/sql/init_temp_publication.sql @@ -1,2 +1,4 @@ -SELECT * FROM aiod.publication -ORDER BY identifier +SELECT publication.identifier, publication.name, publication.description, publication.same_as, publication.resource_id, publication.date_published, publication.version, publication.asset_id, publication.license_identifier, publication.knowledge_asset_id, publication.permanent_identifier, publication.isbn, publication.issn, publication.type_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +FROM aiod.publication INNER JOIN aiod.aiod_entry +ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/init_temp_service.sql b/logstash/pipeline/sql/init_temp_service.sql new file mode 100644 index 00000000..a5bd9d34 --- /dev/null +++ b/logstash/pipeline/sql/init_temp_service.sql @@ -0,0 +1,4 @@ +SELECT service.identifier, service.name, service.description, service.same_as, service.resource_id, service.slogan, service.terms_of_service, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +FROM aiod.service INNER JOIN aiod.aiod_entry +ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +ORDER BY aiod.service.identifier diff --git a/logstash/pipeline/sql/sync_temp_dataset.sql b/logstash/pipeline/sql/sync_temp_dataset.sql index 9d37d1db..59eef926 100644 --- a/logstash/pipeline/sql/sync_temp_dataset.sql +++ b/logstash/pipeline/sql/sync_temp_dataset.sql @@ -1,3 +1,5 @@ -SELECT * FROM aiod.dataset -WHERE date_modified > :sql_last_value -ORDER BY identifier +SELECT dataset.identifier, dataset.name, dataset.description, dataset.same_as, dataset.resource_id, dataset.date_published, dataset.version, dataset.asset_id, dataset.license_identifier, dataset.issn, dataset.measurement_technique, dataset.temporal_coverage, dataset.size_identifier, dataset.spatial_coverage_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +FROM aiod.dataset INNER JOIN aiod.aiod_entry +ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +WHERE aiod.aiod_entry.date_modified > :sql_last_value +ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/sync_temp_experiment.sql b/logstash/pipeline/sql/sync_temp_experiment.sql new file mode 100644 index 00000000..da838257 --- /dev/null +++ b/logstash/pipeline/sql/sync_temp_experiment.sql @@ -0,0 +1,5 @@ +SELECT experiment.identifier, experiment.name, experiment.description, experiment.same_as, experiment.resource_id, experiment.date_published, experiment.version, experiment.asset_id, experiment.license_identifier, experiment.pid, experiment.experimental_workflow, experiment.execution_settings, experiment.reproducibility_explanation, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +FROM aiod.experiment INNER JOIN aiod.aiod_entry +ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +WHERE aiod.aiod_entry.date_modified > :sql_last_value +ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/sync_temp_ml_model.sql b/logstash/pipeline/sql/sync_temp_ml_model.sql new file mode 100644 index 00000000..90e3074e --- /dev/null +++ b/logstash/pipeline/sql/sync_temp_ml_model.sql @@ -0,0 +1,5 @@ +SELECT ml_model.identifier, ml_model.name, ml_model.description, ml_model.same_as, ml_model.resource_id, ml_model.date_published, ml_model.version, ml_model.asset_id, ml_model.license_identifier, ml_model.pid, ml_model.type_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +FROM aiod.ml_model INNER JOIN aiod.aiod_entry +ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +WHERE aiod.aiod_entry.date_modified > :sql_last_value +ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/sync_temp_publication.sql b/logstash/pipeline/sql/sync_temp_publication.sql index 63d26fc3..c034e5e0 100644 --- a/logstash/pipeline/sql/sync_temp_publication.sql +++ b/logstash/pipeline/sql/sync_temp_publication.sql @@ -1,3 +1,5 @@ -SELECT * FROM aiod.publication -WHERE date_created > :sql_last_value -ORDER BY identifier +SELECT publication.identifier, publication.name, publication.description, publication.same_as, publication.resource_id, publication.date_published, publication.version, publication.asset_id, publication.license_identifier, publication.knowledge_asset_id, publication.permanent_identifier, publication.isbn, publication.issn, publication.type_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +FROM aiod.publication INNER JOIN aiod.aiod_entry +ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +WHERE aiod.aiod_entry.date_modified > :sql_last_value +ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/sync_temp_service.sql b/logstash/pipeline/sql/sync_temp_service.sql new file mode 100644 index 00000000..2811154c --- /dev/null +++ b/logstash/pipeline/sql/sync_temp_service.sql @@ -0,0 +1,5 @@ +SELECT service.identifier, service.name, service.description, service.same_as, service.resource_id, service.slogan, service.terms_of_service, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +FROM aiod.service INNER JOIN aiod.aiod_entry +ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +WHERE aiod.aiod_entry.date_modified > :sql_last_value +ORDER BY aiod.service.identifier From f2aa463c8cfb099c3d4ae56a63de6721efc52006 Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Thu, 24 Aug 2023 10:40:00 +0200 Subject: [PATCH 11/79] Some bugfixes for the connectors --- connectors/huggingface/datasets.sh | 2 ++ connectors/openml/entry.sh | 3 +++ connectors/zenodo/datasets.sh | 1 + connectors/zenodo/entry.sh | 3 +++ scripts/clean.sh | 11 +++++++++++ src/connectors/synchronization.py | 2 +- src/database/setup.py | 13 ++++++++----- 7 files changed, 29 insertions(+), 6 deletions(-) create mode 100755 scripts/clean.sh diff --git a/connectors/huggingface/datasets.sh b/connectors/huggingface/datasets.sh index 99ba48c7..8b767360 100755 --- a/connectors/huggingface/datasets.sh +++ b/connectors/huggingface/datasets.sh @@ -2,6 +2,8 @@ WORK_DIR=/opt/connectors/data/huggingface/dataset +mkdir -p $WORK_DIR + python3 connectors/synchronization.py \ -c connectors.huggingface.huggingface_dataset_connector.HuggingFaceDatasetConnector \ -w ${WORK_DIR} \ diff --git a/connectors/openml/entry.sh b/connectors/openml/entry.sh index 5974cadc..98e58ba0 100755 --- a/connectors/openml/entry.sh +++ b/connectors/openml/entry.sh @@ -3,5 +3,8 @@ # If this directory does not exist, the cron job cannot log (and cannot run) mkdir -p /opt/connectors/data/openml/dataset +# Run once on startup +bash /opt/connectors/script/datasets.sh >> /opt/connectors/data/openml/dataset/cron.log 2>&1 + # Run cron on the foreground with log level WARN /usr/sbin/cron -f -l 4 diff --git a/connectors/zenodo/datasets.sh b/connectors/zenodo/datasets.sh index 810bba01..be214cf9 100755 --- a/connectors/zenodo/datasets.sh +++ b/connectors/zenodo/datasets.sh @@ -11,6 +11,7 @@ another_instance() exec 9< "$0" flock -n -x 9 || another_instance + echo $(date -u) "Starting synchronization..." PYTHONPATH=/app /usr/local/bin/python3 /app/connectors/synchronization.py \ -c $CONNECTOR \ diff --git a/connectors/zenodo/entry.sh b/connectors/zenodo/entry.sh index c6e5fc08..5f3461fe 100755 --- a/connectors/zenodo/entry.sh +++ b/connectors/zenodo/entry.sh @@ -3,5 +3,8 @@ # If this directory does not exist, the cron job cannot log (and cannot run) mkdir -p /opt/connectors/data/zenodo/dataset +# run once on startup +bash /opt/connectors/script/datasets.sh >> /opt/connectors/data/zenodo/dataset/cron.log 2>&1 + # Run cron on the foreground with log level WARN /usr/sbin/cron -f -l 4 diff --git a/scripts/clean.sh b/scripts/clean.sh new file mode 100755 index 00000000..6acbfaa5 --- /dev/null +++ b/scripts/clean.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +ROOT=$(dirname $SCRIPT_DIR) +DATA=${ROOT}/data + + +sudo rm -rf ${DATA}/* +echo "Deleted everything from $DATA" +mkdir -p ${DATA}/mysql ${DATA}/connectors ${DATA}/elasticsearch + diff --git a/src/connectors/synchronization.py b/src/connectors/synchronization.py index f8e1f2dd..facba3b6 100644 --- a/src/connectors/synchronization.py +++ b/src/connectors/synchronization.py @@ -162,7 +162,7 @@ def main(): if router.resource_class == connector.resource_class ] - engine = sqlmodel_engine(rebuild_db="never") + engine = sqlmodel_engine(rebuild_db="never", create_if_not_exists=False) with Session(engine) as session: for i, item in enumerate(items): diff --git a/src/database/setup.py b/src/database/setup.py index b75b201a..94a6d1a9 100644 --- a/src/database/setup.py +++ b/src/database/setup.py @@ -38,9 +38,10 @@ def connect_to_database( drop_or_create_database(url, delete_first) engine = create_engine(url, echo=False, pool_recycle=3600) - with engine.connect() as connection: - AIoDConcept.metadata.create_all(connection, checkfirst=True) - connection.commit() + if create_if_not_exists: + with engine.connect() as connection: + AIoDConcept.metadata.create_all(connection, checkfirst=True) + connection.commit() return engine @@ -112,7 +113,7 @@ def _create_or_fetch_related_objects(session: Session, item: ResourceWithRelatio item.resource.__setattr__(field_name, identifiers) # E.g. Dataset.keywords = [1, 4] -def sqlmodel_engine(rebuild_db: str) -> Engine: +def sqlmodel_engine(rebuild_db: str, create_if_not_exists=True) -> Engine: """ Return a SQLModel engine, backed by the MySql connection as configured in the configuration file. @@ -126,4 +127,6 @@ def sqlmodel_engine(rebuild_db: str) -> Engine: db_url = f"mysql://{username}:{password}@{host}:{port}/{database}" delete_before_create = rebuild_db == "always" - return connect_to_database(db_url, delete_first=delete_before_create) + return connect_to_database( + db_url, delete_first=delete_before_create, create_if_not_exists=create_if_not_exists + ) From f2893f4c2e78073a71957dd2b692263e83a40c6b Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Thu, 24 Aug 2023 10:44:24 +0200 Subject: [PATCH 12/79] Deleting docker images in clean script --- scripts/clean.sh | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/scripts/clean.sh b/scripts/clean.sh index 6acbfaa5..03b8d524 100755 --- a/scripts/clean.sh +++ b/scripts/clean.sh @@ -1,11 +1,17 @@ #!/bin/bash +docker image rm ai4eu_server +docker image rm ai4eu_openml_connector +docker image rm ai4eu_zenodo_connector +echo "Deleted docker images, so that they will be rebuild on docker up." + + SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) ROOT=$(dirname $SCRIPT_DIR) DATA=${ROOT}/data - -sudo rm -rf ${DATA}/* -echo "Deleted everything from $DATA" +sudo rm -rf ${DATA}/mysql +sudo rm -rf ${DATA}/connectors +sudo rm -rf ${DATA}/elasticsearch mkdir -p ${DATA}/mysql ${DATA}/connectors ${DATA}/elasticsearch - +echo "Deleted everything from $DATA" From 3d17c8c318b92fb9e33362d0a00b12e7865b420e Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Thu, 24 Aug 2023 11:16:18 +0200 Subject: [PATCH 13/79] Fixed issues with authentication --- .env | 2 -- src/authentication.py | 12 ++++++++++++ src/routers/resource_router.py | 8 ++++---- src/routers/search_router.py | 9 +++++---- 4 files changed, 21 insertions(+), 10 deletions(-) diff --git a/.env b/.env index e6cc40b2..352f3968 100644 --- a/.env +++ b/.env @@ -1,5 +1,3 @@ -PYTHONPATH=/app - #MYSQL MYSQL_ROOT_PASSWORD=ok diff --git a/src/authentication.py b/src/authentication.py index a6cd6188..ac8cca1b 100644 --- a/src/authentication.py +++ b/src/authentication.py @@ -75,3 +75,15 @@ async def get_current_user(token=Security(oidc)) -> dict: detail=detail, headers={"WWW-Authenticate": "Bearer"}, ) + + +def has_role(user: dict, role: str | None) -> bool: + if role is None: + raise ValueError("Role should be set.") + if "groups" in user: + roles = user["groups"] + elif "realm_access" in user and "roles" in user["realm_access"]: + roles = user["realm_access"]["roles"] + else: + return False + return role in roles diff --git a/src/routers/resource_router.py b/src/routers/resource_router.py index e1f6c4d3..7b62ddb5 100644 --- a/src/routers/resource_router.py +++ b/src/routers/resource_router.py @@ -14,7 +14,7 @@ from sqlmodel import SQLModel, Session, select from starlette.responses import JSONResponse -from authentication import get_current_user +from authentication import get_current_user, has_role from config import KEYCLOAK_CONFIG from converters.schema_converters.schema_converter import SchemaConverter from database.model.ai_resource.resource import AIResource @@ -330,7 +330,7 @@ def register_resource( user: dict = Depends(get_current_user), ): f"""Register a {self.resource_name} with AIoD.""" - if "groups" not in user or KEYCLOAK_CONFIG.get("role") not in user["groups"]: + if not has_role(user, KEYCLOAK_CONFIG.get("role")): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to edit Aiod resources.", @@ -372,7 +372,7 @@ def put_resource( user: dict = Depends(get_current_user), ): f"""Update an existing {self.resource_name}.""" - if "groups" not in user or KEYCLOAK_CONFIG.get("role") not in user["groups"]: + if not has_role(user, KEYCLOAK_CONFIG.get("role")): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to edit Aiod resources.", @@ -411,7 +411,7 @@ def delete_resource_func(self, engine: Engine): """ def delete_resource(identifier: str, user: dict = Depends(get_current_user)): - if "groups" not in user or KEYCLOAK_CONFIG.get("role") not in user["groups"]: + if not has_role(user, KEYCLOAK_CONFIG.get("role")): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to edit Aiod resources.", diff --git a/src/routers/search_router.py b/src/routers/search_router.py index e46a5869..3468b447 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -7,7 +7,7 @@ from sqlalchemy.engine import Engine from starlette import status -from authentication import get_current_user +from authentication import get_current_user, has_role from database.model.knowledge_asset.publication import Publication from database.model.resource_read_and_create import resource_read from routers.router import AIoDRouter @@ -30,9 +30,9 @@ def __init__(self): def create(self, engine: Engine, url_prefix: str) -> APIRouter: router = APIRouter() - user = os.getenv("ES_USER") + user_name = os.getenv("ES_USER") pw = os.getenv("ES_PASSWORD") - self.client = Elasticsearch("http://localhost:9200", basic_auth=(user, pw)) + self.client = Elasticsearch("http://localhost:9200", basic_auth=(user_name, pw)) publication_class = resource_read(Publication) @@ -49,7 +49,8 @@ def search_publication( detail=f"The limit should be maximum {LIMIT_MAX}. If you want more results, " f"use pagination.", ) - if "groups" not in user or os.getenv("ES_ROLE") not in user["groups"]: + + if not has_role(user, os.getenv("ES_ROLE")): raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to search Aiod resources.", From 575faaf65c81864815e67e5344c178b3987a7ef7 Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Thu, 24 Aug 2023 12:24:15 +0200 Subject: [PATCH 14/79] Publication search seems to work. TODO: test cases and other resourcse --- .env | 2 + docker-compose.yaml | 106 +++++++++--------- .../pipeline/sql/init_temp_publication.sql | 25 ++++- .../pipeline/sql/sync_temp_publication.sql | 25 ++++- scripts/clean.sh | 6 +- src/routers/search_router.py | 28 +++-- 6 files changed, 123 insertions(+), 69 deletions(-) diff --git a/.env b/.env index 352f3968..e6cc40b2 100644 --- a/.env +++ b/.env @@ -1,3 +1,5 @@ +PYTHONPATH=/app + #MYSQL MYSQL_ROOT_PASSWORD=ok diff --git a/docker-compose.yaml b/docker-compose.yaml index 1c2257a2..e68f054d 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -45,59 +45,59 @@ services: app: condition: service_healthy - huggingface-dataset-connector: - image: ai4eu_server - container_name: huggingface-dataset-connector - env_file: .env - environment: - - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - volumes: - - ./src:/app - - ./data/connectors:/opt/connectors/data - - ./connectors/huggingface/:/opt/connectors/script - command: > - /bin/bash -c "/opt/connectors/script/datasets.sh" - depends_on: - app: - condition: service_healthy - - openml-dataset-connector: - build: - context: connectors/openml - dockerfile: Dockerfile - image: ai4eu_openml_connector - container_name: openml-dataset-connector - env_file: .env - environment: - - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - volumes: - - ./src:/app - - ./data/connectors:/opt/connectors/data - - ./connectors/openml/:/opt/connectors/script - command: > - /bin/bash -c "/opt/connectors/script/entry.sh" - depends_on: - app: - condition: service_healthy - - zenodo-dataset-connector: - build: - context: connectors/zenodo - dockerfile: Dockerfile - image: ai4eu_zenodo_connector - container_name: zenodo-dataset-connector - env_file: .env - environment: - - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - volumes: - - ./src:/app - - ./data/connectors:/opt/connectors/data - - ./connectors/zenodo/:/opt/connectors/script - command: > - /bin/bash -c "/opt/connectors/script/entry.sh" - depends_on: - app: - condition: service_healthy +# huggingface-dataset-connector: +# image: ai4eu_server +# container_name: huggingface-dataset-connector +# env_file: .env +# environment: +# - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET +# volumes: +# - ./src:/app +# - ./data/connectors:/opt/connectors/data +# - ./connectors/huggingface/:/opt/connectors/script +# command: > +# /bin/bash -c "/opt/connectors/script/datasets.sh" +# depends_on: +# app: +# condition: service_healthy +# +# openml-dataset-connector: +# build: +# context: connectors/openml +# dockerfile: Dockerfile +# image: ai4eu_openml_connector +# container_name: openml-dataset-connector +# env_file: .env +# environment: +# - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET +# volumes: +# - ./src:/app +# - ./data/connectors:/opt/connectors/data +# - ./connectors/openml/:/opt/connectors/script +# command: > +# /bin/bash -c "/opt/connectors/script/entry.sh" +# depends_on: +# app: +# condition: service_healthy +# +# zenodo-dataset-connector: +# build: +# context: connectors/zenodo +# dockerfile: Dockerfile +# image: ai4eu_zenodo_connector +# container_name: zenodo-dataset-connector +# env_file: .env +# environment: +# - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET +# volumes: +# - ./src:/app +# - ./data/connectors:/opt/connectors/data +# - ./connectors/zenodo/:/opt/connectors/script +# command: > +# /bin/bash -c "/opt/connectors/script/entry.sh" +# depends_on: +# app: +# condition: service_healthy sqlserver: image: mysql diff --git a/logstash/pipeline/sql/init_temp_publication.sql b/logstash/pipeline/sql/init_temp_publication.sql index ab0786b5..d5b27fd8 100644 --- a/logstash/pipeline/sql/init_temp_publication.sql +++ b/logstash/pipeline/sql/init_temp_publication.sql @@ -1,4 +1,23 @@ -SELECT publication.identifier, publication.name, publication.description, publication.same_as, publication.resource_id, publication.date_published, publication.version, publication.asset_id, publication.license_identifier, publication.knowledge_asset_id, publication.permanent_identifier, publication.isbn, publication.issn, publication.type_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created -FROM aiod.publication INNER JOIN aiod.aiod_entry -ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +SELECT publication.identifier, + publication.platform, + publication.platform_identifier, + publication.name, + publication.description, + publication.same_as, + publication.date_published, + publication.version, + license.name AS `license`, + publication.knowledge_asset_id, + publication.permanent_identifier, + publication.isbn, + publication.issn, + publication_type.name AS `publication_type`, + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created +FROM aiod.publication +INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.publication.license_identifier=aiod.license.identifier +LEFT JOIN aiod.publication_type ON aiod.publication.type_identifier=aiod.publication_type.identifier ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/sync_temp_publication.sql b/logstash/pipeline/sql/sync_temp_publication.sql index c034e5e0..e061a2ea 100644 --- a/logstash/pipeline/sql/sync_temp_publication.sql +++ b/logstash/pipeline/sql/sync_temp_publication.sql @@ -1,5 +1,24 @@ -SELECT publication.identifier, publication.name, publication.description, publication.same_as, publication.resource_id, publication.date_published, publication.version, publication.asset_id, publication.license_identifier, publication.knowledge_asset_id, publication.permanent_identifier, publication.isbn, publication.issn, publication.type_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created -FROM aiod.publication INNER JOIN aiod.aiod_entry -ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +SELECT publication.identifier, + publication.platform, + publication.platform_identifier, + publication.name, + publication.description, + publication.same_as, + publication.date_published, + publication.version, + license.name AS `license`, + publication.knowledge_asset_id, + publication.permanent_identifier, + publication.isbn, + publication.issn, + publication_type.name AS `publication_type`, + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created +FROM aiod.publication +INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.publication.license_identifier=aiod.license.identifier +LEFT JOIN aiod.publication_type ON aiod.publication.type_identifier=aiod.publication_type.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value ORDER BY aiod.publication.identifier diff --git a/scripts/clean.sh b/scripts/clean.sh index 03b8d524..c4fe0e5c 100755 --- a/scripts/clean.sh +++ b/scripts/clean.sh @@ -1,8 +1,8 @@ #!/bin/bash -docker image rm ai4eu_server -docker image rm ai4eu_openml_connector -docker image rm ai4eu_zenodo_connector +#docker image rm ai4eu_server +#docker image rm ai4eu_openml_connector +#docker image rm ai4eu_zenodo_connector echo "Deleted docker images, so that they will be rebuild on docker up." diff --git a/src/routers/search_router.py b/src/routers/search_router.py index 3468b447..95457f6e 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -1,5 +1,5 @@ import os -from typing import TypeVar, Generic +from typing import TypeVar, Generic, Any, Type from elasticsearch import Elasticsearch from fastapi import APIRouter, Depends, HTTPException @@ -8,6 +8,7 @@ from starlette import status from authentication import get_current_user, has_role +from database.model.concept.aiod_entry import AIoDEntryRead from database.model.knowledge_asset.publication import Publication from database.model.resource_read_and_create import resource_read from routers.router import AIoDRouter @@ -21,7 +22,7 @@ class SearchResult(BaseModel, Generic[RESOURCE]): total_hits: int resources: list[RESOURCE] - next_offset: str | None + next_offset: list | None class SearchRouter(AIoDRouter): @@ -38,7 +39,7 @@ def create(self, engine: Engine, url_prefix: str) -> APIRouter: @router.get(url_prefix + "/search/publications/v1", tags=["search"]) def search_publication( - title: str = "", + name: str = "", limit: int = 10, offset: str | None = None, # TODO: this should not be a string user: dict = Depends(get_current_user), @@ -60,19 +61,32 @@ def search_publication( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Client not initialized", ) - query = {"bool": {"must": {"match": {"title": title}}}} + query = {"bool": {"must": {"match": {"name": name}}}} result = self.client.search( index="publication", query=query, size=limit, sort=SORT, search_after=offset ) - # TODO: how to get Publications? - resources: list[publication_class] = [] # type: ignore + + total_hits = result["hits"]["total"]["value"] + resources: list[publication_class] = [ # type: ignore + _cast_resource(publication_class, hit["_source"]) for hit in result["hits"]["hits"] + ] next_offset = ( result["hits"]["hits"][-1]["sort"] if len(result["hits"]["hits"]) > 0 else None ) return SearchResult[publication_class]( # type: ignore - total_hits=result["hits"]["total"]["value"], + total_hits=total_hits, next_offset=next_offset, resources=resources, ) return router + + +def _cast_resource(resource_class: RESOURCE, resource_dict: dict[str, Any]) -> Type[RESOURCE]: + resource = resource_class(**resource_dict) # type: ignore + resource.aiod_entry = AIoDEntryRead( + date_modified=resource_dict["date_modified"], + date_created=resource_dict["date_created"], + status=resource_dict["status"], + ) + return resource From afdb897710c07c61336d6db41f732f8f48475ca6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 24 Aug 2023 12:41:32 +0200 Subject: [PATCH 15/79] platform and platform_identifier changed from aiod_entry to each instance --- .DS_Store | Bin 6148 -> 0 bytes logstash/pipeline/sql/init_temp_dataset.sql | 20 +++++++++++++++++- .../pipeline/sql/init_temp_experiment.sql | 19 ++++++++++++++++- logstash/pipeline/sql/init_temp_ml_model.sql | 17 ++++++++++++++- logstash/pipeline/sql/init_temp_service.sql | 13 +++++++++++- logstash/pipeline/sql/sync_temp_dataset.sql | 20 +++++++++++++++++- .../pipeline/sql/sync_temp_experiment.sql | 19 ++++++++++++++++- logstash/pipeline/sql/sync_temp_ml_model.sql | 17 ++++++++++++++- logstash/pipeline/sql/sync_temp_service.sql | 13 +++++++++++- src/.DS_Store | Bin 6148 -> 0 bytes src/routers/.DS_Store | Bin 6148 -> 0 bytes src/tests/.DS_Store | Bin 8196 -> 0 bytes 12 files changed, 130 insertions(+), 8 deletions(-) delete mode 100644 .DS_Store delete mode 100644 src/.DS_Store delete mode 100644 src/routers/.DS_Store delete mode 100644 src/tests/.DS_Store diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index a5ceb7214e5992c025777938c07d4a9259eb1d5d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5T3QwrWTTtm@P?a52cW^zK~Di^Z3o~ z7SyVE644!){bqM(Hp~aJn;|068um7b>O|x~8Ow_pJ`mn#osl`SQU(fjMotlRDWg6G zGv0Q=GGH0_Z47YjE}>pyRBT4C>o-Lu--M%V5XLfu*Hbu%qbzSWpT%6MJikzJDo)M0 z^l#+I&x3rJwSx4VXD3p|;nWVo)2Kh{)Rzxsk_S=JPgOwF>%-;ZEJ}KE)RM!bm#J7^ zZ#Y$_+Nn3j*8 z7a89BX%fri8eV)t1uS$a#XU9v8`&Sp|6Nw^a-K3}@qaO3D#oe(ue5EpmI2GaHyGgi z!G$t<1`Cbq>cF5{03iQHD1ptr1jo1rJ%fct>_CJL1?o^?S`4AXvF;i_&tRcZhm$bP zhcI6jrb7|x>p0(4?j$^ocC-vw2HrAI($hM>|3{zq|8Ilr%ramZ_)`psa@%jWkdk>@ xXOiQ$)`ea{S=g`8s6sH9=U5s16gQwsu;;P?^b8gnu>!F_0*VGZSO$KSffxN{j^F?Q diff --git a/logstash/pipeline/sql/init_temp_dataset.sql b/logstash/pipeline/sql/init_temp_dataset.sql index 4323af1f..6f390d01 100644 --- a/logstash/pipeline/sql/init_temp_dataset.sql +++ b/logstash/pipeline/sql/init_temp_dataset.sql @@ -1,4 +1,22 @@ -SELECT dataset.identifier, dataset.name, dataset.description, dataset.same_as, dataset.resource_id, dataset.date_published, dataset.version, dataset.asset_id, dataset.license_identifier, dataset.issn, dataset.measurement_technique, dataset.temporal_coverage, dataset.size_identifier, dataset.spatial_coverage_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +SELECT dataset.identifier, + dataset.platform, + dataset.platform_identifier, + dataset.name, + dataset.description, + dataset.same_as, + dataset.resource_id, + dataset.date_published, + dataset.version, + dataset.asset_id, + dataset.license_identifier, + dataset.issn, + dataset.measurement_technique, + dataset.temporal_coverage, + dataset.size_identifier, + dataset.spatial_coverage_identifier, + aiod_entry.status_identifier, + aiod_entry.date_modified, + aiod_entry.date_created FROM aiod.dataset INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/init_temp_experiment.sql b/logstash/pipeline/sql/init_temp_experiment.sql index a2e417a4..4cc78648 100644 --- a/logstash/pipeline/sql/init_temp_experiment.sql +++ b/logstash/pipeline/sql/init_temp_experiment.sql @@ -1,4 +1,21 @@ -SELECT experiment.identifier, experiment.name, experiment.description, experiment.same_as, experiment.resource_id, experiment.date_published, experiment.version, experiment.asset_id, experiment.license_identifier, experiment.pid, experiment.experimental_workflow, experiment.execution_settings, experiment.reproducibility_explanation, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +SELECT experiment.identifier, + experiment.platform, + experiment.platform_identifier, + experiment.name, + experiment.description, + experiment.same_as, + experiment.resource_id, + experiment.date_published, + experiment.version, + experiment.asset_id, + experiment.license_identifier, + experiment.pid, + experiment.experimental_workflow, + experiment.execution_settings, + experiment.reproducibility_explanation, + aiod_entry.status_identifier, + aiod_entry.date_modified, + aiod_entry.date_created FROM aiod.experiment INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/init_temp_ml_model.sql b/logstash/pipeline/sql/init_temp_ml_model.sql index 6a13ed5f..83795c77 100644 --- a/logstash/pipeline/sql/init_temp_ml_model.sql +++ b/logstash/pipeline/sql/init_temp_ml_model.sql @@ -1,4 +1,19 @@ -SELECT ml_model.identifier, ml_model.name, ml_model.description, ml_model.same_as, ml_model.resource_id, ml_model.date_published, ml_model.version, ml_model.asset_id, ml_model.license_identifier, ml_model.pid, ml_model.type_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +SELECT ml_model.identifier, + ml_model.platform, + ml_model.platform_identifier, + ml_model.name, + ml_model.description, + ml_model.same_as, + ml_model.resource_id, + ml_model.date_published, + ml_model.version, + ml_model.asset_id, + ml_model.license_identifier, + ml_model.pid, + ml_model.type_identifier, + aiod_entry.status_identifier, + aiod_entry.date_modified, + aiod_entry.date_created FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/init_temp_service.sql b/logstash/pipeline/sql/init_temp_service.sql index a5bd9d34..5de7ce0f 100644 --- a/logstash/pipeline/sql/init_temp_service.sql +++ b/logstash/pipeline/sql/init_temp_service.sql @@ -1,4 +1,15 @@ -SELECT service.identifier, service.name, service.description, service.same_as, service.resource_id, service.slogan, service.terms_of_service, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +SELECT service.identifier, + service.platform, + service.platform_identifier, + service.name, + service.description, + service.same_as, + service.resource_id, + service.slogan, + service.terms_of_service, + aiod_entry.status_identifier, + aiod_entry.date_modified, + aiod_entry.date_created FROM aiod.service INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier ORDER BY aiod.service.identifier diff --git a/logstash/pipeline/sql/sync_temp_dataset.sql b/logstash/pipeline/sql/sync_temp_dataset.sql index 59eef926..8c68ffec 100644 --- a/logstash/pipeline/sql/sync_temp_dataset.sql +++ b/logstash/pipeline/sql/sync_temp_dataset.sql @@ -1,4 +1,22 @@ -SELECT dataset.identifier, dataset.name, dataset.description, dataset.same_as, dataset.resource_id, dataset.date_published, dataset.version, dataset.asset_id, dataset.license_identifier, dataset.issn, dataset.measurement_technique, dataset.temporal_coverage, dataset.size_identifier, dataset.spatial_coverage_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +SELECT dataset.identifier, + dataset.platform, + dataset.platform_identifier, + dataset.name, + dataset.description, + dataset.same_as, + dataset.resource_id, + dataset.date_published, + dataset.version, + dataset.asset_id, + dataset.license_identifier, + dataset.issn, + dataset.measurement_technique, + dataset.temporal_coverage, + dataset.size_identifier, + dataset.spatial_coverage_identifier, + aiod_entry.status_identifier, + aiod_entry.date_modified, + aiod_entry.date_created FROM aiod.dataset INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_temp_experiment.sql b/logstash/pipeline/sql/sync_temp_experiment.sql index da838257..443a6ba4 100644 --- a/logstash/pipeline/sql/sync_temp_experiment.sql +++ b/logstash/pipeline/sql/sync_temp_experiment.sql @@ -1,4 +1,21 @@ -SELECT experiment.identifier, experiment.name, experiment.description, experiment.same_as, experiment.resource_id, experiment.date_published, experiment.version, experiment.asset_id, experiment.license_identifier, experiment.pid, experiment.experimental_workflow, experiment.execution_settings, experiment.reproducibility_explanation, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +SELECT experiment.identifier, + experiment.platform, + experiment.platform_identifier, + experiment.name, + experiment.description, + experiment.same_as, + experiment.resource_id, + experiment.date_published, + experiment.version, + experiment.asset_id, + experiment.license_identifier, + experiment.pid, + experiment.experimental_workflow, + experiment.execution_settings, + experiment.reproducibility_explanation, + aiod_entry.status_identifier, + aiod_entry.date_modified, + aiod_entry.date_created FROM aiod.experiment INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_temp_ml_model.sql b/logstash/pipeline/sql/sync_temp_ml_model.sql index 90e3074e..32a6f360 100644 --- a/logstash/pipeline/sql/sync_temp_ml_model.sql +++ b/logstash/pipeline/sql/sync_temp_ml_model.sql @@ -1,4 +1,19 @@ -SELECT ml_model.identifier, ml_model.name, ml_model.description, ml_model.same_as, ml_model.resource_id, ml_model.date_published, ml_model.version, ml_model.asset_id, ml_model.license_identifier, ml_model.pid, ml_model.type_identifier, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +SELECT ml_model.identifier, + ml_model.platform, + ml_model.platform_identifier, + ml_model.name, + ml_model.description, + ml_model.same_as, + ml_model.resource_id, + ml_model.date_published, + ml_model.version, + ml_model.asset_id, + ml_model.license_identifier, + ml_model.pid, + ml_model.type_identifier, + aiod_entry.status_identifier, + aiod_entry.date_modified, + aiod_entry.date_created FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_temp_service.sql b/logstash/pipeline/sql/sync_temp_service.sql index 2811154c..397d3448 100644 --- a/logstash/pipeline/sql/sync_temp_service.sql +++ b/logstash/pipeline/sql/sync_temp_service.sql @@ -1,4 +1,15 @@ -SELECT service.identifier, service.name, service.description, service.same_as, service.resource_id, service.slogan, service.terms_of_service, aiod_entry.platform, aiod_entry.platform_identifier, aiod_entry.status_identifier, aiod_entry.date_modified, aiod_entry.date_created +SELECT service.identifier, + service.platform, + service.platform_identifier, + service.name, + service.description, + service.same_as, + service.resource_id, + service.slogan, + service.terms_of_service, + aiod_entry.status_identifier, + aiod_entry.date_modified, + aiod_entry.date_created FROM aiod.service INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value diff --git a/src/.DS_Store b/src/.DS_Store deleted file mode 100644 index 04036f0e09a83eb543455a1785d0b40c92e62441..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5T5A|M!fXsabKYpdQg;D;z<#qFCeu=4-F~S-tz*2SHWlT=6m_g&R7!? z!IMatfthbJKbg(9A)6&4o^0krq5%;lP{GbVOp8cdv>_eyNRZ<_7W7E#<^6DaS2VKS z@gEtGwcEq06||uzs;pnWiqqxSQ8ObivTU|2W{4!$XQT1S<;Qtj*8CS)s|R^otXua+ zn)c&N*0A<2eMOJn>e1>vPT!xej&&ZD`n>p_LwiQ8fB!}LyEp^RfHSb`3}DX|Ne&d< zIs?vtGq7bqz7GK^m>QOf@zVhlj{v|P+)*%=vxL+H!_=@;LH1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0EWyoEb1}el8HK|VDDWiSr7T9t@91sV@0dYVa5C{GR2XJO{NlMOrJ1VU>AP)SO z4)F6qqcWxrbBpHDfySf&z!6+F1%2cJCPq6<9p)BIDCV@;gL0wDr5MVFV_Y*Gm^#cY z+Hg`foRnKxxeP^_)e)x+om6U3T5&)eC_2Eo`#7@pjK1OHDdV-X zN~PAyY8X#`p3Z+-UAp*K&TTxI+xguaAKu6_7ye2nn=+6M7LUFNAc2EUrnlhFZs zb?yhs)a@^*kFb1gSML@c_BhJys~gM=^bOsyZ=+Df+M+vqb6I5gx{jxg9z1(AkH4Gc ztU!-CVLf9;Nf#|^+N0JZzi9SPoaO8KJ=4<_na|fYm>KA!4c3=HR|l#x++_{^HUzs# z6W1}0ukGt*Z!ZgT`N{_ILS1~HapiXg?J#wiTeJtbod4{G6mj7792mAsPjddhQvUn@ z>j{*V#Q|~P*&R?Lm1<=f%h;p2xE|-)8g&bm3-fY|CIpR1$AKmt2loCjL|=o-F{#7c SBCepwi-55~8gbx79k>Tmb^ Date: Thu, 24 Aug 2023 13:50:25 +0200 Subject: [PATCH 16/79] Created testcase for publication search --- .../pipeline/sql/init_temp_publication.sql | 4 +- .../pipeline/sql/sync_temp_publication.sql | 3 +- .../model/knowledge_asset/knowledge_asset.py | 8 +++- src/routers/search_router.py | 16 ++++++-- .../elasticsearch/publication_search.json | 41 +++++++++++++++++++ src/tests/routers/test_search_router.py | 41 ++++++++++++++++--- 6 files changed, 101 insertions(+), 12 deletions(-) create mode 100644 src/tests/resources/elasticsearch/publication_search.json diff --git a/logstash/pipeline/sql/init_temp_publication.sql b/logstash/pipeline/sql/init_temp_publication.sql index d5b27fd8..57ab1352 100644 --- a/logstash/pipeline/sql/init_temp_publication.sql +++ b/logstash/pipeline/sql/init_temp_publication.sql @@ -7,7 +7,9 @@ SELECT publication.identifier, publication.date_published, publication.version, license.name AS `license`, - publication.knowledge_asset_id, + publication.resource_id AS `resource_identifier`, + publication.asset_id AS `asset_identifier`, + publication.knowledge_asset_id AS `knowledge_asset_identifier`, publication.permanent_identifier, publication.isbn, publication.issn, diff --git a/logstash/pipeline/sql/sync_temp_publication.sql b/logstash/pipeline/sql/sync_temp_publication.sql index e061a2ea..d9e65588 100644 --- a/logstash/pipeline/sql/sync_temp_publication.sql +++ b/logstash/pipeline/sql/sync_temp_publication.sql @@ -7,11 +7,10 @@ SELECT publication.identifier, publication.date_published, publication.version, license.name AS `license`, - publication.knowledge_asset_id, publication.permanent_identifier, publication.isbn, publication.issn, - publication_type.name AS `publication_type`, + publication_type.name AS `type`, status.name AS `status`, aiod_entry.date_modified, aiod_entry.date_created diff --git a/src/database/model/knowledge_asset/knowledge_asset.py b/src/database/model/knowledge_asset/knowledge_asset.py index 688f25d4..14a0064b 100644 --- a/src/database/model/knowledge_asset/knowledge_asset.py +++ b/src/database/model/knowledge_asset/knowledge_asset.py @@ -6,7 +6,7 @@ from database.model.ai_asset.ai_asset_table import AIAssetTable from database.model.helper_functions import link_factory from database.model.knowledge_asset.knowledge_asset_table import KnowledgeAssetTable -from database.model.relationships import ResourceRelationshipList +from database.model.relationships import ResourceRelationshipList, ResourceRelationshipSingle from database.model.serializers import AttributeSerializer, FindByIdentifierDeserializer @@ -40,6 +40,12 @@ def __init_subclass__(cls): cls.__sqlmodel_relationships__.update(relationships) class RelationshipConfig(AIAsset.RelationshipConfig): + knowledge_asset_identifier: int | None = ResourceRelationshipSingle( + identifier_name="knowledge_asset_id", + serializer=AttributeSerializer("identifier"), + include_in_create=False, + default_factory_orm=lambda type_: KnowledgeAssetTable(type=type_), + ) documents: list[int] = ResourceRelationshipList( description="The identifier of an AI asset for which the Knowledge Asset acts as an " "information source", diff --git a/src/routers/search_router.py b/src/routers/search_router.py index 95457f6e..e90c18d0 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -68,7 +68,10 @@ def search_publication( total_hits = result["hits"]["total"]["value"] resources: list[publication_class] = [ # type: ignore - _cast_resource(publication_class, hit["_source"]) for hit in result["hits"]["hits"] + _cast_resource( + publication_class, hit["_source"], key_translations={"publication_type": "type"} + ) + for hit in result["hits"]["hits"] ] next_offset = ( result["hits"]["hits"][-1]["sort"] if len(result["hits"]["hits"]) > 0 else None @@ -82,8 +85,15 @@ def search_publication( return router -def _cast_resource(resource_class: RESOURCE, resource_dict: dict[str, Any]) -> Type[RESOURCE]: - resource = resource_class(**resource_dict) # type: ignore +def _cast_resource( + resource_class: RESOURCE, resource_dict: dict[str, Any], key_translations: dict[str, str] +) -> Type[RESOURCE]: + kwargs = { + key_translations.get(key, key): val + for key, val in resource_dict.items() + if key != "type" and not key.startswith("@") + } + resource = resource_class(**kwargs) # type: ignore resource.aiod_entry = AIoDEntryRead( date_modified=resource_dict["date_modified"], date_created=resource_dict["date_created"], diff --git a/src/tests/resources/elasticsearch/publication_search.json b/src/tests/resources/elasticsearch/publication_search.json new file mode 100644 index 00000000..da0787f6 --- /dev/null +++ b/src/tests/resources/elasticsearch/publication_search.json @@ -0,0 +1,41 @@ +{ + "took": 1, + "timed_out": false, + "_shards": {"total": 1, "successful": 1, "skipped": 0, "failed": 0}, + "hits": { + "total": {"value": 1, "relation": "eq"}, + "max_score": null, + "hits": [ + { + "_index": "publication", + "_id": "publication_1", + "_score": null, + "_source": { + "platform_identifier": "1", + "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", + "@version": "1", + "status": "draft", + "knowledge_asset_identifier": 1, + "asset_identifier": 1, + "resource_identifier": 1, + "permanent_identifier": "http://dx.doi.org/10.1093/ajae/aaq063", + "description": "A description.", + "isbn": "9783161484100", + "date_created": "2023-08-24T10:14:52.000Z", + "version": "1.1.0", + "date_modified": "2023-08-24T10:14:52.000Z", + "name": "The name of this publication", + "platform": "example", + "publication_type": "journal", + "same_as": "https://www.example.com/resource/this_resource", + "identifier": 1, + "issn": "20493630", + "type": "publication", + "@timestamp": "2023-08-24T10:14:55.452Z", + "date_published": "2022-01-01T15:15:00.000Z" + }, + "sort": [1] + } + ] + } +} \ No newline at end of file diff --git a/src/tests/routers/test_search_router.py b/src/tests/routers/test_search_router.py index 77d15ea2..fa85ae09 100644 --- a/src/tests/routers/test_search_router.py +++ b/src/tests/routers/test_search_router.py @@ -1,14 +1,15 @@ +import json from unittest.mock import Mock -import pytest from sqlalchemy.engine import Engine from starlette.testclient import TestClient from authentication import keycloak_openid +from routers import SearchRouter, other_routers +from tests.testutils.paths import path_test_resources -@pytest.mark.skip(reason="This test isn't finished yet, we need to mock ES") -def test_happy_path( +def test_search_publication_happy_path( client: TestClient, engine: Engine, mocked_privileged_token: Mock, @@ -16,8 +17,38 @@ def test_happy_path( ): keycloak_openid.userinfo = mocked_privileged_token + (search_router,) = [r for r in other_routers if isinstance(r, SearchRouter)] + with open(path_test_resources() / "elasticsearch" / "publication_search.json", "r") as f: + mocked_results = json.load(f) + search_router.client = Mock() + search_router.client.search = Mock(return_value=mocked_results) + response = client.get( - "/search/publications/v1", params={"title": "in"}, headers={"Authorization": "Fake token"} + "/search/publications/v1", + params={"name": "publication"}, + headers={"Authorization": "Fake token"}, ) - # TODO(jos): mock the ES results. But first we need some results assert response.status_code == 200, response.json() + response_json = response.json() + (publication,) = response_json["resources"] + assert publication["platform_identifier"] == "1" + assert ( + publication["license"] == "https://creativecommons.org/share-your-work/public-domain/cc0/" + ) + assert publication["knowledge_asset_identifier"] == 1 + assert publication["asset_identifier"] == 1 + assert publication["resource_identifier"] == 1 + assert publication["permanent_identifier"] == "http://dx.doi.org/10.1093/ajae/aaq063" + assert publication["description"] == "A description." + assert publication["isbn"] == "9783161484100" + assert publication["aiod_entry"]["date_modified"] == "2023-08-24T10:14:52+00:00" + assert publication["aiod_entry"]["date_created"] == "2023-08-24T10:14:52+00:00" + assert publication["aiod_entry"]["status"] == "draft" + assert publication["version"] == "1.1.0" + assert publication["name"] == "The name of this publication" + assert publication["platform"] == "example" + assert publication["type"] == "journal" + assert publication["same_as"] == "https://www.example.com/resource/this_resource" + assert publication["identifier"] == 1 + assert publication["issn"] == "20493630" + assert publication["date_published"] == "2022-01-01T15:15:00+00:00" From 4a2e0f9a3c21400bd22ae609b07b04de02b48415 Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Thu, 24 Aug 2023 15:12:39 +0200 Subject: [PATCH 17/79] Made ElasticSearch router generic, implemented it for dataset --- connectors/fill-examples.sh | 4 + logstash/pipeline/sql/init_temp_dataset.sql | 48 ++++++---- .../pipeline/sql/init_temp_experiment.sql | 48 ++++++---- logstash/pipeline/sql/init_temp_ml_model.sql | 45 +++++---- .../pipeline/sql/init_temp_publication.sql | 45 +++++---- logstash/pipeline/sql/init_temp_service.sql | 34 ++++--- logstash/pipeline/sql/sync_temp_dataset.sql | 48 ++++++---- .../pipeline/sql/sync_temp_experiment.sql | 48 ++++++---- logstash/pipeline/sql/sync_temp_ml_model.sql | 46 +++++---- .../pipeline/sql/sync_temp_publication.sql | 42 ++++---- logstash/pipeline/sql/sync_temp_service.sql | 34 ++++--- .../models_and_experiments/experiment.py | 2 +- .../model/models_and_experiments/ml_model.py | 2 +- src/routers/__init__.py | 27 ++++-- .../{ => resources}/case_study_router.py | 0 .../computational_asset_router.py | 0 .../educational_resource_router.py | 0 src/routers/{ => resources}/team_router.py | 0 src/routers/search_router.py | 96 +++++++++++-------- src/routers/search_routers/__init__.py | 0 .../search_routers/search_router_datasets.py | 16 ++++ .../search_router_publications.py | 20 ++++ src/tests/.env | 4 +- .../elasticsearch/dataset_search.json | 49 ++++++++++ .../resources/test_router_experiment.py | 4 +- .../routers/resources/test_router_ml_model.py | 4 +- src/tests/routers/search/__init__.py | 0 .../search/test_search_router_datasets.py | 45 +++++++++ .../search/test_search_router_publications.py | 46 +++++++++ src/tests/routers/test_search_router.py | 54 ----------- 30 files changed, 521 insertions(+), 290 deletions(-) rename src/routers/{ => resources}/case_study_router.py (100%) rename src/routers/{ => resources}/computational_asset_router.py (100%) rename src/routers/{ => resources}/educational_resource_router.py (100%) rename src/routers/{ => resources}/team_router.py (100%) create mode 100644 src/routers/search_routers/__init__.py create mode 100644 src/routers/search_routers/search_router_datasets.py create mode 100644 src/routers/search_routers/search_router_publications.py create mode 100644 src/tests/resources/elasticsearch/dataset_search.json create mode 100644 src/tests/routers/search/__init__.py create mode 100644 src/tests/routers/search/test_search_router_datasets.py create mode 100644 src/tests/routers/search/test_search_router_publications.py delete mode 100644 src/tests/routers/test_search_router.py diff --git a/connectors/fill-examples.sh b/connectors/fill-examples.sh index 5cb5a7d9..c09f86a4 100755 --- a/connectors/fill-examples.sh +++ b/connectors/fill-examples.sh @@ -9,6 +9,10 @@ python3 connectors/synchronization.py \ -c connectors.example.example.ExampleComputationalAssetConnector \ -w /opt/connectors/data/example/computational_asset +python3 connectors/synchronization.py \ + -c connectors.example.example.ExampleDatasetConnector \ + -w /opt/connectors/data/example/dataset + python3 connectors/synchronization.py \ -c connectors.example.example.ExampleEducationalResourceConnector \ -w /opt/connectors/data/example/educational_resource diff --git a/logstash/pipeline/sql/init_temp_dataset.sql b/logstash/pipeline/sql/init_temp_dataset.sql index 6f390d01..ab6359fd 100644 --- a/logstash/pipeline/sql/init_temp_dataset.sql +++ b/logstash/pipeline/sql/init_temp_dataset.sql @@ -1,22 +1,28 @@ -SELECT dataset.identifier, - dataset.platform, - dataset.platform_identifier, - dataset.name, - dataset.description, - dataset.same_as, - dataset.resource_id, - dataset.date_published, - dataset.version, - dataset.asset_id, - dataset.license_identifier, - dataset.issn, - dataset.measurement_technique, - dataset.temporal_coverage, - dataset.size_identifier, - dataset.spatial_coverage_identifier, - aiod_entry.status_identifier, - aiod_entry.date_modified, - aiod_entry.date_created -FROM aiod.dataset INNER JOIN aiod.aiod_entry -ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +SELECT + -- Concept + dataset.identifier, + dataset.platform, + dataset.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + dataset.resource_id AS `resource_identifier`, + dataset.name, + dataset.description, + dataset.same_as, + -- AIAsset + dataset.asset_id AS `asset_identifier`, + dataset.date_published, + dataset.version, + license.name AS `license`, + -- Dataset + dataset.issn, + dataset.measurement_technique, + dataset.temporal_coverage +FROM aiod.dataset +INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.dataset.license_identifier=aiod.license.identifier ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/init_temp_experiment.sql b/logstash/pipeline/sql/init_temp_experiment.sql index 4cc78648..d72d7fbd 100644 --- a/logstash/pipeline/sql/init_temp_experiment.sql +++ b/logstash/pipeline/sql/init_temp_experiment.sql @@ -1,21 +1,29 @@ -SELECT experiment.identifier, - experiment.platform, - experiment.platform_identifier, - experiment.name, - experiment.description, - experiment.same_as, - experiment.resource_id, - experiment.date_published, - experiment.version, - experiment.asset_id, - experiment.license_identifier, - experiment.pid, - experiment.experimental_workflow, - experiment.execution_settings, - experiment.reproducibility_explanation, - aiod_entry.status_identifier, - aiod_entry.date_modified, - aiod_entry.date_created -FROM aiod.experiment INNER JOIN aiod.aiod_entry -ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +SELECT + -- Concept + experiment.identifier, + experiment.platform, + experiment.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + experiment.resource_id AS `resource_identifier`, + experiment.name, + experiment.description, + experiment.same_as, + -- AIAsset + experiment.asset_id AS `asset_identifier`, + experiment.date_published, + experiment.version, + license.name AS `license`, + -- Experiment + experiment.permanent_identifier, + experiment.experimental_workflow, + experiment.execution_settings, + experiment.reproducibility_explanation +FROM aiod.experiment +INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.experiment.license_identifier=aiod.license.identifier ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/init_temp_ml_model.sql b/logstash/pipeline/sql/init_temp_ml_model.sql index 83795c77..a53659ea 100644 --- a/logstash/pipeline/sql/init_temp_ml_model.sql +++ b/logstash/pipeline/sql/init_temp_ml_model.sql @@ -1,19 +1,28 @@ -SELECT ml_model.identifier, - ml_model.platform, - ml_model.platform_identifier, - ml_model.name, - ml_model.description, - ml_model.same_as, - ml_model.resource_id, - ml_model.date_published, - ml_model.version, - ml_model.asset_id, - ml_model.license_identifier, - ml_model.pid, - ml_model.type_identifier, - aiod_entry.status_identifier, - aiod_entry.date_modified, - aiod_entry.date_created -FROM aiod.ml_model INNER JOIN aiod.aiod_entry -ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +SELECT + -- Concept + ml_model.identifier, + ml_model.platform, + ml_model.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + ml_model.resource_id AS `resource_identifier`, + ml_model.name, + ml_model.description, + ml_model.same_as, + -- AIAsset + ml_model.asset_id AS `asset_identifier`, + ml_model.date_published, + ml_model.version, + license.name AS `license`, + -- MLModel + ml_model.permanent_identifier, + ml_model_type.name AS `ml_model_type` +FROM aiod.ml_model +INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifier +LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/init_temp_publication.sql b/logstash/pipeline/sql/init_temp_publication.sql index 57ab1352..fd69a205 100644 --- a/logstash/pipeline/sql/init_temp_publication.sql +++ b/logstash/pipeline/sql/init_temp_publication.sql @@ -1,22 +1,29 @@ -SELECT publication.identifier, - publication.platform, - publication.platform_identifier, - publication.name, - publication.description, - publication.same_as, - publication.date_published, - publication.version, - license.name AS `license`, - publication.resource_id AS `resource_identifier`, - publication.asset_id AS `asset_identifier`, - publication.knowledge_asset_id AS `knowledge_asset_identifier`, - publication.permanent_identifier, - publication.isbn, - publication.issn, - publication_type.name AS `publication_type`, - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created +SELECT + -- Concept + publication.identifier, + publication.platform, + publication.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + publication.resource_id AS `resource_identifier`, + publication.name, + publication.description, + publication.same_as, + -- AIAsset + publication.asset_id AS `asset_identifier`, + publication.date_published, + publication.version, + license.name AS `license`, + -- KnowledgeAsset + publication.knowledge_asset_id AS `knowledge_asset_identifier`, + -- Publication + publication.permanent_identifier, + publication.isbn, + publication.issn, + publication_type.name AS `publication_type` FROM aiod.publication INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier diff --git a/logstash/pipeline/sql/init_temp_service.sql b/logstash/pipeline/sql/init_temp_service.sql index 5de7ce0f..486160b5 100644 --- a/logstash/pipeline/sql/init_temp_service.sql +++ b/logstash/pipeline/sql/init_temp_service.sql @@ -1,15 +1,21 @@ -SELECT service.identifier, - service.platform, - service.platform_identifier, - service.name, - service.description, - service.same_as, - service.resource_id, - service.slogan, - service.terms_of_service, - aiod_entry.status_identifier, - aiod_entry.date_modified, - aiod_entry.date_created -FROM aiod.service INNER JOIN aiod.aiod_entry -ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +SELECT + -- Concept + service.identifier, + service.platform, + service.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + service.resource_id AS `resource_identifier`, + service.name, + service.description, + service.same_as, + -- Service + service.slogan, + service.terms_of_service +FROM aiod.service +INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier ORDER BY aiod.service.identifier diff --git a/logstash/pipeline/sql/sync_temp_dataset.sql b/logstash/pipeline/sql/sync_temp_dataset.sql index 8c68ffec..73c9d95b 100644 --- a/logstash/pipeline/sql/sync_temp_dataset.sql +++ b/logstash/pipeline/sql/sync_temp_dataset.sql @@ -1,23 +1,29 @@ -SELECT dataset.identifier, - dataset.platform, - dataset.platform_identifier, - dataset.name, - dataset.description, - dataset.same_as, - dataset.resource_id, - dataset.date_published, - dataset.version, - dataset.asset_id, - dataset.license_identifier, - dataset.issn, - dataset.measurement_technique, - dataset.temporal_coverage, - dataset.size_identifier, - dataset.spatial_coverage_identifier, - aiod_entry.status_identifier, - aiod_entry.date_modified, - aiod_entry.date_created -FROM aiod.dataset INNER JOIN aiod.aiod_entry -ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +SELECT + -- Concept + dataset.identifier, + dataset.platform, + dataset.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + dataset.resource_id AS `resource_identifier`, + dataset.name, + dataset.description, + dataset.same_as, + -- AIAsset + dataset.asset_id AS `asset_identifier`, + dataset.date_published, + dataset.version, + license.name AS `license`, + -- Dataset + dataset.issn, + dataset.measurement_technique, + dataset.temporal_coverage +FROM aiod.dataset +INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.dataset.license_identifier=aiod.license.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/sync_temp_experiment.sql b/logstash/pipeline/sql/sync_temp_experiment.sql index 443a6ba4..181520f3 100644 --- a/logstash/pipeline/sql/sync_temp_experiment.sql +++ b/logstash/pipeline/sql/sync_temp_experiment.sql @@ -1,22 +1,30 @@ -SELECT experiment.identifier, - experiment.platform, - experiment.platform_identifier, - experiment.name, - experiment.description, - experiment.same_as, - experiment.resource_id, - experiment.date_published, - experiment.version, - experiment.asset_id, - experiment.license_identifier, - experiment.pid, - experiment.experimental_workflow, - experiment.execution_settings, - experiment.reproducibility_explanation, - aiod_entry.status_identifier, - aiod_entry.date_modified, - aiod_entry.date_created -FROM aiod.experiment INNER JOIN aiod.aiod_entry -ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +SELECT + -- Concept + experiment.identifier, + experiment.platform, + experiment.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + experiment.resource_id AS `resource_identifier`, + experiment.name, + experiment.description, + experiment.same_as, + -- AIAsset + experiment.asset_id AS `asset_identifier`, + experiment.date_published, + experiment.version, + license.name AS `license`, + -- Experiment + experiment.permanent_identifier, + experiment.experimental_workflow, + experiment.execution_settings, + experiment.reproducibility_explanation +FROM aiod.experiment +INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.experiment.license_identifier=aiod.license.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/sync_temp_ml_model.sql b/logstash/pipeline/sql/sync_temp_ml_model.sql index 32a6f360..6ca2880d 100644 --- a/logstash/pipeline/sql/sync_temp_ml_model.sql +++ b/logstash/pipeline/sql/sync_temp_ml_model.sql @@ -1,20 +1,28 @@ -SELECT ml_model.identifier, - ml_model.platform, - ml_model.platform_identifier, - ml_model.name, - ml_model.description, - ml_model.same_as, - ml_model.resource_id, - ml_model.date_published, - ml_model.version, - ml_model.asset_id, - ml_model.license_identifier, - ml_model.pid, - ml_model.type_identifier, - aiod_entry.status_identifier, - aiod_entry.date_modified, - aiod_entry.date_created -FROM aiod.ml_model INNER JOIN aiod.aiod_entry -ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier -WHERE aiod.aiod_entry.date_modified > :sql_last_value +SELECT + -- Concept + ml_model.identifier, + ml_model.platform, + ml_model.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + ml_model.resource_id AS `resource_identifier`, + ml_model.name, + ml_model.description, + ml_model.same_as, + -- AIAsset + ml_model.asset_id AS `asset_identifier`, + ml_model.date_published, + ml_model.version, + license.name AS `license`, + -- MLModel + ml_model.permanent_identifier, + ml_model_type.name AS `ml_model_type` +FROM aiod.ml_model +INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifier +LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/sync_temp_publication.sql b/logstash/pipeline/sql/sync_temp_publication.sql index d9e65588..99b38a68 100644 --- a/logstash/pipeline/sql/sync_temp_publication.sql +++ b/logstash/pipeline/sql/sync_temp_publication.sql @@ -1,19 +1,29 @@ -SELECT publication.identifier, - publication.platform, - publication.platform_identifier, - publication.name, - publication.description, - publication.same_as, - publication.date_published, - publication.version, - license.name AS `license`, - publication.permanent_identifier, - publication.isbn, - publication.issn, - publication_type.name AS `type`, - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created +SELECT + -- Concept + publication.identifier, + publication.platform, + publication.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + publication.resource_id AS `resource_identifier`, + publication.name, + publication.description, + publication.same_as, + -- AIAsset + publication.asset_id AS `asset_identifier`, + publication.date_published, + publication.version, + license.name AS `license`, + -- KnowledgeAsset + publication.knowledge_asset_id AS `knowledge_asset_identifier`, + -- Publication + publication.permanent_identifier, + publication.isbn, + publication.issn, + publication_type.name AS `publication_type` FROM aiod.publication INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier diff --git a/logstash/pipeline/sql/sync_temp_service.sql b/logstash/pipeline/sql/sync_temp_service.sql index 397d3448..d581d037 100644 --- a/logstash/pipeline/sql/sync_temp_service.sql +++ b/logstash/pipeline/sql/sync_temp_service.sql @@ -1,16 +1,22 @@ -SELECT service.identifier, - service.platform, - service.platform_identifier, - service.name, - service.description, - service.same_as, - service.resource_id, - service.slogan, - service.terms_of_service, - aiod_entry.status_identifier, - aiod_entry.date_modified, - aiod_entry.date_created -FROM aiod.service INNER JOIN aiod.aiod_entry -ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +SELECT + -- Concept + service.identifier, + service.platform, + service.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + service.resource_id AS `resource_identifier`, + service.name, + service.description, + service.same_as, + -- Service + service.slogan, + service.terms_of_service +FROM aiod.service +INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value ORDER BY aiod.service.identifier diff --git a/src/database/model/models_and_experiments/experiment.py b/src/database/model/models_and_experiments/experiment.py index 4df43bb2..77c97169 100644 --- a/src/database/model/models_and_experiments/experiment.py +++ b/src/database/model/models_and_experiments/experiment.py @@ -10,7 +10,7 @@ class ExperimentBase(AIAssetBase): - pid: str | None = Field( + permanent_identifier: str | None = Field( description="A permanent identifier for the model, for example a digital object " "identifier (DOI). Ideally a url.", max_length=SHORT, diff --git a/src/database/model/models_and_experiments/ml_model.py b/src/database/model/models_and_experiments/ml_model.py index c6166304..b36c35e5 100644 --- a/src/database/model/models_and_experiments/ml_model.py +++ b/src/database/model/models_and_experiments/ml_model.py @@ -17,7 +17,7 @@ class MLModelBase(AIAssetBase): - pid: str | None = Field( + permanent_identifier: str | None = Field( description="A permanent identifier for the model, for example a digital object " "identifier (DOI). Ideally a url.", max_length=SHORT, diff --git a/src/routers/__init__.py b/src/routers/__init__.py index efcfb1f6..910089f2 100644 --- a/src/routers/__init__.py +++ b/src/routers/__init__.py @@ -1,8 +1,12 @@ -from routers.case_study_router import CaseStudyRouter -from routers.computational_asset_router import ComputationalAssetRouter -from routers.educational_resource_router import EducationalResourceRouter +import os + +from elasticsearch import Elasticsearch + from routers.resource_router import ResourceRouter # noqa:F401 +from routers.resources.case_study_router import CaseStudyRouter +from routers.resources.computational_asset_router import ComputationalAssetRouter from routers.resources.dataset_router import DatasetRouter +from routers.resources.educational_resource_router import EducationalResourceRouter from routers.resources.experiment_router import ExperimentRouter from routers.resources.ml_model_router import MLModelRouter from routers.resources.organisation_router import OrganisationRouter @@ -10,12 +14,13 @@ from routers.resources.platform_router import PlatformRouter from routers.resources.publication_router import PublicationRouter from routers.resources.service_router import ServiceRouter +from routers.resources.team_router import TeamRouter from routers.router import AIoDRouter # noqa:F401 -from routers.search_router import SearchRouter -from routers.team_router import TeamRouter +from routers.search_router import SearchRouter # noqa:F401 +from routers.search_routers.search_router_datasets import SearchRouterDatasets +from routers.search_routers.search_router_publications import SearchRouterPublications from routers.upload_router_huggingface import UploadRouterHuggingface - resource_routers = [ PlatformRouter(), CaseStudyRouter(), @@ -34,4 +39,12 @@ TeamRouter(), ] # type: list[ResourceRouter] -other_routers = [UploadRouterHuggingface(), SearchRouter()] # type: list[AIoDRouter] + +user_name = os.getenv("ES_USER") +pw = os.getenv("ES_PASSWORD") +elasticsearch_client = Elasticsearch("http://localhost:9200", basic_auth=(user_name, pw)) +other_routers = [ + UploadRouterHuggingface(), + SearchRouterDatasets(client=elasticsearch_client), + SearchRouterPublications(client=elasticsearch_client), +] # type: list[AIoDRouter] diff --git a/src/routers/case_study_router.py b/src/routers/resources/case_study_router.py similarity index 100% rename from src/routers/case_study_router.py rename to src/routers/resources/case_study_router.py diff --git a/src/routers/computational_asset_router.py b/src/routers/resources/computational_asset_router.py similarity index 100% rename from src/routers/computational_asset_router.py rename to src/routers/resources/computational_asset_router.py diff --git a/src/routers/educational_resource_router.py b/src/routers/resources/educational_resource_router.py similarity index 100% rename from src/routers/educational_resource_router.py rename to src/routers/resources/educational_resource_router.py diff --git a/src/routers/team_router.py b/src/routers/resources/team_router.py similarity index 100% rename from src/routers/team_router.py rename to src/routers/resources/team_router.py diff --git a/src/routers/search_router.py b/src/routers/search_router.py index e90c18d0..a6b5c920 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -1,3 +1,4 @@ +import abc import os from typing import TypeVar, Generic, Any, Type @@ -9,7 +10,6 @@ from authentication import get_current_user, has_role from database.model.concept.aiod_entry import AIoDEntryRead -from database.model.knowledge_asset.publication import Publication from database.model.resource_read_and_create import resource_read from routers.router import AIoDRouter @@ -25,25 +25,50 @@ class SearchResult(BaseModel, Generic[RESOURCE]): next_offset: list | None -class SearchRouter(AIoDRouter): - def __init__(self): - self.client: Elasticsearch | None = None +class SearchRouter(AIoDRouter, Generic[RESOURCE], abc.ABC): + """ + Providing search functionality in ElasticSearch + """ + + def __init__(self, client: Elasticsearch): + self.client: Elasticsearch = client + + @property + @abc.abstractmethod + def es_index(self) -> str: + """The name of the elasticsearch index""" + + @property + @abc.abstractmethod + def resource_name_plural(self) -> str: + """The name of the resource (plural)""" + + @property + def key_translations(self) -> dict[str, str]: + """If an attribute is called differently in elasticsearch than in our metadata model, + you can define a translation dictionary here. The key should be the name in + elasticsearch, the value the name in our data model.""" + return {} + + @property + @abc.abstractmethod + def resource_class(self) -> RESOURCE: + """The resource class""" def create(self, engine: Engine, url_prefix: str) -> APIRouter: router = APIRouter() - user_name = os.getenv("ES_USER") - pw = os.getenv("ES_PASSWORD") - self.client = Elasticsearch("http://localhost:9200", basic_auth=(user_name, pw)) - - publication_class = resource_read(Publication) + read_class = resource_read(self.resource_class) # type: ignore - @router.get(url_prefix + "/search/publications/v1", tags=["search"]) - def search_publication( + @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", tags=["search"]) + def search( name: str = "", limit: int = 10, offset: str | None = None, # TODO: this should not be a string user: dict = Depends(get_current_user), - ) -> SearchResult[publication_class]: # type: ignore + ) -> SearchResult[read_class]: # type: ignore + f""" + Search for {self.resource_name_plural}. + """ if limit > LIMIT_MAX: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, @@ -56,27 +81,21 @@ def search_publication( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to search Aiod resources.", ) - if self.client is None: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Client not initialized", - ) + query = {"bool": {"must": {"match": {"name": name}}}} result = self.client.search( - index="publication", query=query, size=limit, sort=SORT, search_after=offset + index=self.es_index, query=query, size=limit, sort=SORT, search_after=offset ) total_hits = result["hits"]["total"]["value"] - resources: list[publication_class] = [ # type: ignore - _cast_resource( - publication_class, hit["_source"], key_translations={"publication_type": "type"} - ) + resources: list[read_class] = [ # type: ignore + self._cast_resource(read_class, hit["_source"]) # type: ignore for hit in result["hits"]["hits"] ] next_offset = ( result["hits"]["hits"][-1]["sort"] if len(result["hits"]["hits"]) > 0 else None ) - return SearchResult[publication_class]( # type: ignore + return SearchResult[read_class]( # type: ignore total_hits=total_hits, next_offset=next_offset, resources=resources, @@ -84,19 +103,18 @@ def search_publication( return router - -def _cast_resource( - resource_class: RESOURCE, resource_dict: dict[str, Any], key_translations: dict[str, str] -) -> Type[RESOURCE]: - kwargs = { - key_translations.get(key, key): val - for key, val in resource_dict.items() - if key != "type" and not key.startswith("@") - } - resource = resource_class(**kwargs) # type: ignore - resource.aiod_entry = AIoDEntryRead( - date_modified=resource_dict["date_modified"], - date_created=resource_dict["date_created"], - status=resource_dict["status"], - ) - return resource + def _cast_resource( + self, resource_class: RESOURCE, resource_dict: dict[str, Any] + ) -> Type[RESOURCE]: + kwargs = { + self.key_translations.get(key, key): val + for key, val in resource_dict.items() + if key != "type" and not key.startswith("@") + } + resource = resource_class(**kwargs) # type: ignore + resource.aiod_entry = AIoDEntryRead( + date_modified=resource_dict["date_modified"], + date_created=resource_dict["date_created"], + status=resource_dict["status"], + ) + return resource diff --git a/src/routers/search_routers/__init__.py b/src/routers/search_routers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/routers/search_routers/search_router_datasets.py b/src/routers/search_routers/search_router_datasets.py new file mode 100644 index 00000000..3a837ad3 --- /dev/null +++ b/src/routers/search_routers/search_router_datasets.py @@ -0,0 +1,16 @@ +from database.model.dataset.dataset import Dataset +from routers import SearchRouter + + +class SearchRouterDatasets(SearchRouter[Dataset]): + @property + def es_index(self) -> str: + return "dataset" + + @property + def resource_name_plural(self) -> str: + return "datasets" + + @property + def resource_class(self): + return Dataset diff --git a/src/routers/search_routers/search_router_publications.py b/src/routers/search_routers/search_router_publications.py new file mode 100644 index 00000000..e98983e5 --- /dev/null +++ b/src/routers/search_routers/search_router_publications.py @@ -0,0 +1,20 @@ +from database.model.knowledge_asset.publication import Publication +from routers import SearchRouter + + +class SearchRouterPublications(SearchRouter[Publication]): + @property + def es_index(self) -> str: + return "publication" + + @property + def resource_name_plural(self) -> str: + return "publications" + + @property + def resource_class(self): + return Publication + + @property + def key_translations(self) -> dict: + return {"publication_type": "type"} diff --git a/src/tests/.env b/src/tests/.env index aebf011b..94142d8d 100644 --- a/src/tests/.env +++ b/src/tests/.env @@ -1,4 +1,4 @@ KEYCLOAK_CLIENT_SECRET="mocked_secret" -ES_USER=elastic -ES_PASSWORD=changeme +ES_USER="mocked_user" +ES_PASSWORD="mocked_password" ES_ROLE="edit_aiod_resources" \ No newline at end of file diff --git a/src/tests/resources/elasticsearch/dataset_search.json b/src/tests/resources/elasticsearch/dataset_search.json new file mode 100644 index 00000000..0f293eee --- /dev/null +++ b/src/tests/resources/elasticsearch/dataset_search.json @@ -0,0 +1,49 @@ +{ + "took": 15, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 1, + "relation": "eq" + }, + "max_score": null, + "hits": [ + { + "_index": "dataset", + "_id": "dataset_1", + "_score": null, + "_source": { + "type": "dataset", + "date_created": "2023-08-24T12:48:49.000Z", + "date_modified": "2023-08-24T12:48:49.000Z", + "description": "A description.", + "asset_identifier": 3, + "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", + "name": "The name of this dataset", + "status": "draft", + "@timestamp": "2023-08-24T12:49:00.321Z", + "identifier": 1, + "platform_identifier": "1", + "resource_identifier": 3, + "same_as": "https://www.example.com/resource/this_resource", + "version": "1.1.0", + "issn": "20493630", + "temporal_coverage": "2011/2012", + "@version": "1", + "date_published": "2022-01-01T15:15:00.000Z", + "platform": "example", + "measurement_technique": "mass spectrometry" + }, + "sort": [ + 1 + ] + } + ] + } +} \ No newline at end of file diff --git a/src/tests/routers/resources/test_router_experiment.py b/src/tests/routers/resources/test_router_experiment.py index 1b9ce76b..d62638f9 100644 --- a/src/tests/routers/resources/test_router_experiment.py +++ b/src/tests/routers/resources/test_router_experiment.py @@ -16,7 +16,7 @@ def test_happy_path( keycloak_openid.userinfo = mocked_privileged_token body = copy.copy(body_asset) - body["pid"] = "https://doi.org/10.1000/182" + body["permanent_identifier"] = "https://doi.org/10.1000/182" body["experimental_workflow"] = "Example workflow." body["execution_settings"] = "Example execution settings." body["reproducibility_explanation"] = "Example reproducibility explanation." @@ -51,7 +51,7 @@ def test_happy_path( assert response.status_code == 200, response.json() response_json = response.json() - assert response_json["pid"] == "https://doi.org/10.1000/182" + assert response_json["permanent_identifier"] == "https://doi.org/10.1000/182" assert response_json["experimental_workflow"] == "Example workflow." assert response_json["execution_settings"] == "Example execution settings." assert response_json["reproducibility_explanation"] == "Example reproducibility explanation." diff --git a/src/tests/routers/resources/test_router_ml_model.py b/src/tests/routers/resources/test_router_ml_model.py index eaa94359..13555c29 100644 --- a/src/tests/routers/resources/test_router_ml_model.py +++ b/src/tests/routers/resources/test_router_ml_model.py @@ -23,7 +23,7 @@ def test_happy_path( session.commit() body = copy.copy(body_asset) - body["pid"] = "https://doi.org/10.1000/182" + body["permanent_identifier"] = "https://doi.org/10.1000/182" body["type"] = "Large Language Model" body["related_experiment"] = [1] distribution = { @@ -56,7 +56,7 @@ def test_happy_path( assert response.status_code == 200, response.json() response_json = response.json() - assert response_json["pid"] == "https://doi.org/10.1000/182" + assert response_json["permanent_identifier"] == "https://doi.org/10.1000/182" assert response_json["type"] == "Large Language Model" assert response_json["related_experiment"] == [1] assert response_json["distribution"] == [distribution] diff --git a/src/tests/routers/search/__init__.py b/src/tests/routers/search/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/tests/routers/search/test_search_router_datasets.py b/src/tests/routers/search/test_search_router_datasets.py new file mode 100644 index 00000000..cb14747c --- /dev/null +++ b/src/tests/routers/search/test_search_router_datasets.py @@ -0,0 +1,45 @@ +import json +from unittest.mock import Mock + +from starlette.testclient import TestClient + +from authentication import keycloak_openid +from routers import other_routers, SearchRouterDatasets +from tests.testutils.paths import path_test_resources + + +def test_search_happy_path(client: TestClient, mocked_privileged_token: Mock): + keycloak_openid.userinfo = mocked_privileged_token + + (search_router,) = [r for r in other_routers if isinstance(r, SearchRouterDatasets)] + with open(path_test_resources() / "elasticsearch" / "dataset_search.json", "r") as f: + mocked_results = json.load(f) + search_router.client.search = Mock(return_value=mocked_results) + + response = client.get( + "/search/datasets/v1", + params={"name": "dataset"}, + headers={"Authorization": "Fake token"}, + ) + + assert response.status_code == 200, response.json() + response_json = response.json() + (resource,) = response_json["resources"] + assert resource["platform_identifier"] == "1" + assert resource["license"] == "https://creativecommons.org/share-your-work/public-domain/cc0/" + + assert resource["asset_identifier"] == 3 + assert resource["resource_identifier"] == 3 + assert resource["description"] == "A description." + assert resource["aiod_entry"]["date_modified"] == "2023-08-24T12:48:49+00:00" + assert resource["aiod_entry"]["date_created"] == "2023-08-24T12:48:49+00:00" + assert resource["aiod_entry"]["status"] == "draft" + assert resource["version"] == "1.1.0" + assert resource["name"] == "The name of this dataset" + assert resource["platform"] == "example" + assert resource["same_as"] == "https://www.example.com/resource/this_resource" + assert resource["identifier"] == 1 + assert resource["date_published"] == "2022-01-01T15:15:00+00:00" + assert resource["issn"] == "20493630" + assert resource["measurement_technique"] == "mass spectrometry" + assert resource["temporal_coverage"] == "2011/2012" diff --git a/src/tests/routers/search/test_search_router_publications.py b/src/tests/routers/search/test_search_router_publications.py new file mode 100644 index 00000000..9cb253ad --- /dev/null +++ b/src/tests/routers/search/test_search_router_publications.py @@ -0,0 +1,46 @@ +import json +from unittest.mock import Mock + +from starlette.testclient import TestClient + +from authentication import keycloak_openid +from routers import other_routers, SearchRouterPublications +from tests.testutils.paths import path_test_resources + + +def test_search_happy_path(client: TestClient, mocked_privileged_token: Mock): + keycloak_openid.userinfo = mocked_privileged_token + + (search_router,) = [r for r in other_routers if isinstance(r, SearchRouterPublications)] + with open(path_test_resources() / "elasticsearch" / "publication_search.json", "r") as f: + mocked_results = json.load(f) + search_router.client.search = Mock(return_value=mocked_results) + + response = client.get( + "/search/publications/v1", + params={"name": "resource"}, + headers={"Authorization": "Fake token"}, + ) + + assert response.status_code == 200, response.json() + response_json = response.json() + (resource,) = response_json["resources"] + assert resource["platform_identifier"] == "1" + assert resource["license"] == "https://creativecommons.org/share-your-work/public-domain/cc0/" + assert resource["knowledge_asset_identifier"] == 1 + assert resource["asset_identifier"] == 1 + assert resource["resource_identifier"] == 1 + assert resource["permanent_identifier"] == "http://dx.doi.org/10.1093/ajae/aaq063" + assert resource["description"] == "A description." + assert resource["isbn"] == "9783161484100" + assert resource["aiod_entry"]["date_modified"] == "2023-08-24T10:14:52+00:00" + assert resource["aiod_entry"]["date_created"] == "2023-08-24T10:14:52+00:00" + assert resource["aiod_entry"]["status"] == "draft" + assert resource["version"] == "1.1.0" + assert resource["name"] == "The name of this publication" + assert resource["platform"] == "example" + assert resource["type"] == "journal" + assert resource["same_as"] == "https://www.example.com/resource/this_resource" + assert resource["identifier"] == 1 + assert resource["issn"] == "20493630" + assert resource["date_published"] == "2022-01-01T15:15:00+00:00" diff --git a/src/tests/routers/test_search_router.py b/src/tests/routers/test_search_router.py deleted file mode 100644 index fa85ae09..00000000 --- a/src/tests/routers/test_search_router.py +++ /dev/null @@ -1,54 +0,0 @@ -import json -from unittest.mock import Mock - -from sqlalchemy.engine import Engine -from starlette.testclient import TestClient - -from authentication import keycloak_openid -from routers import SearchRouter, other_routers -from tests.testutils.paths import path_test_resources - - -def test_search_publication_happy_path( - client: TestClient, - engine: Engine, - mocked_privileged_token: Mock, - body_resource: dict, -): - keycloak_openid.userinfo = mocked_privileged_token - - (search_router,) = [r for r in other_routers if isinstance(r, SearchRouter)] - with open(path_test_resources() / "elasticsearch" / "publication_search.json", "r") as f: - mocked_results = json.load(f) - search_router.client = Mock() - search_router.client.search = Mock(return_value=mocked_results) - - response = client.get( - "/search/publications/v1", - params={"name": "publication"}, - headers={"Authorization": "Fake token"}, - ) - assert response.status_code == 200, response.json() - response_json = response.json() - (publication,) = response_json["resources"] - assert publication["platform_identifier"] == "1" - assert ( - publication["license"] == "https://creativecommons.org/share-your-work/public-domain/cc0/" - ) - assert publication["knowledge_asset_identifier"] == 1 - assert publication["asset_identifier"] == 1 - assert publication["resource_identifier"] == 1 - assert publication["permanent_identifier"] == "http://dx.doi.org/10.1093/ajae/aaq063" - assert publication["description"] == "A description." - assert publication["isbn"] == "9783161484100" - assert publication["aiod_entry"]["date_modified"] == "2023-08-24T10:14:52+00:00" - assert publication["aiod_entry"]["date_created"] == "2023-08-24T10:14:52+00:00" - assert publication["aiod_entry"]["status"] == "draft" - assert publication["version"] == "1.1.0" - assert publication["name"] == "The name of this publication" - assert publication["platform"] == "example" - assert publication["type"] == "journal" - assert publication["same_as"] == "https://www.example.com/resource/this_resource" - assert publication["identifier"] == 1 - assert publication["issn"] == "20493630" - assert publication["date_published"] == "2022-01-01T15:15:00+00:00" From d6ce8e932d910529a24e2b665776c66b7289a770 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 24 Aug 2023 16:57:24 +0200 Subject: [PATCH 18/79] Logstash configured for dataset, experiment, ml_model, publication, and service --- es/setup/dataset.json | 78 +++++++++++++++ es/setup/experiment.json | 162 ++++++++++++++++++++++++++++++++ es/setup/ml_model.json | 142 ++++++++++++++++++++++++++++ es/setup/publication.json | 112 ++++++++++++++++++++++ es/setup/service.json | 123 ++++++++++++++++++++++++ logstash/pipeline/.DS_Store | Bin 0 -> 6148 bytes logstash/pipeline/sql/.DS_Store | Bin 0 -> 6148 bytes 7 files changed, 617 insertions(+) create mode 100644 es/setup/experiment.json create mode 100644 es/setup/ml_model.json create mode 100644 es/setup/service.json create mode 100644 logstash/pipeline/.DS_Store create mode 100644 logstash/pipeline/sql/.DS_Store diff --git a/es/setup/dataset.json b/es/setup/dataset.json index 8b497f89..2fec49cc 100644 --- a/es/setup/dataset.json +++ b/es/setup/dataset.json @@ -15,6 +15,21 @@ } } }, + "asset_identifier" : { + "type" : "long", + "index" : false + }, + "date_created" : { + "type" : "date", + "index" : false + }, + "date_modified" : { + "type" : "date" + }, + "date_published" : { + "type" : "date", + "index" : false + }, "description" : { "type" : "text", "fields" : { @@ -32,6 +47,35 @@ "type" : "boolean", "index" : false }, + "issn" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "license" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "measurement_technique" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, "name" : { "type" : "text", "fields" : { @@ -61,6 +105,10 @@ } } }, + "resource_identifier" : { + "type" : "long", + "index" : false + }, "same_as" : { "type" : "text", "index" : false, @@ -71,6 +119,26 @@ } } }, + "status" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "temporal_coverage" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, "type" : { "type" : "text", "index" : false, @@ -80,6 +148,16 @@ "ignore_above" : 256 } } + }, + "version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } } } } diff --git a/es/setup/experiment.json b/es/setup/experiment.json new file mode 100644 index 00000000..6673fe33 --- /dev/null +++ b/es/setup/experiment.json @@ -0,0 +1,162 @@ +{ + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date", + "index" : false + }, + "@version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "asset_identifier" : { + "type" : "long", + "index" : false + }, + "date_created" : { + "type" : "date", + "index" : false + }, + "date_modified" : { + "type" : "date" + }, + "date_published" : { + "type" : "date", + "index" : false + }, + "description" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "execution_settings" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "experimental_workflow" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "identifier" : { + "type" : "long", + "index" : false + }, + "license" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform_identifier" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "reproducibility_explanation" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "resource_identifier" : { + "type" : "long", + "index" : false + }, + "same_as" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "status" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "type" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + } + } + } +} + diff --git a/es/setup/ml_model.json b/es/setup/ml_model.json new file mode 100644 index 00000000..381a182e --- /dev/null +++ b/es/setup/ml_model.json @@ -0,0 +1,142 @@ +{ + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date", + "index" : false + }, + "@version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "asset_identifier" : { + "type" : "long", + "index" : false + }, + "date_created" : { + "type" : "date", + "index" : false + }, + "date_modified" : { + "type" : "date" + }, + "date_published" : { + "type" : "date", + "index" : false + }, + "description" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "identifier" : { + "type" : "long", + "index" : false + }, + "license" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "ml_model_type" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform_identifier" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "resource_identifier" : { + "type" : "long", + "index" : false + }, + "same_as" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "status" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "type" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + } + } + } +} + diff --git a/es/setup/publication.json b/es/setup/publication.json index 151ba894..c24e2772 100644 --- a/es/setup/publication.json +++ b/es/setup/publication.json @@ -25,6 +25,10 @@ } } }, + "asset_identifier" : { + "type" : "long", + "index" : false + }, "creators" : { "type" : "text", "index" : false, @@ -39,10 +43,22 @@ "type" : "date", "index" : false }, + "date_modified" : { + "type" : "date" + }, "date_published" : { "type" : "date", "index" : false }, + "description" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, "doi" : { "type" : "text", "index" : false, @@ -57,10 +73,61 @@ "type" : "long", "index" : false }, + "isbn" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "issn" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "knowledge_asset_identifier" : { + "type" : "long", + "index" : false + }, + "license" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, "license_identifier" : { "type" : "long", "index" : false }, + "name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "permanent_identifier" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, "platform" : { "type" : "text", "index" : false, @@ -73,6 +140,17 @@ }, "platform_identifier" : { "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "publication_type" : { + "type" : "text", + "index" : false, "fields" : { "keyword" : { "type" : "keyword", @@ -80,10 +158,34 @@ } } }, + "resource_identifier" : { + "type" : "long", + "index" : false + }, "resource_type_identifier" : { "type" : "long", "index" : false }, + "same_as" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "status" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, "title" : { "type" : "text", "fields" : { @@ -112,6 +214,16 @@ "ignore_above" : 256 } } + }, + "version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } } } } diff --git a/es/setup/service.json b/es/setup/service.json new file mode 100644 index 00000000..22b57190 --- /dev/null +++ b/es/setup/service.json @@ -0,0 +1,123 @@ +{ + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date", + "index" : false + }, + "@version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "date_created" : { + "type" : "date", + "index" : false + }, + "date_modified" : { + "type" : "date" + }, + "description" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "identifier" : { + "type" : "long", + "index" : false + }, + "name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform_identifier" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "resource_identifier" : { + "type" : "long", + "index" : false + }, + "same_as" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "slogan" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "status" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "terms_of_service" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "type" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + } + } + } +} + diff --git a/logstash/pipeline/.DS_Store b/logstash/pipeline/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..1bb1d1e2036c92c78c2a1220bf20738972486ec6 GIT binary patch literal 6148 zcmeH~K?=e^3`G;|qTr@Wm$UHz-e3?tK`$UEx{($HUC+_|$ppdbT10*z`IF3q(zoa| zBBGnyaV^q`NDDWWm4%5Z@+s;vtg>cH1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 Date: Thu, 24 Aug 2023 17:00:32 +0200 Subject: [PATCH 19/79] Logstash configured for dataset, experiment, ml_model, publication, and service --- logstash/pipeline/.DS_Store | Bin 6148 -> 0 bytes logstash/pipeline/sql/.DS_Store | Bin 6148 -> 0 bytes 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 logstash/pipeline/.DS_Store delete mode 100644 logstash/pipeline/sql/.DS_Store diff --git a/logstash/pipeline/.DS_Store b/logstash/pipeline/.DS_Store deleted file mode 100644 index 1bb1d1e2036c92c78c2a1220bf20738972486ec6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~K?=e^3`G;|qTr@Wm$UHz-e3?tK`$UEx{($HUC+_|$ppdbT10*z`IF3q(zoa| zBBGnyaV^q`NDDWWm4%5Z@+s;vtg>cH1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 Date: Thu, 24 Aug 2023 17:08:47 +0200 Subject: [PATCH 20/79] Logstash waits until fill-db-with-examples ends --- docker-compose.yaml | 108 ++++++++++++++++++++++---------------------- 1 file changed, 55 insertions(+), 53 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index e68f054d..cea674c7 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -45,59 +45,59 @@ services: app: condition: service_healthy -# huggingface-dataset-connector: -# image: ai4eu_server -# container_name: huggingface-dataset-connector -# env_file: .env -# environment: -# - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET -# volumes: -# - ./src:/app -# - ./data/connectors:/opt/connectors/data -# - ./connectors/huggingface/:/opt/connectors/script -# command: > -# /bin/bash -c "/opt/connectors/script/datasets.sh" -# depends_on: -# app: -# condition: service_healthy -# -# openml-dataset-connector: -# build: -# context: connectors/openml -# dockerfile: Dockerfile -# image: ai4eu_openml_connector -# container_name: openml-dataset-connector -# env_file: .env -# environment: -# - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET -# volumes: -# - ./src:/app -# - ./data/connectors:/opt/connectors/data -# - ./connectors/openml/:/opt/connectors/script -# command: > -# /bin/bash -c "/opt/connectors/script/entry.sh" -# depends_on: -# app: -# condition: service_healthy -# -# zenodo-dataset-connector: -# build: -# context: connectors/zenodo -# dockerfile: Dockerfile -# image: ai4eu_zenodo_connector -# container_name: zenodo-dataset-connector -# env_file: .env -# environment: -# - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET -# volumes: -# - ./src:/app -# - ./data/connectors:/opt/connectors/data -# - ./connectors/zenodo/:/opt/connectors/script -# command: > -# /bin/bash -c "/opt/connectors/script/entry.sh" -# depends_on: -# app: -# condition: service_healthy + huggingface-dataset-connector: + image: ai4eu_server + container_name: huggingface-dataset-connector + env_file: .env + environment: + - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + volumes: + - ./src:/app + - ./data/connectors:/opt/connectors/data + - ./connectors/huggingface/:/opt/connectors/script + command: > + /bin/bash -c "/opt/connectors/script/datasets.sh" + depends_on: + app: + condition: service_healthy + + openml-dataset-connector: + build: + context: connectors/openml + dockerfile: Dockerfile + image: ai4eu_openml_connector + container_name: openml-dataset-connector + env_file: .env + environment: + - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + volumes: + - ./src:/app + - ./data/connectors:/opt/connectors/data + - ./connectors/openml/:/opt/connectors/script + command: > + /bin/bash -c "/opt/connectors/script/entry.sh" + depends_on: + app: + condition: service_healthy + + zenodo-dataset-connector: + build: + context: connectors/zenodo + dockerfile: Dockerfile + image: ai4eu_zenodo_connector + container_name: zenodo-dataset-connector + env_file: .env + environment: + - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + volumes: + - ./src:/app + - ./data/connectors:/opt/connectors/data + - ./connectors/zenodo/:/opt/connectors/script + command: > + /bin/bash -c "/opt/connectors/script/entry.sh" + depends_on: + app: + condition: service_healthy sqlserver: image: mysql @@ -217,3 +217,5 @@ services: condition: service_healthy elasticsearch_setup: condition: service_completed_successfully + fill-db-with-examples: + condition: service_completed_successfully From 5de434cbaca89ad50350450c99b231094955327b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 30 Aug 2023 16:21:21 +0200 Subject: [PATCH 21/79] take src from develop --- src/authentication.py | 12 -- src/connectors/example/enum.py | 61 +++++++++ src/connectors/example/enum_fill_connector.py | 32 +++++ src/connectors/example/example.py | 35 +++-- .../resources/enum/application_areas.json | 18 +++ .../enum/educational_resource_types.json | 4 + .../example/resources/enum/event_modes.json | 5 + .../example/resources/enum/event_status.json | 7 + .../example/resources/enum/languages.json | 5 + .../example/resources/enum/licenses.json | 8 ++ .../resources/enum/organisation_types.json | 7 + .../example/resources/enum/status.json | 5 + .../{ => resource}/case_studies.json | 0 .../{ => resource}/computational_assets.json | 0 .../resources/{ => resource}/datasets.json | 0 .../{ => resource}/educational_resources.json | 0 .../resources/{ => resource}/events.json | 0 .../resources/{ => resource}/experiments.json | 0 .../resources/{ => resource}/ml_models.json | 0 .../resources/{ => resource}/news.json | 0 .../{ => resource}/organisations.json | 0 .../resources/{ => resource}/persons.json | 0 .../example/resources/resource/projects.json | 82 ++++++++++++ .../{ => resource}/publications.json | 0 .../resources/{ => resource}/services.json | 0 .../resources/{ => resource}/teams.json | 0 src/connectors/synchronization.py | 7 +- src/database/model/agent/agent.py | 2 +- src/database/model/ai_asset/ai_asset.py | 10 +- src/database/model/ai_resource/resource.py | 8 +- .../model/knowledge_asset/knowledge_asset.py | 10 +- .../models_and_experiments/experiment.py | 2 +- .../model/models_and_experiments/ml_model.py | 2 +- .../model/project}/__init__.py | 0 src/database/model/project/project.py | 92 ++++++++++++++ src/database/setup.py | 32 ++--- src/main.py | 8 +- src/routers/__init__.py | 48 +------ src/routers/enum_routers/__init__.py | 13 ++ src/routers/enum_routers/enum_router.py | 54 ++++++++ src/routers/parent_router.py | 104 +++++++++++++++ src/routers/parent_routers/__init__.py | 6 + src/routers/parent_routers/agent_router.py | 23 ++++ src/routers/parent_routers/ai_asset_router.py | 23 ++++ .../parent_routers/ai_resource_router.py | 23 ++++ src/routers/resource_router.py | 11 +- src/routers/resource_routers/__init__.py | 34 +++++ .../case_study_router.py | 0 .../computational_asset_router.py | 0 .../dataset_router.py | 1 - .../educational_resource_router.py | 0 .../event_router.py | 0 .../experiment_router.py | 0 .../ml_model_router.py | 0 .../news_router.py | 0 .../organisation_router.py | 0 .../person_router.py | 0 .../platform_router.py | 0 .../resource_routers/project_router.py | 20 +++ .../publication_router.py | 0 .../service_router.py | 0 .../team_router.py | 0 src/routers/router.py | 10 -- src/routers/search_router.py | 120 ------------------ .../search_routers/search_router_datasets.py | 16 --- .../search_router_publications.py | 20 --- src/routers/upload_router_huggingface.py | 3 +- src/tests/.env | 3 - src/tests/connectors/.DS_Store | Bin 6148 -> 0 bytes .../connectors/example/test_enum_connector.py | 7 + src/tests/resources/.DS_Store | Bin 6148 -> 0 bytes .../elasticsearch/dataset_search.json | 49 ------- .../elasticsearch/publication_search.json | 41 ------ src/tests/routers/.DS_Store | Bin 6148 -> 0 bytes .../routers/enum_routers}/__init__.py | 0 .../enum_routers/test_license_router.py | 22 ++++ .../{resources => parent_routers}/__init__.py | 0 .../parent_routers/test_agent_router.py | 35 +++++ .../parent_routers/test_ai_asset_router.py | 35 +++++ .../parent_routers/test_ai_resource_router.py | 35 +++++ .../{search => resource_routers}/__init__.py | 0 .../test_router_case_study.py | 0 .../test_router_computational_asset.py | 0 .../test_router_dataset.py | 0 .../test_router_dataset_generic_fields.py | 8 +- .../test_router_educational_resource.py | 0 .../test_router_event.py | 0 .../test_router_experiment.py | 4 +- .../test_router_ml_model.py | 4 +- .../test_router_news.py | 0 .../test_router_organisation.py | 2 +- .../test_router_person.py | 2 +- .../test_router_platform.py | 0 .../resource_routers/test_router_project.py | 58 +++++++++ .../test_router_publication.py | 0 .../test_router_service.py | 2 +- .../test_router_team.py | 0 .../search/test_search_router_datasets.py | 45 ------- .../search/test_search_router_publications.py | 46 ------- src/tests/testutils/test_resource.py | 2 +- 100 files changed, 900 insertions(+), 483 deletions(-) create mode 100644 src/connectors/example/enum.py create mode 100644 src/connectors/example/enum_fill_connector.py create mode 100644 src/connectors/example/resources/enum/application_areas.json create mode 100644 src/connectors/example/resources/enum/educational_resource_types.json create mode 100644 src/connectors/example/resources/enum/event_modes.json create mode 100644 src/connectors/example/resources/enum/event_status.json create mode 100644 src/connectors/example/resources/enum/languages.json create mode 100644 src/connectors/example/resources/enum/licenses.json create mode 100644 src/connectors/example/resources/enum/organisation_types.json create mode 100644 src/connectors/example/resources/enum/status.json rename src/connectors/example/resources/{ => resource}/case_studies.json (100%) rename src/connectors/example/resources/{ => resource}/computational_assets.json (100%) rename src/connectors/example/resources/{ => resource}/datasets.json (100%) rename src/connectors/example/resources/{ => resource}/educational_resources.json (100%) rename src/connectors/example/resources/{ => resource}/events.json (100%) rename src/connectors/example/resources/{ => resource}/experiments.json (100%) rename src/connectors/example/resources/{ => resource}/ml_models.json (100%) rename src/connectors/example/resources/{ => resource}/news.json (100%) rename src/connectors/example/resources/{ => resource}/organisations.json (100%) rename src/connectors/example/resources/{ => resource}/persons.json (100%) create mode 100644 src/connectors/example/resources/resource/projects.json rename src/connectors/example/resources/{ => resource}/publications.json (100%) rename src/connectors/example/resources/{ => resource}/services.json (100%) rename src/connectors/example/resources/{ => resource}/teams.json (100%) rename src/{routers/resources => database/model/project}/__init__.py (100%) create mode 100644 src/database/model/project/project.py create mode 100644 src/routers/enum_routers/__init__.py create mode 100644 src/routers/enum_routers/enum_router.py create mode 100644 src/routers/parent_router.py create mode 100644 src/routers/parent_routers/__init__.py create mode 100644 src/routers/parent_routers/agent_router.py create mode 100644 src/routers/parent_routers/ai_asset_router.py create mode 100644 src/routers/parent_routers/ai_resource_router.py create mode 100644 src/routers/resource_routers/__init__.py rename src/routers/{resources => resource_routers}/case_study_router.py (100%) rename src/routers/{resources => resource_routers}/computational_asset_router.py (100%) rename src/routers/{resources => resource_routers}/dataset_router.py (99%) rename src/routers/{resources => resource_routers}/educational_resource_router.py (100%) rename src/routers/{resources => resource_routers}/event_router.py (100%) rename src/routers/{resources => resource_routers}/experiment_router.py (100%) rename src/routers/{resources => resource_routers}/ml_model_router.py (100%) rename src/routers/{resources => resource_routers}/news_router.py (100%) rename src/routers/{resources => resource_routers}/organisation_router.py (100%) rename src/routers/{resources => resource_routers}/person_router.py (100%) rename src/routers/{resources => resource_routers}/platform_router.py (100%) create mode 100644 src/routers/resource_routers/project_router.py rename src/routers/{resources => resource_routers}/publication_router.py (100%) rename src/routers/{resources => resource_routers}/service_router.py (100%) rename src/routers/{resources => resource_routers}/team_router.py (100%) delete mode 100644 src/routers/router.py delete mode 100644 src/routers/search_router.py delete mode 100644 src/routers/search_routers/search_router_datasets.py delete mode 100644 src/routers/search_routers/search_router_publications.py delete mode 100644 src/tests/connectors/.DS_Store create mode 100644 src/tests/connectors/example/test_enum_connector.py delete mode 100644 src/tests/resources/.DS_Store delete mode 100644 src/tests/resources/elasticsearch/dataset_search.json delete mode 100644 src/tests/resources/elasticsearch/publication_search.json delete mode 100644 src/tests/routers/.DS_Store rename src/{routers/search_routers => tests/routers/enum_routers}/__init__.py (100%) create mode 100644 src/tests/routers/enum_routers/test_license_router.py rename src/tests/routers/{resources => parent_routers}/__init__.py (100%) create mode 100644 src/tests/routers/parent_routers/test_agent_router.py create mode 100644 src/tests/routers/parent_routers/test_ai_asset_router.py create mode 100644 src/tests/routers/parent_routers/test_ai_resource_router.py rename src/tests/routers/{search => resource_routers}/__init__.py (100%) rename src/tests/routers/{ => resource_routers}/test_router_case_study.py (100%) rename src/tests/routers/{ => resource_routers}/test_router_computational_asset.py (100%) rename src/tests/routers/{resources => resource_routers}/test_router_dataset.py (100%) rename src/tests/routers/{resources => resource_routers}/test_router_dataset_generic_fields.py (95%) rename src/tests/routers/{ => resource_routers}/test_router_educational_resource.py (100%) rename src/tests/routers/{ => resource_routers}/test_router_event.py (100%) rename src/tests/routers/{resources => resource_routers}/test_router_experiment.py (94%) rename src/tests/routers/{resources => resource_routers}/test_router_ml_model.py (93%) rename src/tests/routers/{ => resource_routers}/test_router_news.py (100%) rename src/tests/routers/{resources => resource_routers}/test_router_organisation.py (97%) rename src/tests/routers/{resources => resource_routers}/test_router_person.py (96%) rename src/tests/routers/{resources => resource_routers}/test_router_platform.py (100%) create mode 100644 src/tests/routers/resource_routers/test_router_project.py rename src/tests/routers/{resources => resource_routers}/test_router_publication.py (100%) rename src/tests/routers/{resources => resource_routers}/test_router_service.py (94%) rename src/tests/routers/{ => resource_routers}/test_router_team.py (100%) delete mode 100644 src/tests/routers/search/test_search_router_datasets.py delete mode 100644 src/tests/routers/search/test_search_router_publications.py diff --git a/src/authentication.py b/src/authentication.py index ac8cca1b..a6cd6188 100644 --- a/src/authentication.py +++ b/src/authentication.py @@ -75,15 +75,3 @@ async def get_current_user(token=Security(oidc)) -> dict: detail=detail, headers={"WWW-Authenticate": "Bearer"}, ) - - -def has_role(user: dict, role: str | None) -> bool: - if role is None: - raise ValueError("Role should be set.") - if "groups" in user: - roles = user["groups"] - elif "realm_access" in user and "roles" in user["realm_access"]: - roles = user["realm_access"]["roles"] - else: - return False - return role in roles diff --git a/src/connectors/example/enum.py b/src/connectors/example/enum.py new file mode 100644 index 00000000..b567ccec --- /dev/null +++ b/src/connectors/example/enum.py @@ -0,0 +1,61 @@ +import pathlib + +from connectors.example.enum_fill_connector import EnumConnector +from database.model.agent.language import Language +from database.model.agent.organisation_type import OrganisationType +from database.model.ai_asset.license import License +from database.model.ai_resource.application_area import ApplicationArea +from database.model.concept.status import Status +from database.model.educational_resource.educational_resource_type import EducationalResourceType +from database.model.event.event_mode import EventMode +from database.model.event.event_status import EventStatus + +ENUM_PATH = pathlib.Path(__file__).parent.parent / "example" / "resources" / "enum" + + +class EnumConnectorApplicationArea(EnumConnector[ApplicationArea]): + def __init__(self): + json_path = ENUM_PATH / "application_areas.json" + super().__init__(json_path, ApplicationArea) + + +class EnumConnectorEducationalResourceType(EnumConnector[EducationalResourceType]): + def __init__(self): + json_path = ENUM_PATH / "educational_resource_types.json" + super().__init__(json_path, EducationalResourceType) + + +class EnumConnectorEventMode(EnumConnector[EventMode]): + def __init__(self): + json_path = ENUM_PATH / "event_modes.json" + super().__init__(json_path, EventMode) + + +class EnumConnectorEventStatus(EnumConnector[EventStatus]): + def __init__(self): + json_path = ENUM_PATH / "event_status.json" + super().__init__(json_path, EventStatus) + + +class EnumConnectorLanguage(EnumConnector[Language]): + def __init__(self): + json_path = ENUM_PATH / "languages.json" + super().__init__(json_path, Language) + + +class EnumConnectorLicense(EnumConnector[License]): + def __init__(self): + json_path = ENUM_PATH / "licenses.json" + super().__init__(json_path, License) + + +class EnumConnectorOrganisationType(EnumConnector[OrganisationType]): + def __init__(self): + json_path = ENUM_PATH / "organisation_types.json" + super().__init__(json_path, OrganisationType) + + +class EnumConnectorStatus(EnumConnector[Status]): + def __init__(self): + json_path = ENUM_PATH / "status.json" + super().__init__(json_path, Status) diff --git a/src/connectors/example/enum_fill_connector.py b/src/connectors/example/enum_fill_connector.py new file mode 100644 index 00000000..353e08c6 --- /dev/null +++ b/src/connectors/example/enum_fill_connector.py @@ -0,0 +1,32 @@ +import json +import pathlib +from typing import Iterator, TypeVar + +from connectors.abstract.resource_connector_on_start_up import ResourceConnectorOnStartUp +from database.model.named_relation import NamedRelation +from database.model.platform.platform_names import PlatformName + +RESOURCE = TypeVar("RESOURCE", bound=NamedRelation) + + +class EnumConnector(ResourceConnectorOnStartUp[RESOURCE]): + """ + Filling enums using a hard-coded json + """ + + def __init__(self, json_path: pathlib.Path, resource_class: type[RESOURCE]): + self.json_path = json_path + self._resource_class = resource_class + + @property + def resource_class(self) -> type[RESOURCE]: + return self._resource_class + + @property + def platform_name(self) -> PlatformName: + return PlatformName.example + + def fetch(self, limit: int | None = None) -> Iterator[RESOURCE]: + with open(self.json_path) as f: + json_data = json.load(f) + yield from json_data[:limit] diff --git a/src/connectors/example/example.py b/src/connectors/example/example.py index c066560a..beef1c66 100644 --- a/src/connectors/example/example.py +++ b/src/connectors/example/example.py @@ -13,84 +13,91 @@ from database.model.models_and_experiments.experiment import Experiment from database.model.models_and_experiments.ml_model import MLModel from database.model.news.news import News +from database.model.project.project import Project from database.model.service.service import Service -_path_example_resources = pathlib.Path(__file__).parent.parent / "example" / "resources" +RESOURCE_PATH = pathlib.Path(__file__).parent.parent / "example" / "resources" / "resource" class ExampleCaseStudyConnector(ExampleConnector[CaseStudy]): def __init__(self): - json_path = _path_example_resources / "case_studies.json" + json_path = RESOURCE_PATH / "case_studies.json" super().__init__(json_path, CaseStudy) class ExampleComputationalAssetConnector(ExampleConnector[ComputationalAsset]): def __init__(self): - json_path = _path_example_resources / "computational_assets.json" + json_path = RESOURCE_PATH / "computational_assets.json" super().__init__(json_path, ComputationalAsset) class ExampleDatasetConnector(ExampleConnector[Dataset]): def __init__(self): - json_path = _path_example_resources / "datasets.json" + json_path = RESOURCE_PATH / "datasets.json" super().__init__(json_path, Dataset) class ExampleEducationalResourceConnector(ExampleConnector[EducationalResource]): def __init__(self): - json_path = _path_example_resources / "educational_resources.json" + json_path = RESOURCE_PATH / "educational_resources.json" super().__init__(json_path, EducationalResource) class ExampleEventConnector(ExampleConnector[Event]): def __init__(self): - json_path = _path_example_resources / "events.json" + json_path = RESOURCE_PATH / "events.json" super().__init__(json_path, Event) class ExampleExperimentConnector(ExampleConnector[Experiment]): def __init__(self): - json_path = _path_example_resources / "experiments.json" + json_path = RESOURCE_PATH / "experiments.json" super().__init__(json_path, Experiment) class ExampleMLModelConnector(ExampleConnector[MLModel]): def __init__(self): - json_path = _path_example_resources / "ml_models.json" + json_path = RESOURCE_PATH / "ml_models.json" super().__init__(json_path, MLModel) class ExampleNewsConnector(ExampleConnector[News]): def __init__(self): - json_path = _path_example_resources / "news.json" + json_path = RESOURCE_PATH / "news.json" super().__init__(json_path, News) class ExampleOrganisationConnector(ExampleConnector[Organisation]): def __init__(self): - json_path = _path_example_resources / "organisations.json" + json_path = RESOURCE_PATH / "organisations.json" super().__init__(json_path, Organisation) class ExamplePersonConnector(ExampleConnector[Person]): def __init__(self): - json_path = _path_example_resources / "persons.json" + json_path = RESOURCE_PATH / "persons.json" super().__init__(json_path, Person) +class ExampleProjectConnector(ExampleConnector[Project]): + def __init__(self): + json_path = RESOURCE_PATH / "projects.json" + super().__init__(json_path, Project) + + class ExamplePublicationConnector(ExampleConnector[Publication]): def __init__(self): - json_path = _path_example_resources / "publications.json" + json_path = RESOURCE_PATH / "publications.json" super().__init__(json_path, Publication) class ExampleServiceConnector(ExampleConnector[Service]): def __init__(self): - json_path = _path_example_resources / "services.json" + json_path = RESOURCE_PATH / "services.json" super().__init__(json_path, Service) class ExampleTeamConnector(ExampleConnector[Team]): def __init__(self): - json_path = _path_example_resources / "teams.json" + json_path = RESOURCE_PATH / "teams.json" super().__init__(json_path, Team) diff --git a/src/connectors/example/resources/enum/application_areas.json b/src/connectors/example/resources/enum/application_areas.json new file mode 100644 index 00000000..35a9493a --- /dev/null +++ b/src/connectors/example/resources/enum/application_areas.json @@ -0,0 +1,18 @@ +[ + "AerospaceDefence", + "AgriFood", + "Construction", + "CulturalCreativeIndustries", + "Digital", + "Electronics", + "EnergyIntensiveIndustries", + "EnergyRenewables", + "Health", + "MobilityTransportAutomotive", + "PostalServices", + "ProximitySocialeconomyCivilSecurity", + "Retail", + "Textile", + "Tourism", + "Other" +] \ No newline at end of file diff --git a/src/connectors/example/resources/enum/educational_resource_types.json b/src/connectors/example/resources/enum/educational_resource_types.json new file mode 100644 index 00000000..1e57e32f --- /dev/null +++ b/src/connectors/example/resources/enum/educational_resource_types.json @@ -0,0 +1,4 @@ +[ + "presentation", + "book" +] \ No newline at end of file diff --git a/src/connectors/example/resources/enum/event_modes.json b/src/connectors/example/resources/enum/event_modes.json new file mode 100644 index 00000000..fb9f2374 --- /dev/null +++ b/src/connectors/example/resources/enum/event_modes.json @@ -0,0 +1,5 @@ +[ + "offline", + "online", + "hybrid" +] \ No newline at end of file diff --git a/src/connectors/example/resources/enum/event_status.json b/src/connectors/example/resources/enum/event_status.json new file mode 100644 index 00000000..a3dab67c --- /dev/null +++ b/src/connectors/example/resources/enum/event_status.json @@ -0,0 +1,7 @@ +[ + "cancelled", + "scheduled", + "rescheduled", + "postponed", + "moved online" +] \ No newline at end of file diff --git a/src/connectors/example/resources/enum/languages.json b/src/connectors/example/resources/enum/languages.json new file mode 100644 index 00000000..5015f904 --- /dev/null +++ b/src/connectors/example/resources/enum/languages.json @@ -0,0 +1,5 @@ +[ + "eng", + "fra", + "spa" +] \ No newline at end of file diff --git a/src/connectors/example/resources/enum/licenses.json b/src/connectors/example/resources/enum/licenses.json new file mode 100644 index 00000000..7e961aeb --- /dev/null +++ b/src/connectors/example/resources/enum/licenses.json @@ -0,0 +1,8 @@ +[ + "https://creativecommons.org/share-your-work/public-domain/cc0/", + "https://creativecommons.org/licenses/by/1.0/", + "https://creativecommons.org/licenses/by/2.0/", + "https://creativecommons.org/licenses/by/2.5/", + "https://creativecommons.org/licenses/by/3.0/", + "https://creativecommons.org/licenses/by/4.0/" +] \ No newline at end of file diff --git a/src/connectors/example/resources/enum/organisation_types.json b/src/connectors/example/resources/enum/organisation_types.json new file mode 100644 index 00000000..571cbc0d --- /dev/null +++ b/src/connectors/example/resources/enum/organisation_types.json @@ -0,0 +1,7 @@ +[ + "Association", + "Centre", + "Company", + "Education Institution", + "Research Institution" +] \ No newline at end of file diff --git a/src/connectors/example/resources/enum/status.json b/src/connectors/example/resources/enum/status.json new file mode 100644 index 00000000..8cbd0036 --- /dev/null +++ b/src/connectors/example/resources/enum/status.json @@ -0,0 +1,5 @@ +[ + "published", + "draft", + "rejected" +] \ No newline at end of file diff --git a/src/connectors/example/resources/case_studies.json b/src/connectors/example/resources/resource/case_studies.json similarity index 100% rename from src/connectors/example/resources/case_studies.json rename to src/connectors/example/resources/resource/case_studies.json diff --git a/src/connectors/example/resources/computational_assets.json b/src/connectors/example/resources/resource/computational_assets.json similarity index 100% rename from src/connectors/example/resources/computational_assets.json rename to src/connectors/example/resources/resource/computational_assets.json diff --git a/src/connectors/example/resources/datasets.json b/src/connectors/example/resources/resource/datasets.json similarity index 100% rename from src/connectors/example/resources/datasets.json rename to src/connectors/example/resources/resource/datasets.json diff --git a/src/connectors/example/resources/educational_resources.json b/src/connectors/example/resources/resource/educational_resources.json similarity index 100% rename from src/connectors/example/resources/educational_resources.json rename to src/connectors/example/resources/resource/educational_resources.json diff --git a/src/connectors/example/resources/events.json b/src/connectors/example/resources/resource/events.json similarity index 100% rename from src/connectors/example/resources/events.json rename to src/connectors/example/resources/resource/events.json diff --git a/src/connectors/example/resources/experiments.json b/src/connectors/example/resources/resource/experiments.json similarity index 100% rename from src/connectors/example/resources/experiments.json rename to src/connectors/example/resources/resource/experiments.json diff --git a/src/connectors/example/resources/ml_models.json b/src/connectors/example/resources/resource/ml_models.json similarity index 100% rename from src/connectors/example/resources/ml_models.json rename to src/connectors/example/resources/resource/ml_models.json diff --git a/src/connectors/example/resources/news.json b/src/connectors/example/resources/resource/news.json similarity index 100% rename from src/connectors/example/resources/news.json rename to src/connectors/example/resources/resource/news.json diff --git a/src/connectors/example/resources/organisations.json b/src/connectors/example/resources/resource/organisations.json similarity index 100% rename from src/connectors/example/resources/organisations.json rename to src/connectors/example/resources/resource/organisations.json diff --git a/src/connectors/example/resources/persons.json b/src/connectors/example/resources/resource/persons.json similarity index 100% rename from src/connectors/example/resources/persons.json rename to src/connectors/example/resources/resource/persons.json diff --git a/src/connectors/example/resources/resource/projects.json b/src/connectors/example/resources/resource/projects.json new file mode 100644 index 00000000..9a1250b1 --- /dev/null +++ b/src/connectors/example/resources/resource/projects.json @@ -0,0 +1,82 @@ +[ + { + "platform": "example", + "platform_identifier": "1", + "name": "Name of the Project", + "description": "A description.", + "same_as": "https://www.example.com/resource/this_resource", + "date_published": "2022-01-01T15:15:00.000", + "version": "1.1.0", + "pid": "https://doi.org/10.1000/182", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [ + "alias 1", + "alias 2" + ], + "application_area": [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "citation": [], + "contact": [], + "creator": [], + "distribution": [ + { + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/case_study/file.pdf", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "application/pdf", + "name": "Name of this file.", + "technology_readiness_level": 1 + } + ], + "has_part": [], + "industrial_sector": [ + "Finance", + "eCommerce", + "Healthcare" + ], + "is_part_of": [], + "keyword": [ + "keyword1", + "keyword2" + ], + "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", + "media": [ + { + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/image.jpeg", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "image/jpeg", + "name": "Name of this file." + } + ], + "note": [ + "A brief record of points or ideas about this AI resource." + ], + "research_area": [ + "Explainable AI", + "Physical AI" + ], + "scientific_domain": [ + "Anomaly Detection", + "Voice Recognition", + "Computer Vision." + ], + "start_date": "2021-02-02T15:15:00", + "end_date": "2021-02-03T15:15:00", + "total_cost_euro": 10000000 + } +] \ No newline at end of file diff --git a/src/connectors/example/resources/publications.json b/src/connectors/example/resources/resource/publications.json similarity index 100% rename from src/connectors/example/resources/publications.json rename to src/connectors/example/resources/resource/publications.json diff --git a/src/connectors/example/resources/services.json b/src/connectors/example/resources/resource/services.json similarity index 100% rename from src/connectors/example/resources/services.json rename to src/connectors/example/resources/resource/services.json diff --git a/src/connectors/example/resources/teams.json b/src/connectors/example/resources/resource/teams.json similarity index 100% rename from src/connectors/example/resources/teams.json rename to src/connectors/example/resources/resource/teams.json diff --git a/src/connectors/synchronization.py b/src/connectors/synchronization.py index facba3b6..3d229b9c 100644 --- a/src/connectors/synchronization.py +++ b/src/connectors/synchronization.py @@ -10,13 +10,12 @@ from sqlmodel import Session -import routers from connectors.abstract.resource_connector import ResourceConnector, RESOURCE from connectors.record_error import RecordError from connectors.resource_with_relations import ResourceWithRelations from database.model.concept.concept import AIoDConcept from database.setup import _create_or_fetch_related_objects, _get_existing_resource, sqlmodel_engine -from routers import ResourceRouter +from routers import ResourceRouter, resource_routers, enum_routers RELATIVE_PATH_STATE_JSON = pathlib.Path("state.json") RELATIVE_PATH_ERROR_CSV = pathlib.Path("errors.csv") @@ -158,11 +157,11 @@ def main(): (router,) = [ router - for router in routers.resource_routers + for router in resource_routers.router_list + enum_routers.router_list if router.resource_class == connector.resource_class ] - engine = sqlmodel_engine(rebuild_db="never", create_if_not_exists=False) + engine = sqlmodel_engine(rebuild_db="never") with Session(engine) as session: for i, item in enumerate(items): diff --git a/src/database/model/agent/agent.py b/src/database/model/agent/agent.py index e1a09728..62468839 100644 --- a/src/database/model/agent/agent.py +++ b/src/database/model/agent/agent.py @@ -20,7 +20,7 @@ class AgentBase(AIResourceBase): class Agent(AgentBase, AIResource): - agent_id: int | None = Field(foreign_key=AgentTable.__tablename__ + ".identifier") + agent_id: int | None = Field(foreign_key=AgentTable.__tablename__ + ".identifier", index=True) agent_identifier: AgentTable | None = Relationship( sa_relationship_kwargs={"cascade": "all, delete"} ) diff --git a/src/database/model/ai_asset/ai_asset.py b/src/database/model/ai_asset/ai_asset.py index 0d15229d..ae6b585a 100644 --- a/src/database/model/ai_asset/ai_asset.py +++ b/src/database/model/ai_asset/ai_asset.py @@ -47,8 +47,10 @@ class AIAssetBase(AIResourceBase, metaclass=abc.ABCMeta): class AIAsset(AIAssetBase, AIResource, metaclass=abc.ABCMeta): - asset_id: int | None = Field(foreign_key=AIAssetTable.__tablename__ + ".identifier") - asset_identifier: AIAssetTable | None = Relationship() + ai_asset_id: int | None = Field( + foreign_key=AIAssetTable.__tablename__ + ".identifier", index=True + ) + ai_asset_identifier: AIAssetTable | None = Relationship() citation: list["Publication"] = Relationship() distribution: list = Relationship(sa_relationship_kwargs={"cascade": "all, delete"}) @@ -71,8 +73,8 @@ def __init_subclass__(cls): cls.__sqlmodel_relationships__.update(relationships) class RelationshipConfig(AIResource.RelationshipConfig): - asset_identifier: int | None = ResourceRelationshipSingle( - identifier_name="asset_id", + ai_asset_identifier: int | None = ResourceRelationshipSingle( + identifier_name="ai_asset_id", serializer=AttributeSerializer("identifier"), include_in_create=False, default_factory_orm=lambda type_: AIAssetTable(type=type_), diff --git a/src/database/model/ai_resource/resource.py b/src/database/model/ai_resource/resource.py index 6d9c380d..2833fd19 100644 --- a/src/database/model/ai_resource/resource.py +++ b/src/database/model/ai_resource/resource.py @@ -45,8 +45,8 @@ class AIResourceBase(AIoDConceptBase, metaclass=abc.ABCMeta): class AIResource(AIResourceBase, AIoDConcept, metaclass=abc.ABCMeta): - resource_id: int | None = Field(foreign_key="ai_resource.identifier") - resource_identifier: AIResourceTable | None = Relationship() + ai_resource_id: int | None = Field(foreign_key="ai_resource.identifier", index=True) + ai_resource_identifier: AIResourceTable | None = Relationship() alternate_name: list[AlternateName] = Relationship() keyword: list[Keyword] = Relationship() @@ -79,10 +79,10 @@ def __init_subclass__(cls): cls.__sqlmodel_relationships__.update(relationships) class RelationshipConfig(AIoDConcept.RelationshipConfig): - resource_identifier: int | None = ResourceRelationshipSingle( + ai_resource_identifier: int | None = ResourceRelationshipSingle( description="This resource can be identified by its own identifier, but also by the " "resource_identifier.", - identifier_name="resource_id", + identifier_name="ai_resource_id", serializer=AttributeSerializer("identifier"), include_in_create=False, default_factory_orm=lambda type_: AIResourceTable(type=type_), diff --git a/src/database/model/knowledge_asset/knowledge_asset.py b/src/database/model/knowledge_asset/knowledge_asset.py index 14a0064b..fb601eec 100644 --- a/src/database/model/knowledge_asset/knowledge_asset.py +++ b/src/database/model/knowledge_asset/knowledge_asset.py @@ -6,7 +6,7 @@ from database.model.ai_asset.ai_asset_table import AIAssetTable from database.model.helper_functions import link_factory from database.model.knowledge_asset.knowledge_asset_table import KnowledgeAssetTable -from database.model.relationships import ResourceRelationshipList, ResourceRelationshipSingle +from database.model.relationships import ResourceRelationshipList from database.model.serializers import AttributeSerializer, FindByIdentifierDeserializer @@ -16,7 +16,7 @@ class KnowledgeAssetBase(AIAssetBase): class KnowledgeAsset(KnowledgeAssetBase, AIAsset): knowledge_asset_id: int | None = Field( - foreign_key=KnowledgeAssetTable.__tablename__ + ".identifier" + foreign_key=KnowledgeAssetTable.__tablename__ + ".identifier", index=True ) knowledge_asset_identifier: KnowledgeAssetTable | None = Relationship( sa_relationship_kwargs={"cascade": "all, delete"} @@ -40,12 +40,6 @@ def __init_subclass__(cls): cls.__sqlmodel_relationships__.update(relationships) class RelationshipConfig(AIAsset.RelationshipConfig): - knowledge_asset_identifier: int | None = ResourceRelationshipSingle( - identifier_name="knowledge_asset_id", - serializer=AttributeSerializer("identifier"), - include_in_create=False, - default_factory_orm=lambda type_: KnowledgeAssetTable(type=type_), - ) documents: list[int] = ResourceRelationshipList( description="The identifier of an AI asset for which the Knowledge Asset acts as an " "information source", diff --git a/src/database/model/models_and_experiments/experiment.py b/src/database/model/models_and_experiments/experiment.py index 77c97169..4df43bb2 100644 --- a/src/database/model/models_and_experiments/experiment.py +++ b/src/database/model/models_and_experiments/experiment.py @@ -10,7 +10,7 @@ class ExperimentBase(AIAssetBase): - permanent_identifier: str | None = Field( + pid: str | None = Field( description="A permanent identifier for the model, for example a digital object " "identifier (DOI). Ideally a url.", max_length=SHORT, diff --git a/src/database/model/models_and_experiments/ml_model.py b/src/database/model/models_and_experiments/ml_model.py index b36c35e5..c6166304 100644 --- a/src/database/model/models_and_experiments/ml_model.py +++ b/src/database/model/models_and_experiments/ml_model.py @@ -17,7 +17,7 @@ class MLModelBase(AIAssetBase): - permanent_identifier: str | None = Field( + pid: str | None = Field( description="A permanent identifier for the model, for example a digital object " "identifier (DOI). Ideally a url.", max_length=SHORT, diff --git a/src/routers/resources/__init__.py b/src/database/model/project/__init__.py similarity index 100% rename from src/routers/resources/__init__.py rename to src/database/model/project/__init__.py diff --git a/src/database/model/project/project.py b/src/database/model/project/project.py new file mode 100644 index 00000000..5a817c22 --- /dev/null +++ b/src/database/model/project/project.py @@ -0,0 +1,92 @@ +from datetime import datetime +from typing import Optional + +from pydantic import condecimal +from sqlmodel import Field, Relationship + +from database.model.agent.organisation import Organisation +from database.model.ai_asset.ai_asset_table import AIAssetTable +from database.model.ai_resource.resource import AIResourceBase, AIResource +from database.model.helper_functions import link_factory +from database.model.relationships import ResourceRelationshipList, ResourceRelationshipSingle +from database.model.serializers import ( + AttributeSerializer, + FindByIdentifierDeserializer, +) + + +class ProjectBase(AIResourceBase): + start_date: datetime = Field( + description="The start date and time of the project as ISO 8601.", + default=None, + schema_extra={"example": "2021-02-03T15:15:00"}, + ) + end_date: datetime | None = Field( + description="The end date and time of the project as ISO 8601.", + default=None, + schema_extra={"example": "2022-01-01T15:15:00"}, + ) + total_cost_euro: condecimal(max_digits=12, decimal_places=2) | None = Field( # type: ignore + description="The total budget of the project in euros.", + schema_extra={"example": 1000000}, + default=None, + ) + + +class Project(ProjectBase, AIResource, table=True): # type: ignore [call-arg] + __tablename__ = "project" + + funder: list[Organisation] = Relationship( + sa_relationship_kwargs={"cascade": "all, delete"}, + link_model=link_factory("project", Organisation.__tablename__, table_prefix="funder"), + ) + participant: list[Organisation] = Relationship( + sa_relationship_kwargs={"cascade": "all, delete"}, + link_model=link_factory("project", Organisation.__tablename__, table_prefix="participant"), + ) + coordinator_identifier: int | None = Field( + foreign_key=Organisation.__tablename__ + ".identifier" + ) + coordinator: Optional[Organisation] = Relationship() + produced: list[AIAssetTable] = Relationship( + link_model=link_factory("project", AIAssetTable.__tablename__, table_prefix="produced"), + ) + used: list[AIAssetTable] = Relationship( + link_model=link_factory("project", AIAssetTable.__tablename__, table_prefix="used"), + ) + + class RelationshipConfig(AIResource.RelationshipConfig): + funder: list[int] = ResourceRelationshipList( + description="Identifiers of organizations that support this project through some kind " + "of financial contribution. ", + serializer=AttributeSerializer("identifier"), + deserializer=FindByIdentifierDeserializer(Organisation), + default_factory_pydantic=list, + example=[], + ) + participant: list[int] = ResourceRelationshipList( + description="Identifiers of members of this project. ", + serializer=AttributeSerializer("identifier"), + deserializer=FindByIdentifierDeserializer(Organisation), + default_factory_pydantic=list, + example=[], + ) + coordinator: Optional[int] = ResourceRelationshipSingle( + identifier_name="coordinator_identifier", + description="The coordinating organisation of this project.", + serializer=AttributeSerializer("identifier"), + ) + produced: list[int] = ResourceRelationshipList( + description="Identifiers of AIAssets that are created in this project.", + serializer=AttributeSerializer("identifier"), + deserializer=FindByIdentifierDeserializer(AIAssetTable), + default_factory_pydantic=list, + example=[], + ) + used: list[int] = ResourceRelationshipList( + description="Identifiers of AIAssets that are used (but not created) in this project.", + serializer=AttributeSerializer("identifier"), + deserializer=FindByIdentifierDeserializer(AIAssetTable), + default_factory_pydantic=list, + example=[], + ) diff --git a/src/database/setup.py b/src/database/setup.py index 94a6d1a9..f23d5bff 100644 --- a/src/database/setup.py +++ b/src/database/setup.py @@ -8,11 +8,12 @@ from sqlalchemy.engine import Engine from sqlmodel import create_engine, Session, SQLModel, select -import routers from config import DB_CONFIG from connectors.resource_with_relations import ResourceWithRelations from database.model.concept.concept import AIoDConcept +from database.model.named_relation import NamedRelation from database.model.platform.platform_names import PlatformName +from routers import resource_routers def connect_to_database( @@ -38,10 +39,9 @@ def connect_to_database( drop_or_create_database(url, delete_first) engine = create_engine(url, echo=False, pool_recycle=3600) - if create_if_not_exists: - with engine.connect() as connection: - AIoDConcept.metadata.create_all(connection, checkfirst=True) - connection.commit() + with engine.connect() as connection: + AIoDConcept.metadata.create_all(connection, checkfirst=True) + connection.commit() return engine @@ -61,12 +61,16 @@ def _get_existing_resource( session: Session, resource: AIoDConcept, clazz: type[SQLModel] ) -> AIoDConcept | None: """Selecting a resource based on platform and platform_identifier""" - query = select(clazz).where( - and_( - clazz.platform == resource.platform, - clazz.platform_identifier == resource.platform_identifier, + is_enum = NamedRelation in clazz.__mro__ + if is_enum: + query = select(clazz).where(clazz.name == resource) + else: + query = select(clazz).where( + and_( + clazz.platform == resource.platform, + clazz.platform_identifier == resource.platform_identifier, + ) ) - ) return session.scalars(query).first() @@ -95,7 +99,7 @@ def _create_or_fetch_related_objects(session: Session, item: ResourceWithRelatio resource_read_str = type(resource).__name__ # E.g. DatasetRead (router,) = [ router - for router in routers.resource_routers + for router in resource_routers.router_list if resource_read_str.startswith(router.resource_class.__name__) # E.g. "DatasetRead".startswith("Dataset") ] @@ -113,7 +117,7 @@ def _create_or_fetch_related_objects(session: Session, item: ResourceWithRelatio item.resource.__setattr__(field_name, identifiers) # E.g. Dataset.keywords = [1, 4] -def sqlmodel_engine(rebuild_db: str, create_if_not_exists=True) -> Engine: +def sqlmodel_engine(rebuild_db: str) -> Engine: """ Return a SQLModel engine, backed by the MySql connection as configured in the configuration file. @@ -127,6 +131,4 @@ def sqlmodel_engine(rebuild_db: str, create_if_not_exists=True) -> Engine: db_url = f"mysql://{username}:{password}@{host}:{port}/{database}" delete_before_create = rebuild_db == "always" - return connect_to_database( - db_url, delete_first=delete_before_create, create_if_not_exists=create_if_not_exists - ) + return connect_to_database(db_url, delete_first=delete_before_create) diff --git a/src/main.py b/src/main.py index dff1e4ed..73c565dc 100644 --- a/src/main.py +++ b/src/main.py @@ -19,6 +19,7 @@ from database.model.platform.platform import Platform from database.model.platform.platform_names import PlatformName from database.setup import sqlmodel_engine +from routers import resource_routers, parent_routers, enum_routers def _parse_args() -> argparse.Namespace: @@ -64,7 +65,12 @@ def test_authorization(user: Json = Depends(get_current_user)) -> dict: """ return {"msg": "success", "user": user} - for router in routers.resource_routers + routers.other_routers: + for router in ( + resource_routers.router_list + + routers.other_routers + + parent_routers.router_list + + enum_routers.router_list + ): app.include_router(router.create(engine, url_prefix)) diff --git a/src/routers/__init__.py b/src/routers/__init__.py index 44315f43..2d170ad1 100644 --- a/src/routers/__init__.py +++ b/src/routers/__init__.py @@ -1,51 +1,5 @@ -import os - -from elasticsearch import Elasticsearch - from .resource_router import ResourceRouter # noqa:F401 -from .resources.case_study_router import CaseStudyRouter -from .resources.computational_asset_router import ComputationalAssetRouter -from .resources.dataset_router import DatasetRouter -from .resources.educational_resource_router import EducationalResourceRouter -from .resources.event_router import EventRouter -from .resources.experiment_router import ExperimentRouter -from .resources.ml_model_router import MLModelRouter -from .resources.news_router import NewsRouter -from .resources.organisation_router import OrganisationRouter -from .resources.person_router import PersonRouter -from .resources.platform_router import PlatformRouter -from .resources.publication_router import PublicationRouter -from .resources.service_router import ServiceRouter -from .resources.team_router import TeamRouter -from .router import AIoDRouter # noqa:F401 -from .search_routers.search_router_datasets import SearchRouterDatasets -from .search_routers.search_router_publications import SearchRouterPublications from .upload_router_huggingface import UploadRouterHuggingface -resource_routers = [ - PlatformRouter(), - CaseStudyRouter(), - ComputationalAssetRouter(), - DatasetRouter(), - EducationalResourceRouter(), - EventRouter(), - ExperimentRouter(), - MLModelRouter(), - NewsRouter(), - OrganisationRouter(), - PersonRouter(), - PublicationRouter(), - # ProjectRouter(), - ServiceRouter(), - TeamRouter(), -] # type: list[ResourceRouter] - -user_name = os.getenv("ES_USER") -pw = os.getenv("ES_PASSWORD") -elasticsearch_client = Elasticsearch("http://localhost:9200", basic_auth=(user_name, pw)) -other_routers = [ - UploadRouterHuggingface(), - SearchRouterDatasets(client=elasticsearch_client), - SearchRouterPublications(client=elasticsearch_client), -] # type: list[AIoDRouter] +other_routers = [UploadRouterHuggingface()] diff --git a/src/routers/enum_routers/__init__.py b/src/routers/enum_routers/__init__.py new file mode 100644 index 00000000..ea78f32d --- /dev/null +++ b/src/routers/enum_routers/__init__.py @@ -0,0 +1,13 @@ +from database.model.named_relation import NamedRelation +from routers.enum_routers.enum_router import EnumRouter +from routers.parent_router import non_abstract_subclasses + + +# Excluding some enums that should not get a router. TODO: make it configurable on the NamedRelation +__exclusion_list = ("alternate_name", "email", "note", "telephone") + + +__named_relations = sorted(non_abstract_subclasses(NamedRelation), key=lambda n: n.__tablename__) +__filtered_relations = (n for n in __named_relations if n.__tablename__ not in __exclusion_list) + +router_list: list[EnumRouter] = [EnumRouter(n) for n in __filtered_relations] diff --git a/src/routers/enum_routers/enum_router.py b/src/routers/enum_routers/enum_router.py new file mode 100644 index 00000000..0efb1b88 --- /dev/null +++ b/src/routers/enum_routers/enum_router.py @@ -0,0 +1,54 @@ +import abc +from typing import Type + +from fastapi import APIRouter +from sqlalchemy.engine import Engine +from sqlmodel import select, Session + +from database.model.named_relation import NamedRelation + + +class EnumRouter(abc.ABC): + """ + Abstract class for FastAPI enum routers. These are routers for, for example, Language, + making it possible to get all existing values of the Language. + """ + + def __init__(self, resource_class: Type[NamedRelation]): + self.resource_class = resource_class + self.resource_name = resource_class.__tablename__ + self.resource_name_plural = ( + self.resource_name + "s" if not self.resource_name.endswith("s") else self.resource_name + ) + + def create(self, engine: Engine, url_prefix: str) -> APIRouter: + router = APIRouter() + version = "v1" + default_kwargs = { + "response_model_exclude_none": True, + "tags": ["enums"], + } + router.add_api_route( + path=url_prefix + f"/{self.resource_name_plural}/{version}", + endpoint=self.get_resources_func(engine), + response_model=list[str], + name=self.resource_name, + **default_kwargs, + ) + return router + + def get_resources_func(self, engine: Engine): + def get_resources(): + with Session(engine) as session: + query = select(self.resource_class) + resources = session.scalars(query).all() + return [r.name for r in resources] + + return get_resources + + def create_resource(self, session: Session, resource_create_instance: str): + # Used by synchronization.py: router.create_resource + resource = self.resource_class(name=resource_create_instance) + session.add(resource) + session.commit() + return resource diff --git a/src/routers/parent_router.py b/src/routers/parent_router.py new file mode 100644 index 00000000..849aac1c --- /dev/null +++ b/src/routers/parent_router.py @@ -0,0 +1,104 @@ +import abc +from typing import Union + +from fastapi import APIRouter, HTTPException +from sqlalchemy.engine import Engine +from sqlmodel import SQLModel, select, Session +from starlette.status import HTTP_404_NOT_FOUND, HTTP_500_INTERNAL_SERVER_ERROR + +from routers import resource_routers + + +class ParentRouter(abc.ABC): + """ + Abstract class for FastAPI parent-class routers. These are routers for, for example, Agent, + making it possible to perform a GET request based on the agent_identifier, retrieving either + an Organisation or a Person. + """ + + @property + @abc.abstractmethod + def resource_name(self) -> str: + """The name of the resource. E.g. 'agent'""" + + @property + @abc.abstractmethod + def resource_name_plural(self) -> str: + """The plural of the name of the resource. E.g. 'agents'""" + + @property + @abc.abstractmethod + def parent_class(self): + """The resource type. E.g. Agent""" + + @property + @abc.abstractmethod + def parent_class_table(self): + """The table class of the resource. E.g. AgentTable""" + + def create(self, engine: Engine, url_prefix: str) -> APIRouter: + router = APIRouter() + version = "v1" + default_kwargs = { + "response_model_exclude_none": True, + "tags": ["parents"], + } + available_schemas: list[SQLModel] = list(non_abstract_subclasses(self.parent_class)) + classes_dict = {clz.__tablename__: clz for clz in available_schemas if clz.__tablename__} + routers = {router.resource_name: router for router in resource_routers.router_list} + read_classes_dict = {name: routers[name].resource_class_read for name in classes_dict} + response_model = Union[*read_classes_dict.values()] # type:ignore + + router.add_api_route( + path=url_prefix + f"/{self.resource_name_plural}/{version}/{{identifier}}", + endpoint=self.get_resource_func(engine, classes_dict, read_classes_dict), + response_model=response_model, # type: ignore + name=self.resource_name, + **default_kwargs, + ) + return router + + def get_resource_func(self, engine: Engine, classes_dict: dict, read_classes_dict: dict): + def get_resource(identifier: int): + with Session(engine) as session: + query = select(self.parent_class_table).where( + self.parent_class_table.identifier == identifier + ) + parent_resource = session.scalars(query).first() + if not parent_resource: + raise HTTPException( + status_code=HTTP_404_NOT_FOUND, + detail=f"{self.resource_name} with identifier {identifier} not found.", + ) + child_type: str = parent_resource.type + child_class = classes_dict[child_type] + child_class_read = read_classes_dict[child_type] + query_child = select(child_class).where( + getattr(child_class, self.resource_name + "_id") == identifier + ) + child = session.scalars(query_child).first() + if not child: + raise HTTPException( + status_code=HTTP_500_INTERNAL_SERVER_ERROR, + detail=f"The parent could be found, but the child ({child_type}) was not " + f"found in our database", + ) + return child_class_read.from_orm(child) + + return get_resource + + +def non_abstract_subclasses(cls): + """ + All non-abstract subclasses of the class. + + To check if a class is abstract, we check if it has any children itself. This will break if + we ever inherit from a non-abstract class. + """ + for child in cls.__subclasses__(): + has_grandchild = False + for grand_child in non_abstract_subclasses(child): + has_grandchild = True + yield grand_child + if not has_grandchild: + yield child diff --git a/src/routers/parent_routers/__init__.py b/src/routers/parent_routers/__init__.py new file mode 100644 index 00000000..b03e8fff --- /dev/null +++ b/src/routers/parent_routers/__init__.py @@ -0,0 +1,6 @@ +from .agent_router import AgentRouter +from .ai_asset_router import AIAssetRouter +from .ai_resource_router import AIResourceRouter +from ..parent_router import ParentRouter + +router_list: list[ParentRouter] = [AgentRouter(), AIAssetRouter(), AIResourceRouter()] diff --git a/src/routers/parent_routers/agent_router.py b/src/routers/parent_routers/agent_router.py new file mode 100644 index 00000000..e51e9494 --- /dev/null +++ b/src/routers/parent_routers/agent_router.py @@ -0,0 +1,23 @@ +from typing import Type + +from database.model.agent.agent import Agent +from database.model.agent.agent_table import AgentTable +from routers.parent_router import ParentRouter + + +class AgentRouter(ParentRouter): + @property + def resource_name(self) -> str: + return "agent" + + @property + def resource_name_plural(self) -> str: + return "agents" + + @property + def parent_class(self) -> Type[Agent]: + return Agent + + @property + def parent_class_table(self) -> Type[AgentTable]: + return AgentTable diff --git a/src/routers/parent_routers/ai_asset_router.py b/src/routers/parent_routers/ai_asset_router.py new file mode 100644 index 00000000..82051ec9 --- /dev/null +++ b/src/routers/parent_routers/ai_asset_router.py @@ -0,0 +1,23 @@ +from typing import Type + +from database.model.ai_asset.ai_asset import AIAsset +from database.model.ai_asset.ai_asset_table import AIAssetTable +from routers.parent_router import ParentRouter + + +class AIAssetRouter(ParentRouter): + @property + def resource_name(self) -> str: + return "ai_asset" + + @property + def resource_name_plural(self) -> str: + return "ai_assets" + + @property + def parent_class(self) -> Type[AIAsset]: + return AIAsset + + @property + def parent_class_table(self) -> Type[AIAssetTable]: + return AIAssetTable diff --git a/src/routers/parent_routers/ai_resource_router.py b/src/routers/parent_routers/ai_resource_router.py new file mode 100644 index 00000000..7f51dd17 --- /dev/null +++ b/src/routers/parent_routers/ai_resource_router.py @@ -0,0 +1,23 @@ +from typing import Type + +from database.model.ai_resource.resource import AIResource +from database.model.ai_resource.resource_table import AIResourceTable +from routers.parent_router import ParentRouter + + +class AIResourceRouter(ParentRouter): + @property + def resource_name(self) -> str: + return "ai_resource" + + @property + def resource_name_plural(self) -> str: + return "ai_resources" + + @property + def parent_class(self) -> Type[AIResource]: + return AIResource + + @property + def parent_class_table(self) -> Type[AIResourceTable]: + return AIResourceTable diff --git a/src/routers/resource_router.py b/src/routers/resource_router.py index 7b62ddb5..262db575 100644 --- a/src/routers/resource_router.py +++ b/src/routers/resource_router.py @@ -14,7 +14,7 @@ from sqlmodel import SQLModel, Session, select from starlette.responses import JSONResponse -from authentication import get_current_user, has_role +from authentication import get_current_user from config import KEYCLOAK_CONFIG from converters.schema_converters.schema_converter import SchemaConverter from database.model.ai_resource.resource import AIResource @@ -25,7 +25,6 @@ resource_read, ) from database.model.serializers import deserialize_resource_relationships -from routers.router import AIoDRouter class Pagination(BaseModel): @@ -38,7 +37,7 @@ class Pagination(BaseModel): RESOURCE_READ = TypeVar("RESOURCE_READ", bound=SQLModel) -class ResourceRouter(AIoDRouter, abc.ABC): +class ResourceRouter(abc.ABC): """ Abstract class for FastAPI resource router. @@ -330,7 +329,7 @@ def register_resource( user: dict = Depends(get_current_user), ): f"""Register a {self.resource_name} with AIoD.""" - if not has_role(user, KEYCLOAK_CONFIG.get("role")): + if "groups" in user and KEYCLOAK_CONFIG.get("role") not in user["groups"]: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to edit Aiod resources.", @@ -372,7 +371,7 @@ def put_resource( user: dict = Depends(get_current_user), ): f"""Update an existing {self.resource_name}.""" - if not has_role(user, KEYCLOAK_CONFIG.get("role")): + if "groups" in user and KEYCLOAK_CONFIG.get("role") not in user["groups"]: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to edit Aiod resources.", @@ -411,7 +410,7 @@ def delete_resource_func(self, engine: Engine): """ def delete_resource(identifier: str, user: dict = Depends(get_current_user)): - if not has_role(user, KEYCLOAK_CONFIG.get("role")): + if "groups" in user and KEYCLOAK_CONFIG.get("role") not in user["groups"]: raise HTTPException( status_code=status.HTTP_403_FORBIDDEN, detail="You do not have permission to edit Aiod resources.", diff --git a/src/routers/resource_routers/__init__.py b/src/routers/resource_routers/__init__.py new file mode 100644 index 00000000..baaa8412 --- /dev/null +++ b/src/routers/resource_routers/__init__.py @@ -0,0 +1,34 @@ +from .case_study_router import CaseStudyRouter +from .computational_asset_router import ComputationalAssetRouter +from .dataset_router import DatasetRouter +from .educational_resource_router import EducationalResourceRouter +from .event_router import EventRouter +from .experiment_router import ExperimentRouter +from .ml_model_router import MLModelRouter +from .news_router import NewsRouter +from .organisation_router import OrganisationRouter +from .person_router import PersonRouter +from .platform_router import PlatformRouter +from .project_router import ProjectRouter +from .publication_router import PublicationRouter +from .service_router import ServiceRouter +from .team_router import TeamRouter +from .. import ResourceRouter + +router_list: list[ResourceRouter] = [ + PlatformRouter(), + CaseStudyRouter(), + ComputationalAssetRouter(), + DatasetRouter(), + EducationalResourceRouter(), + EventRouter(), + ExperimentRouter(), + MLModelRouter(), + NewsRouter(), + OrganisationRouter(), + PersonRouter(), + PublicationRouter(), + ProjectRouter(), + ServiceRouter(), + TeamRouter(), +] diff --git a/src/routers/resources/case_study_router.py b/src/routers/resource_routers/case_study_router.py similarity index 100% rename from src/routers/resources/case_study_router.py rename to src/routers/resource_routers/case_study_router.py diff --git a/src/routers/resources/computational_asset_router.py b/src/routers/resource_routers/computational_asset_router.py similarity index 100% rename from src/routers/resources/computational_asset_router.py rename to src/routers/resource_routers/computational_asset_router.py diff --git a/src/routers/resources/dataset_router.py b/src/routers/resource_routers/dataset_router.py similarity index 99% rename from src/routers/resources/dataset_router.py rename to src/routers/resource_routers/dataset_router.py index e440a586..9ae6c57c 100644 --- a/src/routers/resources/dataset_router.py +++ b/src/routers/resource_routers/dataset_router.py @@ -6,7 +6,6 @@ ) from converters.schema_converters.schema_converter import SchemaConverter from database.model.dataset.dataset import Dataset - from routers.resource_router import ResourceRouter diff --git a/src/routers/resources/educational_resource_router.py b/src/routers/resource_routers/educational_resource_router.py similarity index 100% rename from src/routers/resources/educational_resource_router.py rename to src/routers/resource_routers/educational_resource_router.py diff --git a/src/routers/resources/event_router.py b/src/routers/resource_routers/event_router.py similarity index 100% rename from src/routers/resources/event_router.py rename to src/routers/resource_routers/event_router.py diff --git a/src/routers/resources/experiment_router.py b/src/routers/resource_routers/experiment_router.py similarity index 100% rename from src/routers/resources/experiment_router.py rename to src/routers/resource_routers/experiment_router.py diff --git a/src/routers/resources/ml_model_router.py b/src/routers/resource_routers/ml_model_router.py similarity index 100% rename from src/routers/resources/ml_model_router.py rename to src/routers/resource_routers/ml_model_router.py diff --git a/src/routers/resources/news_router.py b/src/routers/resource_routers/news_router.py similarity index 100% rename from src/routers/resources/news_router.py rename to src/routers/resource_routers/news_router.py diff --git a/src/routers/resources/organisation_router.py b/src/routers/resource_routers/organisation_router.py similarity index 100% rename from src/routers/resources/organisation_router.py rename to src/routers/resource_routers/organisation_router.py diff --git a/src/routers/resources/person_router.py b/src/routers/resource_routers/person_router.py similarity index 100% rename from src/routers/resources/person_router.py rename to src/routers/resource_routers/person_router.py diff --git a/src/routers/resources/platform_router.py b/src/routers/resource_routers/platform_router.py similarity index 100% rename from src/routers/resources/platform_router.py rename to src/routers/resource_routers/platform_router.py diff --git a/src/routers/resource_routers/project_router.py b/src/routers/resource_routers/project_router.py new file mode 100644 index 00000000..779363f4 --- /dev/null +++ b/src/routers/resource_routers/project_router.py @@ -0,0 +1,20 @@ +from database.model.project.project import Project +from routers.resource_router import ResourceRouter + + +class ProjectRouter(ResourceRouter): + @property + def version(self) -> int: + return 1 + + @property + def resource_name(self) -> str: + return "project" + + @property + def resource_name_plural(self) -> str: + return "projects" + + @property + def resource_class(self) -> type[Project]: + return Project diff --git a/src/routers/resources/publication_router.py b/src/routers/resource_routers/publication_router.py similarity index 100% rename from src/routers/resources/publication_router.py rename to src/routers/resource_routers/publication_router.py diff --git a/src/routers/resources/service_router.py b/src/routers/resource_routers/service_router.py similarity index 100% rename from src/routers/resources/service_router.py rename to src/routers/resource_routers/service_router.py diff --git a/src/routers/resources/team_router.py b/src/routers/resource_routers/team_router.py similarity index 100% rename from src/routers/resources/team_router.py rename to src/routers/resource_routers/team_router.py diff --git a/src/routers/router.py b/src/routers/router.py deleted file mode 100644 index b2de2fc0..00000000 --- a/src/routers/router.py +++ /dev/null @@ -1,10 +0,0 @@ -import abc - -from fastapi import APIRouter -from sqlalchemy.engine import Engine - - -class AIoDRouter(abc.ABC): - @abc.abstractmethod - def create(self, engine: Engine, url_prefix: str) -> APIRouter: - pass diff --git a/src/routers/search_router.py b/src/routers/search_router.py deleted file mode 100644 index a6b5c920..00000000 --- a/src/routers/search_router.py +++ /dev/null @@ -1,120 +0,0 @@ -import abc -import os -from typing import TypeVar, Generic, Any, Type - -from elasticsearch import Elasticsearch -from fastapi import APIRouter, Depends, HTTPException -from pydantic import BaseModel -from sqlalchemy.engine import Engine -from starlette import status - -from authentication import get_current_user, has_role -from database.model.concept.aiod_entry import AIoDEntryRead -from database.model.resource_read_and_create import resource_read -from routers.router import AIoDRouter - -SORT = {"identifier": "asc"} -LIMIT_MAX = 1000 - -RESOURCE = TypeVar("RESOURCE") - - -class SearchResult(BaseModel, Generic[RESOURCE]): - total_hits: int - resources: list[RESOURCE] - next_offset: list | None - - -class SearchRouter(AIoDRouter, Generic[RESOURCE], abc.ABC): - """ - Providing search functionality in ElasticSearch - """ - - def __init__(self, client: Elasticsearch): - self.client: Elasticsearch = client - - @property - @abc.abstractmethod - def es_index(self) -> str: - """The name of the elasticsearch index""" - - @property - @abc.abstractmethod - def resource_name_plural(self) -> str: - """The name of the resource (plural)""" - - @property - def key_translations(self) -> dict[str, str]: - """If an attribute is called differently in elasticsearch than in our metadata model, - you can define a translation dictionary here. The key should be the name in - elasticsearch, the value the name in our data model.""" - return {} - - @property - @abc.abstractmethod - def resource_class(self) -> RESOURCE: - """The resource class""" - - def create(self, engine: Engine, url_prefix: str) -> APIRouter: - router = APIRouter() - read_class = resource_read(self.resource_class) # type: ignore - - @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", tags=["search"]) - def search( - name: str = "", - limit: int = 10, - offset: str | None = None, # TODO: this should not be a string - user: dict = Depends(get_current_user), - ) -> SearchResult[read_class]: # type: ignore - f""" - Search for {self.resource_name_plural}. - """ - if limit > LIMIT_MAX: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The limit should be maximum {LIMIT_MAX}. If you want more results, " - f"use pagination.", - ) - - if not has_role(user, os.getenv("ES_ROLE")): - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You do not have permission to search Aiod resources.", - ) - - query = {"bool": {"must": {"match": {"name": name}}}} - result = self.client.search( - index=self.es_index, query=query, size=limit, sort=SORT, search_after=offset - ) - - total_hits = result["hits"]["total"]["value"] - resources: list[read_class] = [ # type: ignore - self._cast_resource(read_class, hit["_source"]) # type: ignore - for hit in result["hits"]["hits"] - ] - next_offset = ( - result["hits"]["hits"][-1]["sort"] if len(result["hits"]["hits"]) > 0 else None - ) - return SearchResult[read_class]( # type: ignore - total_hits=total_hits, - next_offset=next_offset, - resources=resources, - ) - - return router - - def _cast_resource( - self, resource_class: RESOURCE, resource_dict: dict[str, Any] - ) -> Type[RESOURCE]: - kwargs = { - self.key_translations.get(key, key): val - for key, val in resource_dict.items() - if key != "type" and not key.startswith("@") - } - resource = resource_class(**kwargs) # type: ignore - resource.aiod_entry = AIoDEntryRead( - date_modified=resource_dict["date_modified"], - date_created=resource_dict["date_created"], - status=resource_dict["status"], - ) - return resource diff --git a/src/routers/search_routers/search_router_datasets.py b/src/routers/search_routers/search_router_datasets.py deleted file mode 100644 index b0b2ed43..00000000 --- a/src/routers/search_routers/search_router_datasets.py +++ /dev/null @@ -1,16 +0,0 @@ -from database.model.dataset.dataset import Dataset -from routers.search_router import SearchRouter - - -class SearchRouterDatasets(SearchRouter[Dataset]): - @property - def es_index(self) -> str: - return "dataset" - - @property - def resource_name_plural(self) -> str: - return "datasets" - - @property - def resource_class(self): - return Dataset diff --git a/src/routers/search_routers/search_router_publications.py b/src/routers/search_routers/search_router_publications.py deleted file mode 100644 index bf213445..00000000 --- a/src/routers/search_routers/search_router_publications.py +++ /dev/null @@ -1,20 +0,0 @@ -from database.model.knowledge_asset.publication import Publication -from routers.search_router import SearchRouter - - -class SearchRouterPublications(SearchRouter[Publication]): - @property - def es_index(self) -> str: - return "publication" - - @property - def resource_name_plural(self) -> str: - return "publications" - - @property - def resource_class(self): - return Publication - - @property - def key_translations(self) -> dict: - return {"publication_type": "type"} diff --git a/src/routers/upload_router_huggingface.py b/src/routers/upload_router_huggingface.py index e16dd269..c3e254ca 100644 --- a/src/routers/upload_router_huggingface.py +++ b/src/routers/upload_router_huggingface.py @@ -2,11 +2,10 @@ from fastapi import File, Query, UploadFile from sqlalchemy.engine import Engine -from routers.router import AIoDRouter from uploader.hugging_face_uploader import handle_upload -class UploadRouterHuggingface(AIoDRouter): +class UploadRouterHuggingface: def create(self, engine: Engine, url_prefix: str) -> APIRouter: router = APIRouter() diff --git a/src/tests/.env b/src/tests/.env index 94142d8d..803f6c1e 100644 --- a/src/tests/.env +++ b/src/tests/.env @@ -1,4 +1 @@ KEYCLOAK_CLIENT_SECRET="mocked_secret" -ES_USER="mocked_user" -ES_PASSWORD="mocked_password" -ES_ROLE="edit_aiod_resources" \ No newline at end of file diff --git a/src/tests/connectors/.DS_Store b/src/tests/connectors/.DS_Store deleted file mode 100644 index a46fda6b8e323be4e6fa33fe6ee338210b52254b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5T32Y28+<67a_0EtEX7vNpF1t?Z1$?y%fY#UcjSx5kUkmK8IHyz_SnH zH?x~IF$sDSm6@{hC9^ZxFCS#5OGIX{n)Hc!L=<2!ws+uajOTe&tmPwIY~&eBN@$wQ z`@^xVWNSbbPz8RQ0{rcEXiArKMky7(-*oAyw%)55sg2`NVn>(>Ztkv64vt?A>#`=_ z$XYIVx5!sxI@F~Ft!R$lIbFcdz-^rGD|u${+MHSB_xP~p*YMobZg_s3UBh>qER>sq zYf6dR{c@JO`(Rm`FLJwoT66o9vtiVfQ_JH@SXi2nE%WHy3Xj5kk;mI}fk!E4!=ovu zh6k2X`vVG~XR`%ehw4=URX`P3E5Q3hh`|_n%pA(r0VhWQU>jjETuToJh8zG!9y5oC zz>F&ex>Dnh7{-+&9{Rk(y|wak+-pOOQw%oF%N$AxoNvc+!L4`?Lkx2vUjQSI SnL|Wi`bR)yP^Sv~r~)4=dy&lm diff --git a/src/tests/connectors/example/test_enum_connector.py b/src/tests/connectors/example/test_enum_connector.py new file mode 100644 index 00000000..c0f98426 --- /dev/null +++ b/src/tests/connectors/example/test_enum_connector.py @@ -0,0 +1,7 @@ +from connectors.example.enum import EnumConnectorStatus + + +def test_fetch_happy_path(): + connector = EnumConnectorStatus() + resources = list(connector.fetch()) + assert set(resources) == {"published", "draft", "rejected"} diff --git a/src/tests/resources/.DS_Store b/src/tests/resources/.DS_Store deleted file mode 100644 index fe6df7af3f15d3abb7d6f367e9d971e2289cc763..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKF;2rU6n!q0XaR{13@{*NV}YUUNTCD?!PpCwCKVkLQc=2R-~!x+3vdJu!pO$^ zwkZi|5EDY^|LXfWKi~Gta~j(K%wUo9fgXSkU9i<o0e5+zi$ugtQD-5xBR?jGNdz!pH9$#5*U;4@NPgc!Y6+RdVpDq@dA*FWL zNXUq9eU%e-R%exY?Qb7_XttuiGPmzDtNi5utfmU60;)ix0$j7jI-3r)R0UK4RbZ)r zd>;b3VCb=MXg?h+>=A(2X4o3*cJDz9BtQ&377iJq8P}BPni_w^Fs?b{fzJy)77kr= z7&|ii@gp05LNR{lV*;BF6FSsV6;K7L3aq)yo~-}p&FBAWBfU}uRDpk`fN4j=XuwDE zYisS{WUUS8Bf6O66%Ln8Sg}hnb7d*srCVb>kP0#MSU6;arW*k(gBGg5uPX2k6}WT< diff --git a/src/tests/resources/elasticsearch/dataset_search.json b/src/tests/resources/elasticsearch/dataset_search.json deleted file mode 100644 index 0f293eee..00000000 --- a/src/tests/resources/elasticsearch/dataset_search.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "took": 15, - "timed_out": false, - "_shards": { - "total": 1, - "successful": 1, - "skipped": 0, - "failed": 0 - }, - "hits": { - "total": { - "value": 1, - "relation": "eq" - }, - "max_score": null, - "hits": [ - { - "_index": "dataset", - "_id": "dataset_1", - "_score": null, - "_source": { - "type": "dataset", - "date_created": "2023-08-24T12:48:49.000Z", - "date_modified": "2023-08-24T12:48:49.000Z", - "description": "A description.", - "asset_identifier": 3, - "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", - "name": "The name of this dataset", - "status": "draft", - "@timestamp": "2023-08-24T12:49:00.321Z", - "identifier": 1, - "platform_identifier": "1", - "resource_identifier": 3, - "same_as": "https://www.example.com/resource/this_resource", - "version": "1.1.0", - "issn": "20493630", - "temporal_coverage": "2011/2012", - "@version": "1", - "date_published": "2022-01-01T15:15:00.000Z", - "platform": "example", - "measurement_technique": "mass spectrometry" - }, - "sort": [ - 1 - ] - } - ] - } -} \ No newline at end of file diff --git a/src/tests/resources/elasticsearch/publication_search.json b/src/tests/resources/elasticsearch/publication_search.json deleted file mode 100644 index da0787f6..00000000 --- a/src/tests/resources/elasticsearch/publication_search.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "took": 1, - "timed_out": false, - "_shards": {"total": 1, "successful": 1, "skipped": 0, "failed": 0}, - "hits": { - "total": {"value": 1, "relation": "eq"}, - "max_score": null, - "hits": [ - { - "_index": "publication", - "_id": "publication_1", - "_score": null, - "_source": { - "platform_identifier": "1", - "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", - "@version": "1", - "status": "draft", - "knowledge_asset_identifier": 1, - "asset_identifier": 1, - "resource_identifier": 1, - "permanent_identifier": "http://dx.doi.org/10.1093/ajae/aaq063", - "description": "A description.", - "isbn": "9783161484100", - "date_created": "2023-08-24T10:14:52.000Z", - "version": "1.1.0", - "date_modified": "2023-08-24T10:14:52.000Z", - "name": "The name of this publication", - "platform": "example", - "publication_type": "journal", - "same_as": "https://www.example.com/resource/this_resource", - "identifier": 1, - "issn": "20493630", - "type": "publication", - "@timestamp": "2023-08-24T10:14:55.452Z", - "date_published": "2022-01-01T15:15:00.000Z" - }, - "sort": [1] - } - ] - } -} \ No newline at end of file diff --git a/src/tests/routers/.DS_Store b/src/tests/routers/.DS_Store deleted file mode 100644 index f594c603bf858a27b366a39b8933ca6b03ec0dd1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKu};G<5IvVlq+;m?qko|*lUs$UZ2bY$v?@pqsfxOLMuLeC;3xP5{)v%yzAH3J zKnxY4=uSF6=X@8RUo2ltL~gX14T$s8}urlcH*P zo8d1qz~Ane7Br_@=+5^mSCa12OVgvQ@_f3irWncY-ySZ{$FIXSuh|d0R@b~+bT;fF zNDq0;>5eMepjU8T_S80~G0)uMwVPLY`nb)j)$=pA?>%ez(KF{~Vhk7q#=xO7Ag3*5 zCx^Ze%ikC<2KLSXpAR0&7!;#mIy%s#D*&(ya}xCVF2ONgF(^hsj6j@(0wvVx6~jq5 z>|XtXVic5ca%pC~M`w0=Lvd+#*nM&*7X+;}28@9^14r`M=lXx~_5HsdWKYI`G4QV# zaNT^8kFX@It&PQTt@Y3=C=2^V!H*DhQYnTnm*O*M64*U=fI%?|!UC}$fkcBf#=x&K F@DA9{QLO*~ diff --git a/src/routers/search_routers/__init__.py b/src/tests/routers/enum_routers/__init__.py similarity index 100% rename from src/routers/search_routers/__init__.py rename to src/tests/routers/enum_routers/__init__.py diff --git a/src/tests/routers/enum_routers/test_license_router.py b/src/tests/routers/enum_routers/test_license_router.py new file mode 100644 index 00000000..6289bea8 --- /dev/null +++ b/src/tests/routers/enum_routers/test_license_router.py @@ -0,0 +1,22 @@ +from sqlalchemy.engine import Engine +from sqlmodel import Session +from starlette.testclient import TestClient + +from database.model.ai_asset.license import License +from database.model.dataset.dataset import Dataset +from database.model.knowledge_asset.publication import Publication + + +def test_happy_path(client: TestClient, engine: Engine, dataset: Dataset, publication: Publication): + + dataset.license = License(name="license 1") + publication.license = License(name="license 2") + with Session(engine) as session: + session.add(dataset) + session.merge(publication) + session.commit() + + response = client.get("/licenses/v1") + assert response.status_code == 200, response.json() + response_json = response.json() + assert set(response_json).issuperset({"license 1", "license 2"}) diff --git a/src/tests/routers/resources/__init__.py b/src/tests/routers/parent_routers/__init__.py similarity index 100% rename from src/tests/routers/resources/__init__.py rename to src/tests/routers/parent_routers/__init__.py diff --git a/src/tests/routers/parent_routers/test_agent_router.py b/src/tests/routers/parent_routers/test_agent_router.py new file mode 100644 index 00000000..f7739afd --- /dev/null +++ b/src/tests/routers/parent_routers/test_agent_router.py @@ -0,0 +1,35 @@ +from sqlalchemy.engine import Engine +from sqlmodel import Session +from starlette.testclient import TestClient + +from database.model.agent.organisation import Organisation +from database.model.agent.person import Person + + +def test_happy_path( + client: TestClient, + engine: Engine, + organisation: Organisation, + person: Person, +): + + organisation.name = "Organisation" + person.name = "Person" + with Session(engine) as session: + session.add(organisation) + session.merge(person) + session.commit() + + response = client.get("/agents/v1/1") + assert response.status_code == 200, response.json() + response_json = response.json() + assert response_json["identifier"] == 1 + assert response_json["agent_identifier"] == 1 + assert response_json["name"] == "Organisation" + + response = client.get("/agents/v1/2") + assert response.status_code == 200, response.json() + response_json = response.json() + assert response_json["identifier"] == 1 + assert response_json["agent_identifier"] == 2 + assert response_json["name"] == "Person" diff --git a/src/tests/routers/parent_routers/test_ai_asset_router.py b/src/tests/routers/parent_routers/test_ai_asset_router.py new file mode 100644 index 00000000..9cc227ad --- /dev/null +++ b/src/tests/routers/parent_routers/test_ai_asset_router.py @@ -0,0 +1,35 @@ +from sqlalchemy.engine import Engine +from sqlmodel import Session +from starlette.testclient import TestClient + +from database.model.dataset.dataset import Dataset +from database.model.knowledge_asset.publication import Publication + + +def test_happy_path( + client: TestClient, + engine: Engine, + dataset: Dataset, + publication: Publication, +): + + dataset.name = "Dataset" + publication.name = "Publication" + with Session(engine) as session: + session.add(dataset) + session.merge(publication) + session.commit() + + response = client.get("/ai_assets/v1/1") + assert response.status_code == 200, response.json() + response_json = response.json() + assert response_json["identifier"] == 1 + assert response_json["ai_asset_identifier"] == 1 + assert response_json["name"] == "Dataset" + + response = client.get("/ai_assets/v1/2") + assert response.status_code == 200, response.json() + response_json = response.json() + assert response_json["identifier"] == 1 + assert response_json["ai_asset_identifier"] == 2 + assert response_json["name"] == "Publication" diff --git a/src/tests/routers/parent_routers/test_ai_resource_router.py b/src/tests/routers/parent_routers/test_ai_resource_router.py new file mode 100644 index 00000000..14b10e42 --- /dev/null +++ b/src/tests/routers/parent_routers/test_ai_resource_router.py @@ -0,0 +1,35 @@ +from sqlalchemy.engine import Engine +from sqlmodel import Session +from starlette.testclient import TestClient + +from database.model.agent.organisation import Organisation +from database.model.agent.person import Person + + +def test_happy_path( + client: TestClient, + engine: Engine, + organisation: Organisation, + person: Person, +): + + organisation.name = "Organisation" + person.name = "Person" + with Session(engine) as session: + session.add(organisation) + session.merge(person) + session.commit() + + response = client.get("/ai_resources/v1/1") + assert response.status_code == 200, response.json() + response_json = response.json() + assert response_json["identifier"] == 1 + assert response_json["ai_resource_identifier"] == 1 + assert response_json["name"] == "Organisation" + + response = client.get("/ai_resources/v1/2") + assert response.status_code == 200, response.json() + response_json = response.json() + assert response_json["identifier"] == 1 + assert response_json["ai_resource_identifier"] == 2 + assert response_json["name"] == "Person" diff --git a/src/tests/routers/search/__init__.py b/src/tests/routers/resource_routers/__init__.py similarity index 100% rename from src/tests/routers/search/__init__.py rename to src/tests/routers/resource_routers/__init__.py diff --git a/src/tests/routers/test_router_case_study.py b/src/tests/routers/resource_routers/test_router_case_study.py similarity index 100% rename from src/tests/routers/test_router_case_study.py rename to src/tests/routers/resource_routers/test_router_case_study.py diff --git a/src/tests/routers/test_router_computational_asset.py b/src/tests/routers/resource_routers/test_router_computational_asset.py similarity index 100% rename from src/tests/routers/test_router_computational_asset.py rename to src/tests/routers/resource_routers/test_router_computational_asset.py diff --git a/src/tests/routers/resources/test_router_dataset.py b/src/tests/routers/resource_routers/test_router_dataset.py similarity index 100% rename from src/tests/routers/resources/test_router_dataset.py rename to src/tests/routers/resource_routers/test_router_dataset.py diff --git a/src/tests/routers/resources/test_router_dataset_generic_fields.py b/src/tests/routers/resource_routers/test_router_dataset_generic_fields.py similarity index 95% rename from src/tests/routers/resources/test_router_dataset_generic_fields.py rename to src/tests/routers/resource_routers/test_router_dataset_generic_fields.py index 65f1296a..9239dfc6 100644 --- a/src/tests/routers/resources/test_router_dataset_generic_fields.py +++ b/src/tests/routers/resource_routers/test_router_dataset_generic_fields.py @@ -44,8 +44,8 @@ def test_happy_path( response_json = response.json() assert response_json["identifier"] == 1 - assert response_json["resource_identifier"] == 3 - assert response_json["asset_identifier"] == 2 + assert response_json["ai_resource_identifier"] == 3 + assert response_json["ai_asset_identifier"] == 2 assert response_json["platform"] == "example" assert response_json["platform_identifier"] == "1" @@ -111,8 +111,8 @@ def test_happy_path( response = client.get("/datasets/v1/1") response_json = response.json() assert response_json["identifier"] == 1 - assert response_json["resource_identifier"] == 3 - assert response_json["asset_identifier"] == 2 + assert response_json["ai_resource_identifier"] == 3 + assert response_json["ai_asset_identifier"] == 2 date_created = dateutil.parser.parse(response_json["aiod_entry"]["date_created"] + "Z") date_modified = dateutil.parser.parse(response_json["aiod_entry"]["date_modified"] + "Z") diff --git a/src/tests/routers/test_router_educational_resource.py b/src/tests/routers/resource_routers/test_router_educational_resource.py similarity index 100% rename from src/tests/routers/test_router_educational_resource.py rename to src/tests/routers/resource_routers/test_router_educational_resource.py diff --git a/src/tests/routers/test_router_event.py b/src/tests/routers/resource_routers/test_router_event.py similarity index 100% rename from src/tests/routers/test_router_event.py rename to src/tests/routers/resource_routers/test_router_event.py diff --git a/src/tests/routers/resources/test_router_experiment.py b/src/tests/routers/resource_routers/test_router_experiment.py similarity index 94% rename from src/tests/routers/resources/test_router_experiment.py rename to src/tests/routers/resource_routers/test_router_experiment.py index d62638f9..1b9ce76b 100644 --- a/src/tests/routers/resources/test_router_experiment.py +++ b/src/tests/routers/resource_routers/test_router_experiment.py @@ -16,7 +16,7 @@ def test_happy_path( keycloak_openid.userinfo = mocked_privileged_token body = copy.copy(body_asset) - body["permanent_identifier"] = "https://doi.org/10.1000/182" + body["pid"] = "https://doi.org/10.1000/182" body["experimental_workflow"] = "Example workflow." body["execution_settings"] = "Example execution settings." body["reproducibility_explanation"] = "Example reproducibility explanation." @@ -51,7 +51,7 @@ def test_happy_path( assert response.status_code == 200, response.json() response_json = response.json() - assert response_json["permanent_identifier"] == "https://doi.org/10.1000/182" + assert response_json["pid"] == "https://doi.org/10.1000/182" assert response_json["experimental_workflow"] == "Example workflow." assert response_json["execution_settings"] == "Example execution settings." assert response_json["reproducibility_explanation"] == "Example reproducibility explanation." diff --git a/src/tests/routers/resources/test_router_ml_model.py b/src/tests/routers/resource_routers/test_router_ml_model.py similarity index 93% rename from src/tests/routers/resources/test_router_ml_model.py rename to src/tests/routers/resource_routers/test_router_ml_model.py index 13555c29..eaa94359 100644 --- a/src/tests/routers/resources/test_router_ml_model.py +++ b/src/tests/routers/resource_routers/test_router_ml_model.py @@ -23,7 +23,7 @@ def test_happy_path( session.commit() body = copy.copy(body_asset) - body["permanent_identifier"] = "https://doi.org/10.1000/182" + body["pid"] = "https://doi.org/10.1000/182" body["type"] = "Large Language Model" body["related_experiment"] = [1] distribution = { @@ -56,7 +56,7 @@ def test_happy_path( assert response.status_code == 200, response.json() response_json = response.json() - assert response_json["permanent_identifier"] == "https://doi.org/10.1000/182" + assert response_json["pid"] == "https://doi.org/10.1000/182" assert response_json["type"] == "Large Language Model" assert response_json["related_experiment"] == [1] assert response_json["distribution"] == [distribution] diff --git a/src/tests/routers/test_router_news.py b/src/tests/routers/resource_routers/test_router_news.py similarity index 100% rename from src/tests/routers/test_router_news.py rename to src/tests/routers/resource_routers/test_router_news.py diff --git a/src/tests/routers/resources/test_router_organisation.py b/src/tests/routers/resource_routers/test_router_organisation.py similarity index 97% rename from src/tests/routers/resources/test_router_organisation.py rename to src/tests/routers/resource_routers/test_router_organisation.py index 88a0fe61..03f9052d 100644 --- a/src/tests/routers/resources/test_router_organisation.py +++ b/src/tests/routers/resource_routers/test_router_organisation.py @@ -41,7 +41,7 @@ def test_happy_path( response_json = response.json() assert response_json["identifier"] == 2 - assert response_json["resource_identifier"] == 2 + assert response_json["ai_resource_identifier"] == 2 assert response_json["agent_identifier"] == 2 assert response_json["date_founded"] == "2023-01-01" diff --git a/src/tests/routers/resources/test_router_person.py b/src/tests/routers/resource_routers/test_router_person.py similarity index 96% rename from src/tests/routers/resources/test_router_person.py rename to src/tests/routers/resource_routers/test_router_person.py index 41fb13d9..58731284 100644 --- a/src/tests/routers/resources/test_router_person.py +++ b/src/tests/routers/resource_routers/test_router_person.py @@ -37,7 +37,7 @@ def test_happy_path( response_json = response.json() assert response_json["identifier"] == 2 - assert response_json["resource_identifier"] == 2 + assert response_json["ai_resource_identifier"] == 2 assert response_json["agent_identifier"] == 2 assert response_json["contact"] == [1] diff --git a/src/tests/routers/resources/test_router_platform.py b/src/tests/routers/resource_routers/test_router_platform.py similarity index 100% rename from src/tests/routers/resources/test_router_platform.py rename to src/tests/routers/resource_routers/test_router_platform.py diff --git a/src/tests/routers/resource_routers/test_router_project.py b/src/tests/routers/resource_routers/test_router_project.py new file mode 100644 index 00000000..1b019a94 --- /dev/null +++ b/src/tests/routers/resource_routers/test_router_project.py @@ -0,0 +1,58 @@ +import copy +from unittest.mock import Mock + +from sqlalchemy.engine import Engine +from sqlmodel import Session +from starlette.testclient import TestClient + +from authentication import keycloak_openid +from database.model.agent.organisation import Organisation +from database.model.agent.person import Person +from database.model.dataset.dataset import Dataset +from database.model.knowledge_asset.publication import Publication + + +def test_happy_path( + client: TestClient, + engine: Engine, + mocked_privileged_token: Mock, + body_resource: dict, + person: Person, + organisation: Organisation, + publication: Publication, + dataset: Dataset, +): + keycloak_openid.userinfo = mocked_privileged_token + + with Session(engine) as session: + session.add(person) + session.merge(organisation) + session.merge(dataset) + session.merge(publication) + session.commit() + + body = copy.deepcopy(body_resource) + body["start_date"] = "2021-02-02T15:15:00" + body["end_date"] = "2021-02-03T15:15:00" + body["total_cost_euro"] = 10000000.53 + body["funder"] = [1] + body["participant"] = [1] + body["coordinator"] = 1 + body["produced"] = [1] # the dataset + body["used"] = [2] # the publication + + response = client.post("/projects/v1", json=body, headers={"Authorization": "Fake token"}) + assert response.status_code == 200, response.json() + + response = client.get("/projects/v1/1") + assert response.status_code == 200, response.json() + + response_json = response.json() + assert response_json["start_date"] == "2021-02-02T15:15:00" + assert response_json["end_date"] == "2021-02-03T15:15:00" + assert response_json["total_cost_euro"] == 10000000.53 + assert response_json["funder"] == [1] + assert response_json["participant"] == [1] + assert response_json["coordinator"] == 1 + assert response_json["produced"] == [1] # the dataset + assert response_json["used"] == [2] # the publication diff --git a/src/tests/routers/resources/test_router_publication.py b/src/tests/routers/resource_routers/test_router_publication.py similarity index 100% rename from src/tests/routers/resources/test_router_publication.py rename to src/tests/routers/resource_routers/test_router_publication.py diff --git a/src/tests/routers/resources/test_router_service.py b/src/tests/routers/resource_routers/test_router_service.py similarity index 94% rename from src/tests/routers/resources/test_router_service.py rename to src/tests/routers/resource_routers/test_router_service.py index a2155e97..e7cd0914 100644 --- a/src/tests/routers/resources/test_router_service.py +++ b/src/tests/routers/resource_routers/test_router_service.py @@ -26,7 +26,7 @@ def test_happy_path( response_json = response.json() assert response_json["identifier"] == 1 - assert response_json["resource_identifier"] == 1 + assert response_json["ai_resource_identifier"] == 1 assert response_json["slogan"] == "Smart Blockchains for everyone!" assert response_json["terms_of_service"] == "Some text here" diff --git a/src/tests/routers/test_router_team.py b/src/tests/routers/resource_routers/test_router_team.py similarity index 100% rename from src/tests/routers/test_router_team.py rename to src/tests/routers/resource_routers/test_router_team.py diff --git a/src/tests/routers/search/test_search_router_datasets.py b/src/tests/routers/search/test_search_router_datasets.py deleted file mode 100644 index cb14747c..00000000 --- a/src/tests/routers/search/test_search_router_datasets.py +++ /dev/null @@ -1,45 +0,0 @@ -import json -from unittest.mock import Mock - -from starlette.testclient import TestClient - -from authentication import keycloak_openid -from routers import other_routers, SearchRouterDatasets -from tests.testutils.paths import path_test_resources - - -def test_search_happy_path(client: TestClient, mocked_privileged_token: Mock): - keycloak_openid.userinfo = mocked_privileged_token - - (search_router,) = [r for r in other_routers if isinstance(r, SearchRouterDatasets)] - with open(path_test_resources() / "elasticsearch" / "dataset_search.json", "r") as f: - mocked_results = json.load(f) - search_router.client.search = Mock(return_value=mocked_results) - - response = client.get( - "/search/datasets/v1", - params={"name": "dataset"}, - headers={"Authorization": "Fake token"}, - ) - - assert response.status_code == 200, response.json() - response_json = response.json() - (resource,) = response_json["resources"] - assert resource["platform_identifier"] == "1" - assert resource["license"] == "https://creativecommons.org/share-your-work/public-domain/cc0/" - - assert resource["asset_identifier"] == 3 - assert resource["resource_identifier"] == 3 - assert resource["description"] == "A description." - assert resource["aiod_entry"]["date_modified"] == "2023-08-24T12:48:49+00:00" - assert resource["aiod_entry"]["date_created"] == "2023-08-24T12:48:49+00:00" - assert resource["aiod_entry"]["status"] == "draft" - assert resource["version"] == "1.1.0" - assert resource["name"] == "The name of this dataset" - assert resource["platform"] == "example" - assert resource["same_as"] == "https://www.example.com/resource/this_resource" - assert resource["identifier"] == 1 - assert resource["date_published"] == "2022-01-01T15:15:00+00:00" - assert resource["issn"] == "20493630" - assert resource["measurement_technique"] == "mass spectrometry" - assert resource["temporal_coverage"] == "2011/2012" diff --git a/src/tests/routers/search/test_search_router_publications.py b/src/tests/routers/search/test_search_router_publications.py deleted file mode 100644 index 9cb253ad..00000000 --- a/src/tests/routers/search/test_search_router_publications.py +++ /dev/null @@ -1,46 +0,0 @@ -import json -from unittest.mock import Mock - -from starlette.testclient import TestClient - -from authentication import keycloak_openid -from routers import other_routers, SearchRouterPublications -from tests.testutils.paths import path_test_resources - - -def test_search_happy_path(client: TestClient, mocked_privileged_token: Mock): - keycloak_openid.userinfo = mocked_privileged_token - - (search_router,) = [r for r in other_routers if isinstance(r, SearchRouterPublications)] - with open(path_test_resources() / "elasticsearch" / "publication_search.json", "r") as f: - mocked_results = json.load(f) - search_router.client.search = Mock(return_value=mocked_results) - - response = client.get( - "/search/publications/v1", - params={"name": "resource"}, - headers={"Authorization": "Fake token"}, - ) - - assert response.status_code == 200, response.json() - response_json = response.json() - (resource,) = response_json["resources"] - assert resource["platform_identifier"] == "1" - assert resource["license"] == "https://creativecommons.org/share-your-work/public-domain/cc0/" - assert resource["knowledge_asset_identifier"] == 1 - assert resource["asset_identifier"] == 1 - assert resource["resource_identifier"] == 1 - assert resource["permanent_identifier"] == "http://dx.doi.org/10.1093/ajae/aaq063" - assert resource["description"] == "A description." - assert resource["isbn"] == "9783161484100" - assert resource["aiod_entry"]["date_modified"] == "2023-08-24T10:14:52+00:00" - assert resource["aiod_entry"]["date_created"] == "2023-08-24T10:14:52+00:00" - assert resource["aiod_entry"]["status"] == "draft" - assert resource["version"] == "1.1.0" - assert resource["name"] == "The name of this publication" - assert resource["platform"] == "example" - assert resource["type"] == "journal" - assert resource["same_as"] == "https://www.example.com/resource/this_resource" - assert resource["identifier"] == 1 - assert resource["issn"] == "20493630" - assert resource["date_published"] == "2022-01-01T15:15:00+00:00" diff --git a/src/tests/testutils/test_resource.py b/src/tests/testutils/test_resource.py index 2a3b1ce3..d7f76de5 100644 --- a/src/tests/testutils/test_resource.py +++ b/src/tests/testutils/test_resource.py @@ -9,7 +9,7 @@ from database.model.concept.aiod_entry import AIoDEntryORM from database.model.concept.concept import AIoDConcept, AIoDConceptBase from database.model.concept.status import Status -from routers import ResourceRouter +from routers.resource_router import ResourceRouter class TestResourceBase(AIoDConceptBase): From 5ff449822b44677921ab568ecc41d6af300a0ad5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 30 Aug 2023 16:36:45 +0200 Subject: [PATCH 22/79] Copied entire develop branch --- .dockerignore | 2 +- .gitignore | 1 + connectors/fill-examples.sh | 45 ++++++++++++++++++++++++++---- connectors/huggingface/datasets.sh | 2 -- connectors/openml/entry.sh | 3 -- connectors/zenodo/datasets.sh | 1 - connectors/zenodo/entry.sh | 3 -- pyproject.toml | 21 +++++++------- scripts/clean.sh | 17 ----------- 9 files changed, 51 insertions(+), 44 deletions(-) delete mode 100755 scripts/clean.sh diff --git a/.dockerignore b/.dockerignore index d93d9aa1..fe58a30e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,4 @@ scripts venv data -**.DS_Store \ No newline at end of file +**.DS_Store diff --git a/.gitignore b/.gitignore index 2910b82d..a38142c5 100644 --- a/.gitignore +++ b/.gitignore @@ -113,6 +113,7 @@ venv/ ENV/ env.bak/ venv.bak/ +**.DS_Store # Spyder project settings .spyderproject diff --git a/connectors/fill-examples.sh b/connectors/fill-examples.sh index 7cf22f50..cabd3a6f 100755 --- a/connectors/fill-examples.sh +++ b/connectors/fill-examples.sh @@ -4,15 +4,10 @@ python3 connectors/synchronization.py \ -c connectors.example.example.ExampleCaseStudyConnector \ -w /opt/connectors/data/example/case_study - python3 connectors/synchronization.py \ -c connectors.example.example.ExampleComputationalAssetConnector \ -w /opt/connectors/data/example/computational_asset -python3 connectors/synchronization.py \ - -c connectors.example.example.ExampleDatasetConnector \ - -w /opt/connectors/data/example/dataset - python3 connectors/synchronization.py \ -c connectors.example.example.ExampleEducationalResourceConnector \ -w /opt/connectors/data/example/educational_resource @@ -41,6 +36,10 @@ python3 connectors/synchronization.py \ -c connectors.example.example.ExamplePersonConnector \ -w /opt/connectors/data/example/person +python3 connectors/synchronization.py \ + -c connectors.example.example.ExampleProjectConnector \ + -w /opt/connectors/data/example/project + python3 connectors/synchronization.py \ -c connectors.example.example.ExamplePublicationConnector \ -w /opt/connectors/data/example/publication @@ -51,4 +50,38 @@ python3 connectors/synchronization.py \ python3 connectors/synchronization.py \ -c connectors.example.example.ExampleTeamConnector \ - -w /opt/connectors/data/example/team \ No newline at end of file + -w /opt/connectors/data/example/team + +# Enums + +python3 connectors/synchronization.py \ + -c connectors.example.enum.EnumConnectorApplicationArea \ + -w /opt/connectors/data/enum/application_area + +python3 connectors/synchronization.py \ + -c connectors.example.enum.EnumConnectorEducationalResourceType \ + -w /opt/connectors/data/enum/educational_resource_type + +python3 connectors/synchronization.py \ + -c connectors.example.enum.EnumConnectorEventMode \ + -w /opt/connectors/data/enum/event_mode + +python3 connectors/synchronization.py \ + -c connectors.example.enum.EnumConnectorEventStatus \ + -w /opt/connectors/data/enum/event_status + +python3 connectors/synchronization.py \ + -c connectors.example.enum.EnumConnectorLanguage \ + -w /opt/connectors/data/enum/language + +python3 connectors/synchronization.py \ + -c connectors.example.enum.EnumConnectorLicense \ + -w /opt/connectors/data/enum/license + +python3 connectors/synchronization.py \ + -c connectors.example.enum.EnumConnectorOrganisationType \ + -w /opt/connectors/data/enum/organisation_type + +python3 connectors/synchronization.py \ + -c connectors.example.enum.EnumConnectorStatus \ + -w /opt/connectors/data/enum/status \ No newline at end of file diff --git a/connectors/huggingface/datasets.sh b/connectors/huggingface/datasets.sh index 8b767360..99ba48c7 100755 --- a/connectors/huggingface/datasets.sh +++ b/connectors/huggingface/datasets.sh @@ -2,8 +2,6 @@ WORK_DIR=/opt/connectors/data/huggingface/dataset -mkdir -p $WORK_DIR - python3 connectors/synchronization.py \ -c connectors.huggingface.huggingface_dataset_connector.HuggingFaceDatasetConnector \ -w ${WORK_DIR} \ diff --git a/connectors/openml/entry.sh b/connectors/openml/entry.sh index 98e58ba0..5974cadc 100755 --- a/connectors/openml/entry.sh +++ b/connectors/openml/entry.sh @@ -3,8 +3,5 @@ # If this directory does not exist, the cron job cannot log (and cannot run) mkdir -p /opt/connectors/data/openml/dataset -# Run once on startup -bash /opt/connectors/script/datasets.sh >> /opt/connectors/data/openml/dataset/cron.log 2>&1 - # Run cron on the foreground with log level WARN /usr/sbin/cron -f -l 4 diff --git a/connectors/zenodo/datasets.sh b/connectors/zenodo/datasets.sh index be214cf9..810bba01 100755 --- a/connectors/zenodo/datasets.sh +++ b/connectors/zenodo/datasets.sh @@ -11,7 +11,6 @@ another_instance() exec 9< "$0" flock -n -x 9 || another_instance - echo $(date -u) "Starting synchronization..." PYTHONPATH=/app /usr/local/bin/python3 /app/connectors/synchronization.py \ -c $CONNECTOR \ diff --git a/connectors/zenodo/entry.sh b/connectors/zenodo/entry.sh index 5f3461fe..c6e5fc08 100755 --- a/connectors/zenodo/entry.sh +++ b/connectors/zenodo/entry.sh @@ -3,8 +3,5 @@ # If this directory does not exist, the cron job cannot log (and cannot run) mkdir -p /opt/connectors/data/zenodo/dataset -# run once on startup -bash /opt/connectors/script/datasets.sh >> /opt/connectors/data/zenodo/dataset/cron.log 2>&1 - # Run cron on the foreground with log level WARN /usr/sbin/cron -f -l 4 diff --git a/pyproject.toml b/pyproject.toml index e101b693..e1875d65 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,25 +13,24 @@ authors = [ {name = "Taniya Das", email = "t.das@tue.nl"} ] dependencies = [ + "urllib3== 2.0.4", "bibtexparser==1.4.0", "datasets==2.14.4", - "elasticsearch==8.9.0", - "fastapi==0.101.1", - "httpx==0.24.1", - "mysql-connector-python==8.1.0", + "fastapi==0.103.0", + "uvicorn==0.23.2", + "requests==2.31.0", "mysqlclient==2.2.0", "oic==1.6.0", + "python-keycloak==3.3.0", + "python-dotenv==1.0.0", "pydantic_schemaorg==1.0.6", "python-dateutil==2.8.2", - "python-dotenv==1.0.0", - "python-keycloak==3.3.0", - "python-multipart==0.0.6", - "requests==2.31.0", - "sickle==0.7.0", "sqlmodel==0.0.8", - "urllib3== 1.26.16", - "uvicorn==0.23.2", + "httpx==0.24.1", + "sickle==0.7.0", "xmltodict==0.13.0", + "python-multipart==0.0.6", + "mysql-connector-python==8.1.0", ] readme = "README.md" diff --git a/scripts/clean.sh b/scripts/clean.sh deleted file mode 100755 index c4fe0e5c..00000000 --- a/scripts/clean.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -#docker image rm ai4eu_server -#docker image rm ai4eu_openml_connector -#docker image rm ai4eu_zenodo_connector -echo "Deleted docker images, so that they will be rebuild on docker up." - - -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -ROOT=$(dirname $SCRIPT_DIR) -DATA=${ROOT}/data - -sudo rm -rf ${DATA}/mysql -sudo rm -rf ${DATA}/connectors -sudo rm -rf ${DATA}/elasticsearch -mkdir -p ${DATA}/mysql ${DATA}/connectors ${DATA}/elasticsearch -echo "Deleted everything from $DATA" From cae4321d23003f33d7af429808a605f29c18a471 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 30 Aug 2023 16:47:27 +0200 Subject: [PATCH 23/79] Logstash configuration readapted to new names --- logstash/pipeline/sql/init_temp_dataset.sql | 4 ++-- logstash/pipeline/sql/init_temp_experiment.sql | 4 ++-- logstash/pipeline/sql/init_temp_ml_model.sql | 4 ++-- logstash/pipeline/sql/init_temp_publication.sql | 4 ++-- logstash/pipeline/sql/init_temp_service.sql | 2 +- logstash/pipeline/sql/sync_temp_dataset.sql | 4 ++-- logstash/pipeline/sql/sync_temp_experiment.sql | 4 ++-- logstash/pipeline/sql/sync_temp_ml_model.sql | 4 ++-- logstash/pipeline/sql/sync_temp_publication.sql | 4 ++-- logstash/pipeline/sql/sync_temp_service.sql | 2 +- 10 files changed, 18 insertions(+), 18 deletions(-) diff --git a/logstash/pipeline/sql/init_temp_dataset.sql b/logstash/pipeline/sql/init_temp_dataset.sql index ab6359fd..e39acd5e 100644 --- a/logstash/pipeline/sql/init_temp_dataset.sql +++ b/logstash/pipeline/sql/init_temp_dataset.sql @@ -8,12 +8,12 @@ SELECT aiod_entry.date_modified, aiod_entry.date_created, -- Resource - dataset.resource_id AS `resource_identifier`, + dataset.ai_resource_id AS `resource_identifier`, dataset.name, dataset.description, dataset.same_as, -- AIAsset - dataset.asset_id AS `asset_identifier`, + dataset.ai_asset_id AS `asset_identifier`, dataset.date_published, dataset.version, license.name AS `license`, diff --git a/logstash/pipeline/sql/init_temp_experiment.sql b/logstash/pipeline/sql/init_temp_experiment.sql index d72d7fbd..709b7f19 100644 --- a/logstash/pipeline/sql/init_temp_experiment.sql +++ b/logstash/pipeline/sql/init_temp_experiment.sql @@ -8,12 +8,12 @@ SELECT aiod_entry.date_modified, aiod_entry.date_created, -- Resource - experiment.resource_id AS `resource_identifier`, + experiment.ai_resource_id AS `resource_identifier`, experiment.name, experiment.description, experiment.same_as, -- AIAsset - experiment.asset_id AS `asset_identifier`, + experiment.ai_asset_id AS `asset_identifier`, experiment.date_published, experiment.version, license.name AS `license`, diff --git a/logstash/pipeline/sql/init_temp_ml_model.sql b/logstash/pipeline/sql/init_temp_ml_model.sql index a53659ea..a090c99c 100644 --- a/logstash/pipeline/sql/init_temp_ml_model.sql +++ b/logstash/pipeline/sql/init_temp_ml_model.sql @@ -8,12 +8,12 @@ SELECT aiod_entry.date_modified, aiod_entry.date_created, -- Resource - ml_model.resource_id AS `resource_identifier`, + ml_model.ai_resource_id AS `resource_identifier`, ml_model.name, ml_model.description, ml_model.same_as, -- AIAsset - ml_model.asset_id AS `asset_identifier`, + ml_model.ai_asset_id AS `asset_identifier`, ml_model.date_published, ml_model.version, license.name AS `license`, diff --git a/logstash/pipeline/sql/init_temp_publication.sql b/logstash/pipeline/sql/init_temp_publication.sql index fd69a205..4119115b 100644 --- a/logstash/pipeline/sql/init_temp_publication.sql +++ b/logstash/pipeline/sql/init_temp_publication.sql @@ -8,12 +8,12 @@ SELECT aiod_entry.date_modified, aiod_entry.date_created, -- Resource - publication.resource_id AS `resource_identifier`, + publication.ai_resource_id AS `resource_identifier`, publication.name, publication.description, publication.same_as, -- AIAsset - publication.asset_id AS `asset_identifier`, + publication.ai_asset_id AS `asset_identifier`, publication.date_published, publication.version, license.name AS `license`, diff --git a/logstash/pipeline/sql/init_temp_service.sql b/logstash/pipeline/sql/init_temp_service.sql index 486160b5..8c3a7d16 100644 --- a/logstash/pipeline/sql/init_temp_service.sql +++ b/logstash/pipeline/sql/init_temp_service.sql @@ -8,7 +8,7 @@ SELECT aiod_entry.date_modified, aiod_entry.date_created, -- Resource - service.resource_id AS `resource_identifier`, + service.ai_resource_id AS `resource_identifier`, service.name, service.description, service.same_as, diff --git a/logstash/pipeline/sql/sync_temp_dataset.sql b/logstash/pipeline/sql/sync_temp_dataset.sql index 73c9d95b..38b9d024 100644 --- a/logstash/pipeline/sql/sync_temp_dataset.sql +++ b/logstash/pipeline/sql/sync_temp_dataset.sql @@ -8,12 +8,12 @@ SELECT aiod_entry.date_modified, aiod_entry.date_created, -- Resource - dataset.resource_id AS `resource_identifier`, + dataset.ai_resource_id AS `resource_identifier`, dataset.name, dataset.description, dataset.same_as, -- AIAsset - dataset.asset_id AS `asset_identifier`, + dataset.ai_asset_id AS `asset_identifier`, dataset.date_published, dataset.version, license.name AS `license`, diff --git a/logstash/pipeline/sql/sync_temp_experiment.sql b/logstash/pipeline/sql/sync_temp_experiment.sql index 181520f3..c81b4efb 100644 --- a/logstash/pipeline/sql/sync_temp_experiment.sql +++ b/logstash/pipeline/sql/sync_temp_experiment.sql @@ -8,12 +8,12 @@ SELECT aiod_entry.date_modified, aiod_entry.date_created, -- Resource - experiment.resource_id AS `resource_identifier`, + experiment.ai_resource_id AS `resource_identifier`, experiment.name, experiment.description, experiment.same_as, -- AIAsset - experiment.asset_id AS `asset_identifier`, + experiment.ai_asset_id AS `asset_identifier`, experiment.date_published, experiment.version, license.name AS `license`, diff --git a/logstash/pipeline/sql/sync_temp_ml_model.sql b/logstash/pipeline/sql/sync_temp_ml_model.sql index 6ca2880d..5fb0137c 100644 --- a/logstash/pipeline/sql/sync_temp_ml_model.sql +++ b/logstash/pipeline/sql/sync_temp_ml_model.sql @@ -8,12 +8,12 @@ SELECT aiod_entry.date_modified, aiod_entry.date_created, -- Resource - ml_model.resource_id AS `resource_identifier`, + ml_model.ai_resource_id AS `resource_identifier`, ml_model.name, ml_model.description, ml_model.same_as, -- AIAsset - ml_model.asset_id AS `asset_identifier`, + ml_model.ai_asset_id AS `asset_identifier`, ml_model.date_published, ml_model.version, license.name AS `license`, diff --git a/logstash/pipeline/sql/sync_temp_publication.sql b/logstash/pipeline/sql/sync_temp_publication.sql index 99b38a68..24f199d9 100644 --- a/logstash/pipeline/sql/sync_temp_publication.sql +++ b/logstash/pipeline/sql/sync_temp_publication.sql @@ -8,12 +8,12 @@ SELECT aiod_entry.date_modified, aiod_entry.date_created, -- Resource - publication.resource_id AS `resource_identifier`, + publication.ai_resource_id AS `resource_identifier`, publication.name, publication.description, publication.same_as, -- AIAsset - publication.asset_id AS `asset_identifier`, + publication.ai_asset_id AS `asset_identifier`, publication.date_published, publication.version, license.name AS `license`, diff --git a/logstash/pipeline/sql/sync_temp_service.sql b/logstash/pipeline/sql/sync_temp_service.sql index d581d037..f06aa76f 100644 --- a/logstash/pipeline/sql/sync_temp_service.sql +++ b/logstash/pipeline/sql/sync_temp_service.sql @@ -8,7 +8,7 @@ SELECT aiod_entry.date_modified, aiod_entry.date_created, -- Resource - service.resource_id AS `resource_identifier`, + service.ai_resource_id AS `resource_identifier`, service.name, service.description, service.same_as, From 80aef7d51b677df37761042d08c1446e0d69d9e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 30 Aug 2023 16:57:02 +0200 Subject: [PATCH 24/79] Logstash configuration readapted to new names --- logstash/pipeline/sql/init_temp_experiment.sql | 1 - logstash/pipeline/sql/init_temp_ml_model.sql | 1 - logstash/pipeline/sql/sync_temp_experiment.sql | 1 - logstash/pipeline/sql/sync_temp_ml_model.sql | 1 - 4 files changed, 4 deletions(-) diff --git a/logstash/pipeline/sql/init_temp_experiment.sql b/logstash/pipeline/sql/init_temp_experiment.sql index 709b7f19..6811d178 100644 --- a/logstash/pipeline/sql/init_temp_experiment.sql +++ b/logstash/pipeline/sql/init_temp_experiment.sql @@ -18,7 +18,6 @@ SELECT experiment.version, license.name AS `license`, -- Experiment - experiment.permanent_identifier, experiment.experimental_workflow, experiment.execution_settings, experiment.reproducibility_explanation diff --git a/logstash/pipeline/sql/init_temp_ml_model.sql b/logstash/pipeline/sql/init_temp_ml_model.sql index a090c99c..de8ce17a 100644 --- a/logstash/pipeline/sql/init_temp_ml_model.sql +++ b/logstash/pipeline/sql/init_temp_ml_model.sql @@ -18,7 +18,6 @@ SELECT ml_model.version, license.name AS `license`, -- MLModel - ml_model.permanent_identifier, ml_model_type.name AS `ml_model_type` FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier diff --git a/logstash/pipeline/sql/sync_temp_experiment.sql b/logstash/pipeline/sql/sync_temp_experiment.sql index c81b4efb..f100fc39 100644 --- a/logstash/pipeline/sql/sync_temp_experiment.sql +++ b/logstash/pipeline/sql/sync_temp_experiment.sql @@ -18,7 +18,6 @@ SELECT experiment.version, license.name AS `license`, -- Experiment - experiment.permanent_identifier, experiment.experimental_workflow, experiment.execution_settings, experiment.reproducibility_explanation diff --git a/logstash/pipeline/sql/sync_temp_ml_model.sql b/logstash/pipeline/sql/sync_temp_ml_model.sql index 5fb0137c..b61d35fa 100644 --- a/logstash/pipeline/sql/sync_temp_ml_model.sql +++ b/logstash/pipeline/sql/sync_temp_ml_model.sql @@ -18,7 +18,6 @@ SELECT ml_model.version, license.name AS `license`, -- MLModel - ml_model.permanent_identifier, ml_model_type.name AS `ml_model_type` FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier From 69ffa34b4cb8e1f2860a6349f94831ad19f2ac24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 31 Aug 2023 20:41:34 +0200 Subject: [PATCH 25/79] added ai4experiments to platform names --- src/database/model/platform/platform_names.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/database/model/platform/platform_names.py b/src/database/model/platform/platform_names.py index 1a74161e..dadb61d4 100644 --- a/src/database/model/platform/platform_names.py +++ b/src/database/model/platform/platform_names.py @@ -13,3 +13,4 @@ class PlatformName(str, enum.Enum): openml = "openml" huggingface = "huggingface" zenodo = "zenodo" + ai4experiments = "ai4experiments" From ba49bbbfdda5148a3bba4cdfb82d62d09058ea7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 5 Sep 2023 16:14:17 +0200 Subject: [PATCH 26/79] Copied initial search routers to start creating them --- connectors/fill-examples.sh | 6 +- es/setup/curl.sh | 5 +- es/setup/curl_dockerfile | 5 +- es/setup/dataset.json | 4 - es/setup/publication.json | 61 --------- src/routers/search_router.py | 120 ++++++++++++++++++ src/routers/search_routers/__init__.py | 0 .../search_routers/search_router_datasets.py | 16 +++ .../search_router_publications.py | 20 +++ 9 files changed, 169 insertions(+), 68 deletions(-) create mode 100644 src/routers/search_router.py create mode 100644 src/routers/search_routers/__init__.py create mode 100644 src/routers/search_routers/search_router_datasets.py create mode 100644 src/routers/search_routers/search_router_publications.py diff --git a/connectors/fill-examples.sh b/connectors/fill-examples.sh index cabd3a6f..320a5867 100755 --- a/connectors/fill-examples.sh +++ b/connectors/fill-examples.sh @@ -8,6 +8,10 @@ python3 connectors/synchronization.py \ -c connectors.example.example.ExampleComputationalAssetConnector \ -w /opt/connectors/data/example/computational_asset +python3 connectors/synchronization.py \ + -c connectors.example.example.ExampleDatasetConnector \ + -w /opt/connectors/data/example/datasset + python3 connectors/synchronization.py \ -c connectors.example.example.ExampleEducationalResourceConnector \ -w /opt/connectors/data/example/educational_resource @@ -84,4 +88,4 @@ python3 connectors/synchronization.py \ python3 connectors/synchronization.py \ -c connectors.example.enum.EnumConnectorStatus \ - -w /opt/connectors/data/enum/status \ No newline at end of file + -w /opt/connectors/data/enum/status diff --git a/es/setup/curl.sh b/es/setup/curl.sh index c7510b3c..8e92b07f 100755 --- a/es/setup/curl.sh +++ b/es/setup/curl.sh @@ -1,2 +1,5 @@ -curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/publication?pretty -H 'Content-Type: application/json' -d @/publication.json curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/dataset?pretty -H 'Content-Type: application/json' -d @/dataset.json +curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/experiment?pretty -H 'Content-Type: application/json' -d @/experiment.json +curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/ml_model?pretty -H 'Content-Type: application/json' -d @/ml_model.json +curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/publication?pretty -H 'Content-Type: application/json' -d @/publication.json +curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/service?pretty -H 'Content-Type: application/json' -d @/service.json diff --git a/es/setup/curl_dockerfile b/es/setup/curl_dockerfile index cda3901b..183cea06 100644 --- a/es/setup/curl_dockerfile +++ b/es/setup/curl_dockerfile @@ -2,8 +2,11 @@ FROM ubuntu:22.04 RUN apt-get update && apt-get install -y curl -COPY publication.json /publication.json COPY dataset.json /dataset.json +COPY experiment.json /experiment.json +COPY ml_model.json /ml_model.json +COPY publication.json /publication.json +COPY service.json /service.json COPY curl.sh /curl.sh ENTRYPOINT ["/bin/bash", "/curl.sh"] diff --git a/es/setup/dataset.json b/es/setup/dataset.json index 2fec49cc..e779ead0 100644 --- a/es/setup/dataset.json +++ b/es/setup/dataset.json @@ -43,10 +43,6 @@ "type" : "long", "index" : false }, - "is_accessible_for_free" : { - "type" : "boolean", - "index" : false - }, "issn" : { "type" : "text", "fields" : { diff --git a/es/setup/publication.json b/es/setup/publication.json index c24e2772..4a4641f2 100644 --- a/es/setup/publication.json +++ b/es/setup/publication.json @@ -15,30 +15,10 @@ } } }, - "access_right" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, "asset_identifier" : { "type" : "long", "index" : false }, - "creators" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, "date_created" : { "type" : "date", "index" : false @@ -59,16 +39,6 @@ } } }, - "doi" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, "identifier" : { "type" : "long", "index" : false @@ -91,10 +61,6 @@ } } }, - "knowledge_asset_identifier" : { - "type" : "long", - "index" : false - }, "license" : { "type" : "text", "index" : false, @@ -105,10 +71,6 @@ } } }, - "license_identifier" : { - "type" : "long", - "index" : false - }, "name" : { "type" : "text", "fields" : { @@ -162,10 +124,6 @@ "type" : "long", "index" : false }, - "resource_type_identifier" : { - "type" : "long", - "index" : false - }, "same_as" : { "type" : "text", "index" : false, @@ -186,15 +144,6 @@ } } }, - "title" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, "type" : { "type" : "text", "index" : false, @@ -205,16 +154,6 @@ } } }, - "url" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, "version" : { "type" : "text", "index" : false, diff --git a/src/routers/search_router.py b/src/routers/search_router.py new file mode 100644 index 00000000..a6b5c920 --- /dev/null +++ b/src/routers/search_router.py @@ -0,0 +1,120 @@ +import abc +import os +from typing import TypeVar, Generic, Any, Type + +from elasticsearch import Elasticsearch +from fastapi import APIRouter, Depends, HTTPException +from pydantic import BaseModel +from sqlalchemy.engine import Engine +from starlette import status + +from authentication import get_current_user, has_role +from database.model.concept.aiod_entry import AIoDEntryRead +from database.model.resource_read_and_create import resource_read +from routers.router import AIoDRouter + +SORT = {"identifier": "asc"} +LIMIT_MAX = 1000 + +RESOURCE = TypeVar("RESOURCE") + + +class SearchResult(BaseModel, Generic[RESOURCE]): + total_hits: int + resources: list[RESOURCE] + next_offset: list | None + + +class SearchRouter(AIoDRouter, Generic[RESOURCE], abc.ABC): + """ + Providing search functionality in ElasticSearch + """ + + def __init__(self, client: Elasticsearch): + self.client: Elasticsearch = client + + @property + @abc.abstractmethod + def es_index(self) -> str: + """The name of the elasticsearch index""" + + @property + @abc.abstractmethod + def resource_name_plural(self) -> str: + """The name of the resource (plural)""" + + @property + def key_translations(self) -> dict[str, str]: + """If an attribute is called differently in elasticsearch than in our metadata model, + you can define a translation dictionary here. The key should be the name in + elasticsearch, the value the name in our data model.""" + return {} + + @property + @abc.abstractmethod + def resource_class(self) -> RESOURCE: + """The resource class""" + + def create(self, engine: Engine, url_prefix: str) -> APIRouter: + router = APIRouter() + read_class = resource_read(self.resource_class) # type: ignore + + @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", tags=["search"]) + def search( + name: str = "", + limit: int = 10, + offset: str | None = None, # TODO: this should not be a string + user: dict = Depends(get_current_user), + ) -> SearchResult[read_class]: # type: ignore + f""" + Search for {self.resource_name_plural}. + """ + if limit > LIMIT_MAX: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"The limit should be maximum {LIMIT_MAX}. If you want more results, " + f"use pagination.", + ) + + if not has_role(user, os.getenv("ES_ROLE")): + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="You do not have permission to search Aiod resources.", + ) + + query = {"bool": {"must": {"match": {"name": name}}}} + result = self.client.search( + index=self.es_index, query=query, size=limit, sort=SORT, search_after=offset + ) + + total_hits = result["hits"]["total"]["value"] + resources: list[read_class] = [ # type: ignore + self._cast_resource(read_class, hit["_source"]) # type: ignore + for hit in result["hits"]["hits"] + ] + next_offset = ( + result["hits"]["hits"][-1]["sort"] if len(result["hits"]["hits"]) > 0 else None + ) + return SearchResult[read_class]( # type: ignore + total_hits=total_hits, + next_offset=next_offset, + resources=resources, + ) + + return router + + def _cast_resource( + self, resource_class: RESOURCE, resource_dict: dict[str, Any] + ) -> Type[RESOURCE]: + kwargs = { + self.key_translations.get(key, key): val + for key, val in resource_dict.items() + if key != "type" and not key.startswith("@") + } + resource = resource_class(**kwargs) # type: ignore + resource.aiod_entry = AIoDEntryRead( + date_modified=resource_dict["date_modified"], + date_created=resource_dict["date_created"], + status=resource_dict["status"], + ) + return resource diff --git a/src/routers/search_routers/__init__.py b/src/routers/search_routers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/routers/search_routers/search_router_datasets.py b/src/routers/search_routers/search_router_datasets.py new file mode 100644 index 00000000..b0b2ed43 --- /dev/null +++ b/src/routers/search_routers/search_router_datasets.py @@ -0,0 +1,16 @@ +from database.model.dataset.dataset import Dataset +from routers.search_router import SearchRouter + + +class SearchRouterDatasets(SearchRouter[Dataset]): + @property + def es_index(self) -> str: + return "dataset" + + @property + def resource_name_plural(self) -> str: + return "datasets" + + @property + def resource_class(self): + return Dataset diff --git a/src/routers/search_routers/search_router_publications.py b/src/routers/search_routers/search_router_publications.py new file mode 100644 index 00000000..bf213445 --- /dev/null +++ b/src/routers/search_routers/search_router_publications.py @@ -0,0 +1,20 @@ +from database.model.knowledge_asset.publication import Publication +from routers.search_router import SearchRouter + + +class SearchRouterPublications(SearchRouter[Publication]): + @property + def es_index(self) -> str: + return "publication" + + @property + def resource_name_plural(self) -> str: + return "publications" + + @property + def resource_class(self): + return Publication + + @property + def key_translations(self) -> dict: + return {"publication_type": "type"} From ce08b0abc1aef8b707d573e2dd3fbe887b978b0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 6 Sep 2023 16:14:53 +0200 Subject: [PATCH 27/79] Examples of ml_model, dataset and experiment used to insert ai4experiment data --- .../example/resources/resource/datasets.json | 6253 ++++++++++++- .../resources/resource/experiments.json | 2072 ++++- .../example/resources/resource/ml_models.json | 8172 ++++++++++++++++- 3 files changed, 16246 insertions(+), 251 deletions(-) diff --git a/src/connectors/example/resources/resource/datasets.json b/src/connectors/example/resources/resource/datasets.json index 37c4411a..b225f03b 100644 --- a/src/connectors/example/resources/resource/datasets.json +++ b/src/connectors/example/resources/resource/datasets.json @@ -1,101 +1,6212 @@ [ { - "platform": "example", - "platform_identifier": "1", - "name": "The name of this dataset", - "description": "A description.", - "same_as": "https://www.example.com/resource/this_resource", - "date_published": "2022-01-01T15:15:00.000", - "version": "1.1.0", - "issn": "20493630", - "measurement_technique": "mass spectrometry", - "temporal_coverage": "2011/2012", + "platform": "ai4experiments", + "platform_identifier": "5", + "name": "autoUniv-au1-1000", + "description": "https://openml.org

Author: Ray. J. Hickey

Source: UCI

Please cite:



  • Dataset Title:


AutoUniv Dataset

data problem: autoUniv-au1-1000



  • Abstract:


AutoUniv is an advanced data generator for classifications tasks. The aim is to reflect the nuances and heterogeneity of real data. Data can be generated in .csv, ARFF or C4.5 formats.



  • Source:


AutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com
AutoUniv web-site: http://sites.google.com/site/autouniv/.



  • Data Set Information:


The user first creates a classification model and then generates classified examples from it. To create a model, the following are specified: the number of attributes (up to 1000) and their type (discrete or continuous), the number of classes (up to 10), the complexity of the underlying rules and the noise level. AutoUniv then produces a model through a process of constrained randomised search to satisfy the user's requirements. A model can have up to 3000 rules. Rare class models can be designed. A sequence of models can be designed to reflect concept and/or population drift.


AutoUniv creates three text files for a model: a Prolog specification of the model used to generate examples (.aupl); a user-friendly statement of the classification rules in an 'if ... then' format (.aurules); a statistical summary of the main properties of the model, including its Bayes rate (.auprops).



  • Attribute Information:


Attributes may be discrete with up to 10 values or continuous. A discrete attribute ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=01dd6e8a-f4b1-4c5e-9206-0e40c8031be6&revisionId=39245fff-57fa-45c5-9f6a-49abf71e99b6&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", "aiod_entry": { "editor": [], "status": "draft" }, - "alternate_name": [ - "alias 1", - "alias 2" + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=01dd6e8a-f4b1-4c5e-9206-0e40c8031be6&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/01dd6e8a-f4b1-4c5e-9206-0e40c8031be6/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "6", + "name": "schizo", + "description": "https://openml.org

Author:

Source: Unknown - Date unknown

Please cite:


Schizophrenic Eye-Tracking Data in Rubin and Wu (1997)
Biometrics. Yingnian Wu (wu@hustat.harvard.edu) [14/Oct/97]


Information about the dataset
CLASSTYPE: nominal
CLASSINDEX: last

", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0247b1c5-2161-4367-96ea-4aa9370b8bb6&revisionId=9c637a1f-22db-49df-ab7d-d0763058e9e9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0247b1c5-2161-4367-96ea-4aa9370b8bb6&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0247b1c5-2161-4367-96ea-4aa9370b8bb6/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "7", + "name": "calendarDOW", + "description": "https://openml.org

calendarDOW-pmlb

", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=026c2720-4045-48c9-87ec-9791c120bb85&revisionId=b2df4780-1459-41ed-abbb-fb38f2697a04&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=026c2720-4045-48c9-87ec-9791c120bb85&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/026c2720-4045-48c9-87ec-9791c120bb85/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "8", + "name": "GesturePhaseSegmentationProcessed", + "description": "https://openml.org

Author: Renata Cristina Barros Madeo (Madeo\",\"R. C. B.) Priscilla Koch Wagner (Wagner\",\"P. K.) Sarajane Marques Peres (Peres\",\"S. M.) {renata.si\",\"priscilla.wagner\",\"sarajane} at usp.br http://each.uspnet.usp.br/sarajane/

Source: UCI

Please cite: Please refer to the Machine Learning Repository's citation policy. Additionally, the authors require a citation to one or more publications from those cited as relevant papers.


Creators:
Renata Cristina Barros Madeo (Madeo, R. C. B.)
Priscilla Koch Wagner (Wagner, P. K.)
Sarajane Marques Peres (Peres, S. M.)
{renata.si, priscilla.wagner, sarajane} at usp.br
http://each.uspnet.usp.br/sarajane/


Donor:
University of Sao Paulo - Brazil


Data Set Information:


The dataset is composed by features extracted from 7 videos with people gesticulating, aiming at studying Gesture Phase Segmentation.
Each video is represented by two files: a raw file, which contains the position of hands, wrists, head and spine of the user in each frame; and a processed file, which contains velocity and acceleration of hands and wrists. See the data set description for more information on the dataset.


Attribute Information:


Raw files: 18 numeric attributes (double), a timestamp and a class attribute (nominal).
Processed files: 32 numeric attributes (double) and a class attribute (nominal).
A feature vector with up to 50 numeric attributes can be generated with the two files mentioned above.


This is the processe", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=03a938ee-181d-4409-a806-199034e5172b&revisionId=73ee8559-e5f5-45c4-b15b-56392843644f&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=03a938ee-181d-4409-a806-199034e5172b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/03a938ee-181d-4409-a806-199034e5172b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "9", + "name": "haberman", + "description": "https://openml.org

Author:

Source: Unknown -

Please cite:




  1. Title: Haberman's Survival Data




  2. Sources:
    (a) Donor: Tjen-Sien Lim (limt@stat.wisc.edu)
    (b) Date: March 4, 1999




  3. Past Usage:



    1. Haberman, S. J. (1976). Generalized Residuals for Log-Linear
      Models, Proceedings of the 9th International Biometrics
      Conference, Boston, pp. 104-122.

    2. Landwehr, J. M., Pregibon, D., and Shoemaker, A. C. (1984),
      Graphical Models for Assessing Logistic Regression Models (with
      discussion), Journal of the American Statistical Association 79:
      61-83.

    3. Lo, W.-D. (1993). Logistic Regression Trees, PhD thesis,
      Department of Statistics, University of Wisconsin, Madison, WI.




  4. Relevant Information:
    The dataset contains cases from a study that was conducted between
    1958 and 1970 at the University of Chicago's Billings Hospital on
    the survival of patients who had undergone surgery for breast
    cancer.




  5. Number of Instances: 306




  6. Number of Attributes: 4 (including the class attribute)




  7. Attribute Information:



    1. Age of patient at time of operation (numerical)

    2. Patient's year of operation (year - 1900, numerical)

    3. Number of positive axillary nodes detected (numerical)

    4. Survival status (class attribute)
      1 = the patient survived 5 years or longer
      2 = the patient died within 5 year




  8. Missing Attribute Values: None


    <", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=03cc6248-6087-45e0-a732-6d34e299934e&revisionId=e36bfd43-b146-47b4-ad8b-f1cca7ef09c0&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=03cc6248-6087-45e0-a732-6d34e299934e&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/03cc6248-6087-45e0-a732-6d34e299934e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "13", + "name": "sudoku-tutorial-gui-stream", + "description": "



    This is the streaming version User Interface component of the AI4EU Experiments Sudoku Hello World!


    For more details, see the corresponding entry in the AI4EU Asset Catalog: https://www.ai4europe.eu/research/ai-catalog/sudoku-design-assistant-gui










    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=06c6909b-7c7d-4a09-8199-e3d647ba144d&revisionId=edc4ecbd-8189-4021-83ca-44e046f41127&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=06c6909b-7c7d-4a09-8199-e3d647ba144d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/06c6909b-7c7d-4a09-8199-e3d647ba144d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "17", + "name": "GAMETES_Epistasis_2-Way_20atts_0.1H_EDM-1_1", + "description": "https://openml.org

    GAMETES_Epistasis_2-Way_20atts_0.1H_EDM-1_1-pmlb

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=08819c99-9458-48de-84e1-83290b73caa7&revisionId=a718624c-d501-459f-8ad6-7628dbcf60a9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=08819c99-9458-48de-84e1-83290b73caa7&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/08819c99-9458-48de-84e1-83290b73caa7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "20", + "name": "mushroom", + "description": "https://openml.org

    Author: Jeff Schlimmer

    Source: UCI - 1981

    Please cite: The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf


    Description


    This dataset describes mushrooms in terms of their physical characteristics. They are classified into: poisonous or edible.


    Source


    ```
    (a) Origin:
    Mushroom records are drawn from The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf


    (b) Donor:
    Jeff Schlimmer (Jeffrey.Schlimmer '@' a.gp.cs.cmu.edu)
    ```


    Dataset description


    This dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family. Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like ``leaflets three, let it be'' for Poisonous Oak and Ivy.


    Attributes Information


    1. cap-shape: bell=b,conical=c,convex=x,flat=f, knobbed=k,sunken=s
    2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s
    3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r, pink=p,purple=u,red=e,white=w,yellow=y
    4. bruises?: bruises=t,no=f
    5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f, musty=m,none=n,pungent=p,spicy=s
    6. gill-attachment: attached=a,descending=d,", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0a6e6072-441e-4274-bf2a-6216def228bd&revisionId=d6acfed4-6030-4b57-ac62-277a78f4592d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0a6e6072-441e-4274-bf2a-6216def228bd&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0a6e6072-441e-4274-bf2a-6216def228bd/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "21", + "name": "ecoli", + "description": "https://openml.org

    ecoli-pmlb

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0c052358-19dd-4904-a209-f58f7457623e&revisionId=bbd47da9-e81c-43da-97ba-490c32c80089&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0c052358-19dd-4904-a209-f58f7457623e&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0c052358-19dd-4904-a209-f58f7457623e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "22", + "name": "AudioFileBroker", + "description": "

    This model is used for the beginning of an audio mining pipeline and dispachtes the audio files.

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0c4d6ad9-c9df-4054-a030-e8d22613afc5&revisionId=b3a2910a-0c19-47e8-9521-8482c203b49f&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0c4d6ad9-c9df-4054-a030-e8d22613afc5&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0c4d6ad9-c9df-4054-a030-e8d22613afc5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "23", + "name": "ai4iot-data-source", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&revisionId=016bab07-2b2b-4bbd-8384-fb489403012b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0e228e69-9703-4445-b1f9-e6d1da1446da/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "24", + "name": "ai4iot-data-source", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&revisionId=663f1188-9f5a-4cf0-8d9e-b3f2aaaf863b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0e228e69-9703-4445-b1f9-e6d1da1446da/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "25", + "name": "ai4iot-data-source", + "description": "

    The Data Source component serves as an aggregator of data incoming from different services, and which is useful for the AI4IoT pipeline. In particular, it connects to external APIs and provides data in an unified (and standardized through protobuf message definition) way. The AI4IoT tackles air quality in the city of Trondheim, Norway. Therefore, the current version of this component fetches data for this city. The structure can, however, be replicated to any other place by extending the scripts with the given API calls for the place of interest. Currently, available data through this component is pollution measurements both from a network of low-cost sensors, a (much smaller) network of industrial sensors and meteorological data.

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&revisionId=a68fc42c-2e73-4328-97dc-34424eec75c5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.3", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&version=1.0.3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0e228e69-9703-4445-b1f9-e6d1da1446da/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "26", + "name": "ai4iot-data-source", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&revisionId=b42fb848-ad2d-408c-897e-b25932fe2b93&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0e228e69-9703-4445-b1f9-e6d1da1446da/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "27", + "name": "wdbc", + "description": "https://openml.org

    Author: William H. Wolberg, W. Nick Street, Olvi L. Mangasarian

    Source: UCI, University of Wisconsin - 1995

    Please cite: UCI


    Breast Cancer Wisconsin (Diagnostic) Data Set (WDBC). Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. The target feature records the prognosis (benign (1) or malignant (2)). Original data available here


    Current dataset was adapted to ARFF format from the UCI version. Sample code ID's were removed.


    ! Note that there is also a related Breast Cancer Wisconsin (Original) Data Set with a different set of features, better known as breast-w.


    Feature description


    Ten real-valued features are computed for each of 3 cell nuclei, yielding a total of 30 descriptive features. See the papers below for more details on how they were computed. The 10 features (in order) are:


    a) radius (mean of distances from center to points on the perimeter)

    b) texture (standard deviation of gray-scale values)

    c) perimeter

    d) area

    e) smoothness (local variation in radius lengths)

    f) compactness (perimeter^2 / area - 1.0)

    g) concavity (severity of concave portions of the contour)

    h) conca", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0f467230-f8cf-4e8e-8ef0-1428d5147b29&revisionId=7b0db765-cd12-44cc-b22a-b9b92b31bdf4&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0f467230-f8cf-4e8e-8ef0-1428d5147b29&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0f467230-f8cf-4e8e-8ef0-1428d5147b29/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "29", + "name": "liver-disorders", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1061beb3-646c-458a-bb10-6bea01fce9d7&revisionId=678ca961-e726-4070-9a38-c19602648ecf&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1061beb3-646c-458a-bb10-6bea01fce9d7&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/1061beb3-646c-458a-bb10-6bea01fce9d7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "30", + "name": "liver-disorders", + "description": "https://openml.org

    Author: BUPA Medical Research Ltd. Donor: Richard S. Forsyth

    Source: UCI - 5/15/1990

    Please cite:


    BUPA liver disorders


    The first 5 variables are all blood tests which are thought to be sensitive to liver disorders that might arise from excessive alcohol consumption. Each line in the dataset constitutes the record of a single male individual.


    Important note: The 7th field (selector) has been widely misinterpreted in the past as a dependent variable representing presence or absence of a liver disorder. This is incorrect [1]. The 7th field was created by BUPA researchers as a train/test selector. It is not suitable as a dependent variable for classification. The dataset does not contain any variable representing presence or absence of a liver disorder. Researchers who wish to use this dataset as a classification benchmark should follow the method used in experiments by the donor (Forsyth & Rada, 1986, Machine learning: applications in expert systems and information retrieval) and others (e.g. Turney, 1995, Cost-sensitive classification: Empirical evaluation of a hybrid genetic decision tree induction algorithm), who used the 6th field (drinks), after dichotomising, as a dependent variable for classification. Because of widespread misinterpretation in the past, researchers should take care to state their method clearly.


    Attribute information

    1. mcv mean corpuscular volume

    2. alkphos alkaline phosphotase

    3. sgpt alanine aminotransferase

    4. sgot aspartate aminotra", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1061beb3-646c-458a-bb10-6bea01fce9d7&revisionId=94f838e4-944a-401d-84a7-49b5582a540b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1061beb3-646c-458a-bb10-6bea01fce9d7&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/1061beb3-646c-458a-bb10-6bea01fce9d7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "34", + "name": "grpc_hydro_hubeau", + "description": "

    Connector to get French hydrology data.


    The API makes it possible to interrogate the French hydrometric reference system (sites and observation stations of the French measurement network) as well as the observations of water level (H) and flow (Q) called \"real time\".


    The API is updated every 2 minutes over 24 hours deep and maintains a one month history.


    The data disseminated is the raw data measured in the field, without expertise or improvements made by hydrometers.


    Observations are expressed in the following units:


    • mm for water heights (divide by 1000 to convert to meters);
    • l / s for flow rates (divide by 1000 to convert to m3 / s).

    Dates are expressed in Coordinated Universal Time (UTC) in ISO 8601 format.


    In metropolitan France, add 1 hour to UTC time during winter time, and 2 hours during summer time. In Guadeloupe and Martinique, subtract 4 hours from UTC time; In Guyana subtract 3 hours from UTC time; In Mayotte add 3 hours to UTC time; In Reunion, add 4 hours to UTC time.

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=11b6681c-d8df-49c0-ba38-480b3ee2f63c&revisionId=ad13c11d-9d68-4101-a325-e9da62142ce0&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.3", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=11b6681c-d8df-49c0-ba38-480b3ee2f63c&version=1.0.3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/11b6681c-d8df-49c0-ba38-480b3ee2f63c/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "38", + "name": "wilt", + "description": "https://openml.org

    Author: Brian Johnson

    Source: [UCI] (https://archive.ics.uci.edu/ml/datasets/Wilt)

    Please cite: Johnson, B., Tateishi, R., Hoan, N., 2013. A hybrid pansharpening approach and multiscale object-based image analysis for mapping diseased pine and oak trees. International Journal of Remote Sensing, 34 (20), 6969-6982.


    Changes w.r.t. version 1: renamed variables such that they match description.


    Dataset:


    Wilt Data Set


    Abstract:


    High-resolution Remote Sensing data set (Quickbird). Small number of training samples of diseased trees, large number for other land cover. Testing data set from stratified random sample of image.


    Source:


    Brian Johnson;
    Institute for Global Environmental Strategies;
    2108-11 Kamiyamaguchi, Hayama, Kanagawa,240-0115 Japan;
    Email: Johnson '@' iges.or.jp


    Data Set Information:


    This data set contains some training and testing data from a remote sensing study by Johnson et al. (2013) that involved detecting diseased trees in Quickbird imagery. There are few training samples for the 'diseased trees' class (74) and many for 'other land cover' class (4265).


    The data set consists of image segments, generated by segmenting the pansharpened image. The segments contain spectral information from the Quickbird multispectral image bands and texture information from the panchromatic (Pan) image band. The testing data set is for the row with \u00e2\u20ac\u0153Segmentation scale 15\u00e2\u20ac\u009d segments and \u00e2\u20ac\u0153original multi-spectral image\u00e2\u20ac\u009d Spectral information in Table 2 of the reference (i.e. row 5). Please see the reference below for more information on", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1413584f-738b-4241-9b60-80228e509fb7&revisionId=0bc075c6-0a74-48a0-97fb-b1dd62870920&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1413584f-738b-4241-9b60-80228e509fb7&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/1413584f-738b-4241-9b60-80228e509fb7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "39", + "name": "cjs", + "description": "https://openml.org

    Author: Dr. Fernando Camacho

    Source: Unknown - 1995

    Please cite: Camacho, F. and Arron, G. (1995) Effects of the regulators paclobutrazol and flurprimidol on the growth of terminal sprouts formed on trimmed silver maple trees. Canadian Journal of Statistics 3(23).


    Data on tree growth used in the Case Study published in the September, 1995 issue of the Canadian Journal of Statistics. This data set was been provided by Dr. Fernando Camacho, Ontario Hydro Technologies, 800 Kipling Ave, Toronto Canada M3Z 5S4. It forms the basis of the Case Study in Data Analysis published in the Canadian Journal of Statistics, September 1995. It can be freely used for noncommercial purposes, as long as proper acknowledgement to the source and to the Canadian Journal of Statistics is made.


    Description


    The effects of the Growth Regulators Paclobutrazol (PP 333)
    and Flurprimidol (EL-500) on the Number and Length of Internodes
    in Terminal Sprouts Formed on Trimmed Silver Maple Trees.


    Introduction:


    The trimming of trees under distribution lines on city streets and
    in rural areas is a major problem and expense for electrical
    utilities. Such operations are routinely performed at intervals of
    one to eight years depending upon the individual species growth rate
    and the amount of clearance required. Ontario Hydro trims about
    500,000 trees per year at a cost of about $25 per tree.


    Much effort has been spent in developing chemicals for the horticultural
    industry to retard the growth of woody and herbaceous plants. Recently,
    a group of new growth regulators was introduced which was shown to be
    effective in controlli", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=141de15b-91a7-4dcb-9eb3-4297e217c3de&revisionId=62ae0822-9a5e-4003-afb5-3fef610694cd&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=141de15b-91a7-4dcb-9eb3-4297e217c3de&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/141de15b-91a7-4dcb-9eb3-4297e217c3de/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "40", + "name": "credit-g", + "description": "https://openml.org

    Author: Dr. Hans Hofmann

    Source: UCI - 1994

    Please cite: UCI


    German Credit data

    This dataset classifies people described by a set of attributes as good or bad credit risks.


    This dataset comes with a cost matrix:
    Good Bad (predicted)
    Good 0 1 (actual)
    Bad 5 0


    It is worse to class a customer as good when they are bad (5), than it is to class a customer as bad when they are good (1).


    Attribute description



    1. Status of existing checking account, in Deutsche Mark.

    2. Duration in months

    3. Credit history (credits taken, paid back duly, delays, critical accounts)

    4. Purpose of the credit (car, television,...)

    5. Credit amount

    6. Status of savings account/bonds, in Deutsche Mark.

    7. Present employment, in number of years.

    8. Installment rate in percentage of disposable income

    9. Personal status (married, single,...) and sex

    10. Other debtors / guarantors

    11. Present residence since X years

    12. Property (e.g. real estate)

    13. Age in years

    14. Other installment plans (banks, stores)

    15. Housing (rent, own,...)

    16. Number of existing credits at this bank

    17. Job

    18. Number of people being liable to provide maintenance for

    19. Telephone (yes,no)

    20. Foreign worker (yes,no)

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=14578085-5f08-4275-a790-5a9cfbefb412&revisionId=ce377185-b3f0-4f39-8910-d6296ddef03b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=14578085-5f08-4275-a790-5a9cfbefb412&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/14578085-5f08-4275-a790-5a9cfbefb412/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "41", + "name": "GAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_75_EDM-2_001", + "description": "https://openml.org

    GAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_75_EDM-2_001-pmlb

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=14f91a0e-6262-454d-8edf-90e68eb8de15&revisionId=7b852968-64e8-417f-947b-487a4b0ffca8&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=14f91a0e-6262-454d-8edf-90e68eb8de15&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/14f91a0e-6262-454d-8edf-90e68eb8de15/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "44", + "name": "FileUploadDataBroker", + "description": "

    This is a simple file upload data broker. It can be used as a starting point for pipelines which process files. It offers a web interface with a simple file upload dialog. The uploaded files are saved on a shared volume, then the corresponding paths are sent to the next model in the pipeline. For example, this data broker can be used in connection with the following models: MusicDetection, SpeechDection, MusicAnnotation, and ObjectDetection. In the current version, only single files are supported.













    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1681c927-ae2c-41f6-9ee4-51ece5e80806&revisionId=f5f3b0cc-2486-45ac-8928-8769b89c8825&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1681c927-ae2c-41f6-9ee4-51ece5e80806&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/1681c927-ae2c-41f6-9ee4-51ece5e80806/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "50", + "name": "led7", + "description": "https://openml.org

    led7-pmlb

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1bb76aa6-45df-4944-b2bf-6c6de92df1cc&revisionId=d6a09a23-a730-4298-93cb-76a00cc4d1ea&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1bb76aa6-45df-4944-b2bf-6c6de92df1cc&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/1bb76aa6-45df-4944-b2bf-6c6de92df1cc/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "55", + "name": "vehicle", + "description": "https://openml.org

    Author: Dr. Pete Mowforth and Dr. Barry Shepherd

    Source: UCI
    Please cite: Siebert,JP. Turing Institute Research Memorandum TIRM-87-018 \"Vehicle Recognition Using Rule Based Methods\" (March 1987)


    NAME
    vehicle silhouettes


    PURPOSE
    to classify a given silhouette as one of four types of vehicle,
    using a set of features extracted from the silhouette. The
    vehicle may be viewed from one of many different angles.


    PROBLEM TYPE
    classification


    SOURCE
    Drs.Pete Mowforth and Barry Shepherd
    Turing Institute
    George House
    36 North Hanover St.
    Glasgow
    G1 2AD


    CONTACT
    Alistair Sutherland
    Statistics Dept.
    Strathclyde University
    Livingstone Tower
    26 Richmond St.
    GLASGOW G1 1XH
    Great Britain


         Tel: 041 552 4400 x3033

    Fax: 041 552 4711

    e-mail: alistair@uk.ac.strathclyde.stams

    HISTORY
    This data was originally gathered at the TI in 1986-87 by
    JP Siebert. It was partially financed by Barr and Stroud Ltd.
    The original purpose was to find a method of distinguishing
    3D objects within a 2D image by application of an ensemble of
    shape feature extractors to the 2D silhouettes of the objects.
    Measures of shape features extracted from example silhouettes
    of objects to be discriminated were used to generate a class-
    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=201367ca-d077-4a98-be44-bff9bee718b6&revisionId=d36b7554-5acf-4f6f-a3c1-702b540faf51&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=201367ca-d077-4a98-be44-bff9bee718b6&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/201367ca-d077-4a98-be44-bff9bee718b6/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "58", + "name": "audio-file-broker", + "description": "

    Audio File Broker is a Python component that exposes an endpoint to receive an audio file (i.e., wav) through a POST endpoint reachable using the command:

    minikube service \\--url audio-file-broker1webui\n

    The output is an audio file with a static ID that can be used for further elaboration.


    Details and source code can be found here: https://github.com/Engineering-Research-and-Development/audio-file-broker

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=228e7550-ddc8-4774-89c8-e2b9638b72fa&revisionId=0fb523a2-61ea-4348-9b66-1ea7a9c28056&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=228e7550-ddc8-4774-89c8-e2b9638b72fa&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/228e7550-ddc8-4774-89c8-e2b9638b72fa/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "62", + "name": "analcatdata_dmft", + "description": "https://openml.org

    Author: Unknown

    Source: Jeffrey S. Simonoff - 2003

    Please cite: Jeffrey S. Simonoff, Analyzing Categorical Data, Springer-Verlag, 2003


    One of the datasets used in the book \"Analyzing Categorical Data,\"
    by Jeffrey S. Simonoff. It contains data on the DMFT Index (Decayed, Missing, and Filled Teeth) before and after different prevention strategies. The prevention strategy is commonly used as the (categorical) target.


    Attribute information



    • DMFT.Begin and DMFT.End: DMFT index before and after the prevention strategy

    • Gender of the individual

    • Ethnicity of the individual

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2580db21-2cd8-405b-8912-e9881ada1454&revisionId=49ed507f-ad20-469b-a293-43628d39546c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2580db21-2cd8-405b-8912-e9881ada1454&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2580db21-2cd8-405b-8912-e9881ada1454/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "64", + "name": "ai4eu-robotics-pump-6144-fft-broker", + "description": "



    The robotic pump demonstrator represents a hydraulic pump that can be mounted on an industrial robot, for example, to pump liquid paint for spray painting. On this pump, one accelerometer is mounted for vibration monitoring and recording.

    The pump can be controlled in terms of speed (rotations per minute, rpm), affecting the throughput of paint and the pressure in and out of the pump.

    The dataset consists of 380 million measurements of several sensor data of the pump system in 1-second intervals over two months in 2020.





    The complete dataset & documentation is available on Zenodo.

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2764acc6-f82f-4b9c-ada8-fcc4edffa180&revisionId=822f9bd2-a5f7-42f7-b39d-01161ad2af1c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2764acc6-f82f-4b9c-ada8-fcc4edffa180&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2764acc6-f82f-4b9c-ada8-fcc4edffa180/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "66", + "name": "parity5_plus_5", + "description": "https://openml.org

    parity5_plus_5-pmlb

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2863f408-3bf5-46e5-a5e8-2c1d49547a73&revisionId=0ce18abf-1767-4bb5-b7fe-351aeaa74102&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2863f408-3bf5-46e5-a5e8-2c1d49547a73&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2863f408-3bf5-46e5-a5e8-2c1d49547a73/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "67", + "name": "profb", + "description": "https://openml.org

    Author: Hal Stern, Robin Lock

    Source: StatLib

    Please cite:


    PRO FOOTBALL SCORES (raw data appears after the description below)


    How well do the oddsmakers of Las Vegas predict the outcome of
    professional football games? Is there really a home field advantage - if
    so how large is it? Are teams that play the Monday Night game at a
    disadvantage when they play again the following Sunday? Do teams benefit
    from having a \"bye\" week off in the current schedule? These questions and
    a host of others can be investigated using this data set.


    Hal Stern from the Statistics Department at Harvard University has
    made available his compilation of scores for all National Football League
    games from the 1989, 1990, and 1991 seasons. Dr. Stern used these data as
    part of his presentation \"Who's Number One?\" in the special \"Best of
    Boston\" session at the 1992 Joint Statistics Meetings.


    Several variables in the data are keyed to the oddsmakers \"point
    spread\" for each game. The point spread is a value assigned before each
    game to serve as a handicap for whichever is perceived to be the better
    team. Thus, to win against the point spread, the \"favorite\" team must beat
    the \"underdog\" team by more points than the spread. The underdog \"wins\"
    against the spread if it wins the game outright or manages to lose by fewer
    points than the spread. In theory, the point spread should represent the
    \"expert\" prediction as to the game's outcome. In practice, it more usually
    denotes a point at which an equal amount of money will be wagered both for
    and against th", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2b1bb308-9b5f-4ba9-afa0-0ac42878bf1b&revisionId=5f66eea5-684f-451a-902e-f8a85d3cac02&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2b1bb308-9b5f-4ba9-afa0-0ac42878bf1b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2b1bb308-9b5f-4ba9-afa0-0ac42878bf1b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "75", + "name": "PersistentVolumeProvider", + "description": "

    The Persistent Volume Provider can be used to provide a common file storage for elements of a pipeline. The name of the node should be the absolute directory path.

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2f20e0ad-bc67-4629-9c8b-89f40a8c12d6&revisionId=4a8e7107-be77-4fd3-b1ec-c00afea2b4e6&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2f20e0ad-bc67-4629-9c8b-89f40a8c12d6&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2f20e0ad-bc67-4629-9c8b-89f40a8c12d6/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "88", + "name": "threeOf9", + "description": "https://openml.org

    threeOf9-pmlb

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=35271ff6-47b3-488c-9021-b0c5f893abd0&revisionId=c4c2d7bd-c07f-44e0-be8e-9711db0fb44a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=35271ff6-47b3-488c-9021-b0c5f893abd0&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/35271ff6-47b3-488c-9021-b0c5f893abd0/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "89", + "name": "monks-problems-2", + "description": "https://openml.org

    Author: Sebastian Thrun (Carnegie Mellon University)

    Source: UCI - October 1992

    Please cite: UCI


    The Monk's Problems: Problem 2

    Once upon a time, in July 1991, the monks of Corsendonk Priory were faced with a school held in their priory, namely the 2nd European Summer School on Machine Learning. After listening more than one week to a wide variety of learning algorithms, they felt rather confused: Which algorithm would be optimal? And which one to avoid? As a consequence of this dilemma, they created a simple task on which all learning algorithms ought to be compared: the three MONK's problems.


    The target concept associated with the 2nd Monk's problem is the binary outcome of the logical formula:

    MONK-2: EXACTLY TWO of {a1 = 1, a2 = 1, a3 = 1, a4 = 1, a5 = 1, a6 = 1}


    In this dataset, the original train and test sets were merged to allow other sampling procedures. However, the original train-test splits can be found as one of the OpenML tasks.


    Attribute information:



    • attr1: 1, 2, 3

    • attr2: 1, 2, 3

    • attr3: 1, 2

    • attr4: 1, 2, 3

    • attr5: 1, 2, 3, 4

    • attr6: 1, 2


    Relevant papers


    The MONK's Problems - A Performance Comparison of Different Learning Algorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J. Cheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K. Kaufman, S. Keller, I. Kononenko, J. Kreuziger, R.S. Michalski, T. Mitch", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=35915a44-ff16-4bdb-a6d6-fa88df61bf26&revisionId=549f1574-d126-42c0-8197-64ec12cbc567&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=35915a44-ff16-4bdb-a6d6-fa88df61bf26&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/35915a44-ff16-4bdb-a6d6-fa88df61bf26/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "90", + "name": "AI4Agri-frontend", + "description": "

    GUI and back-end logic for the AI4Agri models

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=35d9681b-c182-466b-9edf-1a9c962d0888&revisionId=6f92c9f4-b497-411d-8a4b-38e2b32251be&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=35d9681b-c182-466b-9edf-1a9c962d0888&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/35d9681b-c182-466b-9edf-1a9c962d0888/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "96", + "name": "mofn-3-7-10", + "description": "https://openml.org

    Author: Unknown

    Source: PMLB Supposedly from UCI originally, but can't find it there.

    Please cite


    The origin is not clear, but presumably this is an artificial problem representing M-of-N rules. The target is 1 if a certain M 'bits' are '1'? (Joaquin Vanschoren)

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=39846e3e-27c7-47c4-a613-55469ec5bd39&revisionId=9a0ab46a-219c-43c2-9f7d-464f8fb1da02&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=39846e3e-27c7-47c4-a613-55469ec5bd39&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/39846e3e-27c7-47c4-a613-55469ec5bd39/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "97", + "name": "monks-problems-3", + "description": "https://openml.org

    Author: Sebastian Thrun (Carnegie Mellon University)

    Source: UCI - October 1992

    Please cite: UCI


    The Monk's Problems: Problem 3

    Once upon a time, in July 1991, the monks of Corsendonk Priory were faced with a school held in their priory, namely the 2nd European Summer School on Machine Learning. After listening more than one week to a wide variety of learning algorithms, they felt rather confused: Which algorithm would be optimal? And which one to avoid? As a consequence of this dilemma, they created a simple task on which all learning algorithms ought to be compared: the three MONK's problems.


    The target concept associated with the 3rd Monk's problem is the binary outcome of the logical formula:

    MONK-3: (a5 = 3 and a4 = 1) or (a5 /= 4 and a2 /= 3)

    In addition, 5% class noise was added to the training set


    In this dataset, the original train and test sets were merged to allow other sampling procedures. However, the original train-test splits can be found as one of the OpenML tasks.


    Attribute information:



    • attr1: 1, 2, 3

    • attr2: 1, 2, 3

    • attr3: 1, 2

    • attr4: 1, 2, 3

    • attr5: 1, 2, 3, 4

    • attr6: 1, 2


    Relevant papers


    The MONK's Problems - A Performance Comparison of Different Learning Algorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J. Cheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K. Kaufman, S. Keller, I. K", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3c42202a-5c1f-4ebf-954f-b54ad0fb03e5&revisionId=dd7fa7d1-b185-460a-999e-8e792943ca7e&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=3c42202a-5c1f-4ebf-954f-b54ad0fb03e5&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/3c42202a-5c1f-4ebf-954f-b54ad0fb03e5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "98", + "name": "zoo", + "description": "https://openml.org

    Author: Richard S. Forsyth

    Source: UCI - 5/15/1990

    Please cite:


    Zoo database

    A simple database containing 17 Boolean-valued attributes describing animals. The \"type\" attribute appears to be the class attribute.


    Notes:

    * I find it unusual that there are 2 instances of \"frog\" and one of \"girl\"!
    * feature 'animal' is an identifier (though not unique) and should be ignored when modeling

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=41098614-571a-4c70-b45d-6a7fbcabdcf8&revisionId=eccea8f4-cc22-4962-934f-1dbf3da9f983&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=41098614-571a-4c70-b45d-6a7fbcabdcf8&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/41098614-571a-4c70-b45d-6a7fbcabdcf8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "100", + "name": "breast-w", + "description": "https://openml.org

    Author: Dr. William H. Wolberg, University of Wisconsin

    Source: UCI, University of Wisconsin - 1995

    Please cite: See below, plus UCI


    Breast Cancer Wisconsin (Original) Data Set. Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. The target feature records the prognosis (malignant or benign). Original data available here


    Current dataset was adapted to ARFF format from the UCI version. Sample code ID's were removed.


    ! Note that there is also a related Breast Cancer Wisconsin (Diagnosis) Data Set with a different set of features, better known as wdbc.


    Relevant Papers


    W.N. Street, W.H. Wolberg and O.L. Mangasarian. Nuclear feature extraction for breast tumor diagnosis. IS&T/SPIE 1993 International Symposium on Electronic Imaging: Science and Technology, volume 1905, pages 861-870, San Jose, CA, 1993.


    O.L. Mangasarian, W.N. Street and W.H. Wolberg. Breast cancer diagnosis and prognosis via linear programming. Operations Research, 43(4), pages 570-577, July-August 1995.


    Citation request


    This breast cancer database was obtained from the University of Wisconsin Hospitals, Madison from Dr. William H. Wolberg. If you publish ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=42cec034-786e-4b26-b299-c28e428c7b40&revisionId=3a85905c-0034-4a87-b284-b7eac431cf28&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=42cec034-786e-4b26-b299-c28e428c7b40&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/42cec034-786e-4b26-b299-c28e428c7b40/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "103", + "name": "mux6", + "description": "https://openml.org

    mux6-pmlb

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=45e6dcba-4163-4613-8443-2333d958b9a5&revisionId=aa8d762f-b679-4687-9d96-33b887a3d39c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=45e6dcba-4163-4613-8443-2333d958b9a5&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/45e6dcba-4163-4613-8443-2333d958b9a5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "104", + "name": "MyIris", + "description": "https://openml.org

    Author:

    Source: Unknown - Date unknown

    Please cite:


    MyExampleIris

    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4787776f-fd13-43cd-afab-eb863338f6e5&revisionId=9c95ba8a-2f03-41a4-8499-6421229acc9a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4787776f-fd13-43cd-afab-eb863338f6e5&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/4787776f-fd13-43cd-afab-eb863338f6e5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "123", + "name": "steel-plates-fault", + "description": "https://openml.org

    Author: Semeion, Research Center of Sciences of Communication, Rome, Italy.

    Source: UCI

    Please cite: Dataset provided by Semeion, Research Center of Sciences of Communication, Via Sersale 117, 00128, Rome, Italy.


    Steel Plates Faults Data Set

    A dataset of steel plates' faults, classified into 7 different types. The goal was to train machine learning for automatic pattern recognition.


    The dataset consists of 27 features describing each fault (location, size, ...) and 7 binary features indicating the type of fault (on of 7: Pastry, Z_Scratch, K_Scatch, Stains, Dirtiness, Bumps, Other_Faults). The latter is commonly used as a binary classification target ('common' or 'other' fault.)


    Attribute Information



    • V1: X_Minimum

    • V2: X_Maximum

    • V3: Y_Minimum

    • V4: Y_Maximum

    • V5: Pixels_Areas

    • V6: X_Perimeter

    • V7: Y_Perimeter

    • V8: Sum_of_Luminosity

    • V9: Minimum_of_Luminosity

    • V10: Maximum_of_Luminosity

    • V11: Length_of_Conveyer

    • V12: TypeOfSteel_A300

    • V13: TypeOfSteel_A400

    • V14: Steel_Plate_Thickness

    • V15: Edges_Index

    • V16: Empty_Index

    • V17: Square_Index

    • V18: Outside_X_Index

    • V19: Edges_X_Index

    • V20: Edges_Y_Index

    • V21: Outside_Global_Index

    • V22: LogOfAreas

    • V23: Log_X_Index

    • V24: Log_Y_Index

    • V25: Orientation_Index

    • V", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5636ef7b-50d6-49e1-8e78-5b68f24274c5&revisionId=731152cd-a431-4c78-9e65-02f74b6c5c0a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5636ef7b-50d6-49e1-8e78-5b68f24274c5&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5636ef7b-50d6-49e1-8e78-5b68f24274c5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "124", + "name": "ai4eu-robotics-pump-1024-raw-broker", + "description": "

      The robotic pump demonstrator represents a hydraulic pump that can be mounted on an industrial robot, for example, to pump liquid paint for spray painting. On this pump, one accelerometer is mounted for vibration monitoring and recording.

      The pump can be controlled in terms of speed (rotations per minute, rpm), affecting the throughput of paint and the pressure in and out of the pump.

      The dataset consists of 380 million measurements of several sensor data of the pump system in 1-second intervals over two months in 2020.

      The complete dataset & documentation is available on Zenodo.

      ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=57617093-3530-44fc-a72e-b5f6f83630cd&revisionId=a25721c1-88bf-4146-b219-3a4db5c00059&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=57617093-3530-44fc-a72e-b5f6f83630cd&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/57617093-3530-44fc-a72e-b5f6f83630cd/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "126", + "name": "irish", + "description": "https://openml.org

      Author: Vincent Greaney, Thomas Kelleghan (St. Patrick's College, Dublin)

      Source: StatLib - 1984

      Please cite: StatLib


      Irish Educational Transitions Data

      Data on educational transitions for a sample of 500 Irish schoolchildren aged 11 in 1967. The data were collected by Greaney and Kelleghan (1984), and reanalyzed by Raftery and Hout (1985, 1993).


      Attribute information



      • Sex: 1=male; 2=female.

      • DVRT (Drumcondra Verbal Reasoning Test Score).

      • Educational level attained

      • Leaving Certificate. 1 if Leaving Certificate not taken; 2 if taken.

      • Prestige score for father's occupation (calculated by Raftery and Hout, 1985).

      • Type of school: 1=secondary; 2=vocational; 9=primary terminal leaver.


      Relevant papers


      Greaney, V. and Kelleghan, T. (1984). Equality of Opportunity in Irish
      Schools. Dublin: Educational Company.


      Kass, R.E. and Raftery, A.E. (1993). Bayes factors and model uncertainty.
      Technical Report no. 254, Department of Statistics, University of Washington.
      Revised version to appear in Journal of the American Statistical
      Association.


      Raftery, A.E. (1988). Approximate Bayes factors for generalized linear models.
      Technical Report no. 121, Department of Statistics, University of Washington.


      Raftery, A.E. and Hout, M. (1985). Does Irish education approach the
      meritocratic ideal? A logistic analysis.
      Economic and Social Review, 16, 115-140.


      Raftery, A.E. and Hout, M. (1", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5a43bbed-a905-4af3-840b-eec565f2165b&revisionId=920c28eb-e743-4ef1-9606-04b382db90c5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5a43bbed-a905-4af3-840b-eec565f2165b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5a43bbed-a905-4af3-840b-eec565f2165b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "128", + "name": "meta", + "description": "https://openml.org

      Author:

      Source: Unknown - Date unknown

      Please cite:




      1. Title: meta-data




      2. Sources:
        (a) Creator:
        LIACC - University of Porto
        R.Campo Alegre 823
        4150 PORTO
        (b) Donor: P.B.Brazdil or J.Gama Tel.: +351 600 1672
        LIACC, University of Porto Fax.: +351 600 3654
        Rua Campo Alegre 823 Email: statlog-adm@ncc.up.pt
        4150 Porto, Portugal
        (c) Date: March, 1996




      (d) Acknowlegements:
      LIACC wishes to thank Commission of European Communities
      for their support. Also, we wish to thank the following partners
      for providing the individual test results:



      • Dept. of Statistics, University of Strathclyde, Glasgow, UK

      • Dept. of Statistics, University of Leeds, UK

      • Aston University, Birmingham, UK

      • Forschungszentrum Ulm, Daimler-Benz AG, Germany

      • Brainware GmbH, Berlin, Germany

      • Frauenhofer Gesellschaft IITB-EPO, Berlin, Germany

      • Institut fuer Kybernetik, Bochum, Germany

      • ISoft, Gif sur Yvette, France


      • Dept. of CS and AI, University of Granada, Spain




      • Past Usage:




      Meta-Data was used in order to give advice about which classification
      method is appropriate for a particular dataset.
      This work is described in:


      -\"Machine Learning, Neural and Statistical Learning\"
      Eds. D.Michie,D.J.Spiegelhalter and C.Taylor
      Ellis Horwood-1994



      • \"Characterizing the Applicability of
        Classification Algorithms Using Meta-Level Learning\",
        P. Brazdil, J.Gama and B.Hen", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5bdd6ed1-601e-482b-904e-886921963a2d&revisionId=eb64f31c-2e72-4bd9-a60d-0598b8e83b33&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5bdd6ed1-601e-482b-904e-886921963a2d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5bdd6ed1-601e-482b-904e-886921963a2d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "130", + "name": "glass", + "description": "https://openml.org

        Author:

        Source: Unknown -

        Please cite:




        1. Title: Glass Identification Database




        2. Sources:
          (a) Creator: B. German
          -- Central Research Establishment
          Home Office Forensic Science Service
          Aldermaston, Reading, Berkshire RG7 4PN
          (b) Donor: Vina Spiehler, Ph.D., DABFT
          Diagnostic Products Corporation
          (213) 776-0180 (ext 3014)
          (c) Date: September, 1987




        3. Past Usage:
          -- Rule Induction in Forensic Science
          -- Ian W. Evett and Ernest J. Spiehler
          -- Central Research Establishment
          Home Office Forensic Science Service
          Aldermaston, Reading, Berkshire RG7 4PN
          -- Unknown technical note number (sorry, not listed here)
          -- General Results: nearest neighbor held its own with respect to the
          rule-based system




        4. Relevant Information:n
          Vina conducted a comparison test of her rule-based system, BEAGLE, the
          nearest-neighbor algorithm, and discriminant analysis. BEAGLE is
          a product available through VRS Consulting, Inc.; 4676 Admiralty Way,
          Suite 206; Marina Del Ray, CA 90292 (213) 827-7890 and FAX: -3189.
          In determining whether the glass was a type of \"float\" glass or not,
          the following results were obtained (# incorrect answers):


                Type of Sample                            Beagle   NN    DA
          Windows that were float processed (87) 10 12 21
          Windows that were not: (76) 19 16 ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5c5599f7-73c7-4874-ace1-4c6e312409c4&revisionId=64523754-bb18-406c-827d-4fe090d0e5e6&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5c5599f7-73c7-4874-ace1-4c6e312409c4&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5c5599f7-73c7-4874-ace1-4c6e312409c4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "131", + "name": "wall-robot-navigation", + "description": "https://openml.org

          Author: Ananda Freire, Marcus Veloso and Guilherme Barreto

          Source: UCI - 2010

          Please cite: UCI


          Wall-Following Robot Navigation Data Data Set

          The data were collected as the SCITOS G5 robot navigates through the room following the wall in a clockwise direction, for 4 rounds, using 24 ultrasound sensors arranged circularly around its 'waist'.


          The data consists of raw values of the measurements of all 24 ultrasound sensors and the corresponding class label. Sensor readings are sampled at a rate of 9 samples per second.


          The class labels are:

          1. Move-Forward,

          2. Slight-Right-Turn,

          3. Sharp-Right-Turn,

          4. Slight-Left-Turn


          It is worth mentioning that the 24 ultrasound readings and the simplified distances were collected at the same time step, so each file has the same number of rows (one for each sampling time step).


          The wall-following task and data gathering were designed to test the hypothesis that this apparently simple navigation task is indeed a non-linearly separable classification task. Thus, linear classifiers, such as the Perceptron network, are not able to learn the task and command the robot around the room without collisions. Nonlinear neural classifiers, such as the MLP network, are able to learn the task and command the robot successfully without collisions.


          Attribute Information:



          1. US1: ultrasound sensor at the front of the robot (reference angle: 180\u00b0)

          2. US2", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5d6161e5-1cbb-46fc-a005-85607fd7caea&revisionId=7df9f5eb-70a2-4480-901f-7a2f2783520a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5d6161e5-1cbb-46fc-a005-85607fd7caea&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5d6161e5-1cbb-46fc-a005-85607fd7caea/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "132", + "name": "cleve", + "description": "https://openml.org

            cleve-pmlb

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5d938cc4-8cff-4e09-80cf-d8b08461d9c4&revisionId=ef0a6892-61d1-4ef8-9d98-3f29b71c15bf&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5d938cc4-8cff-4e09-80cf-d8b08461d9c4&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5d938cc4-8cff-4e09-80cf-d8b08461d9c4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "133", + "name": "GAMETES_Epistasis_3-Way_20atts_0.2H_EDM-1_1", + "description": "https://openml.org

            GAMETES_Epistasis_3-Way_20atts_0.2H_EDM-1_1-pmlb

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5e840f29-a572-47c2-acdf-c1b8c0b4b8b7&revisionId=3e1cfa13-826c-4672-852e-438ec491a045&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5e840f29-a572-47c2-acdf-c1b8c0b4b8b7&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5e840f29-a572-47c2-acdf-c1b8c0b4b8b7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "136", + "name": "monks-problems-1", + "description": "https://openml.org

            Author: Sebastian Thrun (Carnegie Mellon University)

            Source: UCI - October 1992

            Please cite: UCI


            The Monk's Problems: Problem 1

            Once upon a time, in July 1991, the monks of Corsendonk Priory were faced with a school held in their priory, namely the 2nd European Summer School on Machine Learning. After listening more than one week to a wide variety of learning algorithms, they felt rather confused: Which algorithm would be optimal? And which one to avoid? As a consequence of this dilemma, they created a simple task on which all learning algorithms ought to be compared: the three MONK's problems.


            The target concept associated with the 1st Monk's problem is the binary outcome of the logical formula:

            MONK-1: (a1 == a2) or (a5 == 1)


            In this dataset, the original train and test sets were merged to allow other sampling procedures. However, the original train-test splits can be found as one of the OpenML tasks.


            Attribute information:



            • attr1: 1, 2, 3

            • attr2: 1, 2, 3

            • attr3: 1, 2

            • attr4: 1, 2, 3

            • attr5: 1, 2, 3, 4

            • attr6: 1, 2


            Relevant papers


            The MONK's Problems - A Performance Comparison of Different Learning Algorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J. Cheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K. Kaufman, S. Keller, I. Kononenko, J. Kreuziger, R.S. Michalski, T. Mitchell, P. Pachowicz, Y. Reich H. Vafaie, W", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6265676d-b001-4bd6-949c-05b7db6affae&revisionId=1375bd8f-18ca-4971-9a7b-c7dcb7a27c0c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6265676d-b001-4bd6-949c-05b7db6affae&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6265676d-b001-4bd6-949c-05b7db6affae/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "138", + "name": "hayes-roth", + "description": "https://openml.org

            Author: Barbara and Frederick Hayes-Roth


            Source: original -

            Please cite:


            Hayes-Roth Database


            This is a merged version of the separate train and test set which are usually distributed. On OpenML this train-test split can be found as one of the possible tasks.


            Source Information:
            (a) Creators: Barbara and Frederick Hayes-Roth
            (b) Donor: David W. Aha (aha@ics.uci.edu) (714) 856-8779

            (c) Date: March, 1989


            Attribute Information:
            -- 1. name: distinct for each instance and represented numerically
            -- 2. hobby: nominal values ranging between 1 and 3
            -- 3. age: nominal values ranging between 1 and 4
            -- 4. educational level: nominal values ranging between 1 and 4
            -- 5. marital status: nominal values ranging between 1 and 4
            -- 6. class: nominal value between 1 and 3


            Detailed description of the experiment:
            1. 3 categories (1, 2, and neither -- which I call 3)
            -- some of the instances could be classified in either class 1 or 2, and they have been evenly distributed between the two classes
            2. 5 Attributes
            -- A. name (a randomly-generated number between 1 and 132)
            -- B. hobby (a randomly-generated number between 1 and 3)
            -- C. age (a number between 1 and 4)
            -- D. education level (a number between 1 and 4)
            -- E. marital status (a number between 1 and 4)
            3. Classification:

            -- only attributes C-E are diagnostic; values for A and B are ignored
            -- Class Neither: if a 4 occurs for any attribute C-E
            -- Class 1: Otherwise, if (# of 1's)>(# of 2's) for attributes C-E
            -- Class 2", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=62f47eea-85d2-44a6-a3b2-cd3ac3d2ff32&revisionId=6df90024-afec-494b-b59e-724b350d5eab&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=62f47eea-85d2-44a6-a3b2-cd3ac3d2ff32&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/62f47eea-85d2-44a6-a3b2-cd3ac3d2ff32/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "148", + "name": "ai4eu-robotics-wrist-1024-raw-broker", + "description": "

            The robotic wrist demonstrator represents a mechanical wrist with three axes that can hold tools, e.g. for spray painting in combination with a pump. On this robotic wrist, two accelerometers are mounted for vibration monitoring and recording: one in the movable front part of the wrist and one in the shaft. The wrist can be controlled through the torque or the designated position of each axis\u2019 motor.

            The dataset consists of 1.8 billion measurements of several sensor data of the robotic wrist in 1-second intervals over six months in 2020.

            The complete dataset & description is available on Zenodo

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6819ff36-f57d-459d-a5f7-11e1e8e096fe&revisionId=6400e2d0-ed8f-48fd-8aab-50504461c72b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6819ff36-f57d-459d-a5f7-11e1e8e096fe&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6819ff36-f57d-459d-a5f7-11e1e8e096fe/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "152", + "name": "allbp", + "description": "https://openml.org

            allbp-pmlb

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6a6f5d38-3775-485d-a6d6-1b90952daee9&revisionId=35d8f990-459e-41b0-918c-07895c554e3d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6a6f5d38-3775-485d-a6d6-1b90952daee9&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6a6f5d38-3775-485d-a6d6-1b90952daee9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "153", + "name": "xd6", + "description": "https://openml.org

            Author: Unknown

            Source: PMLB - Supposedly originates from UCI, but can't find it there anymore.

            Please cite:


            XD6 Dataset
            Dataset used by Buntine and Niblett (1992). Composed of 10 features, one of which is irrelevant. The target is a disjunctive normal form formula over the nine other attributes, with additional classification noise.


            More info.

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6af5c9cf-73bf-406b-a250-5bbf7d0e5e47&revisionId=c3c334c0-d744-4b9a-96aa-d4333c5d3e8a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6af5c9cf-73bf-406b-a250-5bbf7d0e5e47&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6af5c9cf-73bf-406b-a250-5bbf7d0e5e47/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "163", + "name": "ucrsuite-config", + "description": "

            Data broker for subsequence search in time series

            This data broker offers a web interface for uploading files and setting search parameters. It saves the uploaded files on a shared volume and sends the corresponding paths to the next model in the pipeline. It was created to be used with ucrsuite-dtw and ucrsuite-ed models, and supports data and query files in txt format.

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6e64762b-97e0-4278-8dad-c9d1513fabb4&revisionId=e41459ef-3143-4ead-a1c0-907b136f6e9a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6e64762b-97e0-4278-8dad-c9d1513fabb4&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6e64762b-97e0-4278-8dad-c9d1513fabb4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "164", + "name": "iris", + "description": "https://openml.org

            Author: R.A. Fisher

            Source: UCI - 1936 - Donated by Michael Marshall

            Please cite:


            Iris Plants Database

            This is perhaps the best known database to be found in the pattern recognition literature. Fisher's paper is a classic in the field and is referenced frequently to this day. (See Duda & Hart, for example.) The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. One class is linearly separable from the other 2; the latter are NOT linearly separable from each other.


            Predicted attribute: class of iris plant.

            This is an exceedingly simple domain.


            Attribute Information:


            1. sepal length in cm
            2. sepal width in cm
            3. petal length in cm
            4. petal width in cm
            5. class:
            -- Iris Setosa
            -- Iris Versicolour
            -- Iris Virginica
            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6e9c598d-8928-437b-9013-d698f3321a37&revisionId=d3cee283-9ba0-40c2-b502-aa7ab4871ecf&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6e9c598d-8928-437b-9013-d698f3321a37&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6e9c598d-8928-437b-9013-d698f3321a37/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "168", + "name": "cars1", + "description": "https://openml.org

            cars1-pmlb

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6feea5d8-c6fb-40d9-99bb-c8ad4ad4d242&revisionId=2699c172-24e4-4d32-aca3-2f74eb6dc968&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6feea5d8-c6fb-40d9-99bb-c8ad4ad4d242&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6feea5d8-c6fb-40d9-99bb-c8ad4ad4d242/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "172", + "name": "ner-databroker", + "description": "

            This is the databroker component of the NER pipeline.

            Through the Web UI of the ner-databroker, you can provide the text to be received as an input for the entity recognizer. The language of the text should be German, since the NER model is trained on German data. More than one sentence can be given as input.

            Make sure to run ner-pipeline, instead of ner-databroker as a standalone component. As ner-pipeline is successfully deployed, open the WEB UI and follow the instructions to submit the text.

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=73244125-66e5-4087-9fe8-8229a39944c2&revisionId=e586beb7-322e-4a3e-82a7-b96bbbf49464&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=73244125-66e5-4087-9fe8-8229a39944c2&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/73244125-66e5-4087-9fe8-8229a39944c2/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "173", + "name": "corral", + "description": "https://openml.org

            corral-pmlb

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7337b1db-a3e8-4e24-8ab1-130d86f032c8&revisionId=a9a6ebfb-485b-4678-8f3a-00b27877c492&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7337b1db-a3e8-4e24-8ab1-130d86f032c8&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7337b1db-a3e8-4e24-8ab1-130d86f032c8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "175", + "name": "autoUniv-au7-1100", + "description": "https://openml.org

            Author: Ray. J. Hickey

            Source: UCI

            Please cite:



            • Dataset Title:


            AutoUniv Dataset

            data problem: autoUniv-au7-300-drift-au7-cpd1-800



            • Abstract:


            AutoUniv is an advanced data generator for classifications tasks. The aim is to reflect the nuances and heterogeneity of real data. Data can be generated in .csv, ARFF or C4.5 formats.



            • Source:


            AutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com
            AutoUniv web-site: http://sites.google.com/site/autouniv/.



            • Data Set Information:


            The user first creates a classification model and then generates classified examples from it. To create a model, the following are specified: the number of attributes (up to 1000) and their type (discrete or continuous), the number of classes (up to 10), the complexity of the underlying rules and the noise level. AutoUniv then produces a model through a process of constrained randomised search to satisfy the user's requirements. A model can have up to 3000 rules. Rare class models can be designed. A sequence of models can be designed to reflect concept and/or population drift.


            AutoUniv creates three text files for a model: a Prolog specification of the model used to generate examples (.aupl); a user-friendly statement of the classification rules in an 'if ... then' format (.aurules); a statistical summary of the main properties of the model, including its Bayes rate (.auprops).



            • Attribute Information:


            Attributes may be discrete with up to 10 values or continuous. A dis", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7422c0f9-0fda-41ab-8bc0-91233a3455e1&revisionId=739ac852-a2b4-45fc-84ca-f93ca4c4d17f&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7422c0f9-0fda-41ab-8bc0-91233a3455e1&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7422c0f9-0fda-41ab-8bc0-91233a3455e1/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "176", + "name": "ai4eu-robotics-wrist-1024-fft-broker", + "description": "

            The robotic wrist demonstrator represents a mechanical wrist with three axes that can hold tools, e.g. for spray painting in combination with a pump. On this robotic wrist, two accelerometers are mounted for vibration monitoring and recording: one in the movable front part of the wrist and one in the shaft. The wrist can be controlled through the torque or the designated position of each axis\u2019 motor.

            The dataset consists of 1.8 billion measurements of several sensor data of the robotic wrist in 1-second intervals over six months in 2020.

            The complete dataset & description is available on Zenodo

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=74b69064-462f-4176-a8ce-7719638f237a&revisionId=1933cb96-3d47-4700-a73a-09692385ad69&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=74b69064-462f-4176-a8ce-7719638f237a&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/74b69064-462f-4176-a8ce-7719638f237a/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "177", + "name": "grpc_piezo_hubeau", + "description": "

            Data from the \"Piezometry\" API come from the ADES portal (national portal for access to French groundwater data). They relate to piezometric measurements (water level in groundwater tables), throughout France, from all the partners of the water information system (see metadata).


            The updates are integrated daily into the API.


            Data is expressed

            • in NGF meters for levels (or ratings);
            • in meters in relation to the measurement mark for the depths.


            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=76fda708-9907-4241-9d35-4d18a406eb35&revisionId=e3ff0320-a93a-4358-b13d-949df627c0b0&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=76fda708-9907-4241-9d35-4d18a406eb35&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/76fda708-9907-4241-9d35-4d18a406eb35/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "180", + "name": "teachingAssistant", + "description": "https://openml.org

            Author:

            Source: Unknown - Date unknown

            Please cite:


            Dataset from the MLRR repository: http://axon.cs.byu.edu:5000/

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7bc5051d-f852-4547-a317-e1c510f66332&revisionId=5f2ac1b6-7a8f-4762-9c64-82a14dea66b1&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7bc5051d-f852-4547-a317-e1c510f66332&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7bc5051d-f852-4547-a317-e1c510f66332/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "181", + "name": "wine-quality-red", + "description": "https://openml.org

            wine-quality-red-pmlb

            ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7c5315b5-ca3c-488c-b235-f7f4d0534b16&revisionId=cecbcde7-4870-4ed3-9bb4-af01655e0c27&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7c5315b5-ca3c-488c-b235-f7f4d0534b16&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7c5315b5-ca3c-488c-b235-f7f4d0534b16/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "183", + "name": "diabetes", + "description": "https://openml.org

            Author: Vincent Sigillito


            Source: Obtained from UCI


            Please cite: UCI citation policy




            1. Title: Pima Indians Diabetes Database




            2. Sources:
              (a) Original owners: National Institute of Diabetes and Digestive and
              Kidney Diseases
              (b) Donor of database: Vincent Sigillito (vgs@aplcen.apl.jhu.edu)
              Research Center, RMI Group Leader
              Applied Physics Laboratory
              The Johns Hopkins University
              Johns Hopkins Road
              Laurel, MD 20707
              (301) 953-6231
              (c) Date received: 9 May 1990




            3. Past Usage:




              1. Smith,~J.~W., Everhart,~J.~E., Dickson,~W.~C., Knowler,~W.~C., &
                Johannes,~R.~S. (1988). Using the ADAP learning algorithm to forecast
                the onset of diabetes mellitus. In {it Proceedings of the Symposium
                on Computer Applications and Medical Care} (pp. 261--265). IEEE
                Computer Society Press.


                The diagnostic, binary-valued variable investigated is whether the
                patient shows signs of diabetes according to World Health Organization
                criteria (i.e., if the 2 hour post-load plasma glucose was at least
                200 mg/dl at any survey examination or if found during routine medical
                care). The population lives near Phoenix, Arizona, USA.


                Results: Their ADAP al", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7d5de8fb-5f22-4b99-9ce3-9ae00f6c86db&revisionId=72ecabe9-fd16-4c78-954a-c7e86585d15c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7d5de8fb-5f22-4b99-9ce3-9ae00f6c86db&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7d5de8fb-5f22-4b99-9ce3-9ae00f6c86db/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "184", + "name": "dis", + "description": "https://openml.org

                dis-pmlb

                ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7d5f6906-f781-4b68-93cc-95e733010b75&revisionId=6b69f0c0-9e4f-437c-8563-55b3b177ef2a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7d5f6906-f781-4b68-93cc-95e733010b75&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7d5f6906-f781-4b68-93cc-95e733010b75/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "186", + "name": "lymph", + "description": "https://openml.org

                Author:

                Source: Unknown -

                Please cite:


                Citation Request:
                This lymphography domain was obtained from the University Medical Centre,
                Institute of Oncology, Ljubljana, Yugoslavia. Thanks go to M. Zwitter and
                M. Soklic for providing the data. Please include this citation if you plan
                to use this database.




                1. Title: Lymphography Domain




                2. Sources:
                  (a) See Above.
                  (b) Donors: Igor Kononenko,
                  University E.Kardelj
                  Faculty for electrical engineering
                  Trzaska 25
                  61000 Ljubljana (tel.: (38)(+61) 265-161


                           Bojan Cestnik
                  Jozef Stefan Institute
                  Jamova 39
                  61000 Ljubljana
                  Yugoslavia (tel.: (38)(+61) 214-399 ext.287)

                  (c) Date: November 1988




                3. Past Usage: (sveral)



                  1. Cestnik,G., Konenenko,I, & Bratko,I. (1987). Assistant-86: A
                    Knowledge-Elicitation Tool for Sophisticated Users. In I.Bratko
                    & N.Lavrac (Eds.) Progress in Machine Learning, 31-45, Sigma Press.
                    -- Assistant-86: 76% accuracy

                  2. Clark,P. & Niblett,T. (1987). Induction in Noisy Domains. In
                    I.Bratko & N.Lavrac (Eds.) Progress in Machine Learning, 11-30,
                    Sigma Press.
                    -- Simple Bayes: 83% accuracy
                    -- CN2 (99% threshold): 82%

                  3. Michalski,R., Mozetic,I. Hong,J., & Lavrac,N. (1986). The Multi-Purpose
                    Incremental Learning System AQ15 and its Testing Applications to Three
                    Medical Domains. In Proceedings of the Fifth Nat", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7f5388ed-8ec4-4f00-8230-e5624404ed95&revisionId=306ff0fb-0cee-48f7-ba80-d9567d62f039&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7f5388ed-8ec4-4f00-8230-e5624404ed95&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7f5388ed-8ec4-4f00-8230-e5624404ed95/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "187", + "name": "allrep", + "description": "https://openml.org

                    allrep-pmlb

                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7f976866-58a9-41a2-a2c4-b66ee2ebb502&revisionId=dd968b72-c353-4de1-9da6-bbaaa6083b6d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7f976866-58a9-41a2-a2c4-b66ee2ebb502&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7f976866-58a9-41a2-a2c4-b66ee2ebb502/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "189", + "name": "ai4eu-robotics-wrist-6144-raw-broker", + "description": "

                    The robotic wrist demonstrator represents a mechanical wrist with three axes that can hold tools, e.g. for spray painting in combination with a pump. On this robotic wrist, two accelerometers are mounted for vibration monitoring and recording: one in the movable front part of the wrist and one in the shaft. The wrist can be controlled through the torque or the designated position of each axis\u2019 motor.

                    The dataset consists of 1.8 billion measurements of several sensor data of the robotic wrist in 1-second intervals over six months in 2020.

                    The complete dataset & description is available on Zenodo

                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8521c122-91e5-4748-aacd-c99e0cc7549e&revisionId=de99b386-a460-4eb9-96f0-7d53f01e3801&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8521c122-91e5-4748-aacd-c99e0cc7549e&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8521c122-91e5-4748-aacd-c99e0cc7549e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "198", + "name": "JapaneseVowels", + "description": "https://openml.org

                    Author: Mineichi Kudo, Jun Toyama, Masaru Shimbo

                    Source: UCI

                    Please cite:


                    Japanese vowels

                    This dataset records 640 time series of 12 LPC cepstrum coefficients taken from nine male speakers.


                    The data was collected for examining our newly developed classifier for multidimensional curves (multidimensional time series). Nine male speakers uttered two Japanese vowels /ae/ successively. For each utterance, with the analysis parameters described below, we applied 12-degree linear prediction analysis to it to obtain a discrete-time series with 12 LPC cepstrum coefficients. This means that one utterance by a speaker forms a time series whose length is in the range 7-29 and each point of a time series is of 12 features (12 coefficients).


                    Similar data are available for different utterances /ei/, /iu/, /uo/, /oa/ in addition to /ae/. Please contact the donor if you are interested in using this data.


                    The number of the time series is 640 in total. We used one set of 270 time series for training and the other set of 370 time series for testing.


                    Analysis parameters:

                    * Sampling rate : 10kHz
                    * Frame length : 25.6 ms
                    * Shift length : 6.4ms
                    * Degree of LPC coefficients : 12


                    Each line represents 12 LPC coefficients in the increasing order separated by spaces. This corresponds to one analysis
                    frame. Lines are organized into blocks, which are a set of 7-29 lines separated by blank lines and corresponds to a single speech utterance of /ae/ with 7-29 frames.


                    Each speaker is a set of consecutive blocks. In ae.t", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=898883b9-a6b7-47a1-ae2c-cdf9012ceaaf&revisionId=e5a5e2dc-1c77-4853-91a8-f559a2c8346a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=898883b9-a6b7-47a1-ae2c-cdf9012ceaaf&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/898883b9-a6b7-47a1-ae2c-cdf9012ceaaf/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "199", + "name": "badges2", + "description": "https://openml.org

                    Author:

                    Source: Unknown - Date unknown

                    Please cite:


                    Dataset from the MLRR repository: http://axon.cs.byu.edu:5000/

                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8abffb54-85d2-40d6-9428-dbd62ffa345d&revisionId=49191518-c230-4f13-81b5-b64ba49d0621&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8abffb54-85d2-40d6-9428-dbd62ffa345d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8abffb54-85d2-40d6-9428-dbd62ffa345d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "200", + "name": "Augmented_data_registry", + "description": "

                    Description of the solution

                    The most important requirement for machine learning based tools is the presence of a robust and reliable data pipeline.  A data pipeline is a series of (possibly automated) data transformations needed before such data can be used by any machine learning model. 


                    Figure 1: Example of a typical machine learning data pipeline


                    As Figure 1 shows the main steps to prepare data are 1) Data preparation which ensures that the raw data collected via different streams is properly cleaned and associated with a certain quality. 2) Data processing which transforms cleaned data into a format compatible with standard machine learning algorithms.

                    The presence of an automated pipeline of this kind makes sure that the same data transformation process can be repeated in time, for example while using the model in real life or when re-training the same model. Data pipelines should be reproducible and reliable and should therefore be properly include", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8b133ef7-6353-480e-82e4-5d66dad7ced8&revisionId=fa47a809-eaaf-44ee-9f21-636290983357&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8b133ef7-6353-480e-82e4-5d66dad7ced8&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8b133ef7-6353-480e-82e4-5d66dad7ced8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "202", + "name": "VideoFileBroker", + "description": "

                    The Video file broker feeds video files to video models, typically starting with segmentation.

                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8eaa811e-46ff-4577-a88d-b203f7757338&revisionId=b102e42d-5a16-4e96-9fa6-fba8dab9616b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8eaa811e-46ff-4577-a88d-b203f7757338&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8eaa811e-46ff-4577-a88d-b203f7757338/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "203", + "name": "autoUniv-au7-500", + "description": "https://openml.org

                    Author: Ray. J. Hickey

                    Source: UCI

                    Please cite:



                    • Dataset Title:


                    AutoUniv Dataset

                    data problem: autoUniv-au7-cpd1-500



                    • Abstract:


                    AutoUniv is an advanced data generator for classifications tasks. The aim is to reflect the nuances and heterogeneity of real data. Data can be generated in .csv, ARFF or C4.5 formats.



                    • Source:


                    AutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com
                    AutoUniv web-site: http://sites.google.com/site/autouniv/.



                    • Data Set Information:


                    The user first creates a classification model and then generates classified examples from it. To create a model, the following are specified: the number of attributes (up to 1000) and their type (discrete or continuous), the number of classes (up to 10), the complexity of the underlying rules and the noise level. AutoUniv then produces a model through a process of constrained randomised search to satisfy the user's requirements. A model can have up to 3000 rules. Rare class models can be designed. A sequence of models can be designed to reflect concept and/or population drift.


                    AutoUniv creates three text files for a model: a Prolog specification of the model used to generate examples (.aupl); a user-friendly statement of the classification rules in an 'if ... then' format (.aurules); a statistical summary of the main properties of the model, including its Bayes rate (.auprops).



                    • Attribute Information:


                    Attributes may be discrete with up to 10 values or continuous. A discrete attri", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8ef7f983-b1d2-4891-b76c-6f4ee2202248&revisionId=66cc456d-3bb0-476f-976a-e96562a3545b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8ef7f983-b1d2-4891-b76c-6f4ee2202248&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8ef7f983-b1d2-4891-b76c-6f4ee2202248/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "204", + "name": "IoTxKG_TEST", + "description": "

                    IoTxKG Ontology Analysis Model

                    identify main concepts based on clustering


                    The Internet of Things (IoT) primary objective is to make a hyper-connected world for various application domains. However, IoT suffers from a lack of interoperability leading to a substantial threat to the predicted economic value. Schema.org provides semantic interoperability to structure heterogeneous data on the Web. An extension of this vocabulary for the IoT domain (iot.schema.org) is an ongoing research effort to address semantic interoperability for the Web of Things (WoT). To design this vocabulary, a central challenge is to identify the main topics (concepts and properties) automatically from existing knowledge in IoT applications. IoTxKG automatically identifies the most important topics from ontologies of the 4 KE4WoT challenge domains \u2013 smart building, mobility, energy and weather \u2013 based on suitable language models.


                    The following technologies are employed in IoTxKG

                    • W3C Semantic Web Technologies (e.g. RDF, OWL, SPARQL, SKOS)
                    • Deep Learning Models (language models)
                    • Clustering Algorithms (e.g. k-means clustering)



                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=900e0378-1f94-4727-b3ba-2907f7cdd818&revisionId=8d0f6c80-b67e-43db-ab6f-3646ed2f57b1&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=900e0378-1f94-4727-b3ba-2907f7cdd818&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/900e0378-1f94-4727-b3ba-2907f7cdd818/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "206", + "name": "dsc-text", + "description": "

                    This is a simple integration of an IDS Data Connector into a AI4EU Model.

                    The source code is available in the tutorials repository on Github: https://github.com/ai4eu/tutorials/tree/master/DSC_Data_Exchange



                    To configure the what data the Model should download from an DSC one can use the providet REST-Api accessable through the path /api/v1/ of the webui container.




                    The following Endpoints are provided:


                    recipient (address of the DSC that provides the Data), resourceId, artifactId, contract, customDSC (address of the DSC that should download the Data)



                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=926bd2eb-51b6-4e64-8a76-b6544cce5162&revisionId=d764d260-491b-4e55-8476-29b2a2598aa5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=926bd2eb-51b6-4e64-8a76-b6544cce5162&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/926bd2eb-51b6-4e64-8a76-b6544cce5162/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "209", + "name": "GAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1", + "description": "https://openml.org

                    GAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1-pmlb

                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=93f29f2f-1fd0-4d24-a057-544397af20bf&revisionId=216e926a-76c7-4c6f-aee9-7c005eb2d6a1&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=93f29f2f-1fd0-4d24-a057-544397af20bf&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/93f29f2f-1fd0-4d24-a057-544397af20bf/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "210", + "name": "ai4eu-security-pilot-databroker", + "description": "

                    This container provides data for Thread Prediction in Network Traffic.

                    Therefore, this container can deliver test and training data.


                    You can connect the training data output of the ai4eu-security-pilot-databroker container with the training data input of the ai4eu-security-pilot.model container. This data will be used to train the model. It only contains benign traffic. To test your model you can connect the prediction data output of the ai4eu-security-pilot-databroker container with the prediction data input of the ai4eu-security-pilot.model container. This data will be used to test the model. It contains benign and fraud traffic.

                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=95c624d4-05ed-40c0-ad1d-a833e35da282&revisionId=653b3402-027c-4fac-96ce-ce8fa0969bce&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=95c624d4-05ed-40c0-ad1d-a833e35da282&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/95c624d4-05ed-40c0-ad1d-a833e35da282/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "214", + "name": "advice-img-databroker", + "description": "

                    advice-img-databroker collects the user's images placed on the shared folder and releases them into the pipeline

                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9a0944ce-a5d3-4e01-8da0-d44be9b42814&revisionId=c754d039-d083-4997-abb2-6d67b1d6f3f5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9a0944ce-a5d3-4e01-8da0-d44be9b42814&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/9a0944ce-a5d3-4e01-8da0-d44be9b42814/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "215", + "name": "moon_rl", + "description": "

                    This document contains information regarding the developments done within the MOON project. Such project took place within the AI4EU Open Call for the Alph -D challenge, addressing machining control optimization through Reinforcement Learning. The content of the document can be summarized with the following points that hold all the information and are ordered in a logical way, going from the problem presentation to the solution proposed to face it. The last point contains comments related to problems found within the project, how MOON has adapted to such scenario and possible future steps. See README file for details on notebook.


                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9c0ed8db-b9e3-4a8f-8c63-b6350d951337&revisionId=9e63d89f-6525-48dc-8aba-36f6a6b04f81&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9c0ed8db-b9e3-4a8f-8c63-b6350d951337&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/9c0ed8db-b9e3-4a8f-8c63-b6350d951337/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "217", + "name": "vowel", + "description": "https://openml.org

                    Author: Peter Turney (peter@ai.iit.nrc.ca)

                    Source: UCI - date unknown

                    Please cite: UCI citation policy


                    Vowel Recognition (Deterding data)
                    Speaker independent recognition of the eleven steady state vowels of British English using a specified training set of lpc derived log area ratios.
                    Collected by David Deterding (data and non-connectionist analysis), Mahesan Niranjan (first connectionist analysis), Tony Robinson (description, program, data, and results)


                    A very comprehensive description including comments by the authors can be found here


                    The problem is specified by the accompanying data file, \"vowel.data\". This
                    consists of a three dimensional array: voweldata [speaker, vowel, input].
                    The speakers are indexed by integers 0-89. (Actually, there are fifteen
                    individual speakers, each saying each vowel six times.) The vowels are
                    indexed by integers 0-10. For each utterance, there are ten floating-point
                    input values, with array indices 0-9.


                    The problem is to train the network as well as possible using only on data
                    from \"speakers\" 0-47, and then to test the network on speakers 48-89,
                    reporting the number of correct classifications in the test set.


                    For a more detailed explanation of the problem, see the excerpt from Tony
                    Robinson's Ph.D. thesis in the COMMENTS section. In Robinson's opin", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9d05f3f0-d155-4dc4-84a7-b7551bcba3e2&revisionId=7295e950-aa23-4a8e-bd1d-075622985ae5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9d05f3f0-d155-4dc4-84a7-b7551bcba3e2&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/9d05f3f0-d155-4dc4-84a7-b7551bcba3e2/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "221", + "name": "NewsDatabroker", + "description": "

                    Overview:

                    Provides textual data to the news-classifier


                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a0588b74-603d-4c6d-bed7-fef41bdaa8eb&revisionId=0cd9b307-60c3-48f4-9308-07108854cf09&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a0588b74-603d-4c6d-bed7-fef41bdaa8eb&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a0588b74-603d-4c6d-bed7-fef41bdaa8eb/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "224", + "name": "rebase-data-broker", + "description": "

                    This data broker can load open datasets from https://www.rebase.energy/datasets. This will enable access to all upcoming open datasets in the Rebase Platform. The goal of this broker is to make it easy for anyone on the AIOD platform to access additional open energy datasets. 

                    The broker provides a user interface to download train and validation sets in a unified way that can quickly be used to evaluate your model. It also exposes LoadData rpc method to get data. A demonstration video can be found here. Please refer to this readme to understand more about how to use and install.

                    This project has received funding from the European Union's Horizon 2020 research and innovation programme within the framework of the I-NERGY Project, funded under grant agreement No 101016508

                    Author:

                    Source: Unknown - Date unknown

                    Please cite:


                    February 23, 1982


                    The 1982 annual meetings of the American Statistical Association (ASA)
                    will be held August 16-19, 1982 in Cincinnati. At that meeting, the ASA
                    Committee on Statistical Graphics plans to sponsor an \"Exposition of
                    Statistical Graphics Technology.\" The purpose of this activity is to
                    more fully inform the ASA membership about the capabilities and uses of
                    computer graphcis in statistical work. This letter is to invite you to
                    participate in the Exposition.


                    Attached is a set of biomedical data containing 209 observations (134
                    for \"normals\" and 75 for \"carriers\"). Each vendor of provider of
                    statistical graphics software participating in the Exposition is to
                    analyze these data using their software and to prepare tabular, graphical
                    and text output illustrating the use of graphics in these analyses and
                    summarizing their conclusions. The tabular and graphical materials must be
                    direct computer output from the statistical graphics software; the
                    textual descriptions and summaries need not be. The total display space
                    available to each participant at the meeting will be a standard poster-
                    board (approximately 4' x 2 1/2'). All entries will be displayed in one
                    location at the meetings, together with brief written commentary by
                    the committee summarizing the results of this activity.


                    Reference


                    Exposition of Statistical Graphics Technology,
                    L. H. Cox, M. M. Johnson, K. Kafadar,
                    ASA Proc Stat. Comp Section, 1982, pp 55-56.
                    Enclosures


                    THE DATA


                    The following data arose in a study to develop ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a6b3cc75-5ff7-4293-b1b7-36731c797020&revisionId=d1323bad-7098-462e-b402-6b6c6f77cfce&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a6b3cc75-5ff7-4293-b1b7-36731c797020&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a6b3cc75-5ff7-4293-b1b7-36731c797020/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "226", + "name": "kc2", + "description": "https://openml.org

                    Author: Mike Chapman, NASA

                    Source: tera-PROMISE - 2004

                    Please cite: Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE Repository of Software Engineering Databases. School of Information Technology and Engineering, University of Ottawa, Canada.


                    KC2 Software defect prediction

                    One of the NASA Metrics Data Program defect data sets. Data from software for science data processing. Data comes from McCabe and Halstead features extractors of source code. These features were defined in the 70s in an attempt to objectively characterize code features that are associated with software quality.


                    Attribute Information



                    1. loc : numeric % McCabe's line count of code

                    2. v(g) : numeric % McCabe \"cyclomatic complexity\"

                    3. ev(g) : numeric % McCabe \"essential complexity\"

                    4. iv(g) : numeric % McCabe \"design complexity\"

                    5. n : numeric % Halstead total operators + operands

                    6. v : numeric % Halstead \"volume\"

                    7. l : numeric % Halstead \"program length\"

                    8. d : numeric % Halstead \"difficulty\"

                    9. i : numeric % Halstead \"intelligence\"

                    10. e : numeric % Halstead \"effort\"

                    11. b : numeric % Halstead

                    12. t : numeric % Halstead's time estimator

                    13. lOCode : numeric % Halstead's line count

                    14. lOComment : numeric % Halstead's count of lines of comments

                    15. lOBlank : numeric % Ha", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a80a6f2d-b129-4ae0-bfce-22f7631801fe&revisionId=066db903-f64c-4bf9-9118-28ed77006e9a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a80a6f2d-b129-4ae0-bfce-22f7631801fe&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a80a6f2d-b129-4ae0-bfce-22f7631801fe/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "227", + "name": "autoUniv-au7-700", + "description": "https://openml.org

                      Author: Ray. J. Hickey

                      Source: UCI

                      Please cite:



                      • Dataset Title:


                      AutoUniv Dataset

                      data problem: autoUniv-au7-700



                      • Abstract:


                      AutoUniv is an advanced data generator for classifications tasks. The aim is to reflect the nuances and heterogeneity of real data. Data can be generated in .csv, ARFF or C4.5 formats.



                      • Source:


                      AutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com
                      AutoUniv web-site: http://sites.google.com/site/autouniv/.



                      • Data Set Information:


                      The user first creates a classification model and then generates classified examples from it. To create a model, the following are specified: the number of attributes (up to 1000) and their type (discrete or continuous), the number of classes (up to 10), the complexity of the underlying rules and the noise level. AutoUniv then produces a model through a process of constrained randomised search to satisfy the user's requirements. A model can have up to 3000 rules. Rare class models can be designed. A sequence of models can be designed to reflect concept and/or population drift.


                      AutoUniv creates three text files for a model: a Prolog specification of the model used to generate examples (.aupl); a user-friendly statement of the classification rules in an 'if ... then' format (.aurules); a statistical summary of the main properties of the model, including its Bayes rate (.auprops).



                      • Attribute Information:


                      Attributes may be discrete with up to 10 values or continuous. A discrete attribute", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a853cebc-f57d-4e28-afa8-88b8f7a27e9f&revisionId=45d90a0e-8de7-44a8-b04f-c05c0ec44b32&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a853cebc-f57d-4e28-afa8-88b8f7a27e9f&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a853cebc-f57d-4e28-afa8-88b8f7a27e9f/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "231", + "name": "cleveland-nominal", + "description": "https://openml.org

                      cleveland-nominal-pmlb

                      ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ae60613f-f66e-4250-b9ee-92784a85ed89&revisionId=6e9c6eea-42b0-4bd1-8d7d-ecc7c170af17&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ae60613f-f66e-4250-b9ee-92784a85ed89&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ae60613f-f66e-4250-b9ee-92784a85ed89/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "238", + "name": "i-nergy-load-forecasting-databroker", + "description": "

                      This is a Databroker service used for Timeseries. This service is implemented in context of the I-NERGY project. A User Interface is included where the users can upload their Timeseries in a csv format. For more information on how to use the solution, please see README.pdf in the Documents section. 

                      ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b303991f-d5cf-40b0-a941-1d0c0292f4f9&revisionId=fa3adc1a-1cee-40df-aea7-628a4942b01b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b303991f-d5cf-40b0-a941-1d0c0292f4f9&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b303991f-d5cf-40b0-a941-1d0c0292f4f9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "239", + "name": "ai4eu-robotics-pump-1024-fft-broker", + "description": "

                      The robotic pump demonstrator represents a hydraulic pump that can be mounted on an industrial robot, for example, to pump liquid paint for spray painting. On this pump, one accelerometer is mounted for vibration monitoring and recording.

                      The pump can be controlled in terms of speed (rotations per minute, rpm), affecting the throughput of paint and the pressure in and out of the pump.

                      The dataset consists of 380 million measurements of several sensor data of the pump system in 1-second intervals over two months in 2020.

                      The complete dataset & documentation is available on Zenodo.

                      ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b3bef42d-b521-4d63-866b-26b6a4b1e053&revisionId=191d8798-2b8b-4ebb-9c4b-9e58caf91bdc&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b3bef42d-b521-4d63-866b-26b6a4b1e053&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b3bef42d-b521-4d63-866b-26b6a4b1e053/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "245", + "name": "car-evaluation", + "description": "https://openml.org

                      car-evaluation-pmlb

                      ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b70b266d-8c03-4f01-b809-668eb6ad4d89&revisionId=61420377-785c-4b22-8344-f04eeda911b7&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b70b266d-8c03-4f01-b809-668eb6ad4d89&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b70b266d-8c03-4f01-b809-668eb6ad4d89/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "248", + "name": "phoneme", + "description": "https://openml.org

                      Author: Dominique Van Cappel, THOMSON-SINTRA

                      Source: KEEL, ELENA - 1993

                      Please cite: None


                      The aim of this dataset is to distinguish between nasal (class 0) and oral sounds (class 1). Five different attributes were chosen to characterize each vowel: they are the amplitudes of the five first harmonics AHi, normalised by the total energy Ene (integrated on all the frequencies): AHi/Ene. The phonemes are transcribed as follows: sh as in she, dcl as in dark, iy as the vowel in she, aa as the vowel in dark, and ao as the first vowel in water.


                      Source


                      The current dataset was formatted by the KEEL repository, but originally hosted by the ELENA Project. The dataset originates from the European ESPRIT 5516 project: ROARS. The aim of this project was the development and the implementation of a real time analytical system for French and Spanish speech recognition.


                      Relevant information


                      Most of the already existing speech recognition systems are global systems (typically Hidden Markov Models and Time Delay Neural Networks) which recognizes signals and do not really use the speech
                      specificities. On the contrary, analytical systems take into account the articulatory process leading to the different phonemes of a given language, the idea being to deduce the presence of each of the
                      phonetic features from the acoustic observation.


                      The main difficulty of analytical sy", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b824ceef-6647-4286-999c-6e175cebc886&revisionId=4517efb8-1b0a-485f-9603-1667a3738dc4&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b824ceef-6647-4286-999c-6e175cebc886&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b824ceef-6647-4286-999c-6e175cebc886/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "249", + "name": "tic-tac-toe", + "description": "https://openml.org

                      Author: David W. Aha

                      Source: UCI - 1991

                      Please cite: UCI


                      Tic-Tac-Toe Endgame database

                      This database encodes the complete set of possible board configurations at the end of tic-tac-toe games, where \"x\" is assumed to have played first. The target concept is \"win for x\" (i.e., true when \"x\" has one of 8 possible ways to create a \"three-in-a-row\").


                      Attribute Information


                       (x=player x has taken, o=player o has taken, b=blank)
                      1. top-left-square: {x,o,b}
                      2. top-middle-square: {x,o,b}
                      3. top-right-square: {x,o,b}
                      4. middle-left-square: {x,o,b}
                      5. middle-middle-square: {x,o,b}
                      6. middle-right-square: {x,o,b}
                      7. bottom-left-square: {x,o,b}
                      8. bottom-middle-square: {x,o,b}
                      9. bottom-right-square: {x,o,b}
                      10. Class: {positive,negative}
                      ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b8a104fa-968e-4492-aca6-7ea4b6de9a2d&revisionId=ebb899ed-1abb-4f88-9d7a-f85922b29557&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b8a104fa-968e-4492-aca6-7ea4b6de9a2d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b8a104fa-968e-4492-aca6-7ea4b6de9a2d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "250", + "name": "wine", + "description": "https://openml.org

                      Author:

                      Source: Unknown -

                      Please cite:




                      1. Title of Database: Wine recognition data
                        Updated Sept 21, 1998 by C.Blake : Added attribute information




                      2. Sources:
                        (a) Forina, M. et al, PARVUS - An Extendible Package for Data
                        Exploration, Classification and Correlation. Institute of Pharmaceutical
                        and Food Analysis and Technologies, Via Brigata Salerno,
                        16147 Genoa, Italy.


                        (b) Stefan Aeberhard, email: stefan@coral.cs.jcu.edu.au
                        (c) July 1991
                        3. Past Usage:


                        (1)
                        S. Aeberhard, D. Coomans and O. de Vel,
                        Comparison of Classifiers in High Dimensional Settings,
                        Tech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of
                        Mathematics and Statistics, James Cook University of North Queensland.
                        (Also submitted to Technometrics).


                        The data was used with many others for comparing various
                        classifiers. The classes are separable, though only RDA
                        has achieved 100% correct classification.
                        (RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data))
                        (All results using the leave-one-out technique)


                        In a classification context, this is a well posed problem
                        with \"well behaved\" class structures. A good data set
                        for first testing of a new classifier, but not very
                        challenging.


                        (2)
                        S. Aeberhard, D. Coomans and O. de Vel,
                        \"THE CLASSIFICATION PERFORMANCE OF RDA\"
                        Tech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of
                        Mathematics and Statistics, James Cook University of North Queensland.
                        (Also submitted to Journal of Chemometrics).


                        Here, the data was used to illustr", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b937a775-61e8-4522-8511-09597c6b40c9&revisionId=9adb25dd-4ded-4104-a593-f5aaad1ff3c2&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b937a775-61e8-4522-8511-09597c6b40c9&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b937a775-61e8-4522-8511-09597c6b40c9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "253", + "name": "file-viewer", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=268020c8-c4fb-4137-953a-d5dd59f70e8a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/bb1c9198-b230-4cd5-bda5-866c689fc1b4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "254", + "name": "file-viewer", + "description": "

                        A simple file viewer that lists provided files with download links.

                        To connect with other components a link to SharedFolderProvider is needed. The viewer will show a list with recent files and their download link. The content of last file will be presented if its text or an image.


                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=811faf16-86aa-41a0-8720-4e4dcc352074&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.3", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&version=1.0.3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/bb1c9198-b230-4cd5-bda5-866c689fc1b4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "255", + "name": "file-viewer", + "description": "

                        A simple file viewer that lists provided files with download links.

                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=aedc2371-cf0d-433a-8878-8b5ab4aec112&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/bb1c9198-b230-4cd5-bda5-866c689fc1b4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "256", + "name": "file-viewer", + "description": "

                        A simple file viewer that lists provided files with download links.

                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=f8389c64-a5e0-4ce4-b97d-ef63de60db19&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/bb1c9198-b230-4cd5-bda5-866c689fc1b4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "258", + "name": "recognaize-ui", + "description": "

                        Recognaize UI

                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bc867fa6-eb1d-4905-bb76-2ebe413c2e91&revisionId=c7add00b-b4b4-46ee-8594-bd0e067f5665&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bc867fa6-eb1d-4905-bb76-2ebe413c2e91&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/bc867fa6-eb1d-4905-bb76-2ebe413c2e91/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "260", + "name": "autos", + "description": "https://openml.org

                        Author: Jeffrey C. Schlimmer (Jeffrey.Schlimmer@a.gp.cs.cmu.edu)

                        Source: UCI - 1987

                        Please cite:


                        1985 Auto Imports Database

                        This data set consists of three types of entities: (a) the specification of an auto in terms of various characteristics, (b) its assigned insurance risk rating, (c) its normalized losses in use as compared to other cars. The second rating corresponds to the degree to which the auto is more risky than its price indicates. Cars are initially assigned a risk factor symbol associated with its price. Then, if it is more risky (or less), this symbol is adjusted by moving it up (or down) the scale. Actuarians call this process \"symboling\". A value of +3 indicates that the auto is risky, -3 that it is probably pretty safe.


                        The third factor is the relative average loss payment per insured vehicle year. This value is normalized for all autos within a particular size classification (two-door small, station wagons, sports/speciality, etc...), and represents the average loss per car per year.


                        Several of the attributes in the database could be used as a \"class\" attribute.


                        Sources:

                        1) 1985 Model Import Car and Truck Specifications, 1985 Ward's Automotive Yearbook.
                        2) Personal Auto Manuals, Insurance Services Office, 160 Water Street, New York, NY 10038
                        3) Insurance Collision Report, Insurance Institute for Highway Safety, Watergate 600, Washington, DC 20037


                        Past Usage:

                        Kibler,~D., Aha,~D.~W., & Albert,~M. (1989). Instance-based prediction of real-valued attributes. {it Computational Intelli", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c3822188-8928-4e20-b604-4a274ff34503&revisionId=d1574d67-64d0-4b00-8dfa-7b35d810ddb1&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c3822188-8928-4e20-b604-4a274ff34503&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c3822188-8928-4e20-b604-4a274ff34503/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "277", + "name": "seismic-bumps", + "description": "https://openml.org

                        Author: Sikora M., Wrobel L.

                        Source: UCI

                        Please cite: Sikora M., Wrobel L.: Application of rule induction algorithms for analysis of data collected by seismic hazard monitoring systems in coal mines. Archives of Mining Sciences, 55(1), 2010, 91-114.



                        • Title:


                        seismic-bumps Data Set



                        • Abstract:


                        The data describe the problem of high energy (higher than 10^4 J) seismic bumps forecasting in a coal mine. Data come from two of longwalls located in a Polish coal mine.



                        • Source:


                        Marek Sikora^{1,2} (marek.sikora '@' polsl.pl), Lukasz Wrobel^{1} (lukasz.wrobel '@' polsl.pl)
                        (1) Institute of Computer Science, Silesian University of Technology, 44-100 Gliwice, Poland
                        (2) Institute of Innovative Technologies EMAG, 40-189 Katowice, Poland



                        • Data Set Information:


                        Mining activity was and is always connected with the occurrence of dangers which are commonly called mining hazards. A special case of such threat is a seismic hazard which frequently occurs in many underground mines. Seismic hazard is the hardest detectable and predictable of natural hazards and in this respect it is comparable to an earthquake. More and more advanced seismic and seismoacoustic monitoring systems allow a better understanding rock mass processes and definition of seismic hazard
                        prediction methods. Accuracy of so far created methods is however far from perfect. Complexity of seismic processes and big disproportion between the number of low-energy seismic events and the number of high-energy phenomena (e.g. > 10^4J) causes the statistical technique", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ce2033a8-a389-435d-a64c-90a173e6775f&revisionId=97be56b0-b72d-41cd-8821-99a6a38e7285&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ce2033a8-a389-435d-a64c-90a173e6775f&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ce2033a8-a389-435d-a64c-90a173e6775f/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "278", + "name": "car", + "description": "https://openml.org

                        Author: Marko Bohanec, Blaz Zupan

                        Source: UCI - 1997

                        Please cite: UCI


                        Car Evaluation Database

                        This database was derived from a simple hierarchical decision model originally developed for the demonstration of DEX (M. Bohanec, V. Rajkovic: Expert system for decision making. Sistemica 1(1), pp. 145-157, 1990.).


                        The model evaluates cars according to the following concept structure:


                        CAR                      car acceptability
                        . PRICE overall price
                        . . buying buying price
                        . . maint price of the maintenance
                        . TECH technical characteristics
                        . . COMFORT comfort
                        . . . doors number of doors
                        . . . persons capacity in terms of persons to carry
                        . . . lug_boot the size of luggage boot
                        . . safety estimated safety of the car

                        Input attributes are printed in lowercase. Besides the target concept (CAR), the model includes three intermediate concepts: PRICE, TECH, COMFORT. Every concept is in the original model related to its lower level descendants by a set of examples (for
                        these examples sets see http://www-ai.ijs.si/BlazZupan/car.html).


                        The Car Evaluation Database contains examples with the structural information removed, i.e., directly relates CAR to the six input attributes: buying, maint, doors, persons, lug_boot, safety. Because of known underlying concept structure, this database may be particularly useful f", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cf25ba45-37d5-4548-b3d1-103c5cbbf24c&revisionId=105d6390-095f-4d54-bb6d-5e5c24cc5d88&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=cf25ba45-37d5-4548-b3d1-103c5cbbf24c&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/cf25ba45-37d5-4548-b3d1-103c5cbbf24c/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "279", + "name": "banknote-authentication", + "description": "https://openml.org

                        Author: Volker Lohweg (University of Applied Sciences, Ostwestfalen-Lippe)

                        Source: UCI - 2012

                        Please cite: UCI


                        Dataset about distinguishing genuine and forged banknotes. Data were extracted from images that were taken from genuine and forged banknote-like specimens. For digitization, an industrial camera usually used for print inspection was used. The final images have 400x 400 pixels. Due to the object lens and distance to the investigated object gray-scale pictures with a resolution of about 660 dpi were gained. A Wavelet Transform tool was used to extract features from these images.


                        Attribute Information


                        V1. variance of Wavelet Transformed image (continuous)

                        V2. skewness of Wavelet Transformed image (continuous)

                        V3. curtosis of Wavelet Transformed image (continuous)

                        V4. entropy of image (continuous)


                        Class (target). Presumably 1 for genuine and 2 for forged

                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cfd561b4-1973-40a1-a572-b70ffdf4d4a0&revisionId=d507733b-9e93-4bef-9161-01dbd46a505a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=cfd561b4-1973-40a1-a572-b70ffdf4d4a0&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/cfd561b4-1973-40a1-a572-b70ffdf4d4a0/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "282", + "name": "Idiap_BEAT_Databroker_-_M-NIST", + "description": "

                        This data broker provides the public MNIST database as a series of image.The data broker itself is standalone as it relies on the BOB mnist database package to provide the data.

                        There is no need for any configuration to be done in order to use it.

                        It can be used as input to benchmark other Acumos models.There is not image processing done in it. The output is a two dimensional numpy array that is stored as a binary type in order to avoid complex type creation as there's no notion of array size with protobuf.

                        The corresponding BEAT experiment can be found on the BEAT platform



                        .

                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d1bb94b1-9a27-47d2-a36a-7fceb57a8a9d&revisionId=082d9988-6731-48a9-aa03-22e8ca420541&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d1bb94b1-9a27-47d2-a36a-7fceb57a8a9d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d1bb94b1-9a27-47d2-a36a-7fceb57a8a9d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "284", + "name": "kc1", + "description": "https://openml.org

                        Author: Mike Chapman, NASA

                        Source: tera-PROMISE - 2004

                        Please cite: Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE Repository of Software Engineering Databases. School of Information Technology and Engineering, University of Ottawa, Canada.


                        KC1 Software defect prediction

                        One of the NASA Metrics Data Program defect data sets. Data from software for storage management for receiving and processing ground data. Data comes from McCabe and Halstead features extractors of source code. These features were defined in the 70s in an attempt to objectively characterize code features that are associated with software quality.


                        Attribute Information



                        1. loc : numeric % McCabe's line count of code

                        2. v(g) : numeric % McCabe \"cyclomatic complexity\"

                        3. ev(g) : numeric % McCabe \"essential complexity\"

                        4. iv(g) : numeric % McCabe \"design complexity\"

                        5. n : numeric % Halstead total operators + operands

                        6. v : numeric % Halstead \"volume\"

                        7. l : numeric % Halstead \"program length\"

                        8. d : numeric % Halstead \"difficulty\"

                        9. i : numeric % Halstead \"intelligence\"

                        10. e : numeric % Halstead \"effort\"

                        11. b : numeric % Halstead

                        12. t : numeric % Halstead's time estimator

                        13. lOCode : numeric % Halstead's line count

                        14. lOComment : numeric % Halstead's count of lines of comments
                        15. SUMO/RL implements a pipeline with a traffic simulator of the city of Trondheim, Norway, and a reinforcement learning autonomous agent that learns and implements traffic control policies with the goal of minimizing the number of pollution peaks above a given threshold. Each component can be ran stand alone.

                          The simulator is a wrapper of the Sumo simulator, that provides more functionality. The simulator is directly targeted to Trondheim city, with the goal to study the traffic related emissions.

                          For a more detailed description check the github repository of the resouce: https://github.com/tsveiga/AI4EU-RL-Trondheim

                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d40fdc2b-fe34-4de3-979d-507b55e96a0f&revisionId=a7ca617c-c274-4500-aff0-3bff21a24298&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d40fdc2b-fe34-4de3-979d-507b55e96a0f&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d40fdc2b-fe34-4de3-979d-507b55e96a0f/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "289", + "name": "edm-env", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&revisionId=44b5ac74-bf4d-42c9-b187-3d827c240553&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5ef8d03-8b7a-471e-b958-4c4a3192eaf8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "290", + "name": "edm-env", + "description": "

                          EDM env component is a numpy based EDM environment that follows the gym API. It emulates the rib machining pattern, with 4 available actions for z-axis control: 0 (stay), 1 (lower by 10\u03bcm), 2 (raise by 10\u03bcm), 3 (flush). Environment returns observed average voltage of the sparks, and the frequency of sparking (both are normalized)

                          This component exposes a protobuf based control API via 8061 port. Using this API it can be controlled by the demo EDM agent (edm-agent component in AI4EU platform). For instructions to run the agent and the enviroment together see the component repository at https://github.com/threethirds/edm


                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&revisionId=6fe2ae46-9234-4ce6-843b-adbf4e963c63&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.3", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&version=1.0.3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5ef8d03-8b7a-471e-b958-4c4a3192eaf8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "291", + "name": "edm-env", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&revisionId=7d04d645-ac32-4751-b953-b461d82e305d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5ef8d03-8b7a-471e-b958-4c4a3192eaf8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "292", + "name": "edm-env", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&revisionId=f7d265c5-821e-4c55-9410-837af3c9d9ab&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5ef8d03-8b7a-471e-b958-4c4a3192eaf8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "294", + "name": "ionosphere", + "description": "https://openml.org

                          Author: Space Physics Group, Applied Physics Laboratory, Johns Hopkins University. Donated by Vince Sigillito.

                          Source: UCI Machine Learning Repository

                          Please cite: UCI


                          Johns Hopkins University Ionosphere database

                          This radar data was collected by a system in Goose Bay, Labrador. This system consists of a phased array of 16 high-frequency antennas with a total transmitted power on the order of 6.4 kilowatts. See the paper for more details.


                          Attribute information


                          Received signals were processed using an autocorrelation function whose arguments are the time of a pulse and the pulse number. There were 17 pulse numbers for the Goose Bay system. Instances in this database are described by 2 attributes per pulse number, corresponding to the complex values returned by the function resulting from the complex electromagnetic signal.


                          The targets were free electrons in the ionosphere. \"Good\" (g) radar returns are those showing evidence of some type of structure in the ionosphere. \"Bad\" (b) returns are those that do not; their signals pass through the ionosphere.


                          Relevant papers


                          Sigillito, V. G., Wing, S. P., Hutton, L. V., & Baker, K. B. (1989). Classification of radar returns from the ionosphere using neural networks. Johns Hopkins APL Technical Digest, 10, 262-266.

                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d6b959e6-59c4-4311-a0b2-550b9a1bd407&revisionId=48b02822-24ca-4e2e-9e05-f606db3b6be2&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d6b959e6-59c4-4311-a0b2-550b9a1bd407&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d6b959e6-59c4-4311-a0b2-550b9a1bd407/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "304", + "name": "credit-approval", + "description": "https://openml.org

                          Author: Confidential - Donated by Ross Quinlan

                          Source: UCI - 1987

                          Please cite: UCI


                          Credit Approval
                          This file concerns credit card applications. All attribute names and values have been changed to meaningless symbols to protect the confidentiality of the data.


                          This dataset is interesting because there is a good mix of attributes -- continuous, nominal with small numbers of values, and nominal with larger numbers of values. There are also a few missing values.

                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dc36f026-d89b-4017-943e-560012105d3d&revisionId=9238fdfe-0824-45cf-933d-d51cb54deb54&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=dc36f026-d89b-4017-943e-560012105d3d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/dc36f026-d89b-4017-943e-560012105d3d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "309", + "name": "segment", + "description": "https://openml.org

                          Author: University of Massachusetts Vision Group, Carla Brodley

                          Source: UCI - 1990

                          Please cite: UCI


                          Image Segmentation Data Set
                          The instances were drawn randomly from a database of 7 outdoor images. The images were hand-segmented to create a classification for every pixel. Each instance is a 3x3 region.


                          Attribute Information



                          1. region-centroid-col: the column of the center pixel of the region.

                          2. region-centroid-row: the row of the center pixel of the region.

                          3. region-pixel-count: the number of pixels in a region = 9.

                          4. short-line-density-5: the results of a line extractoin algorithm that
                            counts how many lines of length 5 (any orientation) with
                            low contrast, less than or equal to 5, go through the region.

                          5. short-line-density-2: same as short-line-density-5 but counts lines
                            of high contrast, greater than 5.

                          6. vedge-mean: measure the contrast of horizontally
                            adjacent pixels in the region. There are 6, the mean and
                            standard deviation are given. This attribute is used as
                            a vertical edge detector.

                          7. vegde-sd: (see 6)

                          8. hedge-mean: measures the contrast of vertically adjacent
                            pixels. Used for horizontal line detection.

                          9. hedge-sd: (see 8).

                          10. intensity-mean: the average over the region of (R + G + B)/3

                          11. rawred-mean: the average over the region of the R value.

                          12. rawblue-mean: th", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df660739-9aee-423a-a44e-df9b637cfe1b&revisionId=6f1d61b0-1028-44ee-ac03-ce7b562550c3&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df660739-9aee-423a-a44e-df9b637cfe1b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/df660739-9aee-423a-a44e-df9b637cfe1b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "310", + "name": "segment", + "description": "https://openml.org

                            Author: University of Massachusetts Vision Group, Carla Brodley

                            Source: UCI - 1990

                            Please cite: UCI


                            Image Segmentation Data Set
                            The instances were drawn randomly from a database of 7 outdoor images. The images were hand-segmented to create a classification for every pixel. Each instance is a 3x3 region.


                            Major changes w.r.t. version 2: ignored first two variables as they do not fit the classification task (they reflect the location of the sample in the original image). The 3rd is constant, so should also be ignored.


                            Attribute Information



                            1. short-line-density-5: the results of a line extractoin algorithm that
                              counts how many lines of length 5 (any orientation) with
                              low contrast, less than or equal to 5, go through the region.

                            2. short-line-density-2: same as short-line-density-5 but counts lines
                              of high contrast, greater than 5.

                            3. vedge-mean: measure the contrast of horizontally
                              adjacent pixels in the region. There are 6, the mean and
                              standard deviation are given. This attribute is used as
                              a vertical edge detector.

                            4. vegde-sd: (see 6)

                            5. hedge-mean: measures the contrast of vertically adjacent
                              pixels. Used for horizontal line detection.

                            6. hedge-sd: (see 8).

                            7. intensity-mean: the average over the region of (R + G + B)/3

                            8. rawred-mean: the average over the region of the R value.

                            9. r", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df660739-9aee-423a-a44e-df9b637cfe1b&revisionId=a0a9b64d-774e-438d-b13c-c9c20e220da0&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df660739-9aee-423a-a44e-df9b637cfe1b&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/df660739-9aee-423a-a44e-df9b637cfe1b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "313", + "name": "pc1", + "description": "https://openml.org

                              Author: Mike Chapman, NASA

                              Source: tera-PROMISE - 2004

                              Please cite: Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE Repository of Software Engineering Databases. School of Information Technology and Engineering, University of Ottawa, Canada.


                              PC1 Software defect prediction

                              One of the NASA Metrics Data Program defect data sets. Data from flight software for earth orbiting satellite. Data comes from McCabe and Halstead features extractors of source code. These features were defined in the 70s in an attempt to objectively characterize code features that are associated with software quality.


                              Attribute Information



                              1. loc : numeric % McCabe's line count of code

                              2. v(g) : numeric % McCabe \"cyclomatic complexity\"

                              3. ev(g) : numeric % McCabe \"essential complexity\"

                              4. iv(g) : numeric % McCabe \"design complexity\"

                              5. n : numeric % Halstead total operators + operands

                              6. v : numeric % Halstead \"volume\"

                              7. l : numeric % Halstead \"program length\"

                              8. d : numeric % Halstead \"difficulty\"

                              9. i : numeric % Halstead \"intelligence\"

                              10. e : numeric % Halstead \"effort\"

                              11. b : numeric % Halstead

                              12. t : numeric % Halstead's time estimator

                              13. lOCode : numeric % Halstead's line count

                              14. lOComment : numeric % Halstead's count of lines of comments

                              15. lOBlank : nume", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e20b38c6-c46c-4cf6-96cf-c3ce14285c88&revisionId=c63c438d-ba5f-4544-94f2-8be84fb8e252&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e20b38c6-c46c-4cf6-96cf-c3ce14285c88&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e20b38c6-c46c-4cf6-96cf-c3ce14285c88/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "318", + "name": "SensorThings_API_connector", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&revisionId=5d3aee4b-03e3-4e99-8fe4-80193da4a04e&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e4e10524-917f-4515-860f-46d7d90106e5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "319", + "name": "SensorThings_API_connector", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&revisionId=a8705f9d-18cd-4d6f-b1a6-ed9a5dfa2d54&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e4e10524-917f-4515-860f-46d7d90106e5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "320", + "name": "SensorThings_API_connector", + "description": "

                                This is a generic connector for the SensorThings API. It will allow you to connect to any SensorThings API in the world and therefore potentially recover data on any domain. For example, this would facilitate the retrieval of public Covid19 data, harvested from various sources including Johns Hopkins and RKI, or from near-real-time air quality across Europe, from both national sources (harvested from AT SOS and WFS) and Europe (EEA).

                                To illustrate the potential uses of these different SensorThings API (with a single connector), one can take a look at these different applications: a visualisation tool[1] bringing together French and German flow data, a covid-19 dashboard[2] and the Windy Web site[3] focused on the weather forecast.


                                [1] https://wg-brgm.k8s.ilt-dmz.iosb.fraunhofer.de/servlet/is/110/


                                [2] http://www.covid19dashboard.org/


                                [3] https://www.windy.com/fr/-NO2-no2?camsEu,no2,47.905,1.908,5

                                ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&revisionId=baf4c46b-673b-48d0-ac27-1fa2a87ba625&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.3", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&version=1.0.3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e4e10524-917f-4515-860f-46d7d90106e5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "321", + "name": "SensorThings_API_connector", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&revisionId=f7bd8ec8-795c-471b-b4d8-5339f907b241&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e4e10524-917f-4515-860f-46d7d90106e5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "327", + "name": "IoTxKG", + "description": "

                                The Internet of Things (IoT) primary objective is to make a hyper-connected world for various application domains. However, IoT suffers from a lack of interoperability leading to a substantial threat to the predicted economic value. Schema.org provides semantic interoperability to structure heterogeneous data on the Web. An extension of this vocabulary for the IoT domain (iot.schema.org) is an ongoing research effort to address semantic interoperability for the Web of Things (WoT). To design this vocabulary, a central challenge is to identify the main topics (concepts and properties) automatically from existing knowledge in IoT applications. IoTxKG automatically 1) identifies the most important topics from existing ontologies of the 4 KE4WoT challenge domains \u2013 smart building, mobility, energy and weather \u2013 based on suitable language models and 2) visualises the topics using both wordclouds and interactive graph-based word clouds. 


                                The following technologies are employed in IoTxKG

                                • W3C Semantic Web Technologies (e.g. RDF, OWL, SPARQL, SKOS)
                                • Deep Learning Language Models (Word2vec, BERT, ERNIE, GPT)
                                • Clustering Algorithms (e.g. k-means clustering)
                                • Graph-based Visualization
                                ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e55074a8-d66b-4e83-84c9-e0cd4371c79b&revisionId=75ce6a2f-1762-4907-8b94-a12ec9607f23&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e55074a8-d66b-4e83-84c9-e0cd4371c79b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e55074a8-d66b-4e83-84c9-e0cd4371c79b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "328", + "name": "ai4eu-robotics-pump-6144-raw-broker", + "description": "

                                The robotic pump demonstrator represents a hydraulic pump that can be mounted on an industrial robot, for example, to pump liquid paint for spray painting. On this pump, one accelerometer is mounted for vibration monitoring and recording.

                                The pump can be controlled in terms of speed (rotations per minute, rpm), affecting the throughput of paint and the pressure in and out of the pump.

                                The dataset consists of 380 million measurements of several sensor data of the pump system in 1-second intervals over two months in 2020.

                                The complete dataset & documentation is available on Zenodo.

                                ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e5be6960-bea7-4d62-8301-be494ab1ac46&revisionId=2626b3dc-d3a3-4f3c-b7b9-e523758dd5b5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e5be6960-bea7-4d62-8301-be494ab1ac46&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e5be6960-bea7-4d62-8301-be494ab1ac46/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "329", + "name": "ai4eu-robotics-pump-6144-raw-broker", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e5be6960-bea7-4d62-8301-be494ab1ac46&revisionId=65423d0c-1238-47a8-94fb-98d39df1d460&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e5be6960-bea7-4d62-8301-be494ab1ac46&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e5be6960-bea7-4d62-8301-be494ab1ac46/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "330", + "name": "TEK_THOR_DATA_CURATION", + "description": "

                                AI4EU - THOR THOR solution consists in a Hybrid optimization solution to make the right decision on the amount spare parts in stock, considering past sales and forecasts. The purchase decision considers as input information current stock status, production needs, production forecast, sales forecast, variability Price of stock material and several restriction parameters.

                                Data-Curation. Different datasets extracted from company ERP are analyzed and normalized by a \u2018Quality\u2019 module, which uses different statistical techniques to calculate quality metrics and fix missing values.

                                ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e6d5038c-424a-44ce-9415-34fa129bf9a5&revisionId=bf98fd1e-fdf2-4ada-9a9a-c30fb1a90fea&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e6d5038c-424a-44ce-9415-34fa129bf9a5&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e6d5038c-424a-44ce-9415-34fa129bf9a5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "331", + "name": "breast-cancer", + "description": "https://openml.org

                                Author:

                                Source: Unknown -

                                Please cite:


                                Citation Request:
                                This breast cancer domain was obtained from the University Medical Centre,
                                Institute of Oncology, Ljubljana, Yugoslavia. Thanks go to M. Zwitter and
                                M. Soklic for providing the data. Please include this citation if you plan
                                to use this database.




                                1. Title: Breast cancer data (Michalski has used this)




                                2. Sources:
                                  -- Matjaz Zwitter & Milan Soklic (physicians)
                                  Institute of Oncology
                                  University Medical Center
                                  Ljubljana, Yugoslavia
                                  -- Donors: Ming Tan and Jeff Schlimmer (Jeffrey.Schlimmer@a.gp.cs.cmu.edu)
                                  -- Date: 11 July 1988




                                3. Past Usage: (Several: here are some)
                                  -- Michalski,R.S., Mozetic,I., Hong,J., & Lavrac,N. (1986). The
                                  Multi-Purpose Incremental Learning System AQ15 and its Testing
                                  Application to Three Medical Domains. In Proceedings of the
                                  Fifth National Conference on Artificial Intelligence, 1041-1045,
                                  Philadelphia, PA: Morgan Kaufmann.
                                  -- accuracy range: 66%-72%
                                  -- Clark,P. & Niblett,T. (1987). Induction in Noisy Domains. In
                                  Progress in Machine Learning (from the Proceedings of the 2nd
                                  European Working Session on Learning), 11-30, Bled,
                                  Yugoslavia: Sigma Press.
                                  -- 8 test results given: 65%-72% accuracy range
                                  -- Tan, M., & Eshelman, L. (1988). Using weighted networks to
                                  represent classification knowledge in noisy domains. Proceedings
                                  of the Fifth International Confere", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e76a359c-ad44-48f2-a5be-f969434c0079&revisionId=62a3f013-f8ae-46b8-9887-aadd4b079659&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e76a359c-ad44-48f2-a5be-f969434c0079&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e76a359c-ad44-48f2-a5be-f969434c0079/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "335", + "name": "GAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_50_EDM-2_001", + "description": "https://openml.org

                                  GAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_50_EDM-2_001-pmlb

                                  ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ea98c298-5fcb-4b37-8262-828d3605cfaf&revisionId=70f884e0-9a7e-458b-bdf0-ad3bba0667dc&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ea98c298-5fcb-4b37-8262-828d3605cfaf&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ea98c298-5fcb-4b37-8262-828d3605cfaf/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "347", + "name": "thyroid-new", + "description": "https://openml.org

                                  new-thyroid-pmlb

                                  ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=edc15172-70fb-489b-bff1-c1c28c61ce6b&revisionId=96529752-d961-4e5e-8f0f-b104c3e1b603&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=edc15172-70fb-489b-bff1-c1c28c61ce6b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/edc15172-70fb-489b-bff1-c1c28c61ce6b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "348", + "name": "churn", + "description": "https://openml.org

                                  Author: Unknown

                                  Source: PMLB, BigML, Supposedly from UCI but I can't find it there.

                                  Please cite:


                                  A dataset relating characteristics of telephony account features and usage and whether or not the customer churned. Originally used in Discovering Knowledge in Data: An Introduction to Data Mining.

                                  ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ee42788e-0ec5-45a9-97e4-6a0634ac84e2&revisionId=8cf5e565-aff3-41fd-ac89-c428b59a0a21&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ee42788e-0ec5-45a9-97e4-6a0634ac84e2&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ee42788e-0ec5-45a9-97e4-6a0634ac84e2/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "349", + "name": "blood-transfusion-service-center", + "description": "https://openml.org

                                  Author: Prof. I-Cheng Yeh

                                  Source: UCI

                                  Please cite: Yeh, I-Cheng, Yang, King-Jang, and Ting, Tao-Ming, \"Knowledge discovery on RFM model using Bernoulli sequence\", Expert Systems with Applications, 2008.


                                  Blood Transfusion Service Center Data Set

                                  Data taken from the Blood Transfusion Service Center in Hsin-Chu City in Taiwan -- this is a classification problem.


                                  To demonstrate the RFMTC marketing model (a modified version of RFM), this study adopted the donor database of Blood Transfusion Service Center in Hsin-Chu City in Taiwan. The center passes their blood transfusion service bus to one university in Hsin-Chu City to gather blood donated about every three months. To build an FRMTC model, we selected 748 donors at random from the donor database.


                                  Attribute Information



                                  • V1: Recency - months since last donation

                                  • V2: Frequency - total number of donation

                                  • V3: Monetary - total blood donated in c.c.

                                  • V4: Time - months since first donation), and a binary variable representing whether he/she donated blood in March 2007 (1 stand for donating blood; 0 stands for not donating blood).


                                  The target attribute is a binary variable representing whether he/she donated blood in March 2007 (2 stands for donating blood; 1 stands for not donating blood).

                                  ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eea25848-33cf-4b43-9677-1e932d8e710a&revisionId=9b1bfbf7-438a-45a7-99b0-c3c470a2551c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eea25848-33cf-4b43-9677-1e932d8e710a&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/eea25848-33cf-4b43-9677-1e932d8e710a/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } ], - "application_area": [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "351", + "name": "edm_aad_data_node_cl", + "description": "

                                  EDM RL Controller predictions (Solution Provider: Artificialy SA)


                                  Reinforcement learning applied to Electrical discharge machining (EDM) control for the AI4EU project with Agie Charmilles SA. 


                                  The solution consist of two nodes: `data_node` server which streams a DataFrame of observations (EDM machine states) read from the path provided by the client (`infile`); and an `agent_node` server which predicts control actions based on the agent / controller specified by the client. Output predictions are stored inside the `./data_predictions/` folder of the `agent_node` Docker container.


                                  To use this solution, please use the Docker container and the additional files (which are in the Documents tap of the model in the marketplace) from both the `data_node` and `agent_node`. They are both in the AI4EU platform market place named as `edm_aad_agent_node_cl` and `edm_aad_data_node_cl`

                                  ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f257ab28-e846-4d05-8fc1-9e53cddab23a&revisionId=0672b76d-0046-4ff5-afc1-5e7a64554451&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f257ab28-e846-4d05-8fc1-9e53cddab23a&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f257ab28-e846-4d05-8fc1-9e53cddab23a/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "352", + "name": "led24", + "description": "https://openml.org

                                  led24-pmlb

                                  ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f7d84667-d8e6-4dc3-af68-0845d7e984e2&revisionId=27254760-7bc4-4b93-b466-3e5c93490461&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], "citation": [], "contact": [], "creator": [], - "distribution": [ + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f7d84667-d8e6-4dc3-af68-0845d7e984e2&version=1.0.0", + "media": [ { - "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "checksum_algorithm": "sha256", - "copyright": "2010-2020 Example Company. All rights reserved.", - "content_url": "https://www.example.com/dataset/file.csv", - "content_size_kb": 10000, - "date_published": "2022-01-01T15:15:00.000", - "description": "Description of this file.", - "encoding_format": "text/csv", - "name": "Name of this file.", - "technology_readiness_level": 1 + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f7d84667-d8e6-4dc3-af68-0845d7e984e2/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" } ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "354", + "name": "ilpd", + "description": "https://openml.org

                                  Author: Bendi Venkata Ramana, M. Surendra Prasad Babu, N. B. Venkateswarlu

                                  Source: UCI - 2012

                                  Please cite: UCI


                                  Indian Liver Patient Dataset

                                  This data set contains 416 liver patient records and 167 non liver patient records.The data set was collected from north east of Andhra Pradesh, India. The class label divides the patients into 2 groups (liver patient or not). This data set contains 441 male patient records and 142 female patient records.


                                  Attribute Information


                                  V1. Age of the patient. Any patient whose age exceeded 89 is listed as being of age \"90\".

                                  V2. Gender of the patient

                                  V3. Total Bilirubin

                                  V4. Direct Bilirubin

                                  V5. Alkphos Alkaline Phosphatase

                                  V6. Sgpt Alanine Aminotransferase

                                  V7. Sgot Aspartate Aminotransferase

                                  V8. Total Proteins

                                  V9. Albumin

                                  V10. A/G Ratio Albumin and Globulin Ratio


                                  A feature indicating a train-test split has been removed.


                                  Relevant Papers



                                  1. Bendi Venkata Ramana, Prof. M. S. Prasad Babu and Prof. N. B. Venkateswarlu, A Critical Comparative Study of Liver Patients from USA and INDIA: An Exploratory Analysis\u009d, International Journal of Computer Science Issues, ISSN:1694-0784, May 2012.

                                  2. Bendi Venkata Ramana, Prof. M. S. Prasad Babu and Prof. N. B. Venkateswarlu, A Critical Study of Selected Classification Algorithms for Liver Disease Diagnosis, International Journal of Database Management Systems (IJDMS), Vo", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8016853-8e2c-45f3-8326-bd38387351e7&revisionId=050f2f0a-629d-4f41-a381-14220bd76465&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], "funder": [], "has_part": [], - "industrial_sector": [ - "Finance", - "eCommerce", - "Healthcare" + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f8016853-8e2c-45f3-8326-bd38387351e7&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f8016853-8e2c-45f3-8326-bd38387351e7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "355", + "name": "ModelInitializer", + "description": "

                                    The Model Initializer is an infrastructure node that can provide initial config parameters to a model.

                                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f888ec3c-1076-4e57-b56a-05f055aa4760&revisionId=76c80c0b-1883-4cb3-8f6c-4857c77ac4d5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], "is_part_of": [], - "keyword": [ - "keyword1", - "keyword2" + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f888ec3c-1076-4e57-b56a-05f055aa4760&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f888ec3c-1076-4e57-b56a-05f055aa4760/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } ], - "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "356", + "name": "solar-flare", + "description": "https://openml.org

                                    flare-pmlb

                                    ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&revisionId=276fb3d2-00a8-4695-abdc-bbcc8d8ed604&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&version=1.0.2", "media": [ { - "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "checksum_algorithm": "sha256", - "copyright": "2010-2020 Example Company. All rights reserved.", - "content_url": "https://www.example.com/image.jpeg", - "content_size_kb": 10000, - "date_published": "2022-01-01T15:15:00.000", - "description": "Description of this file.", - "encoding_format": "image/jpeg", - "name": "Name of this file." + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" } ], - "note": [ - "A brief record of points or ideas about this AI resource." + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "357", + "name": "solar-flare", + "description": "https://openml.org

                                    Author: Gary Bradshaw

                                    Source: UCI

                                    Please cite:


                                    Solar Flare database
                                    Relevant Information:
                                    -- The database contains 3 potential classes, one for the number of times a
                                    certain type of solar flare occured in a 24 hour period.
                                    -- Each instance represents captured features for 1 active region on the
                                    sun.
                                    -- The data are divided into two sections. The second section (flare.data2)
                                    has had much more error correction applied to the it, and has
                                    consequently been treated as more reliable.


                                    Number of Instances: flare.data1: 323, flare.data2: 1066


                                    Number of attributes: 13 (includes 3 class attributes)


                                    Attribute Information


                                    1. Code for class (modified Zurich class)  (A,B,C,D,E,F,H)
                                    2. Code for largest spot size (X,R,S,A,H,K)
                                    3. Code for spot distribution (X,O,I,C)
                                    4. Activity (1 = reduced, 2 = unchanged)
                                    5. Evolution (1 = decay, 2 = no growth,
                                    3 = growth)
                                    6. Previous 24 hour flare activity code (1 = nothing as big as an M1,
                                    2 = one M1,
                                    3 = more activity than one M1)
                                    7. Historically-complex (1 = Yes, 2 = No)
                                    8. Did region become historically complex (1 = yes, 2 = no)
                                    on this pass across the sun's disk
                                    9. Area (1 = small, 2 = large)


                                    1. Area", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&revisionId=91ca0a1e-60e1-45ce-a2c0-7c3f79498739&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } ], - "research_area": [ - "Explainable AI", - "Physical AI" + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "358", + "name": "solar-flare", + "description": "https://openml.org

                                      Author: Gary Bradshaw

                                      Source: UCI

                                      Please cite:


                                      Solar Flare database
                                      Relevant Information:
                                      -- The database contains 3 potential classes, one for the number of times a
                                      certain type of solar flare occured in a 24 hour period.
                                      -- Each instance represents captured features for 1 active region on the
                                      sun.
                                      -- The data are divided into two sections. The second section (flare.data2)
                                      has had much more error correction applied to the it, and has
                                      consequently been treated as more reliable.


                                      Number of Instances: flare.data1: 323, flare.data2: 1066


                                      Number of attributes: 13 (includes 3 class attributes)


                                      Attribute Information


                                      1. Code for class (modified Zurich class)  (A,B,C,D,E,F,H)
                                      2. Code for largest spot size (X,R,S,A,H,K)
                                      3. Code for spot distribution (X,O,I,C)
                                      4. Activity (1 = reduced, 2 = unchanged)
                                      5. Evolution (1 = decay, 2 = no growth,
                                      3 = growth)
                                      6. Previous 24 hour flare activity code (1 = nothing as big as an M1,
                                      2 = one M1,
                                      3 = more activity than one M1)
                                      7. Historically-complex (1 = Yes, 2 = No)
                                      8. Did region become historically complex (1 = yes, 2 = no)
                                      on this pass across the sun's disk
                                      9. Area (1 = small, 2 = large)


                                      1. Area", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&revisionId=f333bc3c-b87a-42b8-a5e9-5290036cc520&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } ], - "scientific_domain": [ - "Anomaly Detection", - "Voice Recognition", - "Computer Vision." + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "360", + "name": "ai4eu-robotics-wrist-6144-fft-broker", + "description": "

                                        The robotic wrist demonstrator represents a mechanical wrist with three axes that can hold tools, e.g. for spray painting in combination with a pump. On this robotic wrist, two accelerometers are mounted for vibration monitoring and recording: one in the movable front part of the wrist and one in the shaft. The wrist can be controlled through the torque or the designated position of each axis\u2019 motor.

                                        The dataset consists of 1.8 billion measurements of several sensor data of the robotic wrist in 1-second intervals over six months in 2020.

                                        The complete dataset & description is available on Zenodo

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f9425d71-4d33-4af5-b4ba-25d6fa8aa3c4&revisionId=2399eb3e-67fb-419f-a630-df48c3cf138a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f9425d71-4d33-4af5-b4ba-25d6fa8aa3c4&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f9425d71-4d33-4af5-b4ba-25d6fa8aa3c4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } ], - "size": { - "unit": "Rows", - "value": 100 + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} + }, + { + "platform": "ai4experiments", + "platform_identifier": "363", + "name": "climate-model-simulation-crashes", + "description": "https://openml.org

                                        Author: D. Lucas, R. Klein, J. Tannahill, D. Ivanova, S. Brandon, D. Domyancic, Y. Zhang.


                                        Source: UCI


                                        Please Cite:
                                        Lucas, D. D., Klein, R., Tannahill, J., Ivanova, D., Brandon, S., Domyancic, D., and Zhang, Y.: Failure analysis of parameter-induced simulation crashes in climate models, Geosci. Model Dev. Discuss., 6, 585-623, Web Link, 2013.


                                        Source:


                                        D. Lucas (ddlucas .at. alum.mit.edu), Lawrence Livermore National Laboratory; R. Klein (rklein .at. astron.berkeley.edu), Lawrence Livermore National Laboratory & U.C. Berkeley; J. Tannahill (tannahill1 .at. llnl.gov), Lawrence Livermore National Laboratory; D. Ivanova (ivanova2 .at. llnl.gov), Lawrence Livermore National Laboratory; S. Brandon (brandon1 .at. llnl.gov), Lawrence Livermore National Laboratory; D. Domyancic (domyancic1 .at. llnl.gov), Lawrence Livermore National Laboratory; Y. Zhang (zhang24 .at. llnl.gov), Lawrence Livermore National Laboratory .


                                        This data was constructed using LLNL's UQ Pipeline, was created under the auspices of the US Department of Energy by Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344, was funded by LLNL's Uncertainty Quantification Strategic Initiative Laboratory Directed Research and Development Project under tracking code 10-SI-013, and is released under UCRL number LLNL-MISC-633994.


                                        Data Set Information:


                                        This dataset contains records of simulation crashes encountered during climate model uncertainty quantification (UQ) ensembles. Ensemb", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fba9e526-edb4-4fb0-9cb1-31ea29f07a2f&revisionId=6b6905e7-2855-43c9-a344-c01991e4efca&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "issn": "00000000", + "measurement_technique": "", + "temporal_coverage": "", + "aiod_entry": { + "editor": [], + "status": "draft" }, - "spatial_coverage": { - "address": { - "region": "California", - "locality": "Paris", - "street": "Wetstraat 170", - "postal_code": "1040 AA", - "address": "Wetstraat 170, 1040 Brussel", - "country": "BEL" - }, - "geo": { - "latitude": 37.42242, - "longitude": -122.08585, - "elevation_millimeters": 0 + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "funder": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fba9e526-edb4-4fb0-9cb1-31ea29f07a2f&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/fba9e526-edb4-4fb0-9cb1-31ea29f07a2f/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" } - } + ], + "note": [], + "research_area": [], + "scientific_domain": [], + "size": {}, + "spatial_coverage": {} } ] \ No newline at end of file diff --git a/src/connectors/example/resources/resource/experiments.json b/src/connectors/example/resources/resource/experiments.json index 094a777a..39ee9194 100644 --- a/src/connectors/example/resources/resource/experiments.json +++ b/src/connectors/example/resources/resource/experiments.json @@ -1,94 +1,1982 @@ [ - { - "platform": "example", - "platform_identifier": "1", - "name": "The name of this experiment", - "description": "A description.", - "same_as": "https://www.example.com/resource/this_resource", - "date_published": "2022-01-01T15:15:00.000", - "version": "1.1.0", - "pid": "https://doi.org/10.1000/182", - "experimental_workflow": "1) Load the dataset 2) run preprocessing code found in ... 3) run the model on the data.", - "execution_settings": "string", - "reproducibility_explanation": "string", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [ - "alias 1", - "alias 2" - ], - "application_area": [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" - ], - "badge": [ - "ACM Artifacts Evaluated - Reusable" - ], - "citation": [], - "contact": [], - "creator": [], - "distribution": [ - { - "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "checksum_algorithm": "sha256", - "copyright": "2010-2020 Example Company. All rights reserved.", - "content_url": "https://www.example.com/experiment.zip", - "content_size_kb": 10000, - "date_published": "2022-01-01T15:15:00.000", - "description": "Description of this file.", - "encoding_format": "application/zip", - "name": "Name of this file.", - "technology_readiness_level": 1, - "installation_script": "./install.sh", - "installation": "Build the Dockerfile", - "installation_time_milliseconds": 100, - "deployment_script": "./run.sh", - "deployment": "You can run the run.py file using python3. See README.md for required arguments.", - "deployment_time_milliseconds": 100, - "os_requirement": "Windows 11.", - "dependency": "Python packages as listed in requirements.txt.", - "hardware_requirement": "4GB RAM; 100MB storage; 1GHz processor with 8 cores." - } - ], - "has_part": [], - "industrial_sector": [ - "Finance", - "eCommerce", - "Healthcare" - ], - "is_part_of": [], - "keyword": [ - "keyword1", - "keyword2" - ], - "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", - "media": [ - { - "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "checksum_algorithm": "sha256", - "copyright": "2010-2020 Example Company. All rights reserved.", - "content_url": "https://www.example.com/image.jpeg", - "content_size_kb": 10000, - "date_published": "2022-01-01T15:15:00.000", - "description": "Description of this file.", - "encoding_format": "image/jpeg", - "name": "Name of this file." - } - ], - "note": [ - "A brief record of points or ideas about this AI resource." - ], - "research_area": [ - "Explainable AI", - "Physical AI" - ], - "scientific_domain": [ - "Anomaly Detection", - "Voice Recognition", - "Computer Vision." - ] - } + { + "platform": "ai4experiments", + "platform_identifier": "366", + "name": "Sudoku Tutorial", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=00aff3ab-94cb-4969-93c3-a95be53c05d2&revisionId=2f7c7ef1-262c-4a73-8393-aef1ded7cad3&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "2", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=00aff3ab-94cb-4969-93c3-a95be53c05d2&version=2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/00aff3ab-94cb-4969-93c3-a95be53c05d2/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "367", + "name": "Sudoku Tutorial", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=00aff3ab-94cb-4969-93c3-a95be53c05d2&revisionId=2f7c7ef1-262c-4a73-8393-aef1ded7cad3&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=00aff3ab-94cb-4969-93c3-a95be53c05d2&version=1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/00aff3ab-94cb-4969-93c3-a95be53c05d2/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "368", + "name": "AI4IoT-Calibration-Solution", + "description": "

                                        This solution implements a pipeline to air quality sensor calibration in the context of the AI4IoT pilot, consisting of three modules which, together, offer a solution for calibration of low-cost air quality sensors in the city of Trondheim, Norway. The modules are: a data source which fetches data from several external APIs and concatenates them, a calibration which predicts the true value at the sensor location and a simple visualization module which implements a web interface to analyze the output of the calibration procedure.


                                        More info on the pipeline can be found at the github repository: https://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&revisionId=478028bb-1c58-4641-9bc0-eba716119aec&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.2", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&version=1.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0fbc7cc0-843a-489b-bab1-40e4d2700680/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "369", + "name": "AI4IoT-Calibration-Solution", + "description": "

                                        This solution implements a pipeline to air quality sensor calibration in the context of the AI4IoT pilot, consisting of three modules which, together, offer a solution for calibration of low-cost air quality sensors in the city of Trondheim, Norway. The modules are: a data source which fetches data from several external APIs and concatenates them, a calibration which predicts the true value at the sensor location and a simple visualization module which implements a web interface to analyze the output of the calibration procedure.


                                        More info on the pipeline can be found at the github repository: https://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&revisionId=478028bb-1c58-4641-9bc0-eba716119aec&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0fbc7cc0-843a-489b-bab1-40e4d2700680/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "370", + "name": "AI4IoT-Calibration-Solution", + "description": "

                                        This solution implements a pipeline to air quality sensor calibration in the context of the AI4IoT pilot, consisting of three modules which, together, offer a solution for calibration of low-cost air quality sensors in the city of Trondheim, Norway. The modules are: a data source which fetches data from several external APIs and concatenates them, a calibration which predicts the true value at the sensor location and a simple visualization module which implements a web interface to analyze the output of the calibration procedure.


                                        More info on the pipeline can be found at the github repository: https://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&revisionId=478028bb-1c58-4641-9bc0-eba716119aec&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&version=1.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0fbc7cc0-843a-489b-bab1-40e4d2700680/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "371", + "name": "MusicDetection-pipeline", + "description": "

                                        This simple pipeline automatically analyzes audio content with the MusicDetection model and annotates music attributes like genre and tempo.

                                        Content to be analyzed can be provided via file upload, detection results will be presented in WebUI and can be downloaded.


                                        Remark: Since MusicDetection model is not publicly accessible, for the deployment of this pipeline it is necessary to acquire access credentials. Please send you requests to ai-assets@idmt.fraunhofer.de


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fc0b6dc-46e5-468b-9adf-841d9b062e51&revisionId=1b067b23-4730-4dc1-95aa-0bfc78b0a6ce&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "0.9.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0fc0b6dc-46e5-468b-9adf-841d9b062e51&version=0.9.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0fc0b6dc-46e5-468b-9adf-841d9b062e51/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "372", + "name": "clinical_evida_text_classifier", + "description": "

                                        This model let us to classify clinical text related to colon cancer or non-colon cancer texts based on ICD10 categories. The main objective is to get a label (1 or 0) depending if the input text belongs to C18 ICD category, which corresponds to Colon Cancer Category. The model is based on distilBERT transformer model and was trained using CodiEsp dataset. The input is a plain text and the output will be a number label.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=13648d7f-5002-4fd8-98f7-27d50d2d964e&revisionId=65657060-5fac-48d5-bdf8-e75dab26ae23&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=13648d7f-5002-4fd8-98f7-27d50d2d964e&version=1.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/13648d7f-5002-4fd8-98f7-27d50d2d964e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "373", + "name": "sentiment-analysis-pipeline", + "description": "

                                        Sentiment analysis pipeline.



                                        It takes the query text from the user and connects to the prediction model. The results can then be viewed on the Prediction model's UI.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=24269432-3dcf-42a8-a04e-463ed0c59757&revisionId=a951dffc-98f8-4914-a1d5-0fa79cb76640&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=24269432-3dcf-42a8-a04e-463ed0c59757&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/24269432-3dcf-42a8-a04e-463ed0c59757/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "374", + "name": "TrainingPipeline", + "description": "

                                        Overview:

                                        The training pipeline for the news_training example consists of 4 main nodes,

                                        1) News-Classifier - The core of the pipeline

                                        2) trainer-model - Facilitates the training process

                                        3) Tensorboard - Provides diagnostics preview

                                        4) News-Databroker - Starting point for data feed

                                         

                                        Note:

                                        Apart from demonstrating a training scenario, this example also shows the use of a shared folder for common file access for pipeline nodes.

                                        Each of the 4 mentioned nodes are also available as independent models here.

                                         

                                        Repository link:

                                        Please refer the following link for the code that represents the training pipeline in the Eclipse Graphene platform - https://gitlab.eclipse.org/eclipse/graphene/tutorials/-/tree/main/news_training

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2d4b9dff-c822-4fb6-9b5f-06f761fcbe2c&revisionId=7e95c907-2bdf-405d-8da4-4961e785514b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "v2", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2d4b9dff-c822-4fb6-9b5f-06f761fcbe2c&version=v2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2d4b9dff-c822-4fb6-9b5f-06f761fcbe2c/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "375", + "name": "ucrsuite-dtw-pip", + "description": "

                                        Overview

                                        The UCR Suite DTW pipeline ucrsuite-dtw-pip implements fast nearest-neighbor retrieval under the dynamic time warping (DTW)

                                        ucrsuite-config data broker is a starting point for the pipeline that process files and parameters to perform subsequence search in time series. ucrsuite-dtw calculates the nearest neighbor of a times series in a larger time series expressed as location and distance, using the UCR suite DTW algorithm.

                                        Usage

                                        To use the ucrsuite-dtw-pip solution, you can either download it from the Marketplace or run it on the Playground. Once the solution is deployed in the Playground, open the Web-UI of the ucrsuite-config model and enter the following information:


                                        * Data file: The path to the file containing the long time series.

                                        * Query file: The path to the file containing the query time series.

                                        * R: The size of the warping window. The value is in range 0-1, e.g., R=0.05 means windows of size +/-5%.


                                        Then, Run the solution. The distance calculation will start in the background.

                                        The result of calculation, expressed as location and distance, will be stored in the shared folder as a `dtw_distance.txt` file.

                                        Detailed result also available in the logs of the ucrsuite-dtw model in the following format:

                                        ------------------------

                                        Location: 756562

                                        Distance: 3.77562

                                        This is the assembled Solution of the AI4Industry Pilot of the AI4EU project. To run the solution, please use \"Deploy to local\" in the AI4EU Experiments Platform on this solution and follow the readme in the package or the YouTube Tutorial (Deploy and Run).


                                        This solution is the result of a collaboration between

                                        • Siemens Germany - Ivan Gocev
                                        • Fraunhofer IAIS - Raoul Blankertz, Nico H\u00f6ft
                                        • Technische Universit\u00e4t Wien - Peter Sch\u00fcller





                                        Contact:

                                        Peter Sch\u00fcller

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=33b984f4-fa6e-42e3-9af7-8cb3464ae10b&revisionId=1f58db48-282b-4629-a596-c379c9550f66&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "2", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=33b984f4-fa6e-42e3-9af7-8cb3464ae10b&version=2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/33b984f4-fa6e-42e3-9af7-8cb3464ae10b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "377", + "name": "AI4IndustryPilot", + "description": "

                                        This is the assembled Solution of the AI4Industry Pilot of the AI4EU project. To run the solution, please use \"Deploy to local\" in the AI4EU Experiments Platform on this solution and follow the readme in the package or the YouTube Tutorial (Deploy and Run).


                                        This solution is the result of a collaboration between

                                        • Siemens Germany - Ivan Gocev
                                        • Fraunhofer IAIS - Raoul Blankertz, Nico H\u00f6ft
                                        • Technische Universit\u00e4t Wien - Peter Sch\u00fcller





                                        Contact:

                                        Peter Sch\u00fcller

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=33b984f4-fa6e-42e3-9af7-8cb3464ae10b&revisionId=1f58db48-282b-4629-a596-c379c9550f66&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=33b984f4-fa6e-42e3-9af7-8cb3464ae10b&version=1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/33b984f4-fa6e-42e3-9af7-8cb3464ae10b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "378", + "name": "house-prices-pipeline", + "description": "

                                        Overview description

                                        The House Prices Pipeline is a simple example pipeline that predicts house prices. The pipeline illustrates how the price development is predicted by entering relevant parameters that provide information about the status of a property.

                                         

                                        Use case example

                                        As an interested house owner, an estimate can be made based on the AI forecast, how much the property will increase in value or not.

                                         

                                        Usage

                                        Select the \"houseprice-pipeline\" solution in the Marketplace or in the Design Studio. It is possible to download the solution or to run it on the Playground for testing purposes. When the solution is deployed in the Playground, select the Web-UI of the databroker and fill in the parameters. Then go back to the Playground and run the solution once and open the Web-UI (interface) of the model. In the second interface you will get the prediction based on your input.

                                         

                                        Support

                                        The solution is part of the tutorials with developer documentation and source code available. For further construction feel free to reach out to the AI.Lab team ai-lab@iais.fraunhofer.de or directly with the developer of the technology. The developer teams are generally open for feedback and happy about co-creation opportunities.


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=42bdc41c-6144-4c7b-88b6-4509999bff6d&revisionId=ec4a4a98-d37a-49c5-aaa1-97437d8a5a31&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=42bdc41c-6144-4c7b-88b6-4509999bff6d&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/42bdc41c-6144-4c7b-88b6-4509999bff6d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "379", + "name": "Sudoku-Tutorial-Stream", + "description": "

                                        This is the streaming version of the deployable Solution of the AI4EU Experiments Sudoku Hello World!

                                        It is a Proof of Concept for a Sudoku design assistant based on ASP, gRPC, and Protobuf, deployable in the AI4EU Experiments Platform.

                                        The Git repository holding this component of the Hello World is publicly available here: https://github.com/peschue/ai4eu-sudoku

                                        A Tutorial video about this \"Sudoku Hello World\" can be found here: https://www.youtube.com/watch?v=gM-HRMNOi4w

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=516d1afa-44ae-4315-be0a-88232698778d&revisionId=72489923-f34e-454a-85ef-2a0b8a54ed54&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=516d1afa-44ae-4315-be0a-88232698778d&version=1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/516d1afa-44ae-4315-be0a-88232698778d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "380", + "name": "Hubeau_Piezo_Stations", + "description": "

                                        This is an example of solution to access data of the French groundwater level stations (piezometer sensor).

                                        Data from the \"Piezometry\" API come from the ADES portal (national portal for access to French groundwater data). They relate to piezometric measurements (water level in groundwater tables), throughout France, from all the partners of the water information system (see metadata).


                                        The updates are integrated daily into the API.


                                        Data is expressed


                                        in NGF meters for levels (or ratings);

                                        in meters in relation to the measurement mark for the depths.


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5a56cb42-bfc5-48c6-a92b-92bb06a2b308&revisionId=780ab7bd-c541-4e36-9493-f80dcd67f743&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5a56cb42-bfc5-48c6-a92b-92bb06a2b308&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5a56cb42-bfc5-48c6-a92b-92bb06a2b308/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "381", + "name": "Iris_Pipeline", + "description": "

                                        Iris Pipeline: Made use of generic data broker to connect to iris dataset.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5b367470-e405-44de-b930-4c1e5f3e7161&revisionId=8b2b253f-3bd1-4719-8d0a-9f1084bf15bf&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5b367470-e405-44de-b930-4c1e5f3e7161&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5b367470-e405-44de-b930-4c1e5f3e7161/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "382", + "name": "ner-pipeline", + "description": "

                                        This is the ner-pipeline, which represents a deep learning Entity Recognizer in German.

                                        After successfully deploying ner-pipeline in the KI.NRW Playground, submit the desired text via ner-databroker's Web UI first (1), then RUN the pipeline (2) and go to the Web UI of the ner-model (3). You will see a list of processed texts, with the most recent provided text on top of the list.


                                        For each new NER request to the deployed ner-pipeline, repeat the steps from 1 to 3.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=612a819c-66fe-4ac4-86ae-b04e95ef4624&revisionId=a63bc9db-1691-45ca-a022-98e89ff43fd5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=612a819c-66fe-4ac4-86ae-b04e95ef4624&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/612a819c-66fe-4ac4-86ae-b04e95ef4624/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "383", + "name": "advice-inference-pipeline", + "description": "

                                        The process is divided into two independent workflows, the first one is the prediction, and includes the advice-img-databroker, advice-road-crop and advice-yolo nodes, which will perform the whole process of label prediction. On the other hand, the advice-label-assitant node allows the user to perform the relabelling task while the inference process is performed in the background

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=659ee5a9-0fbe-4676-8b1f-bb27d8379c30&revisionId=bae9c467-8208-47cc-b46f-ba6c97e9930d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "st3", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=659ee5a9-0fbe-4676-8b1f-bb27d8379c30&version=st3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/659ee5a9-0fbe-4676-8b1f-bb27d8379c30/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "384", + "name": "ADVICE", + "description": "

                                        AI-baseD predictiVe road maIntenanCe for a safer Europe (ADVICE) consist of a two stages pipeline for pothole detection, pothole size estimation and pothole formation forecasting. The pipeline is expected to evolve into a hybrid solution of edge and cloud computing.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6a58218e-ae25-446e-96b0-ebbb954f76e9&revisionId=5487352a-0934-465d-a9bd-feb927033a82&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "0.0.1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6a58218e-ae25-446e-96b0-ebbb954f76e9&version=0.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6a58218e-ae25-446e-96b0-ebbb954f76e9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "385", + "name": "Hubeau_Piezo_Chroniques", + "description": "

                                        This is an example of solution to access data of the French groundwater level observations timeseries (from piezometer sensor).

                                        Data from the \"Piezometry\" API come from the ADES portal (national portal for access to French groundwater data). They relate to piezometric measurements (water level in groundwater tables), throughout France, from all the partners of the water information system (see metadata).


                                        The updates are integrated daily into the API.


                                        Data is expressed


                                        in NGF meters for levels (or ratings);

                                        in meters in relation to the measurement mark for the depths.


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7694139a-dabf-4aa3-98ba-40ffe4c5fcad&revisionId=19527676-2736-419a-be52-0fa6895b2c50&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7694139a-dabf-4aa3-98ba-40ffe4c5fcad&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7694139a-dabf-4aa3-98ba-40ffe4c5fcad/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "386", + "name": "AI_REGIO_NLP_DSS", + "description": "

                                        AI Regio Pipeline structured to receive natural language text from a mic client over internet, transforming audio into text and using the produced text to help an operator in manufacturing domain.

                                        NLP is coupled with a self-learning DSS system that updates probability tables based on past answers given by the operator. It is able to understand short answers, like yes / no / don't know. NLP module, instead, maps a full sentence into well-know problem, allowing the system to ask the right first question

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8556ecaf-35ef-4b40-91bb-699165f89d71&revisionId=41386cb6-d281-429a-9415-b9c20c0cc9cb&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8556ecaf-35ef-4b40-91bb-699165f89d71&version=1.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8556ecaf-35ef-4b40-91bb-699165f89d71/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "387", + "name": "AI_REGIO_NLP_DSS", + "description": "

                                        AI Regio Pipeline structured to receive natural language text from a mic client over internet, transforming audio into text and using the produced text to help an operator in manufacturing domain.

                                        NLP is coupled with a self-learning DSS system that updates probability tables based on past answers given by the operator. It is able to understand short answers, like yes / no / don't know. NLP module, instead, maps a full sentence into well-know problem, allowing the system to ask the right first question

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8556ecaf-35ef-4b40-91bb-699165f89d71&revisionId=41386cb6-d281-429a-9415-b9c20c0cc9cb&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8556ecaf-35ef-4b40-91bb-699165f89d71&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8556ecaf-35ef-4b40-91bb-699165f89d71/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "388", + "name": "Video-Pipeline", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&revisionId=e91bdfc4-0c68-464c-831f-4970ab155386&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "V1.1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&version=V1.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/92a8e46f-d80a-4e68-a485-5ef5a74efa17/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "389", + "name": "Video-Pipeline", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&revisionId=e91bdfc4-0c68-464c-831f-4970ab155386&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "V1.3", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&version=V1.3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/92a8e46f-d80a-4e68-a485-5ef5a74efa17/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "390", + "name": "Video-Pipeline", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&revisionId=e91bdfc4-0c68-464c-831f-4970ab155386&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "V1.2", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&version=V1.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/92a8e46f-d80a-4e68-a485-5ef5a74efa17/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "391", + "name": "Standard_STA_Flow", + "description": "

                                        This is an example of solution exploiting the generic connector for the SensorThings API. This connector allows to connect to any SensorThings API in the world and therefore potentially recover data on any domain. For example, this would facilitate the retrieval of public Covid19 data, harvested from various sources including Johns Hopkins and RKI, or from near-real-time air quality across Europe, from both national sources (harvested from AT SOS and WFS) and Europe (EEA).

                                        To illustrate the potential uses of these different SensorThings API (with a single connector), one can take a look at these different applications: a visualisation tool[1] bringing together French and German flow data, a covid-19 dashboard[2] and the Windy Web site[3] focused on the weather forecast.


                                        [1] https://wg-brgm.k8s.ilt-dmz.iosb.fraunhofer.de/servlet/is/110/


                                        [2] http://www.covid19dashboard.org/


                                        [3] https://www.windy.com/fr/-NO2-no2?camsEu,no2,47.905,1.908,5

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a5a36ff2-f9f7-4272-abde-b81cf4cbbb80&revisionId=8caf7a53-d01e-4ea7-8c43-fc5dc27fcbc3&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a5a36ff2-f9f7-4272-abde-b81cf4cbbb80&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a5a36ff2-f9f7-4272-abde-b81cf4cbbb80/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "392", + "name": "AI4Media Demo", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a87cb119-168c-45b0-9a3e-6963396c1acf&revisionId=ca6125ff-b507-4c9a-b223-5440316a15d4&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a87cb119-168c-45b0-9a3e-6963396c1acf&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a87cb119-168c-45b0-9a3e-6963396c1acf/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "393", + "name": "AI4Media Demo", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a87cb119-168c-45b0-9a3e-6963396c1acf&revisionId=ca6125ff-b507-4c9a-b223-5440316a15d4&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a87cb119-168c-45b0-9a3e-6963396c1acf&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a87cb119-168c-45b0-9a3e-6963396c1acf/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "394", + "name": "aiplan4eu-demo", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ad53fc7d-7110-4b45-a4ed-b79324fa44e1&revisionId=3a9591b8-a644-4343-83ae-a765e88b7109&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "v1.1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ad53fc7d-7110-4b45-a4ed-b79324fa44e1&version=v1.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ad53fc7d-7110-4b45-a4ed-b79324fa44e1/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "395", + "name": "aiplan4eu-demo", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ad53fc7d-7110-4b45-a4ed-b79324fa44e1&revisionId=3a9591b8-a644-4343-83ae-a765e88b7109&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "v1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ad53fc7d-7110-4b45-a4ed-b79324fa44e1&version=v1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ad53fc7d-7110-4b45-a4ed-b79324fa44e1/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "396", + "name": "ObjectDetectionP", + "description": "

                                        This is a simple pipeline wrapping the object detection model.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b08401ec-f24a-452b-bf42-c57cb91b21e8&revisionId=490b5ed8-b498-4ddb-a99b-0cb1662f533c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b08401ec-f24a-452b-bf42-c57cb91b21e8&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b08401ec-f24a-452b-bf42-c57cb91b21e8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "397", + "name": "aqpredvisualize", + "description": "

                                        Air Quality Prediction and Visualization Pipeline for the area of Trondheim. The pipeline consists of 3 modules, a databroker module, a prediction module based on a pre-trained machine learning model and a visualization module with a web interface. More information and instructions can be found in the github repository: https://github.com/EliasKal/ai4eu_pipeline_visualization

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c4d4ec8b-1e43-4bf7-941e-8d81612cb71e&revisionId=3d63a545-e260-46a1-a743-298902fb2818&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c4d4ec8b-1e43-4bf7-941e-8d81612cb71e&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c4d4ec8b-1e43-4bf7-941e-8d81612cb71e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "398", + "name": "ObjectDetectionPipeline", + "description": "

                                        This is a simple pipeline wrapping the object detection model. The underlying object detection model in this pipeline is a public image.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cd303086-6599-41cf-b89b-66f31f7c4f44&revisionId=0d4d73db-e069-447f-949f-2eb1bc9e98e5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=cd303086-6599-41cf-b89b-66f31f7c4f44&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/cd303086-6599-41cf-b89b-66f31f7c4f44/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "399", + "name": "Hubeau_Hydro_Observations", + "description": "

                                        Example of solution to retrieve French hydrology observations data using the \"Grpc hydro hubeau\" component.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d95fa687-97d9-45b4-bda6-cadddebb6343&revisionId=1ee16b73-9874-413d-ba66-33502c2bb689&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d95fa687-97d9-45b4-bda6-cadddebb6343&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d95fa687-97d9-45b4-bda6-cadddebb6343/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "400", + "name": "audio-pipeline", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&revisionId=8b7d0433-56c8-48bf-8654-3ac87eb630e9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "4.1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&version=4.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/df1f1286-0071-4df8-afd7-fe5dd20f9cd4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "401", + "name": "audio-pipeline", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&revisionId=8b7d0433-56c8-48bf-8654-3ac87eb630e9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "2.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&version=2.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/df1f1286-0071-4df8-afd7-fe5dd20f9cd4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "402", + "name": "audio-pipeline", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&revisionId=8b7d0433-56c8-48bf-8654-3ac87eb630e9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "3.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&version=3.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/df1f1286-0071-4df8-afd7-fe5dd20f9cd4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "403", + "name": "audio-pipeline", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&revisionId=8b7d0433-56c8-48bf-8654-3ac87eb630e9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "5.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&version=5.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/df1f1286-0071-4df8-afd7-fe5dd20f9cd4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "404", + "name": "audio-pipeline", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&revisionId=8b7d0433-56c8-48bf-8654-3ac87eb630e9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "4.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&version=4.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/df1f1286-0071-4df8-afd7-fe5dd20f9cd4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "405", + "name": "MusicDetectionPL", + "description": "

                                        This pipeline is designed to use the MusicDetection model for the analysis of single audio files that are provided by file upload. Results of the MusicDetection are provided via WebUI.

                                        Since MusicDetection model is not publicly accessible, for the deployment of this pipeline it is necessary to acquire access credentials from the provider of the MusicDetection model. NB: Access can not be provided from the publisher of this pipeline.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eea265e1-f1b8-4f5d-8694-299b37fc3d0d&revisionId=a44f39bb-56b2-4d5e-b72c-f36cd24a9992&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eea265e1-f1b8-4f5d-8694-299b37fc3d0d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/eea265e1-f1b8-4f5d-8694-299b37fc3d0d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "406", + "name": "Hubeau_Hydro_Stations", + "description": "

                                        Example of solution to retrieve French hydrology stations data using the \"Grpc hydro hubeau\" component.

                                        This service makes it possible to query the stations in the French hydrometric reference system. A station can carry height and / or flow observations (directly measured or calculated from a rating curve).



                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f476f311-e38c-4c60-a550-605a8b7c5af0&revisionId=4ae0dfe8-95c8-47ae-877d-b9247a249e77&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f476f311-e38c-4c60-a550-605a8b7c5af0&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f476f311-e38c-4c60-a550-605a8b7c5af0/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "407", + "name": "ucrsuite-ed-pip", + "description": "

                                        Overview

                                        The UCR Suite ED pipeline ucrsuite-ed-pip implements fast nearest-neighbor retrieval under the Euclidean Distance (ED)

                                        ucrsuite-config data broker is a starting point for the pipeline that process files and parameters to perform subsequence search in time series. ucrsuite-ed calculates the nearest neighbor of a times series in a larger time series expressed as location and distance, using the UCR suite ED algorithm.


                                        Usage

                                        To use the ucrsuite-ed-pip solution, you can either download it from the Marketplace or run it on the Playground. Once the solution is deployed in the Playground, open the Web-UI of the ucrsuite-config model and enter the following information:

                                        * Data file: The path to the file containing the long time series.

                                        * Query file: The path to the file containing the query time series.

                                        Then, Run the solution. The distance calculation will start in the background.

                                        The result of calculation, expressed as location and distance, will be stored in the shared folder as a `ed_distance.txt` file.

                                        Detailed result also available in the logs of the ucrsuite-ed model in the following format:

                                        ----------------------------------------------------

                                        Location : 347236

                                        Distance : 7.03705

                                        Data Scanned : 1000000

                                        Total Execution Time : 1.029 sec

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f87058b4-a1f1-4e0e-a944-ece53adcf8b3&revisionId=20402b92-1b2e-4547-b1e0-e2866c439645&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f87058b4-a1f1-4e0e-a944-ece53adcf8b3&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f87058b4-a1f1-4e0e-a944-ece53adcf8b3/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "408", + "name": "RecognaizePipeline", + "description": "

                                        The RecognAIze pipeline coverts images to text including layout detection and table handling and consists of our microservices:

                                        Databroker with UI, Preprocessing, Segmentation and OCR.


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fca70f4f-d6b7-4fed-a98a-8800b7831ef8&revisionId=c7b3cfaf-7960-472b-91e3-03b930dca96a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.1.1", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fca70f4f-d6b7-4fed-a98a-8800b7831ef8&version=1.1.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/fca70f4f-d6b7-4fed-a98a-8800b7831ef8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + }, + { + "platform": "ai4experiments", + "platform_identifier": "409", + "name": "ai4eu-sec-pilot", + "description": "

                                        This simulation can detect threads in network traffic. To train the model connect the model with the training data interface from the databroker container. The train data are made with benign traffic and does not contain any fraud because the model should lern how benign traffic looks like.

                                        To predict traffic connect the prediction data output from the databroker container with the prediction interface. The traffic to predict includes benign and fraud traffic. The output will be a number between 0 and 1. You can set the threshold according to your data. The best threshold cna be found in the model validation folder insider the model container.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ff236ff3-f08e-40d1-9b76-a42f7e792b96&revisionId=bd6920a5-6998-470b-a4d0-cb0ed9ea73ec&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "0.0.2", + "pid": "", + "experimental_workflow": "", + "execution_settings": "", + "reproducibility_explanation": "", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [], + "application_area": [], + "badge": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ff236ff3-f08e-40d1-9b76-a42f7e792b96&version=0.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ff236ff3-f08e-40d1-9b76-a42f7e792b96/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "research_area": [], + "scientific_domain": [] + } ] \ No newline at end of file diff --git a/src/connectors/example/resources/resource/ml_models.json b/src/connectors/example/resources/resource/ml_models.json index 13ab2829..dcc88d91 100644 --- a/src/connectors/example/resources/resource/ml_models.json +++ b/src/connectors/example/resources/resource/ml_models.json @@ -1,90 +1,8086 @@ [ - { - "platform": "example", - "platform_identifier": "1", - "name": "The name of this resource", - "description": "A description.", - "same_as": "https://www.example.com/resource/this_resource", - "date_published": "2022-01-01T15:15:00.000", - "version": "1.1.0", - "pid": "https://doi.org/10.1000/182", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [ - "alias 1", - "alias 2" - ], - "application_area": [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" - ], - "citation": [], - "contact": [], - "creator": [], - "distribution": [ - { - "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "checksum_algorithm": "sha256", - "copyright": "2010-2020 Example Company. All rights reserved.", - "content_url": "https://www.example.com/model.zip", - "content_size_kb": 10000, - "date_published": "2022-01-01T15:15:00.000", - "description": "Description of this file.", - "encoding_format": "application/zip", - "name": "Name of this file.", - "technology_readiness_level": 1, - "installation_script": "./install.sh", - "installation": "Build the Dockerfile", - "installation_time_milliseconds": 100, - "deployment_script": "./run.sh", - "deployment": "You can run the run.py file using python3. See README.md for required arguments.", - "deployment_time_milliseconds": 100, - "os_requirement": "Windows 11.", - "dependency": "Python packages as listed in requirements.txt.", - "hardware_requirement": "4GB RAM; 100MB storage; 1GHz processor with 8 cores." - } - ], - "has_part": [], - "industrial_sector": [ - "Finance", - "eCommerce", - "Healthcare" - ], - "is_part_of": [], - "keyword": [ - "keyword1", - "keyword2" - ], - "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", - "media": [ - { - "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "checksum_algorithm": "sha256", - "copyright": "2010-2020 Example Company. All rights reserved.", - "content_url": "https://www.example.com/image.jpeg", - "content_size_kb": 10000, - "date_published": "2022-01-01T15:15:00.000", - "description": "Description of this file.", - "encoding_format": "image/jpeg", - "name": "Name of this file." - } - ], - "note": [ - "A brief record of points or ideas about this AI resource." - ], - "related_experiment": [], - "research_area": [ - "Explainable AI", - "Physical AI" - ], - "scientific_domain": [ - "Anomaly Detection", - "Voice Recognition", - "Computer Vision." - ], - "type": "Large Language Model" - } + { + "platform": "ai4experiments", + "platform_identifier": "1", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AIM4PS", + "description": "

                                        AIM4PS employs state-of-the-art AI methodologies for intaking and processing public procurement data, taking as a reference the specific production- and product-related information collected from manufacturing EISs.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0146cfdb-7853-48aa-b4b2-76183a3f3c14&revisionId=7c089fc1-a981-4c93-9137-dfef1bc19bd8&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0146cfdb-7853-48aa-b4b2-76183a3f3c14&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0146cfdb-7853-48aa-b4b2-76183a3f3c14/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "2", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AudioSpeechToTextGerman", + "description": "

                                        This model converts an audio segment to German text.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=015a16fd-8fea-495a-ae94-1fc92384d2b3&revisionId=0e5ad85f-29df-4d60-9b7d-178c1382abe0&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=015a16fd-8fea-495a-ae94-1fc92384d2b3&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/015a16fd-8fea-495a-ae94-1fc92384d2b3/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "3", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Doc2Answer", + "description": "

                                        The model implements 2 main tasks of the AI4EU call. It is able to parse and extract information from 2 type of INPS documents: \"O7\" and \"SocialCard\".

                                        The first type it locates cells and extract the content as text (i.e. numbers, dates).

                                        The second type locates stamps and classify them.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=01742dd8-cc32-4332-93ca-a181be3853e7&revisionId=d5cab0b1-4827-4b75-b270-8b11a2e08b99&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=01742dd8-cc32-4332-93ca-a181be3853e7&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/01742dd8-cc32-4332-93ca-a181be3853e7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "4", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "CODE_V2", + "description": "

                                        The main objective of the challenge is to develop an method for automatic classification of clinical narratives to ICD-10 codes.

                                        Our approach for semantic text classification has three core components: (1) Formalization of domain knowledge of medical information and techniques of semantic data fusion; (2) Multilingual NLP techniques for document preprocessing including all or some of: data cleaning, data normalization, data augmentation, transitive connections analysis, data balancing, expert\u2019s heuristics. For medical data based on our expertise on DOID, ICD-O, ICD-9, ICD-10, MESH, MONDO, UMLS, Orphanet, SNOMED classification, data augmentation including typos simulation and synonym replacement will be used; (3) Multilingual deep learning methods for supervised classification of disease into its corresponding class from the ICD-10. We are fine tuning pretrained BERT family models (bioBERT, clinicalBERT, MultilingualBERT, PubMedBERT, etc.) with domain specific terminology for the target language. Additional corpora generated from public documents and linked open data is used for fine-tuning of the deep learning classification model for the specific ICD-10 classification.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=01d95f4f-3bb4-4807-b6af-eb2d35d352cf&revisionId=2dc164ec-b92a-4413-a78e-70efc6643bc5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=01d95f4f-3bb4-4807-b6af-eb2d35d352cf&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/01d95f4f-3bb4-4807-b6af-eb2d35d352cf/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "10", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "i-nergy-load-forecasting-nbeats", + "description": "

                                        This is a time series forecasting service for predicting of the Portuguese aggregated electricity load series (15-min resolution, 24hr forecasting horizon). This service is based on an NBEATS model trained in the context of I-NERGY project. The model has been trained on the Portuguese timeseries from 2013 to 2019 validated on year 2020 and tested on 2021 with Mean Absolute Percentage Error (MAPE) = 2.35%. No time covariates or external variables have been included in the model. The lookback window of the model is 10 days. The model can be used to produce forecasts for periods from 2022 and later for Portugal. Other transmission system operators may use it as well, however expecting lower performance in general. No external variables have been considered. Please keep in mind that the effects of the pandemic on national loads can negatively affect the model\u2019s performance. For more information please go to ReadME.md in the Documents section.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0440778a-74e8-4d7f-950f-e6e1ce6bc29e&revisionId=3622c8ba-999d-4ce3-b711-b2bf4b43fa88&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0440778a-74e8-4d7f-950f-e6e1ce6bc29e&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0440778a-74e8-4d7f-950f-e6e1ce6bc29e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "14", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "road-damage-detector", + "description": "

                                        # AI4EU Pluto two-stage detector


                                        The model is a two stage detector based on [YOLOv5](https://github.com/ultralytics/yolov5).


                                        The object detector will detect objects of the following classes:

                                         - Rutting

                                         - Pothole

                                         - Manhole

                                         - Gully

                                         - EdgeDeterioration

                                         - Cracking


                                        The second stage classifier will, for `Potholes`, also classify the depth as 1 of 4 discrete values:


                                         - lt2

                                         - 2to5

                                         - 5to10

                                         - gt10



                                        # Example client


                                        ```python

                                        import os

                                        import grpc

                                        import model_pb2

                                        import model_pb2_grpc


                                        ## Setup

                                        port_addr = 'localhost:8061'


                                        # open a gRPC channel

                                        channel_opt = [('grpc.max_send_message_length', 512 * 1024 * 1024),

                                                ('grpc.max_receive_message_length', 512 * 1024 * 1024)]

                                        channel = grpc.insecure_channel(port_addr, options = channel_opt)

                                        stub = model_pb2_grpc.PredictStub(channel)



                                        ## Make prediction

                                        filepath = \"assets/test.png\"


                                        with open(filepath, 'rb') as f:

                                          content = f.read()


                                        responsePrediction = stub.make_prediction(requestPrediction)



                                        ## Interpret result

                                        for annotation in responsePrediction.annotations:

                                         print(f\"Detections: {annotation}\")


                                        ```


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=075252b1-3ff7-424d-ab6d-19ca2d90f0f0&revisionId=8297b2b4-2260-42ec-bb89-072918b7c843&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=075252b1-3ff7-424d-ab6d-19ca2d90f0f0&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/075252b1-3ff7-424d-ab6d-19ca2d90f0f0/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "18", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "iOCR", + "description": "


                                        iOCR can easily convert scanned or photographed documents into digital text using its underlying Deep Learning technologies in order to automatically localize and recognize the text inside of these images.

                                        With our innovative product you will reduce the amount of effort required to digitize your data as iOCR ensures the data is not lost and correctly digitized. The need for specialized scanners or high manual effort will decrease as iOCR aims to improve and scale with your business returning the costs required for this kind of effort back to you, offering you more opportunities to extend your company.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=08be83e3-f261-428d-846a-99f2fb0d46fb&revisionId=e74c2c19-130d-451f-a095-86c01e6739a6&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=08be83e3-f261-428d-846a-99f2fb0d46fb&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/08be83e3-f261-428d-846a-99f2fb0d46fb/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "19", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Text2ImageSearch", + "description": "

                                        This model implements a text-to-image search engine: it searches images in a publicly available database (MIRFlickr100K) using natural language sentences as a query.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=090281fe-4884-4ff8-80e1-fb87a41aa327&revisionId=cbe08f0a-9266-498a-a4ca-ab4f1edf5462&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=090281fe-4884-4ff8-80e1-fb87a41aa327&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/090281fe-4884-4ff8-80e1-fb87a41aa327/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "28", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AWDrugsModel", + "description": "






                                        The first draft of the drug decision support system (ANN model) determines a status of candidate drug molecules as approved or withdrawn categories by means of molecular descriptors.  The dataset has 44 features for analyzing the drugs and contains 220 drugs having 110 approved and 110 withdrawn drugs. We calculated molecular descriptors (760 descriptors) for all molecules in the drug datasets and selected the most effective attributes (44 features) to reduce the dimensionality of data on the drug dataset.




                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fd660e7-7a8c-4616-98af-75a866065b40&revisionId=1c0d6691-fc28-4fd4-bb27-8ad6c3b69bf6&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0fd660e7-7a8c-4616-98af-75a866065b40&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/0fd660e7-7a8c-4616-98af-75a866065b40/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "32", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "FH_SWF_SAT", + "description": "

                                        Documentation of the concept for Reinforcement learning based machine tool control

                                        The following description is part of the submitted solution approach of the AI4EU Challenge and explains the interrelationships of the submitted documents. As part of the challenge, sample data was generated, which are similar to the described input and output data. Some of the approaches developed contained detailed explanations and implementations as well as secret solutions that were re-elaborated as pseudo-code. If our solution is among the finalists, the secret solutions will be explained further.

                                        Structuring the system solution as Docker container

                                        An important aspect of the challenge is modularity and flexibility. For this reason, the developed solution approach is implemented as Docker container. The developed solution is connected via port 8061 with 8 inputs (float - machine parameter) and generates 1 output (float - threshold). The designed solution based on an artificial intelligence reinforcement learner. The developed solution is a reinforcement agent. These generates on the basis of the trained knowledge an action (threshold) which is given as parameter to the environment (rib and surface machine). From the environment the current reward (KPI) and state (8 Inputs) are feedback to the agent (developed solution).

                                        Included documents in the Docker container

                                        For the realisation of the solution approach different python files and data protocols are realised. An overview of the generated files can be seen in the following listing.

                                        actor.pth - data.csv - network.py - README.md - define_threshold.py - license.jason - model.proto - model_pb2.py - model_pb2_grpc.py - requirements.txt - threshold_genera", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=108c1bd1-a0f7-4ada-8d39-a72b1b56fe2e&revisionId=fc31a182-5bfd-48fc-b5ea-a55034a70c41&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=108c1bd1-a0f7-4ada-8d39-a72b1b56fe2e&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/108c1bd1-a0f7-4ada-8d39-a72b1b56fe2e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "35", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "DSS4TB-IMECH", + "description": "

                                        A modification on the module \"AI REGIO DSS4TB\" an intelligent troubleshooting system that able to identify the component that is most probably faulty after a series of closed-ended questions answered by the operator.

                                        The system works on a probabilistic model that selects the most suitable question to ask the operator on the basis of:

                                        1. Information matrix established by an expert
                                        2. Previous answers
                                        3. Description given by the user (interpreted by the NLP-IMECH module)

                                        Operator knowledge is made available to the algorithm in the form of csv files that contain dynamic information matrices that are updated after every troubleshooting session. The use of these files means the system can quickly be adapted to a different contexts by simply that switching out the information matrix.

                                        Responding to the questions asked with YES, NO or DON'T KNOW the operator can quickly arrive at the identification of the fault. The system demonstrates a level of resilience in its ability to arrive at the correct diagnosis despite a some errors and uncertainty in the answers given.

                                        The module is intended for use in conjunction with the following AI4EU modules:

                                        1. NLP-IMECH
                                        2. AudioFileBroker
                                        3. ConvertAudioToTextEng
                                        4. FileViewer
                                        5. SharedFolderProvider
                                        6. 4 x FileUploadDataBroker

                                        Overview:

                                        The NewsTrainer module facilitates the training process by specifying the classifier node with the required hyperparameters. The number of epochs, batch size, validation ratio and model filename are the different parameters available in the web-UI.

                                        Repository link:

                                        Please refer the following link for the code that represents the trainer module in the Eclipse Graphene platform - https://gitlab.eclipse.org/eclipse/graphene/tutorials/-/tree/main/news_training/trainer

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=13416c8e-ae15-488a-b1f3-db33b799eb1a&revisionId=cda82f21-469f-4101-a82f-d1c34b819b74&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=13416c8e-ae15-488a-b1f3-db33b799eb1a&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/13416c8e-ae15-488a-b1f3-db33b799eb1a/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "37", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Fraunhofer-uncertainty-metrics-for-classification-tasks", + "description": "

                                        Uncertainty Metric for Classification tasks


                                        Implements uncertainty estimation metrics for classification tasks.

                                        Input

                                        The input to the metric computation module is a prediction from multiple forward passes of Monte Carlo Dropout or the models in an ensemble. The prediction is expected as a single data point, so the shape is N x C where N is the number of forward passes, and C is the number of classes.

                                        Metrics

                                        The metrics used to quantify uncertainty in the predictions are entropy, mutual information and variance.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=13f5a196-0775-4730-88a0-a62f911ddb3a&revisionId=a549ad83-c0b9-48cb-a43e-0c5be7f4f9fd&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=13f5a196-0775-4730-88a0-a62f911ddb3a&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/13f5a196-0775-4730-88a0-a62f911ddb3a/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "43", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "tensorflow-iris-model", + "description": "

                                        Classify Iris Blossoms with a tensorflow model

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=15a3f457-401e-466e-9b85-1e25d8ae0b69&revisionId=42f38ede-7feb-4ebe-ba7c-2a6912aad332&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=15a3f457-401e-466e-9b85-1e25d8ae0b69&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/15a3f457-401e-466e-9b85-1e25d8ae0b69/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "45", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "INERGY_Cold_Decision", + "description": "

                                        This service is based on a decision support system (DSS) implemented in context of I-NERGY project. The overall vision of I-NERGY is to promote AI in the energy sector by delivering:

                                        • An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.
                                        • Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.

                                        This is a DSS service for for help in the decision on which energy source (for cold generation) use in a Spanish Hospital in hourly basis. The data was provided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital complex have a district heating network. The layout of this district heating network is a ring system composed by two independent rings for heating and cooling. This ring just provides energy for heating and Domestic Hot Water (DHW).

                                        Apart from being a district heating network, this system is complex due to the different production sources used for heating and cooling. In this facility heat, cold and steam are produced by using different sources.

                                        For more information on how to use the service, please see Documents section.

                                        The project leading to this service has received funding from the European Union\u2019s Horizon 2020 research and innovation programme under grant agreement No 101016508

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=169c308d-3451-4bb9-9fe1-84316863c18b&revisionId=68550ad2-0036-4e2d-a29c-99dc940cb235&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=169c308d-3451-4bb9-9fe1-84316863c18b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/169c308d-3451-4bb9-9fe1-84316863c18b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "46", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "i-nergy-load-forecasting-ren-hourly-lstm-2018-2019", + "description": "

                                        This is a forecasting service for predicting the aggregated hourly net electrical load of the Portuguese transmission system operator (REN). The core of the service is a totally recurrent LSTM deep neural network. The model has been trained on the REN load time series for the years 2018 and 2019 (except December 2019). The service is served as a docker container and a client script is also provided to help the user form their inference requests. The model is totally configurable in terms of:

                                        1. Provided ground truth data points: The client can update the existing model with the desired length of new data points that have been observed. The provided input should follow the format of the csv file history_sample.csv.
                                        2. Forecast horizons: The client can request a forecast horizon of their preference. It should be noted that large forecast horizons lead to worse results due to the error propagation caused by the LSTM recurrence.

                                        This model has been developed within I-NERGY EU project.


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=16d39167-1650-487a-ab25-29eee8eb838f&revisionId=b2c1b964-aab1-4002-bbe7-d4d5ae438e61&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=16d39167-1650-487a-ab25-29eee8eb838f&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/16d39167-1650-487a-ab25-29eee8eb838f/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "48", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AI4agriNDVI", + "description": "

                                        AI4AGRI model for correcting NDVI information from satellite images

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=178e0fdf-05ec-42ad-9e0a-da5f147de7fd&revisionId=af75387e-635b-46d1-a442-a47b993b061b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=178e0fdf-05ec-42ad-9e0a-da5f147de7fd&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/178e0fdf-05ec-42ad-9e0a-da5f147de7fd/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "49", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "SAPI_wheat_model_v0", + "description": "

                                        SAPI machine learning regression model based on satellite productivity maps is a powerful tool for predicting crop yields in agriculture. By utilizing advanced algorithms, this model analyzes data from satellite imagery to estimate the expected yield of wheat. The output from the model is predicted yield for particular parcel. The model learns from past data to establish patterns and relationships between the satellite imagery and crop yields. It then applies this knowledge to make predictions for the test parcel. This regression model provides a non-invasive and cost-effective method for yield prediction, as it eliminates the need for manual data collection or extensive field visits.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=195181e4-090f-45e9-91cc-5919718ad0d9&revisionId=ac253be9-81ee-43f2-8a24-79369b10a45c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=195181e4-090f-45e9-91cc-5919718ad0d9&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/195181e4-090f-45e9-91cc-5919718ad0d9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "51", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ObjectDetection", + "description": "

                                        Detection of physical objects in still images or videos


                                        The object detection mining service allows to detect one or more physical objects to be found in images and videos. 


                                        Input: Image file or video file. You can specify which frames are to be processed for a video.


                                        Output: A set of detected objects will be returned for the image or each processed frame. For each detected object an axially parallel bounding box, an object category and a rating are returned. The rating indicates the certainty of the model regarding the category of the identified object within a bounding box.

                                        In addition, an automatically generated ID is assigned to each detected object to allow the unambiguous identification of all detected objects in one media file. This ID has no relation to the category of the detected Object.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&revisionId=6efaddee-cb74-4995-a8c3-9bc8e3f9c29b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/1c97e098-d7c7-4fb5-83ca-a5202efc5e90/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "52", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ObjectDetection", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&revisionId=85536789-c619-4003-87c2-868e8971a597&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/1c97e098-d7c7-4fb5-83ca-a5202efc5e90/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "53", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ObjectDetection", + "description": "

                                        Detection of physical objects in still images or videos


                                        The object detection mining service allows to detect one or more physical objects to be found in images and videos. 


                                        Input: Image file or video file. You can specify which frames are to be processed for a video.


                                        Output: A set of detected objects will be returned for the image or each processed frame. For each detected object an axially parallel bounding box, an object category and a rating are returned. The rating indicates the certainty of the model regarding the category of the identified object within a bounding box.

                                        In addition, an automatically generated ID is assigned to each detected object to allow the unambiguous identification of all detected objects in one media file. This ID has no relation to the category of the detected Object.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&revisionId=f85ede77-a094-46e4-9147-fb9e595f2b91&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/1c97e098-d7c7-4fb5-83ca-a5202efc5e90/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "54", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "QRUL", + "description": "

                                        The model processes quality test data results and estimate the Remaining Useful Life (RUL) of a produced pump from the Pfeiffer company. The provided solution offers 2 classification techniques estimating whether a pump is going to fail in the first year of operation or not, or estimating the time range that the pump will fail.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1ee174ca-e7c4-405e-8137-27611cb0b6bc&revisionId=6dc27e5f-72b7-406e-a5fb-6db99737b816&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1ee174ca-e7c4-405e-8137-27611cb0b6bc&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/1ee174ca-e7c4-405e-8137-27611cb0b6bc/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "56", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "VideoSegmentation", + "description": "

                                        The Video Segmentation model splits the incoming video into scene segments

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=21459f4b-ed64-455b-93ae-5e345f046148&revisionId=9113a839-bfa1-470a-b4c2-7714be30a03c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=21459f4b-ed64-455b-93ae-5e345f046148&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/21459f4b-ed64-455b-93ae-5e345f046148/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "57", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "SAPI_maize_model_v0", + "description": "

                                        SAPI machine learning regression model based on satellite productivity maps is a powerful tool for predicting crop yields in agriculture. By utilizing advanced algorithms, this model analyzes data from satellite imagery to estimate the expected yield of maize. The output from the model is predicted yield for particular parcel. The model learns from past data to establish patterns and relationships between the satellite imagery and crop yields. It then applies this knowledge to make predictions for the test parcel. This regression model provides a non-invasive and cost-effective method for yield prediction, as it eliminates the need for manual data collection or extensive field visits.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=21a28a9d-bc8b-490e-85e5-e1452ad74e3e&revisionId=b11fdff7-5654-48de-bd4e-70d3f1131703&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=21a28a9d-bc8b-490e-85e5-e1452ad74e3e&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/21a28a9d-bc8b-490e-85e5-e1452ad74e3e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "60", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "polaris_mep_ai", + "description": "

                                        Polaris MEP AI module is an addon for Polaris MEP, an execution planner to optimize production plannings using restrictions-based solvers. The new AI module adds features to predict and forecast the inputs of the planner. So production demand and resource availability can be predicted with AI and optimized with OR. Regression methods Linear Regressi\u00f3n, Lasso, Gradient Boosting, Random Forest, and K-NN are included. Autoregressive methods ARIMA, SARIMA, VARMA, LSTM, and Fuzzy NN are included.



                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=24f4722f-9c82-489c-b9b0-359976eb792f&revisionId=76dbff09-04b5-4ec6-af32-8a3e82b60ded&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=24f4722f-9c82-489c-b9b0-359976eb792f&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/24f4722f-9c82-489c-b9b0-359976eb792f/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "61", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "dummy-environment-clarspy", + "description": "

                                        Dummy model for 1st Call for Solutions

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2532264a-b2aa-4cf4-8a90-8eb5f0546b9f&revisionId=558d248e-bd5d-4e53-a360-8bdc95dc8cc0&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2532264a-b2aa-4cf4-8a90-8eb5f0546b9f&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2532264a-b2aa-4cf4-8a90-8eb5f0546b9f/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "63", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AI_REGIO_CUSUM_RLS_filter", + "description": "

                                        CUSUM RLS filter contains a change detection algorithm for multiple sensors, using the Recursive Least Squares (RLS) and Cumulative Sum (CUSUM) methods [F. Gustafsson. Adaptive Filtering and Change Detection. John Willey & Sons, LTD 2000].

                                        As an AI resource the \u201cCUSUMRLSfilter\" asset is currently implemented as Open Source Solution whose main aim is to detect abrupt changes on the measurements recorded by a set of sensors.The asset was implemented as part of one of the experiment of the AI REGIO project, and subsequently adapted for general use.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=259afff9-66a4-47e7-b55c-4f19b2d75b8d&revisionId=f3b61e6d-904c-48ab-9930-72eedd3eb62c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=259afff9-66a4-47e7-b55c-4f19b2d75b8d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/259afff9-66a4-47e7-b55c-4f19b2d75b8d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "65", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ner-model", + "description": "

                                        This is the ner-model component of the ner-pipeline.


                                        Through the Web UI of the ner-model, you can access the results of the entity recognition task on a given text. The most recent result will show on top of the results. An entity is defined within \"|\", followed by its type and confidence score in round brackets.

                                        Make sure to run ner-pipeline, instead of ner-model as a standalone component. As ner-pipeline is successfully deployed, first submit the text via ner-databroker, then RUN the pipeline and go to the Web UI of the ner-model. You will see a list of processed texts, with the most recent provided text on top of the list.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=27e777bc-2968-427c-9df5-9f5593613475&revisionId=77f58af9-73d4-48b8-9237-7c6e1d3cdb97&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=27e777bc-2968-427c-9df5-9f5593613475&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/27e777bc-2968-427c-9df5-9f5593613475/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "68", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "cnext_decision_intelligence", + "description": "

                                        The published model is a result of the AI4EU challenge \u201cDecision Intelligence for Healthcare\u201d, and is focused on delivering data-driven decision support on the question \u201cwhat is the next step in handling patient test/diagnoses related to suspected COVID infection.   

                                        As part of this challenge, we needed to validate a Machine Learning Model \u2013 published on the AI4EU marketplace \u2013 using GRPC (protobuf) as inference endpoint and docker container image as packaging model could act as a decision brick and as such be plugged in into our Decision Intelligence Platform.


                                        More information about the solution can be found in the accompanying AI4EU_Cnext.pdf file.




                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2b3f75d9-a480-4589-9992-457b0863b7b5&revisionId=cb074874-ee6b-458c-a825-e5d129ca4635&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.6", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2b3f75d9-a480-4589-9992-457b0863b7b5&version=1.0.6", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2b3f75d9-a480-4589-9992-457b0863b7b5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "72", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Vibration_analysis", + "description": "

                                        This model allows the analysis of vibration of rotating machines. It is based on vibration measurements in the three spatial directions, on strategic measurement points: MDE (Motor driven end) and MNDE (Motor non driven end).  It allows to detect if a machine presents a faulty behaviour and to establish the cause of this problem and to evaluate its intensity on a scale from 1 to 3.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4&revisionId=42a20377-3b6f-41c5-88b2-76b07993aa0b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "73", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Vibration_analysis", + "description": "

                                        This model allows from acceleration mesurements in the three directions on the measurement points mde (motor driven end) and mnde (motor non driven end), to detect a machine malfunction and to establish its nature. The type of failure detected in this version are unbalance issue and bearing issue . Other types of failure will be supported in the next versions, stay tuned.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4&revisionId=533fbe3c-2b51-48ef-89bd-fe9ee96cf13a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "74", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "INERGY_Cold_Demand_Prediction", + "description": "

                                        This service is based on a Random Forest model implemented in context of I-NERGY project. The overall vision of I-NERGY is to promote AI in the energy sector by delivering:

                                        • An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.
                                        • Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.

                                        This is a forecasting service for predicting thermal load (cold energy) of a Spanish Hospital in hourly basis. The data was provided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital complex have a district heating network. The layout of this district heating network is a ring system composed by two independent rings for heating and cooling. This ring just provides energy for heating and Domestic Hot Water (DHW).

                                        Apart from being a district heating network, this system is complex due to the different production sources used for heating and cooling. In this facility heat, cold and steam are produced by using different sources.

                                        For more information on how to use the service, please see Documents section.

                                        The project leading to this service has received funding from the European Union\u2019s Horizon 2020 research and innovation programme under grant agreement No 101016508

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2ef3e3fb-afe9-422a-b037-88168d219a80&revisionId=8fc73f14-3456-4eda-af0a-68af28faada0&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2ef3e3fb-afe9-422a-b037-88168d219a80&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/2ef3e3fb-afe9-422a-b037-88168d219a80/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "81", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "O7_information_extractor", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&revisionId=00a8cf50-c886-440f-8326-2381b54f7778&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.4", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&version=1.0.4", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/31b07091-8a96-4caf-8149-5d8316c3b314/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "82", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "O7_information_extractor", + "description": "

                                        This model is implemented to extract O7 information from Italian social workers' cards.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&revisionId=b4d4ea0c-c723-4dca-9066-5af00f2d9133&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.5", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&version=1.0.5", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/31b07091-8a96-4caf-8149-5d8316c3b314/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "83", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "O7_information_extractor", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&revisionId=e5afb24c-c035-4853-9ede-7b4b6b5ef5c8&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/31b07091-8a96-4caf-8149-5d8316c3b314/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "84", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "advice-yolo", + "description": "

                                        advice-yolo is the implementation of YOLOv4 deep learning model. The model is already trained for detecting road defects

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3332868c-0248-4f2c-8401-1464faf56166&revisionId=3cc90b52-2567-4432-b6bb-6368ab68ad6f&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=3332868c-0248-4f2c-8401-1464faf56166&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/3332868c-0248-4f2c-8401-1464faf56166/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "85", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "advice-yolo", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3332868c-0248-4f2c-8401-1464faf56166&revisionId=ca4b9849-5e73-45d6-8e47-c512183f55cd&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=3332868c-0248-4f2c-8401-1464faf56166&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/3332868c-0248-4f2c-8401-1464faf56166/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "87", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Drug-Attrition-Oracle", + "description": "

                                        Drug Attrition Oracle is a deep neural network model, based on the chemical structure of the compounds, which can predict the probability of withdrawal from the market for compounds that have passed initial trials. The model provides an interpretable layer which can find chemical substructures that are most influential for making the prediction as well as additional drug and molecular properties which can influence the probability of withdrawal. The model takes as an input only the SMILES string of the molecule and outputs a conformal prediction whether the molecule is approved or withdrawn along with a confidence score. The explanation for a prediction is given using the GNN Explainer. To improve the GCN model predictions we trained additional graph neural network models for predicting molecular properties: Bioavailability, Clearance Hepatocyte, CYP2C9 Substrate and Toxicity (nr-ppar-gamma). These predictions are used with the base GCN model for predicting the withdrawal in an XGBoost model which uses SHAP values for interpretation.\ufeff

                                        Code is available on", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=33de7b45-cc1e-4ff4-b01a-7eb08c5859e9&revisionId=b8f10760-6b7d-4b6c-aea9-74a7851e2027&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=33de7b45-cc1e-4ff4-b01a-7eb08c5859e9&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/33de7b45-cc1e-4ff4-b01a-7eb08c5859e9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "91", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "innerpageanalysis", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3664c82c-39e1-4fd8-bf0a-ee7c7e745068&revisionId=288d9558-641d-4101-8a6c-548ce3acc69f&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=3664c82c-39e1-4fd8-bf0a-ee7c7e745068&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/3664c82c-39e1-4fd8-bf0a-ee7c7e745068/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "92", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "innerpageanalysis", + "description": "

                                        Advanced deep learning models are trained separately and applied for each type of information, and then put together in **Inner Page Analysis** pipeline. The pipeline extracts the information from historical data from Italian workers' social security cards.

                                        Analysis of stamps data and extraction of their key informations is the main goal of this project.

                                        input and output of this project will be like below:

                                        1. input is a full page of stamps in both raw scanned files or ordinary images in .png or .jpg format. file name will be like 11831_2b.

                                        2. output will be a .csv file that contains below information for each stamp as columns:

                                          * filename,ID,xb,stamp_id,stamp_class,price,face,color




                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3664c82c-39e1-4fd8-bf0a-ee7c7e745068&revisionId=b29ec7cf-9cdc-4cc3-9864-d2c607bab121&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=3664c82c-39e1-4fd8-bf0a-ee7c7e745068&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/3664c82c-39e1-4fd8-bf0a-ee7c7e745068/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "93", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "OHDSI_PLP_PILOT", + "description": "

                                        Pilot for Patient level Prediction for the AI4EU challenge.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=367469d8-cbd1-42c9-b3e9-ecd670e95ce8&revisionId=c2da9001-caf3-4594-9fe9-cccd84aa4181&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=367469d8-cbd1-42c9-b3e9-ecd670e95ce8&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/367469d8-cbd1-42c9-b3e9-ecd670e95ce8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "94", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "OpenWIDE", + "description": "

                                        OpenWIDE


                                        Trustworthy Detection of mouthes for automated swab robot.


                                        The service finds a mouth in the image securely by cascading 3 detectors, person->face->mouth and evaluate how open the mouth is.It will only give one mouth per image, which is the dominant mouth.The result is given as a DICT where the most relevant information is:


                                        1. mouthbox: \tbbox of mouth in format x1,y1,x2,y2
                                        2. facebox: \t\tbbox of face in format x1,y1,x2,y2
                                        3. personbox:\tbbox of person in format x1,y1,x2,y2
                                        4. Score: \t\t\tCollective score of the three models
                                        5. Open: \t\t\tA measure of openness. >0.8 tends to be WIDE open.
                                        6. H: \t\t\t\tHow centered is the mouth horizontally. ~0 = looking straight into the camera.
                                        7. V: \t\t\t\tHow centered is the mouth vertically. ~-.3 = looking straight into the camera.


                                        Cloud host


                                        It is hosted as a RPC service in Azure

                                        * openwide.northeurope.azurecontainer.io:8061


                                        Dockerhub


                                        * dtivisionboxcloud/openwide:v1.1


                                        Test

                                        Included is a test image and a test script. 


                                        Just run :


                                        * python testRPCService.py


                                        and you should receive a dict with information about the mouth.


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=36ae858b-6486-46ae-8e8c-01d644b93d4d&revisionId=515a1a44-4ad1-4b29-b4f4-efadfa665dee&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=36ae858b-6486-46ae-8e8c-01d644b93d4d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/36ae858b-6486-46ae-8e8c-01d644b93d4d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "95", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "divis_pump_lifetime_classification", + "description": "

                                        The image provides a model for the classifiction on vacuum pumps into the categories \"short living\" (less than one year) and \"long living\". The data needed is specific to the format of a challenge owner of the AI4EU project.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=36e5b789-fdb8-4016-84d6-829423b58ffc&revisionId=ca6c26a5-9252-4fa0-81c3-aea31d26dca8&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=36e5b789-fdb8-4016-84d6-829423b58ffc&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/36e5b789-fdb8-4016-84d6-829423b58ffc/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "99", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "edm_aad_agent_node_cl", + "description": "

                                        EDM RL Controller predictions (Solution Provider: Artificialy SA)


                                        Reinforcement learning applied to Electrical discharge machining (EDM) control for the AI4EU project with Agie Charmilles SA. For in Depth instructions of how to use this model, please follow the README.pdf which is placed in the Documents tab.







                                        The solution consists of two nodes: `data_node` server which streams a DataFrame of observations (EDM machine states) read from the path provided by the client (`infile`); and an `agent_node` server which predicts control actions based on the agent / controller specified by the client. Output predictions are stored inside the `./data_predictions/` folder of the `agent_node` Docker container.


                                        To use this solution, please use the Docker container and the additional files (which are in the Documents tap of the model in the marketplace) from both the `data_node` and `agent_node`. They are both in the AI4EU platform market place named as `edm_aad_agent_node_cl` and `edm_aad_data_node_cl`:

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=414791ed-55f9-457d-b377-f790161e2cd6&revisionId=7622a8e4-d52f-4288-9bc6-88d64da6f7f6&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=414791ed-55f9-457d-b377-f790161e2cd6&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/414791ed-55f9-457d-b377-f790161e2cd6/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "101", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ICD-10-CM-classifier", + "description": "

                                        ICD-10-CM classifier


                                        The ICD-10-CM classifier is docker image containing two neural classifier models contained within a gRPC server that allows for classification of medical texts in Spanish or English.

                                        Fine-tuned on the CodiEsp dataset, the models for both languages are built upon the Bert architecture. The Spanish model achieves a 0.5980 MAP score across the test set of the CodiEsp-Diagnostic dataset, whereas the English version achieves a 0.5249 MAP score.

                                        This module may provide help for researchers or other data-science enthusiasts that are looking into building tools to automatically diagnose medical descriptions.


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4356534c-aec1-4271-8eda-f125cb08909b&revisionId=ee4f05c5-b86d-423c-b1d6-21b24b14be4d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4356534c-aec1-4271-8eda-f125cb08909b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/4356534c-aec1-4271-8eda-f125cb08909b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "102", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AudioSegmentation", + "description": "

                                        This model splits an audio file into segments like one speaker and removes silence.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4366dce4-cd87-4a51-bd39-2dbfe5fd5b6c&revisionId=4a4c3771-6c63-46b6-aad6-d5cf78e1a03f&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4366dce4-cd87-4a51-bd39-2dbfe5fd5b6c&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/4366dce4-cd87-4a51-bd39-2dbfe5fd5b6c/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "105", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "CODE", + "description": "

                                        The main objective of the challenge is to develop an method for automatic classification of clinical narratives to ICD-10 codes.

                                        Our approach for semantic text classification has three core components: (1) Formalization of domain knowledge of medical information and techniques of semantic data fusion; (2) Multilingual NLP techniques for document preprocessing including all or some of: data cleaning, data normalization, data augmentation, transitive connections analysis, data balancing, expert\u2019s heuristics. For medical data based on our expertise on DOID, ICD-O, ICD-9, ICD-10, MESH, MONDO, UMLS, Orphanet, SNOMED classification, data augmentation including typos simulation and synonym replacement will be used; (3) Multilingual deep learning methods for supervised classification of disease into its corresponding class from the ICD-10. We are fine tuning pretrained BERT family models (bioBERT, clinicalBERT, MultilingualBERT, PubMedBERT, etc.) with domain specific terminology for the target language. Additional corpora generated from public documents and linked open data is used for fine-tuning of the deep learning classification model for the specific ICD-10 classification.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=47920b57-7ab9-4abe-9881-f77d57144944&revisionId=6fdf671b-38d8-4995-b924-30ef638df116&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=47920b57-7ab9-4abe-9881-f77d57144944&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/47920b57-7ab9-4abe-9881-f77d57144944/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "106", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "aquila-ai-service", + "description": "

                                        The experiment aims to compare the design of an electronic product, represented by a CAD file, with the picture of a real artifact of the product.

                                        The proposed solution consists of two main phases. First, the system establishes a machine learning flow that utilizes a neural architecture to address the issue of component recognition (Object Detection) in panel images. Second, the system exploits Answer Set Programming (ASP) to compare the reconstructed scheme from the image with the original patterns to detect any misalignments or errors.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=48053444-5100-4476-a8c3-53db3108dcdb&revisionId=94d411e7-3383-47e5-a923-581e7a6f5a1f&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=48053444-5100-4476-a8c3-53db3108dcdb&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/48053444-5100-4476-a8c3-53db3108dcdb/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "108", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "CDSICD10", + "description": "

                                        1st Call for Solutions, ICD10 classification using medical trained BERT and QA. \u201cOur solution combines two different approaches: one to identify the relevant disease (ICD-10 category) and the other one to determine the subcategory (the digits after the period). 

                                        The \u201ccategory-classifier\u201d is based on Spanish BERT (BETO) fine-tuned on Spanish clinical text (CodiEsp corpus). 

                                        In order to determine the subcategories of each ICD-10 category, we will use a question-answering approach based on a structured version of the ICD-10 dictionary created be NER.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4af0b85d-6d3e-4678-a991-865366ce4152&revisionId=b7ed24a9-c8fa-42cf-8f72-58acbb6f9435&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4af0b85d-6d3e-4678-a991-865366ce4152&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/4af0b85d-6d3e-4678-a991-865366ce4152/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "110", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "aipanel_repurposing", + "description": "

                                        Goal:

                                        To design a model that allows repurposing of already approved drugs i.e., the model predicts if a drug can be used to fight another disease or protein target that relates to the disease.


                                        Approach:

                                        To achieve this goal, another Deep Convolutional Neural Network (D-CNN) has been implemented on molecular descriptors obtained for the drugs and Protein Descriptors obtained for targets, to develop a prediction model which predicts the IC50 value where IC50 refers to Half-maximal inhibitory concentration, the most widely used and informative measure of a drug's efficacy.


                                        To prepare the dataset, following drugs, targets and their combined activities were obtained from specific databases:

                                        1. 1651 Approved Drugs from CHEMBL Database with IC50 Bio-Activities
                                        2. 1975 Targets from CHEMBL Database


                                        Approx. 40000 activities were obtained for above mentioned drugs and targets, where the activities belonged to phase 4 studies. Phase 4 refers to the Stage where a drug is accepted since it shows desired results towards a specific Target. Around 53% of activities consis", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4c32c784-dd97-466c-b533-e4e8e541b80a&revisionId=fd42128d-cd93-4b30-89b7-4c1f756da6b2&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4c32c784-dd97-466c-b533-e4e8e541b80a&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/4c32c784-dd97-466c-b533-e4e8e541b80a/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "111", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Tag-my-outfit", + "description": "

                                        The Tag My Outfit service predicts the category and the attributes of a piece of clothing viewed in a given image. The prediction model is the Visual Semantic Attention Model (VSAM), and is supervised by automatic pose extraction creating a discriminative feature space. This particular classifier was trained with the open source DeepFashion dataset. For further detail see http://physicalai.isr.tecnico.ulisboa.pt/tagmyoutfit.html


                                        The model accepts an image as input and outputs the labels corresponding to category (e.g. dress), subcategory (Evening Dress) and attributes ( short, long sleeve, round neckline)

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4c8cf4f7-670c-4ee8-9c20-f0fff3dc2b1d&revisionId=bb44d189-da04-4eea-9d55-7d2b5518a3e3&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4c8cf4f7-670c-4ee8-9c20-f0fff3dc2b1d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/4c8cf4f7-670c-4ee8-9c20-f0fff3dc2b1d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "112", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Urban4Cast", + "description": "

                                        Docker Image for Parking Predictions. It allows you to obtain parking predictions, with various levels of spacial granularity. It uses gRPC and protobuf as interfaces to the developed model. Please see the README of the project in order to understand how to use it.


                                        The inputs of the model define the spacial granularity (None, Neighborhood, Street, Sensor). Apart from that, you can define the temporal granularity (15 minutes, 1 hour, 1 day) and how many steps in the future you want to predict. The results are the predictions for these steps, including the upper and lower bounds of the prediciton.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4d22b7a8-240f-4e3b-a359-018819d779b3&revisionId=09c477af-508f-4cdc-806e-ce0462ae07cd&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4d22b7a8-240f-4e3b-a359-018819d779b3&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/4d22b7a8-240f-4e3b-a359-018819d779b3/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "113", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AudioSpeakerRecognition", + "description": "

                                        This model add speaker recognition to audio mining pipelines.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4f57c704-10c2-43ec-93ae-d2183b3180f1&revisionId=374b55ac-3579-4ee1-8f7b-c1f6f5779e7e&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4f57c704-10c2-43ec-93ae-d2183b3180f1&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/4f57c704-10c2-43ec-93ae-d2183b3180f1/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "114", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Generic-CODE", + "description": "

                                        The proposed solution is based on fine-tuned with Spanish medical texts of the pre-trained BERT family language models (transformers clinicalBERT and multilingualBERT). The designed text-based classification service predicts ICD-10 codes for clinical text in Spanish for the Colorectal cancer (CRC) and associated diagnoses. The service output contains the ICD-10 \u201csubclassification\u201d (4 sign) codes that gives additional information about manifestation, severity and location of the injury or disease for a wider range of disease (4227) ICD-10 codes. The prediction models for ICD-10 codes are with high accuracy: clinicalBERT: 0.949 AUC ROC score and  multilingualBERT: 0.950 AUC ROC score. The service allows the user to switch between two models (clinicalBERT and multilingualBERT) and to set the parameter N for top N diagnoses according to the specific needs.

                                        This module implements fast nearest-neighbor retrieval of a times series in a larger time series expressed as location and distance using the UCR suite Euclidean Distance (ED) algorithm.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=50ebce0a-f91f-46eb-be32-b36574a1e068&revisionId=7b642559-fd32-41d5-ae18-753d03f5014a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=50ebce0a-f91f-46eb-be32-b36574a1e068&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/50ebce0a-f91f-46eb-be32-b36574a1e068/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "117", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "SmartRiver", + "description": "

                                        The Digital Twin solution for AI-driven hydropower energy forecasting

                                         

                                         

                                         

                                         

                                        River discharge rules energy production for Hydropower plants.

                                        Prediction of water resources for the next day, month, season, challenges every energy producer and trader.

                                        Such knowledge supports optimal energy production, avoiding wastes (underestimation) or empty reservoirs (overestimation). 


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=52471527-6ec1-4233-8c8e-e8d412b300b7&revisionId=7391c733-e008-4467-9965-c905c536ffba&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=52471527-6ec1-4233-8c8e-e8d412b300b7&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/52471527-6ec1-4233-8c8e-e8d412b300b7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "118", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Aquila", + "description": "

                                        The experiment aims to compare the design of an electronic product, represented by a CAD file, with the picture of a real artifact of the product.

                                        The proposed solution consists of two main phases. First, the system establishes a machine learning flow that utilizes a neural architecture to address the issue of component recognition (Object Detection) in panel images. Second, the system exploits Answer Set Programming (ASP) to compare the reconstructed scheme from the image with the original patterns to detect any misalignments or errors.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5357697a-114b-4649-8065-3c2108652ab3&revisionId=66f1c27a-797a-458e-9da2-c837e9e0402d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5357697a-114b-4649-8065-3c2108652ab3&version=1.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5357697a-114b-4649-8065-3c2108652ab3/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "120", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4opti", + "description": "

                                        This model is for production line prediction. More specifically based on the historical data the model is able to predict if the production will be late or on time.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=54c201d7-caf2-4803-8321-6d5ab1ecf2ea&revisionId=10aface4-cf1c-4123-84dc-f91746ef6232&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=54c201d7-caf2-4803-8321-6d5ab1ecf2ea&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/54c201d7-caf2-4803-8321-6d5ab1ecf2ea/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "121", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "aquila-webapp", + "description": "

                                        The experiment aims to compare the design of an electronic product, represented by a CAD file, with the picture of a real artifact of the product.

                                        The proposed solution consists of two main phases. First, the system establishes a machine learning flow that utilizes a neural architecture to address the issue of component recognition (Object Detection) in panel images. Second, the system exploits Answer Set Programming (ASP) to compare the reconstructed scheme from the image with the original patterns to detect any misalignments or errors.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5613118f-b66c-4cd7-b925-ea537d5a9c6c&revisionId=985597a7-a6e9-4a3f-a0b6-5fc0f90065c2&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5613118f-b66c-4cd7-b925-ea537d5a9c6c&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5613118f-b66c-4cd7-b925-ea537d5a9c6c/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "122", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "VideoShotDetection", + "description": "

                                        The shot detection system will detect the boundaries between video shots by detecting a change between visual scenes. 

                                        • Input: A video file. For a more accurate result, all frames need to be assessed. 
                                        • Output: Detection result will be a file where each row contains the start and the end frames of each shot in the video

                                        Model: The underlying model for the shot detection is a deep learning-based model called TransNetV2. This model has been trained on datasets with combination of real (15%) and synthetic (85%) shot transitions (cuts) created from two datasets IACC.3 and ClipShots.

                                        Evaluation: This model achieves the F1 score of 0.898 on TRECVID 2007 dataset. Annotations are provided by TRECVID and downloaded from their website. It appears that the ground truth annotations differ about 2 frames from the actual cuts. As a result, a tolerance of 2 frames is considered when applying the evaluation.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=56258e93-1bdf-4640-93f5-b3786e591acc&revisionId=91d5c71f-e984-4bb0-9c2b-aa2b15bea5e5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=56258e93-1bdf-4640-93f5-b3786e591acc&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/56258e93-1bdf-4640-93f5-b3786e591acc/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "125", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Idiap_BEAT_Face_Recognition_-_FaceNET", + "description": "

                                        A face recognition algorithm to compare one probe image against a set of template images.

                                        The images must be gray-scale and should contain the face region only.Internally, the images are resized to 160x160 pixels.

                                        This algorithm expects the pre-trained FaceNet model to be provided as input as well.

                                        The model can be downloaded from https://drive.google.com/file/d/0B5MzpY9kBtDVZ2RpVDYwWmxoSUkwhich was made available in https://github.com/davidsandberg/facenet/tree/b95c9c3290455cabc425dc3f9435650679a74c50

                                        Reference experiment on the BEAT platform is amohammadi/amohammadi/atnt_eigenfaces/1/atnt1.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5889ec5c-8f7b-44b0-bb6b-164a8fa98fd9&revisionId=09d2cbe8-7eeb-4214-8826-b4665f4ebb8c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5889ec5c-8f7b-44b0-bb6b-164a8fa98fd9&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5889ec5c-8f7b-44b0-bb6b-164a8fa98fd9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "127", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "RoadDefectsDetection", + "description": "

                                        The model detects common road defects as well as gullies and manhole covers. It is trained on image from the UK.

                                        Furthermore, it exposes a classfification model for pothole depths.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5a5ab3be-eddf-4956-829c-acb1934b7ead&revisionId=2a788999-6aec-4e2e-b1b6-30c9d1b39d78&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5a5ab3be-eddf-4956-829c-acb1934b7ead&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5a5ab3be-eddf-4956-829c-acb1934b7ead/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "129", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "cso", + "description": "

                                        AI-service to optimize stock management of components based on forecasting models and historical data analysis

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5c2fbf7d-4417-49da-8714-7e37b925d81b&revisionId=a8e9a9ea-aa80-40e7-91b3-fb2a0fdc1504&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.6", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5c2fbf7d-4417-49da-8714-7e37b925d81b&version=1.0.6", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/5c2fbf7d-4417-49da-8714-7e37b925d81b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "134", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "lexatexer-ai4hydro-proxy", + "description": "

                                        LexaTexer provides an Enterprise AI platform to support the energy value chain with prebuilt, configurable AI applications addressing CAPEX intense hydro assets like Pelton and Francis turbines and pumps. In this project we combine our Enterprise AI platform and existing operational data to model the remaining useful life (RUL) of Pelton turbines based on real-world operational and environmental data. Thus, increasing RUL, efficiency and availability significantly. AI4Hydro plans to extent the remaining useful life of hydro turbines by up to 30%.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=600e7b34-68eb-4cff-892a-42b77eb71fbb&revisionId=8abc36f4-23a4-44bf-9d79-ad18f2d65dc9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=600e7b34-68eb-4cff-892a-42b77eb71fbb&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/600e7b34-68eb-4cff-892a-42b77eb71fbb/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "135", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "mytestmodel", + "description": "

                                        Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=61134a6f-511f-4144-ba26-1ae017bffa36&revisionId=6c316365-742b-43d9-96e4-54d4aa962d48&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=61134a6f-511f-4144-ba26-1ae017bffa36&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/61134a6f-511f-4144-ba26-1ae017bffa36/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "137", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "coverpageanalysis", + "description": "
                                        Key information extraction from document images is of paramount importance in office automation. \nEach cover card includes many words that are not required to be extracted. To extract the crucial key information, this repository works in three-folds:\n\n1. Text detection with YOLOv5 \n2. Text recognition with TRBA \n3. Text recognition enhancement with natural language processing\n\nFor more information, feel free to contact info@cogniteye.com\n




                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6297165d-d2f9-4617-90c5-d6586d34c84a&revisionId=b301cf36-fb4e-46cf-9425-a6dd1495d58c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6297165d-d2f9-4617-90c5-d6586d34c84a&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6297165d-d2f9-4617-90c5-d6586d34c84a/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "139", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "flask-model", + "description": "

                                        The initial model

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=63a30a14-770e-43d1-a929-1e1f1759af69&revisionId=ddc8368d-6dda-42c6-985a-66b7551e970b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=63a30a14-770e-43d1-a929-1e1f1759af69&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/63a30a14-770e-43d1-a929-1e1f1759af69/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "140", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "pumplife-prediction", + "description": "

                                        This repository contains the implementation of a service that performs a prediction on the expected running time of a pump. The prediction is made using a series of parameters recorded during the pump's testing, that happens before the pump is sent to the customer.

                                        Model description

                                        A series of different models have been tested and evaluated during the model selection phase. A Random Forest resulted to be the best performing model across the validation set, and was thus implemented in the API in this repository.

                                        The input data is the csv file output of the test bench performed on the pumps. The csv should contain a specific set of parameters, that are listed in the Readme in this repository.

                                        The model classifies the expected running time of the pump into 5 classes:

                                        • [min,180] ~ \"< 6 months\",
                                        • (180,365] ~ \"6 months ~ 1 year\",
                                        • (365,730] ~ \"1 year ~ 2 years\",
                                        • (730,1e+03] ~ \"2 years ~ 3 years\",
                                        • (1e+03,max] ~ \"> 3 years\".

                                        The prediction output of the Random Forest is than binarized to obtain the classification between the two classes [< 1 year, > 1 year]. The final output of the model is one of this two classes.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=63bfe768-8f18-4265-89fc-18b77b10b4e5&revisionId=9358a7a6-141a-4b36-aabf-8e8ec6f3d6e9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=63bfe768-8f18-4265-89fc-18b77b10b4e5&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/63bfe768-8f18-4265-89fc-18b77b10b4e5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "141", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AI4EU-AgriCounting", + "description": "

                                        This model is part of the AI4EU Agriculture Pilot, where academia researchers, IT partners and smart agriculture companies showcase the opportunities of the AI4EU environment for unlikely stakeholders, like rural partners.

                                        Collectively, this consortium has produced a set of tools that exploit satellite image, UAVs technologies, robotics and the latest trends in IA to help manage and predict the quality and productivity of vineyards.

                                        This models deal with detection of cluster of grapes of a minimum quality and maturation in an image, informing of the visual metrics of the detected regions.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6436a5d2-81d6-440d-9703-25eeede9ca73&revisionId=650ef51a-7c3b-404f-98e5-c85f7c2e1a30&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6436a5d2-81d6-440d-9703-25eeede9ca73&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6436a5d2-81d6-440d-9703-25eeede9ca73/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "143", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "advice-road-crop", + "description": "

                                        advice-road-crop is a semantic segmentation model that detects the region of interest (ROI) of the image and crops this area to speed up the inference process. In the context of this project, the region of interest consists of the road 

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=64d59631-44f5-4179-9b2f-9b6b4fce0fff&revisionId=848cb306-75ee-4a5c-98c7-c9857b5f2afd&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=64d59631-44f5-4179-9b2f-9b6b4fce0fff&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/64d59631-44f5-4179-9b2f-9b6b4fce0fff/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "144", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "INERGY_Heat_Demand_Prediction", + "description": "

                                        This service is based on a Random Forest model implemented in context of I-NERGY project. The overall vision of I-NERGY is to promote AI in the energy sector by delivering:

                                        • An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.
                                        • Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.

                                        This is a forecasting service for predicting thermal load (heat demand) of a Spanish Hospital in hourly basis. The data was provided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital complex have a district heating network. The layout of this district heating network is a ring system composed by two independent rings for heating and cooling. This ring just provides energy for heating and Domestic Hot Water (DHW).

                                        Apart from being a district heating network, this system is complex due to the different production sources used for heating and cooling. In this facility heat, cold and steam are produced by using different sources.

                                        For more information on how to use the service, please see Documents section.

                                        The project leading to this service has received funding from the European Union\u2019s Horizon 2020 research and innovation programme under grant agreement No 101016508

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=64d9f84f-bd62-4da3-8571-756c79f9451e&revisionId=33554300-4673-481f-8203-3c37ec015440&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=64d9f84f-bd62-4da3-8571-756c79f9451e&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/64d9f84f-bd62-4da3-8571-756c79f9451e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "145", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "SpeechRecognition", + "description": "

                                        Speech recognition reliably translates spoken information into digital text.

                                        Main characteristics:

                                        • highly reliable speech recognition
                                        • robust against noise, e.g. in an industrial setting
                                        • can be combined with automatic speaker recognition
                                        • language models available for German and English
                                        • word and phoneme output to subsequent systems
                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=65f43abe-ea13-45d1-9078-ce7fbbcb0d07&revisionId=3057c3ee-99e6-42f8-b398-05290d643917&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=65f43abe-ea13-45d1-9078-ce7fbbcb0d07&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/65f43abe-ea13-45d1-9078-ce7fbbcb0d07/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "146", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "SpeechRecognition", + "description": "

                                        Speech recognition reliably translates spoken information into digital text.

                                        Main characteristics:

                                        • highly reliable speech recognition
                                        • robust against noise, e.g. in an industrial setting
                                        • can be combined with automatic speaker recognition
                                        • language models available for German and English
                                        • word and phoneme output to subsequent systems


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=65f43abe-ea13-45d1-9078-ce7fbbcb0d07&revisionId=9d6dadf1-ee95-4b9c-8f7b-ade96563bd64&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=65f43abe-ea13-45d1-9078-ce7fbbcb0d07&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/65f43abe-ea13-45d1-9078-ce7fbbcb0d07/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "147", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "rebase-model", + "description": "

                                        This is a LightGBM time-series forecasting model. LightGBM is a gradient boosting decision tree framework developed by Microsoft. It works by recursively partitioning the feature-space into hyperrectangles and utilising the mean (or median) of the target in the specific hyperrectangle as prediction. Every one step recursion is made to reduce the prediction errors of the previous model iteration. One of the advantages with LightGBM over other gradient boosting decision tree frameworks is its efficiency and the ability to predict quantile distributions. 

                                        The asset provides a user interface where you can upload a train set and a set to predict on. The prediction is then displayed in a chart and can be downloaded from the user-interface. It also exposes the rpc Predict() to be able to be called from another service. Here is a video demonstration. Please refer to this readme for more information about how to use and install.

                                        This project has received funding from the European Union's Horizon 2020 research and innovation programme within the framework of the I-NE", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6662fc35-2e6c-4f48-8e26-f7b677acbb62&revisionId=97313833-7e70-47b1-8524-139c2dc26a78&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6662fc35-2e6c-4f48-8e26-f7b677acbb62&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6662fc35-2e6c-4f48-8e26-f7b677acbb62/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "149", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "i-nergy-load-forecasting-lightgbm", + "description": "

                                        This is a forecasting service for predicting of the Portuguese aggregated electricity load time series (15-min resolution, 24hr forecasting horizon). This service is based on a LightGBM model implemented in the context of I-NERGY project. For more information on how to use the solution, please see README.pdf in the Documents section.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=693e5d71-2141-4078-9bf8-0b8b0a9d28fd&revisionId=dccbd07e-3522-4aca-a479-62581058c352&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=693e5d71-2141-4078-9bf8-0b8b0a9d28fd&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/693e5d71-2141-4078-9bf8-0b8b0a9d28fd/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "150", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "SSC-Demo", + "description": "

                                        Model for finding stamps in the image and determining their value.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=69e1bd04-c689-44e0-8cb6-e7c45ba4d5c6&revisionId=2fbe123c-09ac-4fdb-9af7-c610a541d709&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=69e1bd04-c689-44e0-8cb6-e7c45ba4d5c6&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/69e1bd04-c689-44e0-8cb6-e7c45ba4d5c6/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "154", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "covid_predict", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=0ea72cd0-290e-49ad-9800-16fd365980a7&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.5", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.5", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "155", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "covid_predict", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=1fa906b5-1a75-4834-9cda-35120d2aa458&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "156", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "covid_predict", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=4d52b360-8cbc-48d3-9741-f921efea9963&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "157", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "covid_predict", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=5b01c8e7-44df-4103-8348-e64133b1377e&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.3", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "158", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "covid_predict", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=63e5e93b-1e72-4ea3-8f8a-b375f9748e3f&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.4", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.4", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "159", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "covid_predict", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=6c56ef46-c70e-4ab7-ab49-c0e7ea856a60&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.7", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.7", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "160", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "covid_predict", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=79d17341-4421-4ad0-bc08-62d349621182&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.6", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.6", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "161", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "covid_predict", + "description": "

                                        Two trained Convolutional networks with capabilities to determine automatically if a patient has pneumonia based on computer tomography (CT) scans or x-ray images. The raining phase is hidden to end users. It is a constant process based on gathering open or anonymized clinical images.

                                        The end users will be supplied with a docker. The communication with which is based on grpc proto buffer. End users will supply a link to X-ray or CT image and will obtain diagnosis and it\u2019s probability.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=ba5f9197-f3dd-469c-ae3f-0fec081ac81a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.8", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.8", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "162", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "covid_predict", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=db665028-c6b1-4e4f-beef-bfcbd14597ec&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "165", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "rp4pl-classification", + "description": "

                                        rp4pl-classification (Reliable Prediction for Pump Lifetime) is a classification model used to predict pump failures within a year of installation. The model input is the final quality test data from the pump manufacturing process and the output is the failure prediction (whether the pump is predicted to fail within a year installation - yes - or to not fail within a year of installation - no). The model pipeline included data transformation and feature inference. Additionally, it includes a feature selection step to select the most relevant features from the input data.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6f0368f1-77c2-4bfe-b632-98ecd9c87bd9&revisionId=151771e8-422b-4a7b-9d87-8edbadfa6def&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6f0368f1-77c2-4bfe-b632-98ecd9c87bd9&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6f0368f1-77c2-4bfe-b632-98ecd9c87bd9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "166", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "rp4pl-classification", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6f0368f1-77c2-4bfe-b632-98ecd9c87bd9&revisionId=7352557e-d807-4ece-af4d-de5f3faa3956&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6f0368f1-77c2-4bfe-b632-98ecd9c87bd9&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6f0368f1-77c2-4bfe-b632-98ecd9c87bd9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "167", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "adios-apply", + "description": "

                                        I-NERGY - TTP1 - ADIOS APPLY MODEL


                                        Applies anomaly detection for electric power grids model to a full dataset. In this phase, we use the previously trained models to label the unknown alarms. Scikit-learn allows to save trained models to binary files on disk, so in this phase we first load our pretrained model and then we load also the one-hot encoder in case we are willing to use categorical data, or the text processing module if we want to use the text-based classification. Once the pre-trained model is loaded, it can be used to predict the labels of unknown alarms.

                                        AIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-apply-model

                                        Attribution

                                        This project has received funding from the European Union's Horizon 2020 research and innovation programme within the framework of the I-NERGY Project, funded under grant agreement No 101016508


                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6f212625-4d1c-4f13-9f0b-fcfcd6bca65c&revisionId=4888be04-de9c-48b3-b9b4-3e45102956f1&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6f212625-4d1c-4f13-9f0b-fcfcd6bca65c&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/6f212625-4d1c-4f13-9f0b-fcfcd6bca65c/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "169", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ML_Assistant_for_Vibration_Monitoring", + "description": "

                                        The models deployed to AI4EU experiment platform are based on the data provided by HAT Analytics as part of the AI4EU challenge entitled \"ML assistant for vibration monitoring\".


                                        Three models have been developed corresponding to three different asset types:

                                        1. Direct fans
                                        2. Feet-mounted fans
                                        3. Flange-mounted fans


                                        The measurements are gathered from different measurement points namely

                                        1. FAN: Fan casing
                                        2. MDE: Motor-Drive End
                                        3. MNDE: Motor-Non-Drive End

                                        Note that: Not all asset types provide data from all 3 measurement points.


                                        Measurements from each measurement point can be provided from three axes Axial (A), vertical (V), and Horizontal (H)

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7264d5a0-ee24-497a-853d-acdf6b8bdd51&revisionId=23318740-fcef-4e42-8f59-c56ab7b8e72f&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7264d5a0-ee24-497a-853d-acdf6b8bdd51&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7264d5a0-ee24-497a-853d-acdf6b8bdd51/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "170", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "aipanel_approvedvswithdrawn", + "description": "

                                        Goal:

                                        To design a model that is able to predict whether a drug compound is approved or has potential tendencies to be withdrawn.

                                        Approach:

                                        To achieve this goal, a Deep Convolutional Neural Network (D-CNN) has been implemented on molecular descriptors obtained for the drugs, to develop a 2-class predictive model where the classes are 0: Approved, 1: Withdrawn. 

                                        To prepare the dataset, following drugs were obtained from specific databases:

                                        1. 270 Withdrawn Drugs from Charite Database
                                        2. 2800 Approved Drugs from CHEMBL Database

                                        Due to the imbalanced ratio of withdrawn and approved drugs, certain steps were taken during data preparation to help the model learn a better representation from the dataset. These steps are discussed in the later slides.

                                        For the obtained Drugs, their SMILES were extracted from CHEMBL. SMILES are notations for describing the structure of chemical species using short ASCII strings. The SMILES were further used to extract 881 PUBCHEM Molecular Descriptors using PaDEL, a software to cal", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=72bbafe5-031c-4a8c-ad21-42d1388b00fd&revisionId=8b6967d7-fd07-4a8d-b6e6-f66ed2a360ad&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=72bbafe5-031c-4a8c-ad21-42d1388b00fd&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/72bbafe5-031c-4a8c-ad21-42d1388b00fd/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "171", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "TEK_THOR_SIMULATION", + "description": "

                                        AI4EU - THOR THOR solution consists in a Hybrid optimization solution to make the right decision on the amount spare parts in stock, considering past sales and forecasts. The purchase decision considers as input information current stock status, production needs, production forecast, sales forecast, variability Price of stock material and several restriction parameters.

                                        Cash-Flow Simulation. A probabilistic Monte Carlo simulator of cash-flow, taking into account existing datasets and forecasts.

                                        ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=731f99e5-8aef-4375-832f-8d5ababf21b3&revisionId=999f0664-c19c-4492-8520-cf467abc4b14&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=731f99e5-8aef-4375-832f-8d5ababf21b3&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/731f99e5-8aef-4375-832f-8d5ababf21b3/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "174", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "pddl-planners-ffi", + "description": "

                                        An ACUMOS component which, acting as a gRPC server, is able to call a number of PDDL action planners (ff, fd, popf and optic for now).

                                        Asset produced by the AIPlan4EU project.




                                        This project contains an ACUMOS component which, acting as a gRPC server, is able to call a number of PDDL action planners (ff, fd, popf and optic for now).

                                        This is more a proof of concept on how to integrate PDDL Planner within a docker made available for ACUMOS Hybrid Pipelines.

                                        If you want to run the server locally, each of these planners needs to be installed separately and have to be available in your PATH. Otherwise, you can use the Dockerize version (see Docker version on this page which contains all of them), still you will need the client.


                                        The supported planners for now are:


                                        • ff is pretty straighforward to install FF homepage
                                        • fd fast downward is easy to install too Fast Downward homepage
                                        • popf, I would not know, I grabbed the binary from the ROSPlan distribution (bad bad\u2026\u200b), but here is the POPF homepage
                                        • optic is a pain to install, the Cmake files are broken\u2026\u200b Check OPTIC homepage, you may find th", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=73a6170b-47a0-4f99-bf95-af01798f693b&revisionId=e72ada49-fffb-45d3-9ef9-9e2b749cbd19&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=73a6170b-47a0-4f99-bf95-af01798f693b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/73a6170b-47a0-4f99-bf95-af01798f693b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "178", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "trondheim-rl-agent", + "description": "

                                          SUMO/RL implements a pipeline with a traffic simulator of the city of Trondheim, Norway, and a reinforcement learning autonomous agent that learns and implements traffic control policies with the goal of minimizing the number of pollution peaks above a given threshold. Each component can be ran stand alone.

                                          This resource contains a trained Reinforcement Learning agent to interact with the 'trondheim-simulator' traffic simulator with the goal of reducing pollution peaks.

                                          For a more detailed description check the github repository of the resource: https://github.com/tsveiga/AI4EU-RL-Trondheim

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=78591f43-c83a-45bb-b5fe-1d79d15cfdde&revisionId=bf5bcfff-4c70-4ca3-bf20-0c6d88f352f7&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=78591f43-c83a-45bb-b5fe-1d79d15cfdde&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/78591f43-c83a-45bb-b5fe-1d79d15cfdde/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "179", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Molecule-Trainer", + "description": "

                                          Molecule Trainer is a modelling pipeline for optimization, training and deployment of models for molecular single prediction tasks. Molecule Trainer optimizes and trains a graph neural network based on Efficient Graph Convolution with fully connected layers at the end, which can produce accurate models with lower memory consumption and latency. As input it requires only a SMILES string of the molecules along with a binary or continuous target variable. The pipeline automatically checks if the task is classification or regression and optimizes the classification or regression metrics accordingly. Molecule Trainer offers methods for optimization, training and prediction. The description of these methods is given in the user guide.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7a343bda-ecb5-4c6d-8a17-88c8d9139f50&revisionId=1626f215-66ff-4dbe-b4a1-17e3f74b64c5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7a343bda-ecb5-4c6d-8a17-88c8d9139f50&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7a343bda-ecb5-4c6d-8a17-88c8d9139f50/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "182", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ucrsuite-dtw", + "description": "

                                          This module implements fast nearest-neighbor retrieval of a times series in a larger time series expressed as location and distance using the UCR suite Dynamic Time Wrapping (DTW) algorithm.


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7cc68464-54e3-4a57-9e36-afdd04af7b74&revisionId=aeafd55f-59f5-4191-a34a-16ad0f7433d6&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7cc68464-54e3-4a57-9e36-afdd04af7b74&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7cc68464-54e3-4a57-9e36-afdd04af7b74/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "185", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "atranscribe", + "description": "

                                          ATransCribe is a speech to text service. It uses whisper model for transcription. Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification.Also using its underlying deep learning technology it process soundclips and removes background noises etc. for better results.The app is developed and used for the H2020 project AI-PROFICIENT.








                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7ed5c850-a7a4-4f71-bf97-c07be436424f&revisionId=b5057270-26f1-49da-b650-610d88fd6df1&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7ed5c850-a7a4-4f71-bf97-c07be436424f&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/7ed5c850-a7a4-4f71-bf97-c07be436424f/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "190", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "entity_extractor", + "description": "

                                          Extracts personally identifiable information from documents of different formats. Entities detected include names, addresses, or faces.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=86b14065-b351-4e37-a394-a401a997c542&revisionId=fd34ef22-937c-4bec-9a02-f4af848e0c3b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=86b14065-b351-4e37-a394-a401a997c542&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/86b14065-b351-4e37-a394-a401a997c542/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "191", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "critical-part-classifier", + "description": "

                                          This is a composite pipeline consisting of the Tensorflow model created for critical part prediction along with a generic data broken block that is used to match the contents of the CSV input file to the expected input features of the model. Given a set of features that describe the production line characteristics or factory conditions, the model we have built predicts whether a particular component part is critical or not to the supply chain. The end goal is the optimization of the stock management.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=88e79675-8008-4b48-bbac-67e7b5c519ed&revisionId=f6e7ad03-637f-490e-babb-36eb7544cf59&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=88e79675-8008-4b48-bbac-67e7b5c519ed&version=0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/88e79675-8008-4b48-bbac-67e7b5c519ed/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "192", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4iot-calibration", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=047d727b-a7a2-43b3-bfca-c93cc1400095&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8949117e-d8a2-49a6-8bd8-359b3d5f1436/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "193", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4iot-calibration", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=14302825-5469-4de8-a0d1-105ff5b66388&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.3", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&version=1.0.3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8949117e-d8a2-49a6-8bd8-359b3d5f1436/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "194", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4iot-calibration", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=4738bd06-fe95-4a25-9a68-825f107ffa4d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8949117e-d8a2-49a6-8bd8-359b3d5f1436/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "195", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4iot-calibration", + "description": "

                                          The Calibration component is part of the AI4IoT Calibration pipeline. It includes a machine learning model that predicts the calibrated values of raw data coming from low-cost sensors, such that the output is as close as possible to reference values. The component is deployed with a pre-trained model and outputs the calibrated values for PM2.5 and PM10 measurements. Inputs are PM measurements from the sensor and meteorological data.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=4afa4cfa-ee5d-4ffa-b114-1f9f093a2ac6&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.4", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&version=1.0.4", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8949117e-d8a2-49a6-8bd8-359b3d5f1436/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "196", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4iot-calibration", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=672734a5-ce48-47fc-81d6-b06b923fa3eb&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8949117e-d8a2-49a6-8bd8-359b3d5f1436/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "201", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "SmartProc", + "description": "

                                          Based on the given and read-in data of a time series, the algorithm calculates a forecast of how the data will develop further in a freely definable time horizon. Trends are recognised and taken into account in the forecast, as are seasonalities and similar dependencies that are contained in the input data and are recognised by the algorithm. The algorithm can be used for all types of data where a forecast makes sense, such as sales figures for a product or parts requirements for purchasing from a supplier. It must be said, however, that extraordinary events such as corona or disasters cannot be predicted by any AI-based algorithm - and it is true that it is only a prediction that does not necessarily reflect reality. The readme.txt file contains an example of a client script that addresses the algorithm and displays the result of the algorithm in a browser.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8c424961-1218-492f-b041-2653a84817a4&revisionId=e4572dcc-8e52-4207-91f3-897f17cd7861&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8c424961-1218-492f-b041-2653a84817a4&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/8c424961-1218-492f-b041-2653a84817a4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "208", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "NLP-IMECH", + "description": "

                                          The module uses Natural Language Processing (cosine difference) to compare input text with a list of sentences contained in a csv file and returns the most similar description from the csv file along with its index in the csv file.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=93301148-af5f-4647-bd0c-51180d6d3688&revisionId=23be4e3a-e8e5-4066-b668-5590f78e5f20&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=93301148-af5f-4647-bd0c-51180d6d3688&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/93301148-af5f-4647-bd0c-51180d6d3688/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "211", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "IoTConceptExtraction", + "description": "

                                          We developed an AI-based tool that automatically extracts knowledge from IoT ontologies to support the construction of a unified ontology for Web of Things. The following technologies are used: W3C semantic web technologies (such as RDF, OWL, SPARQL, SKOS), Deep learning model (Word2vec) and unsupervised clustering algorithms (K-means).

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9629027c-1030-446d-80ad-dec86ddeadeb&revisionId=8daafca8-0c5d-4266-a25b-6c0aa4af0a79&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9629027c-1030-446d-80ad-dec86ddeadeb&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/9629027c-1030-446d-80ad-dec86ddeadeb/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "212", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "traffic-scene-segmentation-deeplab-xception65-cityscapes", + "description": "

                                          This module provides a semantic segmentation using the `xception65_cityscapes_trainfine` model from the tensorflow model zoo1.

                                          Here is the table of the cityscapes train classes with their id and their RGB color values used by the model and output of the module.

                                          | class name    |   ID |    R |    G |    B |\n| ------------- | ---: | ---: | ---: | ---: |\n| ROAD          |    0 |  128 |   64 |  128 |\n| SIDEWALK      |    1 |  244 |   35 |  232 |\n| BUILDING      |    2 |   70 |   70 |   70 |\n| WALL          |    3 |  102 |  102 |  156 |\n| FENCE         |    4 |  190 |  153 |  153 |\n| POLE          |    5 |  153 |  153 |  153 |\n| TRAFFIC LIGHT |    6 |  250 |  170 |   30 |\n| TRAFFIC SIGN  |    7 |  220 |  220 |    0 |\n| VEGETATION    |    8 |  107 |  142 |   35 |\n| TERRAIN       |    9 |  152 |  251 |  152 |\n| SKY           |   10 |   70 |  130 |  180 |\n| PERSON        |   11 |  220 |   20 |   60 |\n| RIDE",
                                          +    "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=98febe4e-ce6d-4f33-90b1-7a87c6c1638b&revisionId=7a8c8f00-2f8d-47dc-91e6-7f02536c2498&parentUrl=marketplace#md-model-detail-template",
                                          +    "date_published": "2023-09-01T15:15:00.000",
                                          +    "version": "1.0.0",
                                          +    "pid": "",
                                          +    "alternate_name": [],
                                          +    "application_area": [],
                                          +    "citation": [],
                                          +    "contact": [],
                                          +    "creator": [],
                                          +    "distribution": [],
                                          +    "has_part": [],
                                          +    "industrial_sector": [],
                                          +    "is_part_of": [],
                                          +    "keyword": [],
                                          +    "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=98febe4e-ce6d-4f33-90b1-7a87c6c1638b&version=1.0.0",
                                          +    "media": [
                                          +      {
                                          +        "checksum": "",
                                          +        "checksum_algorithm": "",
                                          +        "copyright": "",
                                          +        "content_url": "https://aiexp.ai4europe.eu/api/solutions/98febe4e-ce6d-4f33-90b1-7a87c6c1638b/picture",
                                          +        "content_size_kb": 0,
                                          +        "date_published": "2023-09-01T15:15:00.000",
                                          +        "description": "",
                                          +        "encoding_format": "",
                                          +        "name": ""
                                          +      }
                                          +    ],
                                          +    "note": [],
                                          +    "related_experiment": [],
                                          +    "research_area": [],
                                          +    "scientific_domain": [],
                                          +    "type": ""
                                          +  },
                                          +  {
                                          +    "platform": "ai4experiments",
                                          +    "platform_identifier": "216",
                                          +    "aiod_entry": {
                                          +      "editor": [],
                                          +      "status": "draft"
                                          +    },
                                          +    "name": "advice-converter-pipeline",
                                          +    "description": "

                                          In this pipeline, the label format converter node reads the annotations from the shared folder and converts from one standard format to another

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9c590181-fcdd-4f08-afdb-d00cc8ae094c&revisionId=e37153fb-c912-4fe8-a95c-8dbcd52b94e5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "st3", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9c590181-fcdd-4f08-afdb-d00cc8ae094c&version=st3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/9c590181-fcdd-4f08-afdb-d00cc8ae094c/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "218", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ithermai-quality-check-service", + "description": "

                                          This is an AI model for classification of normal and faulty products of the injection molding process. It uses RGBT camera frames as input and labels them as faulty and normal products.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9ddecdf3-9be6-4b4e-a74b-eccfe1c1a6e8&revisionId=21f1a7b0-3e82-492f-95bc-7b3e78d7cf36&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9ddecdf3-9be6-4b4e-a74b-eccfe1c1a6e8&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/9ddecdf3-9be6-4b4e-a74b-eccfe1c1a6e8/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "219", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "i-nergy-load-forecasting", + "description": "

                                          This is a forecasting service for predicting electrical load of a boiler room in a large District Heating Network in hourly basis.

                                          This service is based on a Seasonal ARIMA model implemented in context of I-NERGY project.

                                          For more information on how to use the solution, please see README.pdf in Documents section.







                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9fc0357c-2b50-4733-8225-44f78a9d5421&revisionId=ae6bd423-aa37-411f-a8f1-40aeb6b0bd4d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9fc0357c-2b50-4733-8225-44f78a9d5421&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/9fc0357c-2b50-4733-8225-44f78a9d5421/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "220", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "house-prices-databroker", + "description": "

                                          Databroker of the House Price Prediction Pipeline.

                                           The databroker is responsible for the transfer of house-price dataset to the model. The features are selected based on higher correlation coeffecient. It has a WebUI that can be used to feed new/ unseen input to the model that predicts the sales price of a house.

                                           Repository:

                                          Please refer the following link for the houseprice-prediction code in the Eclipse Graphene platform - https://gitlab.eclipse.org/eclipse/graphene/tutorials/-/tree/main/House_Price_Prediction


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a03e571c-634f-4da5-83cd-1cd069e304e0&revisionId=b577c72a-0f61-4d72-b04c-823ed54f4fa8&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a03e571c-634f-4da5-83cd-1cd069e304e0&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a03e571c-634f-4da5-83cd-1cd069e304e0/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "222", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "VideoObjectRecognition", + "description": "

                                          The video object recognition model detects and classifies objects in a video segment

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a2dd4a73-eae7-4c03-9e10-d07de158d040&revisionId=e2e04665-c00e-4363-9d29-837af49a370d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a2dd4a73-eae7-4c03-9e10-d07de158d040&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a2dd4a73-eae7-4c03-9e10-d07de158d040/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "223", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "keras-iris-model", + "description": "

                                          Classify Iris blossoms with a keras model

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a48fdedd-0ba3-49a4-befe-046467110a6e&revisionId=988e80a4-0629-48d4-8805-ce3cc7f71429&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a48fdedd-0ba3-49a4-befe-046467110a6e&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a48fdedd-0ba3-49a4-befe-046467110a6e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "228", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "predictive-maintenance", + "description": "

                                          Neural network trained in a Federated Learning way for predicting the failure of motors based on a set of features. The network is trained in an experiment in the DIH4AI project in a collaboration of the South Netherlands DIH and Fortiss. The federated learning process was executed on an International Data Spaces architecture with the whole process being recorded by the Evidencia plugin, of which the factsheet is uploaded as document.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a90b4145-51ec-4345-be5f-21d2c8e9a214&revisionId=c4624a34-affb-417b-b004-d30809697b49&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a90b4145-51ec-4345-be5f-21d2c8e9a214&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/a90b4145-51ec-4345-be5f-21d2c8e9a214/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "230", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Healthymity", + "description": "

                                          Complex natural language processing model based on cognitive linguistics and semi-supervised learning using neural networks. The model is used to predict ICD code through medical notes text. 

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=acb05f2a-d6ed-491d-9d70-bea6b8092ca9&revisionId=73b36c23-5849-4ac1-95f1-753070175bd3&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=acb05f2a-d6ed-491d-9d70-bea6b8092ca9&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/acb05f2a-d6ed-491d-9d70-bea6b8092ca9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "232", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "forWoT", + "description": "


                                          The model is built to preprocess digitized worker cards. The model crops the workercard in the image and performs morphological transformations to remove occulusions.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0&revisionId=88630572-3464-444b-9ed5-86bf4dde7c56&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "237", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "WorkerCard_Preprocessing", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0&revisionId=d0d8cf7e-7696-4d64-b064-744f26ba9f33&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "240", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "adios-train", + "description": "


                                          I-NERGY - TTP1 - ADIOS TRAIN MODEL

                                          Trains model for anomaly detection in power grid SCADA output. Given the alarm labelled set, which can be extended using the labelling system described above, we train a machine learning model to predict its category. The available alarms are randomly split in half, and the first part is used as a training set and the latter as a test set, on which we evaluate the performance.

                                          AIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-training-model

                                          Attribution

                                          This project has received funding from the European Union's Horizon 2020 research and innovation programme within the framework of the I-NERGY Project, funded under grant agreement No 101016508


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b5664ace-53a0-4739-bf3d-8f549091f871&revisionId=0010242a-25ea-4ba2-b3fd-46f938004671&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b5664ace-53a0-4739-bf3d-8f549091f871&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b5664ace-53a0-4739-bf3d-8f549091f871/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "241", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "house-prices-model", + "description": "

                                          Prediction model of the House Price Prediction Pipeline. 

                                          The houseprice-prediction model trains with the dataset from the databroker. Once trained, the model can then predict the sales price of houses for new unseen input data. It has a WebUI that displays the predicted sale price of the house for corresponding inputs from the user. 

                                          Repository link: 

                                          Please refer the following link for the houseprice-prediction code in the Eclipse Graphene platform - https://gitlab.eclipse.org/eclipse/graphene/tutorials/-/tree/main/House_Price_Prediction


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b59939e2-76ef-4d82-b869-e96b89e6e175&revisionId=ae1f9926-f865-4467-8d56-b5e9a33fb193&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b59939e2-76ef-4d82-b869-e96b89e6e175&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b59939e2-76ef-4d82-b869-e96b89e6e175/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "244", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "sentiment-analysis-databroker", + "description": "

                                          The model is the databroker for the Sentiment Analysis pipeline.

                                          It has a user interface(UI) that takes the query text from the user and connects to the prediction model. The results can then be viewed on the Prediction model's UI.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b6adb7b2-d8f6-47c6-9702-d8a16338a8e1&revisionId=86d03e8a-619f-4f79-8759-10566671f01d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b6adb7b2-d8f6-47c6-9702-d8a16338a8e1&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b6adb7b2-d8f6-47c6-9702-d8a16338a8e1/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "246", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "SwabAI", + "description": "

                                          Adaptive optimization model for Electric Discharge Machining.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b810ae05-a50e-4dd6-80ff-02384d56ca04&revisionId=257af2ec-9e0f-405d-852e-a6c7b8f73532&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b810ae05-a50e-4dd6-80ff-02384d56ca04&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b810ae05-a50e-4dd6-80ff-02384d56ca04/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "251", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "adios-label-extend", + "description": "

                                          I-NERGY - TTP1 - ADIOS LABEL EXTEND MODEL

                                          This model extends train model dataset labels using one hot encoding and closest distance matching. 

                                          The label extension mechanism uses similarity between alarms to associate each unknown alarm with its most similar known one. We pick a reduced portion of the overall dataset (50k alarms) to extend the training set. The features of the dataset are mainly string fields, except for the Priority file, which is numerical. The similarity between each two alarms is measured in terms of the number of different features that they present. 

                                          AIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-label-extend

                                          Attribution

                                          This project has received funding from the European Union's Horizon 2020 research and innovation programme within the framework of the I-NERGY Project, funded under grant agreement No 101016508

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b9748aa3-9340-4f27-a7b9-59cea5d80d3c&revisionId=4613434d-2ef5-4e60-9fb9-26382dafb97c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b9748aa3-9340-4f27-a7b9-59cea5d80d3c&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b9748aa3-9340-4f27-a7b9-59cea5d80d3c/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "252", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Multimodal_AI", + "description": "

                                          The proposed model is a draft solution for the challenge titled \"Enhancing Clinical AI workflow\". The model is based on multi-modality which takes in multi modal data features after translating, co-aligning and fusion. The main objective is to integrate the model into the clinical decision support system .

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b97a16cd-e475-4f5f-83e5-f1d042a3772a&revisionId=34816a52-7ba9-4890-8203-c0a6dd5fe270&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b97a16cd-e475-4f5f-83e5-f1d042a3772a&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/b97a16cd-e475-4f5f-83e5-f1d042a3772a/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "257", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "rp4pl-rul", + "description": "

                                          This model was developed part of the AI4EU program. RP4PL - RUL (Reliable Prediction for Pump Lifetime - Remaining useful lifetime) is used to predict the remaining useful lifetime for manufactured pumps. It takes as input final quality test data from the pump manufacturing process and outputs a lifetime prediction. The model pipeline contains data transformation and feature inference. It is constructed using a random forest regression algorithm, along with a feature selection step to reduce the set of features to a smaller subset.


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb45c963-205b-4d3b-aad4-9968dce77ee5&revisionId=cd27d33d-3a04-4cb1-be7c-b36522d0f8e1&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bb45c963-205b-4d3b-aad4-9968dce77ee5&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/bb45c963-205b-4d3b-aad4-9968dce77ee5/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "259", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "SWE_predictor", + "description": "

                                          A regression ML model that provides a Snow Water Equivalent indicator from Earth Observation data and data from climate models (ERA5) in river watersheds in the Alpine area. This model has been developed by Amigo srl (https://amigoclimate.com) for SnowPower, an innovative Software-as-a-Service to assist hydropower operators that is part of the I-NERGY 1st Open call. In particular, this model is at the core of the Snow module of SnowPower.Details about data input requirements and model performance are provided in the related entry in the AIOD Catalog (HERE).






                                          Image: flaticon.com

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=be8d1f46-c311-4578-a6cd-69dc8d3fa33b&revisionId=c310c554-9bfa-4146-9e21-6fff647f5abe&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=be8d1f46-c311-4578-a6cd-69dc8d3fa33b&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/be8d1f46-c311-4578-a6cd-69dc8d3fa33b/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "261", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4eu-kewot", + "description": "

                                          The main objective of this work is to deal with the semantic interoperability challenge, where several entities exist in cross-domain ontologies describing the same concept. Our main contributions can be summarized as follow:

                                          \u00b7      A thorough analysis of several ontologies belonging to two domains (city and mobility) was conducted. The ontological entities were enriched with Google embeddings and plotted in 2-dimensions, revealing concepts of high similarity, not only in terms of semantic but also of syntactic similarity.  

                                          \u00b7      An AI approach was followed in order to automatically extract the topics existing in ontologies of different domains. A detailed evaluation of the AI method was performed, showing qualitative and promising results. A visualization tool was deployed for easier exploration and contrast of the topics.

                                          \u00b7      A Search Mechanism was prepared which takes as input the detected (or any other provided) topics T and an ontology O and returns as output a concept o \\in O which is the most similar to a topic t \\in T

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c4cfb4bb-4ac6-4303-acd2-8eb3664c4138&revisionId=8676b4dc-21c3-4d65-b13b-8089ecbb33fc&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c4cfb4bb-4ac6-4303-acd2-8eb3664c4138&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c4cfb4bb-4ac6-4303-acd2-8eb3664c4138/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "262", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4eu-kewot", + "description": "

                                          The main objective of this work is to deal with the semantic interoperability challenge, where several entities exist in cross-domain ontologies describing the same concept. Our main contributions can be summarized as follow:

                                          \u00b7      A thorough analysis of several ontologies belonging to two domains (city and mobility) was conducted. The ontological entities were enriched with Google embeddings and plotted in 2-dimensions, revealing concepts of high similarity, not only in terms of semantic but also of syntactic similarity.  

                                          \u00b7      An AI approach was followed in order to automatically extract the topics existing in ontologies of different domains. A detailed evaluation of the AI method was performed, showing qualitative and promising results. A visualization tool was deployed for easier exploration and contrast of the topics.

                                          \u00b7      A Search Mechanism was prepared which takes as input the detected (or any other provided) topics T and an ontology O and returns as output a concept o \\in O which is the most similar to a topic t \\in T

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c4cfb4bb-4ac6-4303-acd2-8eb3664c4138&revisionId=d97c57a2-7f37-40ec-8ffd-b45f2f69c297&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c4cfb4bb-4ac6-4303-acd2-8eb3664c4138&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c4cfb4bb-4ac6-4303-acd2-8eb3664c4138/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "263", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "DAISY", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e&revisionId=004cf7bb-105e-48fb-9bf6-781fce08919c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "264", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "DAISY", + "description": "

                                          Combination of our expertise in vibration analysis with AI models that will contribute to the diagnosis of rotating machinery

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e&revisionId=ee78edad-6fa5-456f-8bd1-6cc82fcffb33&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "265", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "CODE-CRC", + "description": "

                                          The proposed solution is based on fine-tuned with Spanish medical texts of the pre-trained BERT family language models (transformers clinicalBERT and multilingualBERT). The designed text-based classification service predicts ICD-10 codes for clinical text in Spanish for the Colorectal cancer (CRC) and associated diagnoses. The service output contains the ICD-10 \u201ccategory\u201d (3 sign) codes that describe the basic manifestations of injury or sickness for 158 types of diseases related to CRC. The prediction models for ICD-10 codes are with high accuracy: clinicalBERT: 0.794 AUC ROC score and  multilingualBERT: 0.806 AUC ROC score. The service allows the user to switch between two models (clinicalBERT and multilingualBERT) and to set the parameter N for top N diagnoses according to the specific needs.

                                          This model extracts the topics from an audio segment.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c8b8a888-ae48-41d4-8476-f2ca6851daa7&revisionId=5eacd881-de83-42f4-bf5a-6ca728f4f082&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c8b8a888-ae48-41d4-8476-f2ca6851daa7&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c8b8a888-ae48-41d4-8476-f2ca6851daa7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "267", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "FaceAI", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=2624aea0-1d51-48c8-9043-e64a928267a5&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.3", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "268", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "FaceAI", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=6ede87b0-4f0c-4711-9aeb-9125806f7d7f&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "269", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "FaceAI", + "description": "

                                          This model provides a solution for swab robot finding the position of the mouth, considering the MDR safety regulations. The position finding alogithm is based on deep learning and AI.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=93e8df6b-6226-4674-9963-6d0aa6ddcc3c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "270", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "FaceAI", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=b3116c34-ce39-46c3-9cc7-119c13a85ebf&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "271", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "FaceAI", + "description": "






                                          This model provides a solution for swab robot finding the position of the mouth, considering the MDR safety regulations. The position finding alogithm is based on deep learning and AI.

                                          The docker image is based on Python 3.9 slim buster. Scikit-learn and pandas are installed.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=e0e410f4-12d8-4fec-9113-3b01be44ad62&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.4", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.4", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "274", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "iSolutions", + "description": "

                                          The proposed model is used detection of the teeth and the lips to identification position the mouth detects. The model is including a decision-making process for robots in a medical context.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cb4a33c5-a9a6-4432-bd49-10336956b6b0&revisionId=8e9f567a-8231-4f59-9aef-bfa8e6b79fc0&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=cb4a33c5-a9a6-4432-bd49-10336956b6b0&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/cb4a33c5-a9a6-4432-bd49-10336956b6b0/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "275", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "MusicDetection", + "description": "

                                          With the software tool for automatic detection of music in combination with detection of speech sequences, Fraunhofer IDMT offers a highly effective solution to determine the exact amount of music and speech in radio and TV programs. The tools can be used to optimize broadcasting programs or provide accurate accounting for copyright agencies.

                                          Less work: Using Fraunhofer IDMT\u2019s software tools, the amount of music and speech in radio and TV programs no longer needs to be determined by means of tedious manual work (typically personnel reading through audio content lists). The tool is able to detect and measure general audio categories (music, speech, music and speech combined, other content) both in live streams and in stored digital audio files.

                                          Easy integration: The tools are scalable and can easily be integrated with standard workflows and components. It can be used in production and live streaming environments, both online and offline.

                                          Easy data export: The tools easily integrate with content management systems. For data output, users may choose between XML files, cue sheets, or other standard data export formats.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cd6d1c1b-896e-4c62-9312-14416d5d411f&revisionId=b836fd7f-e5bf-4879-8d1a-c4ff5df393a9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=cd6d1c1b-896e-4c62-9312-14416d5d411f&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/cd6d1c1b-896e-4c62-9312-14416d5d411f/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "276", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Idiap_BEAT_Handwritten_Digit_Recognition_-_Multiclass_Logistic_Regressor_trained_on_M-NIST", + "description": "

                                          This algorithm contains a logistic regression model trained on the MNIST database.It takes as input images of digits and outputs the classification label of images.

                                          To test drive it, the MNIST data broker can be used.This model does not require any configuration and thus can be used as is.

                                          The reference experiment on the BEAT platform is amohammadi/amohammadi/mnist_simple/1/mnist1

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ce1b6792-889d-46cf-9529-3215802f729c&revisionId=eb3669aa-0889-42e9-a89f-7dab1b12baf1&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ce1b6792-889d-46cf-9529-3215802f729c&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ce1b6792-889d-46cf-9529-3215802f729c/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "280", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "news-classifier", + "description": "

                                          Overview:

                                          The classifier module is the core of the entire news training pipeline. It is responsible for the following activities,

                                          1.Training process: Upon receiving the training parameters from the trainer node, the classifier node starts the training process.

                                          2.Saving the trained models: Upon successful training, the models are saved in both the h5 and onnx format available in the shared folder.

                                          3.Classifying the results: The Reuters dataset newswires are labeled over 46 topics. The test sequences are thereupon classified based on these topics.

                                          Repository link:

                                          Please refer the following link for the code that represents the trainer module in the Eclipse Graphene platform - https://gitlab.eclipse.org/eclipse/graphene/tutorials/-/tree/main/news_training/classifier

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d05e44a5-d3e6-4730-b8ab-dd7a23fd52d4&revisionId=28719994-c987-4ce9-b88f-4f9d5e4129fc&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d05e44a5-d3e6-4730-b8ab-dd7a23fd52d4&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d05e44a5-d3e6-4730-b8ab-dd7a23fd52d4/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "281", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "viume-pic2text", + "description": "

                                          The proposed model will address the detection of stamps and named entities from the images. To do that, the structure is split into two main modules. 1)Extractor 2)Analyzer. As Extractor, different models based on convolutional and recurrent neural networks will be trained to detect the stamps, signatures, and text. As Analyzer,

                                          The trained NLP model will crop the document and use a custom trained model to extract the relevant information and all the relations inside the document. The extracted information from the document will be assigned with a unique ID and the corresponding columns will be filled with the extracted data.





                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d0882778-0ca2-4028-b90c-6c91da657817&revisionId=c2fe1abd-af13-4e90-a176-a44fdc5e4912&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d0882778-0ca2-4028-b90c-6c91da657817&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d0882778-0ca2-4028-b90c-6c91da657817/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "283", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AI_REGIO_DSS4TB", + "description": "

                                          This module is an intelligent troubleshooting system that is able to identify the component that is most probably damaged after a series of closed-ended questions answered by the operator. Such systems are built upon a given knowledge, which is an information matrix that represents the relationship between the possible symptoms and the failure components. The probability evolution is mainly based on the Bayes theorem which can elaborate the conditional probability. It consists of computing the likelihood of a general event occurrence based on the prior probability and the new information provided by each answer. More specifically, each answer allows updating the probability associated with each failure, based on which the next question will be selected.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d1ce6215-8102-4b46-b495-5907bea57ba1&revisionId=d43ac2fe-3d60-4198-a664-7eed1ef2d152&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d1ce6215-8102-4b46-b495-5907bea57ba1&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d1ce6215-8102-4b46-b495-5907bea57ba1/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "285", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "sentiment-analysis-model", + "description": "

                                          The model is part of the Sentiment Analysis pipeline.

                                          It analysis the sentiment of the query text sent by the databroker and returns the prediction. This prediction can also be viewed on the user interface.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d2cfc140-0d61-41fb-86ef-fbe2f192c4d2&revisionId=cfec1423-8627-4669-92a1-ca5497743b70&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d2cfc140-0d61-41fb-86ef-fbe2f192c4d2&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d2cfc140-0d61-41fb-86ef-fbe2f192c4d2/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "286", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "INERGY_Heat_Decision", + "description": "

                                          This service is based on a decision support system (DSS) implemented in context of I-NERGY project. The overall vision of I-NERGY is to promote AI in the energy sector by delivering:

                                          • An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.
                                          • Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.

                                          This is a DSS service for for help in the decision on which energy source (for heat generation) use in a Spanish Hospital in hourly basis. The data was provided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital complex have a district heating network. The layout of this district heating network is a ring system composed by two independent rings for heating and cooling. This ring just provides energy for heating and Domestic Hot Water (DHW).

                                          Apart from being a district heating network, this system is complex due to the different production sources used for heating and cooling. In this facility heat, cold and steam are produced by using different sources.

                                          For more information on how to use the service, please see Documents section.

                                          The project leading to this service has received funding from the European Union\u2019s Horizon 2020 research and innovation programme under grant agreement No 101016508

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d30263f9-9902-407d-b0c8-f389b541e98d&revisionId=97e3b739-a584-4b83-a25b-43e6a0bfaf39&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d30263f9-9902-407d-b0c8-f389b541e98d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d30263f9-9902-407d-b0c8-f389b541e98d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "288", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AudioSpeechToTextEnglish", + "description": "

                                          This model converts an audio segment to english text.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5418d91-8eda-42ff-9348-570e5ba0a110&revisionId=ef9a485f-d31d-4f1b-be03-205d112a6b59&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5418d91-8eda-42ff-9348-570e5ba0a110&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5418d91-8eda-42ff-9348-570e5ba0a110/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "293", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "PII_Detector", + "description": "

                                          PII Detector automatically detects personally identifiable information in unstructured files (documents or images).The face detection model analyzes an image file to find faces. The method returns a list of items, each of which contains the coordinates of a face that was detected in the file.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ffc67f-b5ef-42c6-a97b-238546af935a&revisionId=b4adbc99-9aec-4ec1-bb58-abbd40f5b75b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5ffc67f-b5ef-42c6-a97b-238546af935a&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5ffc67f-b5ef-42c6-a97b-238546af935a/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "295", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "openpose", + "description": "

                                          Openpose is a Real-time multi-person keypoint detection model for body, face, hands, and foot estimation, originally developed by CMU but includes several updates.

                                          Openpose-AI4EU is a component that uses an improved version (Mobilenet v2) and can be included in pipelines built with AI4EU experiments or can run standalone as a dockerized grpc service. For that we include test scripts. The component input is one image and outputs the parameters of all body keypoints detected (index of the skeleton keypoin, x and y coordinates in the image and the confidence score).

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d7e0ffc8-afcd-42a3-8d8a-01ea395d1303&revisionId=2beda89e-c87e-416c-980e-fe4908f8c87d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d7e0ffc8-afcd-42a3-8d8a-01ea395d1303&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d7e0ffc8-afcd-42a3-8d8a-01ea395d1303/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "296", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "TEK_THOR_OPTIMIZATION", + "description": "

                                          AI4EU - THOR THOR solution consists in a Hybrid optimization solution to make the right decision on the amount spare parts in stock, considering past sales and forecasts. The purchase decision considers as input information current stock status, production needs, production forecast, sales forecast, variability Price of stock material and several restriction parameters.

                                          Optimization model. EDA and Genetic search have been implemented to minimizing the total cost of spare parts procurement as well as covering cash-flow restrictions and production needs. This optimization provides as a result the procurement plan

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d8745d72-f4c0-49f6-8d20-514e8ad74f86&revisionId=644482dc-abd6-4805-b46a-4cd98192ae1c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d8745d72-f4c0-49f6-8d20-514e8ad74f86&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d8745d72-f4c0-49f6-8d20-514e8ad74f86/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "297", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Daisy_software", + "description": "

                                          Even though maintenance of rotating machines, such as motors, pumps and fans through Vibration Monitoring (VM) has been a proven process, it requires an experienced 3rd party service engineer to attend the vessel onboard for vibration data collection and onshore vibration analysis and machinery condition reporting and that attendance onboard in many cases is not feasible.

                                          To give a response to this problem, Daisy allows to apply AI to the large amount of mechanical vibration data of different assets, in order to build computational models that help in the classification and early detection of the faults that the rotating machinery of ships could have.

                                          With this software, the user can load vibration data, apply signal processing techniques and train machine learning (ML) models with no prior programming experience on artificial intelligence (AI) and signal processing knowledge.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d89b25d2-fcf8-48ae-9858-3f32cf047d8d&revisionId=ec81a5a2-0f51-4254-94a7-b80e92c6560a&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d89b25d2-fcf8-48ae-9858-3f32cf047d8d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/d89b25d2-fcf8-48ae-9858-3f32cf047d8d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "299", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Time-prediction-for-example-manufacturing", + "description": "

                                          This module provides a manufacturing time prediction for an example manufacturing process.

                                          The example manufacturing process is a conveyor belt machine, that takes a number of inputs to process. There are 2 processing stations and depending on the input item only either or both of these stations can be used to process the item.

                                          The model was trained on simulated data.

                                          Input

                                          The input is a list of products characterized by a type (3 different colors) and the time of entering the process.

                                          Model

                                          For the prediction a stacked LSTM model with spatial dropout is used.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&revisionId=6385bf2f-ef0a-4481-a13c-35ef3859a82e&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "300", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Time-prediction-for-example-manufacturing", + "description": "

                                          This module provides a manufacturing time prediction for an example manufacturing process.

                                          The example manufacturing process is a conveyor belt machine, that takes a number of inputs to process. There are 2 processing stations and depending on the input item only either or both of these stations can be used to process the item.

                                          The model was trained on simulated data.

                                          Input

                                          The input is a list of products characterized by a type (3 different colors) and the time of entering the process.

                                          Model

                                          For the prediction a stacked LSTM model with spatial dropout is used.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&revisionId=6faffcf2-e451-4973-b768-cfa4bf01469b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "301", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Time-prediction-for-example-manufacturing", + "description": "

                                          This module provides a manufacturing time prediction for an example manufacturing process.

                                          The example manufacturing process is a conveyor belt machine, that takes a number of inputs to process. There are 2 processing stations and depending on the input item only either or both of these stations can be used to process the item.

                                          The model was trained on simulated data.

                                          Input

                                          The input is a list of products characterized by a type (3 different colors) and the time of entering the process.

                                          Model

                                          For the prediction a stacked LSTM model with spatial dropout is used.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&revisionId=d42e8f33-0bb7-407b-b72d-9fde9a276bd7&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "303", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "lane-detector", + "description": "

                                          # AI4EU Pluto lane-detector

                                          The model runs a lane detector over an image.


                                          The image can be send as bytes, and the result will be the corresponding bytes for the keypoints detected along with the shape of the keypoints array. This helps reconstruct the multidimensional array from the bytes returned. The same for the `results` which is the original image overlayed with the detected keypoints, the `results_shape` provides the shape to reconstruct the array.


                                          ### Example


                                          ```python


                                          import grpc

                                          from PIL import Image

                                          import numpy as np


                                          start_ch = timer()

                                          port_addr = 'localhost:8061'


                                          # open a gRPC channel

                                          channel = grpc.insecure_channel(port_addr)


                                          filepath = \"assets/test.png\"


                                          with open(filepath, 'rb') as f:

                                            content = f.read()


                                          requestPrediction = model_pb2.Features(img=content)


                                          responsePrediction = stub.make_prediction(requestPrediction)


                                          print('The prediction is :', responsePrediction.results)



                                          # Recreate image:


                                          img_shape = [*responsePrediction.results_shape]

                                          np_img = np.frombuffer(responsePrediction.results, dtype=np.uint8).reshape(img_shape)


                                          image = Image.fromarray(np_img).convert('RGB'))

                                          ```


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dc34b9b5-3990-41fb-93b7-1a56cf1016cc&revisionId=23c1693f-08f7-4175-9b72-f2d999b24a98&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=dc34b9b5-3990-41fb-93b7-1a56cf1016cc&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/dc34b9b5-3990-41fb-93b7-1a56cf1016cc/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "305", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4eu-competences", + "description": "

                                          This tool computes the match between text and concepts from ESCO based on the ESCO model itself and the FastText computation model. Trustworthy is ensured in part by these models and their developers. Given a free text description, and the weight parameters, the service produces a set of matches that represent the corresponding ESCO competence (text and URI) and the similarity measure.


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dc67374a-0a1c-4477-86b2-9db8f0a1faed&revisionId=977872e8-b343-4fa4-b5fe-31afc77c9e05&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=dc67374a-0a1c-4477-86b2-9db8f0a1faed&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/dc67374a-0a1c-4477-86b2-9db8f0a1faed/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "306", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "medical_notes_classification", + "description": "

                                          Our solution is a NLP classification model fine-tuned on Spanish free text (medical notes) to predict IDC-10 codes. We will start from a transformer model trained on a Spanish corpus, such as BETO, and fine-tune it on general Spanish medical corpus (research paper or anonymized data delivered by Amadix and its partners), with pre-training tasks such as Masked Language Modeling. We will then fine-tune it on free-text data provided by AMADIX (medical notes) in order to predict the target ICD-10 codes.

                                          We will also create a prediction explanation module to our product, in order for the end user to be able to understand the model prediction by visualizing the words in the input free text that push the model toward the predicted ICD-10 code. In order to do that, we will use SHAP values, which have demonstrated their performance for such tasks.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dd0d9853-2060-44d3-94c7-208a0423609d&revisionId=19a88d46-b9df-47e2-bb53-38ac4fe02eec&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=dd0d9853-2060-44d3-94c7-208a0423609d&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/dd0d9853-2060-44d3-94c7-208a0423609d/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "307", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AudioPunctuationEnglish", + "description": "

                                          This model add english punctuation to an audio segment.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ddf8de5f-5391-48be-a457-bce86757f8ba&revisionId=1846bb25-f697-4091-ba13-79f0ebb3147c&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ddf8de5f-5391-48be-a457-bce86757f8ba&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ddf8de5f-5391-48be-a457-bce86757f8ba/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "308", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AI4IMS", + "description": "

                                          AI-based Inventory Management System, AI4IMS, integrates simulation, optimization and search algorithms in an advanced inventory management approach for an adaptive and dynamic response.

                                          Firstly, we acquire and cleanse the required data to obtain a reliable dataset including product prices and demand forecasting. As a result of the forecasting, the uncertainty associated with material resources prices and demand is also characterized.

                                          Secondly, we capture the production plant and procurement system in a simulation environment.

                                          Thirdly, a direct randomized sampling method generates alternative scenarios for handling the uncertainty characterized during the forecasting step.

                                          Next, a simulation-based optimization system finds an improved procurement policy within the solution space.

                                          Finally, a variability analysis generates alternative solutions, which are provided for decision-maker support.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df50bc0b-e499-4249-b468-b94c0a1cf9fc&revisionId=ee9a1418-2876-414f-982f-84960e811a6d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df50bc0b-e499-4249-b468-b94c0a1cf9fc&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/df50bc0b-e499-4249-b468-b94c0a1cf9fc/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "311", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AI-Panel", + "description": "

                                          The following notebook shows a sample model highlighting a preliminary step taken towards supporting pharmaceutical & nutraceutical drug discovery based on qualitative compound properties and customer requirements. The goal is to create a sophisticated predictive model capable of providing suggestions/predictions regarding compounds that have specific therapeutic advantages as well as their interaction with other compounds. The current model utilizes an exemplary dataset that contains for each substance/compound, a set of quantitative features describing the compound's efficacy. It is envisaged that the dataset will comprise multi-modal features such as physiochemical parameters, drug status, regulatory & safety data, and company-internal data. This numeric, textual, and image data is extracted and consolidated from open access chemical dataspaces/databases. This diversity of data will facilitate the design of a predictive model that filters drugs and related compounds based on product development and customer needs.

                                          This is a Tensorflow model created for critical part prediction. Given a set of features that describe the production line characteristics or factory conditions, the model we have built predicts whether a particular component part is critical or not to the supply chain. The end goal is the optimization of the stock management.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e1a73166-03a3-4b93-a785-28d0591d7271&revisionId=3ef45d82-30f5-4f98-b9c8-44afe80b44a9&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e1a73166-03a3-4b93-a785-28d0591d7271&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e1a73166-03a3-4b93-a785-28d0591d7271/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "314", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "EntityRecognizer", + "description": "

                                          The entity recognizer is a deep learning-based solution that takes a text document as input and returns a list of instances of pre-defined entities (Person, Location, Organization, Miscellaneous). 


                                          It uses bidirectional LSTM networks to generate informative word representations that capture the contextual dependencies between words in a sentence. Additionally, a CRF layer is added on top for a higher tagging accuracy. The models have been built using FlairNLP, a PyTorch-based NLP framework. 


                                          This tool includes a multilingual NER model supporting English, German and Dutch.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&revisionId=41df686d-9fa3-4104-996f-fa926332adbb&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e3794e16-0225-4bf1-a99c-b99638a22232/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "315", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "EntityRecognizer", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&revisionId=7220ac2a-a908-46df-a58d-bad87bbbad23&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e3794e16-0225-4bf1-a99c-b99638a22232/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "316", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "EntityRecognizer", + "description": "

                                          The entity recognizer is a deep learning-based solution that takes a text document as input and returns a list of instances of pre-defined entities (Person, Location, Organization, Miscellaneous). 


                                          It uses bidirectional LSTM networks to generate informative word representations that capture the contextual dependencies between words in a sentence. Additionally, a CRF layer is added on top for a higher tagging accuracy. The models have been built using FlairNLP, a PyTorch-based NLP framework. 


                                          This tool includes a multilingual NER model supporting English, German and Dutch. 

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&revisionId=f7447500-0c8d-4ca7-be7e-24ce3fefd144&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e3794e16-0225-4bf1-a99c-b99638a22232/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "317", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Explanations4AdultClassification", + "description": "

                                          This tool provides predictions and explanations for the classification of instances of Adult Census dataset. The explanation method is called LionForests, while the prediction is based on a random forests model. The corresponding paper supporting this technique can be found here: http://ceur-ws.org/Vol-2659/mollas.pdf in Proceedings of the First International Workshop on New Foundations for Human-Centered AI (NeHuAI) co-located with 24th European Conference on Artificial Intelligence (ECAI 2020).

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4208fe5-3b5c-4fe0-9cff-c28b828db530&revisionId=5d31e250-36f3-4033-9ab9-17a9213f96ae&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e4208fe5-3b5c-4fe0-9cff-c28b828db530&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e4208fe5-3b5c-4fe0-9cff-c28b828db530/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "322", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "edm-agent", + "description": "

                                          EDM agent component is an RL based agent that controls the EDM environment (AI4EU component edm-env) based on the observed voltage and frequencies. It is based on the PPO algorithm and was trained using the `train.py` script that is available in the github repository of the component: https://github.com/threethirds/edm.

                                          This component has a user interface via 8062 port which can be used to run a small demo control scenario. It also has a protobuf API via 8061 port in order to connect to the EDM environment.


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&revisionId=05ea3f80-92b1-4ffc-b1ab-1b3bb38cee7b&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e50055a9-56ad-478a-9f57-c6553202f2a9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "323", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "edm-agent", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&revisionId=459c7f89-264e-4321-8b5c-c9ab7e9dab3d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e50055a9-56ad-478a-9f57-c6553202f2a9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "324", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "edm-agent", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&revisionId=731df380-b367-4945-a59f-5b145e8c6de1&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e50055a9-56ad-478a-9f57-c6553202f2a9/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "333", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4eu-security-pilot-model", + "description": "

                                          This container provides a model for Thread Prediction in Network Traffic.

                                          Therefore, this container can detect malicious traffic.


                                          This container can be trained with the training interface and predict traffic with the prediction interface.

                                          This container provides two inputs and one output.

                                          The training input is to provide training data. You can connect this input with the ai4eu-security-databroker training output. After starting the training the data will be transfered to train the model.

                                          The second input is the prediction input. You can connect this input with the ai4eu-security-databroker prediction output. After starting the model you can see the prediction results in the prediction output. There, you get a number between 0 and 1. According to your data you have to set a threshold to specify if the data are fraud or benign. The threshold can be found in the evaluation container of the model.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e8c82055-1afc-444c-9c21-3d64ea601b28&revisionId=0b99f79d-5e7c-4b0f-850f-bae2b6e710ce&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e8c82055-1afc-444c-9c21-3d64ea601b28&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/e8c82055-1afc-444c-9c21-3d64ea601b28/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "336", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "TransformersGraphAlgorithmsAgainstColonCancer", + "description": "

                                          Automatic classification of ICD-10 codes from free text and medical records based on BERT model. The application of NLP (textual information extraction) tasks in combination with other numerical biomarkers will involve that the model will improve in accuracy and a greater number of cancer patients will be detected earlier, improving the future healthcare system.

                                          Moreover, the automatic identification or classification in ICD-10 codes from free text not only helps to improve the predictive model but also avoids the manual assigning codes that is expensive, time consuming and error prone.


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=0859679b-e1ba-4d89-8093-2313212216af&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.1", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.1", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "337", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "TransformersGraphAlgorithmsAgainstColonCancer", + "description": "

                                          Automatic classification of ICD-10 codes from medical records based on Transformers. The application of NLP tasks in combination with other numerical biomarkers will involve that the model will improve in accuracy and a greater number of cancer patients will be detected earlier, improving the future healthcare system.

                                          Moreover, the automatic identification of ICD-10 codes from free text not only helps to improve the predictive model but also avoids the manual assigning codes that is expensive, time consuming and error prone.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=50c071a6-aba1-4f90-97a3-2ab1108a0d22&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.5", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.5", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "338", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "TransformersGraphAlgorithmsAgainstColonCancer", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=6bef6d39-cb13-4a6d-b1c3-dc6df07cff05&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "339", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "TransformersGraphAlgorithmsAgainstColonCancer", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=7fce648b-ae3c-4514-84f1-d599a2b9e54d&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.3", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.3", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "340", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "TransformersGraphAlgorithmsAgainstColonCancer", + "description": "

                                          Automatic classification of ICD-10 codes from free text and medical records based on BERT model. The application of NLP (textual information extraction) tasks in combination with other numerical biomarkers will involve that the model will improve in accuracy and a greater number of cancer patients will be detected earlier, improving the future healthcare system.

                                          Moreover, the automatic identification or classification in ICD-10 codes from free text not only helps to improve the predictive model but also avoids the manual assigning codes that is expensive, time consuming and error prone.


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=94e9caf4-0568-4125-b2e2-03872507d1d0&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "341", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "TransformersGraphAlgorithmsAgainstColonCancer", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=f85a7339-8bf4-442f-82fa-36eee8214057&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.4", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.4", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "344", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "TEK_THOR_FORECAST", + "description": "

                                          AI4EU - THOR THOR solution consists in a Hybrid optimization solution to make the right decision on the amount spare parts in stock, considering past sales and forecasts. The purchase decision considers as input information current stock status, production needs, production forecast, sales forecast, variability Price of stock material and several restriction parameters.

                                          Forecast. An auto-adjustable predictive model forecasts the short-term expected sales of end products as well as the expected price evolution of spared parts.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ebcb6fba-d8f4-4010-a6b2-8386040c9030&revisionId=afc31a74-dcad-4b4e-a691-b31750478365&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.4", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ebcb6fba-d8f4-4010-a6b2-8386040c9030&version=1.0.4", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ebcb6fba-d8f4-4010-a6b2-8386040c9030/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "345", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "Idiap_BEAT_Face_Recognition_-_Eigenfaces_trained_on_ATNT", + "description": "

                                          A biometrics algorithm that compares a probe image to a set of template images and outputs a comparison score.

                                          This algorithm was trained on the ATNT database and reproduces the EigenFaces face recognition baseline.

                                          The input images must be gray-scale and of the size of 92x92. The training data comes from the BOB atnt database package.

                                          Reference experiment on the BEAT platform is amohammadi/amohammadi/atnt_eigenfaces/1/atnt1.




                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ec640afb-9d7a-499d-977c-bceb435acff7&revisionId=2043f0e1-b332-499d-8472-c946faccd8c2&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ec640afb-9d7a-499d-977c-bceb435acff7&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ec640afb-9d7a-499d-977c-bceb435acff7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "346", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AudioPunctuationGerman", + "description": "

                                          This model adds German punctuation to an audio mining pipeline.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ed1a8947-f102-4786-9dbb-412568317a3f&revisionId=ace9dada-2a60-4530-b264-f4edb8511ca8&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ed1a8947-f102-4786-9dbb-412568317a3f&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/ed1a8947-f102-4786-9dbb-412568317a3f/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "350", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "atcrecognize", + "description": "

                                          Atcrecognize extracts text from images that contain label tags. Using its underlying deep learning technology, atcrecognize enhances the image, removes the unnecessary parts of the image, and feeds into the ocr model that extracts the text with more precision. The app is developed and used for the H2020 project AI-PROFICIENT.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f24a9c17-5f4f-4bec-b0f5-5fd20e4669a7&revisionId=af42ba9b-ec9e-4f37-8a46-e581c9f3d811&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f24a9c17-5f4f-4bec-b0f5-5fd20e4669a7&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f24a9c17-5f4f-4bec-b0f5-5fd20e4669a7/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "353", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "pomdp-ir", + "description": "

                                          Partially Observable Markov Decision Processes with Information Rewards (POMDP-IR) is a framework to compute policies for autonomous agents with the goal of gathering information about particular features on the environment. SymbolicPerseus-IR extends one of the most knowns POMDP solvers to include Information Rewards. It lets you compute and test policies for a given input environment model.


                                          Check the github repository of the resource for a more detailed overview: https://github.com/tsveiga/ai4eu-pomdp-ir

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f7fc6fdd-3bc0-4443-a28d-dc08109d0ffd&revisionId=d5743c46-5b96-4d8a-90be-fdeb5e248f45&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f7fc6fdd-3bc0-4443-a28d-dc08109d0ffd&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/f7fc6fdd-3bc0-4443-a28d-dc08109d0ffd/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "361", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AI4Agri-qualitypredictor", + "description": "", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fb06bc01-1ac9-4a7b-bcdc-cae78e970796&revisionId=6f5ef2c7-3b29-46e9-881b-7adf1191df62&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.2", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fb06bc01-1ac9-4a7b-bcdc-cae78e970796&version=1.0.2", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/fb06bc01-1ac9-4a7b-bcdc-cae78e970796/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "362", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "AI4Agri-qualitypredictor", + "description": "

                                          This component generates a set of models in order to predict one grape yield and three different grape quality indicators related to the AI4EU agriculture pilot.




                                          To do that, the current component connects to the AI4EU agriculture pilot Knowledge graph and retrieves all the required data (according to the dates and parcel information provided in the prediction request and the target variable requested) to generate different models that will be evaluated and used to provide the best prediction possible.


                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fb06bc01-1ac9-4a7b-bcdc-cae78e970796&revisionId=d34ae15a-d648-4c34-ae31-7f5ca2abc7a2&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fb06bc01-1ac9-4a7b-bcdc-cae78e970796&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/fb06bc01-1ac9-4a7b-bcdc-cae78e970796/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "364", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "ai4eu-lexatexer-pump-rul", + "description": "

                                          Provides access to a REST API which consumes a pumps quality assurance data and delivers failure probabilities and MTTF densities.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fbbe4dff-5eaa-4171-b15a-d8035a79a035&revisionId=7bcfcec0-10e7-4c4f-af17-be65b435c5b3&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fbbe4dff-5eaa-4171-b15a-d8035a79a035&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/fbbe4dff-5eaa-4171-b15a-d8035a79a035/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + }, + { + "platform": "ai4experiments", + "platform_identifier": "365", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "name": "pira-analyzer", + "description": "

                                          This component leverages AI technologies for information extraction to identify semantically-relevant structured information from semi-/un-structured documents. This information is classified as personally identifiable information (PII) entities or not by leveraging named entity recognition. Identified PII entities are further classified into different categories depending on their nature.

                                          ", + "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fe6bca3a-9583-4f6c-993a-ec104226a679&revisionId=353595da-df92-4d10-8690-d6e1665040af&parentUrl=marketplace#md-model-detail-template", + "date_published": "2023-09-01T15:15:00.000", + "version": "1.0.0", + "pid": "", + "alternate_name": [], + "application_area": [], + "citation": [], + "contact": [], + "creator": [], + "distribution": [], + "has_part": [], + "industrial_sector": [], + "is_part_of": [], + "keyword": [], + "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fe6bca3a-9583-4f6c-993a-ec104226a679&version=1.0.0", + "media": [ + { + "checksum": "", + "checksum_algorithm": "", + "copyright": "", + "content_url": "https://aiexp.ai4europe.eu/api/solutions/fe6bca3a-9583-4f6c-993a-ec104226a679/picture", + "content_size_kb": 0, + "date_published": "2023-09-01T15:15:00.000", + "description": "", + "encoding_format": "", + "name": "" + } + ], + "note": [], + "related_experiment": [], + "research_area": [], + "scientific_domain": [], + "type": "" + } ] \ No newline at end of file From 84686ea97fa2c6ab0657d2a1d07df768da49da4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 6 Sep 2023 16:45:01 +0200 Subject: [PATCH 28/79] Descriptions of the ai4experiment data improved --- .../example/resources/resource/datasets.json | 246 +++++++------- .../resources/resource/experiments.json | 60 ++-- .../example/resources/resource/ml_models.json | 316 +++++++++--------- 3 files changed, 311 insertions(+), 311 deletions(-) diff --git a/src/connectors/example/resources/resource/datasets.json b/src/connectors/example/resources/resource/datasets.json index b225f03b..ac1b0f13 100644 --- a/src/connectors/example/resources/resource/datasets.json +++ b/src/connectors/example/resources/resource/datasets.json @@ -3,7 +3,7 @@ "platform": "ai4experiments", "platform_identifier": "5", "name": "autoUniv-au1-1000", - "description": "https://openml.org

                                          Author: Ray. J. Hickey

                                          Source: UCI

                                          Please cite:



                                          • Dataset Title:


                                          AutoUniv Dataset

                                          data problem: autoUniv-au1-1000



                                          • Abstract:


                                          AutoUniv is an advanced data generator for classifications tasks. The aim is to reflect the nuances and heterogeneity of real data. Data can be generated in .csv, ARFF or C4.5 formats.



                                          • Source:


                                          AutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com
                                          AutoUniv web-site: http://sites.google.com/site/autouniv/.



                                          • Data Set Information:


                                          The user first creates a classification model and then generates classified examples from it. To create a model, the following are specified: the number of attributes (up to 1000) and their type (discrete or continuous), the number of classes (up to 10), the complexity of the underlying rules and the noise level. AutoUniv then produces a model through a process of constrained randomised search to satisfy the user's requirements. A model can have up to 3000 rules. Rare class models can be designed. A sequence of models can be designed to reflect concept and/or population drift.


                                          AutoUniv creates three text files for a model: a Prolog specification of the model used to generate examples (.aupl); a user-friendly statement of the classification rules in an 'if ... then' format (.aurules); a statistical summary of the main properties of the model, including its Bayes rate (.auprops).



                                          • Attribute Information:


                                          Attributes may be discrete with up to 10 values or continuous. A discrete attribute ", + "description": "https://openml.org \n\n**Author** : Ray. J. Hickey \n \n**Source** : UCI \n \n **Please cite** :\n\n \n\n \n\n * Dataset Title: \n \n\n \n\nAutoUniv Dataset \n \ndata problem: autoUniv-au1-1000\n\n \n\n \n\n * Abstract: \n \n\n \n\nAutoUniv is an advanced data generator for classifications tasks. The aim is\nto reflect the nuances and heterogeneity of real data. Data can be generated\nin .csv, ARFF or C4.5 formats.\n\n \n\n \n\n * Source: \n \n\n \n\nAutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com \nAutoUniv web-site: http://sites.google.com/site/autouniv/.\n\n \n\n \n\n * Data Set Information:\n \n\n \n\nThe user first creates a classification model and then generates classified\nexamples from it. To create a model, the following are specified: the number\nof attributes (up to 1000) and their type (discrete or continuous), the number\nof classes (up to 10), the complexity of the underlying rules and the noise\nlevel. AutoUniv then produces a model through a process of constrained\nrandomised search to satisfy the user's requirements. A model can have up to\n3000 rules. Rare class models can be designed. A sequence of models can be\ndesigned to reflect concept and/or population drift.\n\n \n\nAutoUniv creates three text files for a model: a Prolog specification of the\nmodel used to generate examples (.aupl); a user-friendly statement of the\nclassification rules in an 'if ... then' format (.aurules); a statistical\nsummary of the main properties of the model, including its Bayes rate\n(.auprops).\n\n \n\n \n\n * Attribute Information: \n \n\n \n\nAttributes may be discrete with up to 10 values or continuous. A discrete\nattribute can be nominal with values v1, v2, v3 ... or integer with values 0,\n1, 2 , ... .\n\n \n\n \n\n * Relevant Papers:\n \n\n \n\nMarrs, G, Hickey, RJ and Black, MM (2010)", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=01dd6e8a-f4b1-4c5e-9206-0e40c8031be6&revisionId=39245fff-57fa-45c5-9f6a-49abf71e99b6&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -49,7 +49,7 @@ "platform": "ai4experiments", "platform_identifier": "6", "name": "schizo", - "description": "https://openml.org

                                          Author:

                                          Source: Unknown - Date unknown

                                          Please cite:


                                          Schizophrenic Eye-Tracking Data in Rubin and Wu (1997)
                                          Biometrics. Yingnian Wu (wu@hustat.harvard.edu) [14/Oct/97]


                                          Information about the dataset
                                          CLASSTYPE: nominal
                                          CLASSINDEX: last

                                          ", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\nSchizophrenic Eye-Tracking Data in Rubin and Wu (1997) \nBiometrics. Yingnian Wu (wu@hustat.harvard.edu) [14/Oct/97]\n\n \n\nInformation about the dataset \nCLASSTYPE: nominal \nCLASSINDEX: last\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0247b1c5-2161-4367-96ea-4aa9370b8bb6&revisionId=9c637a1f-22db-49df-ab7d-d0763058e9e9&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -95,7 +95,7 @@ "platform": "ai4experiments", "platform_identifier": "7", "name": "calendarDOW", - "description": "https://openml.org

                                          calendarDOW-pmlb

                                          ", + "description": "https://openml.org \n\ncalendarDOW-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=026c2720-4045-48c9-87ec-9791c120bb85&revisionId=b2df4780-1459-41ed-abbb-fb38f2697a04&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -141,7 +141,7 @@ "platform": "ai4experiments", "platform_identifier": "8", "name": "GesturePhaseSegmentationProcessed", - "description": "https://openml.org

                                          Author: Renata Cristina Barros Madeo (Madeo\",\"R. C. B.) Priscilla Koch Wagner (Wagner\",\"P. K.) Sarajane Marques Peres (Peres\",\"S. M.) {renata.si\",\"priscilla.wagner\",\"sarajane} at usp.br http://each.uspnet.usp.br/sarajane/

                                          Source: UCI

                                          Please cite: Please refer to the Machine Learning Repository's citation policy. Additionally, the authors require a citation to one or more publications from those cited as relevant papers.


                                          Creators:
                                          Renata Cristina Barros Madeo (Madeo, R. C. B.)
                                          Priscilla Koch Wagner (Wagner, P. K.)
                                          Sarajane Marques Peres (Peres, S. M.)
                                          {renata.si, priscilla.wagner, sarajane} at usp.br
                                          http://each.uspnet.usp.br/sarajane/


                                          Donor:
                                          University of Sao Paulo - Brazil


                                          Data Set Information:


                                          The dataset is composed by features extracted from 7 videos with people gesticulating, aiming at studying Gesture Phase Segmentation.
                                          Each video is represented by two files: a raw file, which contains the position of hands, wrists, head and spine of the user in each frame; and a processed file, which contains velocity and acceleration of hands and wrists. See the data set description for more information on the dataset.


                                          Attribute Information:


                                          Raw files: 18 numeric attributes (double), a timestamp and a class attribute (nominal).
                                          Processed files: 32 numeric attributes (double) and a class attribute (nominal).
                                          A feature vector with up to 50 numeric attributes can be generated with the two files mentioned above.


                                          This is the processe", + "description": "https://openml.org \n\n**Author** : Renata Cristina Barros Madeo (Madeo\",\"R. C. B.) Priscilla Koch\nWagner (Wagner\",\"P. K.) Sarajane Marques Peres (Peres\",\"S. M.)\n{renata.si\",\"priscilla.wagner\",\"sarajane} at usp.br\nhttp://each.uspnet.usp.br/sarajane/ \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/gesture+phase+segmentation) \n \n **Please cite** : Please refer to the [Machine Learning Repository's citation\npolicy](https://archive.ics.uci.edu/ml/citation_policy.html). Additionally,\nthe authors require a citation to one or more publications from those cited as\nrelevant papers.\n\n \n\nCreators: \nRenata Cristina Barros Madeo (Madeo, R. C. B.) \nPriscilla Koch Wagner (Wagner, P. K.) \nSarajane Marques Peres (Peres, S. M.) \n{renata.si, priscilla.wagner, sarajane} at usp.br \nhttp://each.uspnet.usp.br/sarajane/\n\n \n\nDonor: \nUniversity of Sao Paulo - Brazil\n\n \n\nData Set Information:\n\n \n\nThe dataset is composed by features extracted from 7 videos with people\ngesticulating, aiming at studying Gesture Phase Segmentation. \nEach video is represented by two files: a raw file, which contains the\nposition of hands, wrists, head and spine of the user in each frame; and a\nprocessed file, which contains velocity and acceleration of hands and wrists.\nSee the data set description for more information on the dataset.\n\n \n\nAttribute Information:\n\n \n\nRaw files: 18 numeric attributes (double), a timestamp and a class attribute\n(nominal). \nProcessed files: 32 numeric attributes (double) and a class attribute\n(nominal). \nA feature vector with up to 50 numeric attributes can be generated with the\ntwo files mentioned above.\n\n \n\nThis is the processed data set with the following feature description:\n\n \n\nProcessed files:\n\n \n\n \n\n 1. Vectorial velocity of left hand (x coordi", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=03a938ee-181d-4409-a806-199034e5172b&revisionId=73ee8559-e5f5-45c4-b15b-56392843644f&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -187,7 +187,7 @@ "platform": "ai4experiments", "platform_identifier": "9", "name": "haberman", - "description": "https://openml.org

                                          Author:

                                          Source: Unknown -

                                          Please cite:




                                          1. Title: Haberman's Survival Data




                                          2. Sources:
                                            (a) Donor: Tjen-Sien Lim (limt@stat.wisc.edu)
                                            (b) Date: March 4, 1999




                                          3. Past Usage:



                                            1. Haberman, S. J. (1976). Generalized Residuals for Log-Linear
                                              Models, Proceedings of the 9th International Biometrics
                                              Conference, Boston, pp. 104-122.

                                            2. Landwehr, J. M., Pregibon, D., and Shoemaker, A. C. (1984),
                                              Graphical Models for Assessing Logistic Regression Models (with
                                              discussion), Journal of the American Statistical Association 79:
                                              61-83.

                                            3. Lo, W.-D. (1993). Logistic Regression Trees, PhD thesis,
                                              Department of Statistics, University of Wisconsin, Madison, WI.




                                          4. Relevant Information:
                                            The dataset contains cases from a study that was conducted between
                                            1958 and 1970 at the University of Chicago's Billings Hospital on
                                            the survival of patients who had undergone surgery for breast
                                            cancer.




                                          5. Number of Instances: 306




                                          6. Number of Attributes: 4 (including the class attribute)




                                          7. Attribute Information:



                                            1. Age of patient at time of operation (numerical)

                                            2. Patient's year of operation (year - 1900, numerical)

                                            3. Number of positive axillary nodes detected (numerical)

                                            4. Survival status (class attribute)
                                              1 = the patient survived 5 years or longer
                                              2 = the patient died within 5 year




                                          8. Missing Attribute Values: None


                                            <", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - \n \n**Please cite** :\n\n \n\n \n\n 1. \n\nTitle: Haberman's Survival Data\n\n \n\n \n\n 2. \n\nSources: \n(a) Donor: Tjen-Sien Lim (limt@stat.wisc.edu) \n(b) Date: March 4, 1999\n\n \n\n \n\n 3. \n\nPast Usage:\n\n \n \n\n 1. Haberman, S. J. (1976). Generalized Residuals for Log-Linear \nModels, Proceedings of the 9th International Biometrics \nConference, Boston, pp. 104-122.\n\n \n\n 2. Landwehr, J. M., Pregibon, D., and Shoemaker, A. C. (1984), \nGraphical Models for Assessing Logistic Regression Models (with \ndiscussion), Journal of the American Statistical Association 79: \n61-83.\n\n \n\n 3. Lo, W.-D. (1993). Logistic Regression Trees, PhD thesis, \nDepartment of Statistics, University of Wisconsin, Madison, WI.\n\n \n \n\n \n\n 4. \n\nRelevant Information: \nThe dataset contains cases from a study that was conducted between \n1958 and 1970 at the University of Chicago's Billings Hospital on \nthe survival of patients who had undergone surgery for breast \ncancer.\n\n \n\n \n\n 5. \n\nNumber of Instances: 306\n\n \n\n \n\n 6. \n\nNumber of Attributes: 4 (including the class attribute)\n\n \n\n \n\n 7. \n\nAttribute Information:\n\n \n \n\n 1. Age of patient at time of operation (numerical)\n \n\n 2. Patient's year of operation (year - 1900, numerical)\n \n\n 3. Number of positive axillary nodes detected (numerical)\n \n\n 4. Survival status (class attribute) \n1 = the patient survived 5 years or longer \n2 = the patient died within 5 year\n\n \n \n\n \n\n 8. \n\nMissing Attribute Values: None\n\n \n\n \n\n \n\nInformation about the dataset \nCLASSTYPE: nominal \nCLASSINDEX: last\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=03cc6248-6087-45e0-a732-6d34e299934e&revisionId=e36bfd43-b146-47b4-ad8b-f1cca7ef09c0&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -233,7 +233,7 @@ "platform": "ai4experiments", "platform_identifier": "13", "name": "sudoku-tutorial-gui-stream", - "description": "



                                            This is the streaming version User Interface component of the AI4EU Experiments Sudoku Hello World!


                                            For more details, see the corresponding entry in the AI4EU Asset Catalog: https://www.ai4europe.eu/research/ai-catalog/sudoku-design-assistant-gui










                                            ", + "description": " \n\n \n\nThis is the **streaming** version **User Interface** component of the AI4EU\nExperiments **Sudoku Hello World**!\n\n \n\nFor more details, see the corresponding entry in the AI4EU Asset Catalog:\n\n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=06c6909b-7c7d-4a09-8199-e3d647ba144d&revisionId=edc4ecbd-8189-4021-83ca-44e046f41127&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -279,7 +279,7 @@ "platform": "ai4experiments", "platform_identifier": "17", "name": "GAMETES_Epistasis_2-Way_20atts_0.1H_EDM-1_1", - "description": "https://openml.org

                                            GAMETES_Epistasis_2-Way_20atts_0.1H_EDM-1_1-pmlb

                                            ", + "description": "https://openml.org \n\nGAMETES_Epistasis_2-Way_20atts_0.1H_EDM-1_1-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=08819c99-9458-48de-84e1-83290b73caa7&revisionId=a718624c-d501-459f-8ad6-7628dbcf60a9&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -325,7 +325,7 @@ "platform": "ai4experiments", "platform_identifier": "20", "name": "mushroom", - "description": "https://openml.org

                                            Author: Jeff Schlimmer

                                            Source: UCI - 1981

                                            Please cite: The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf


                                            Description


                                            This dataset describes mushrooms in terms of their physical characteristics. They are classified into: poisonous or edible.


                                            Source


                                            ```
                                            (a) Origin:
                                            Mushroom records are drawn from The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf


                                            (b) Donor:
                                            Jeff Schlimmer (Jeffrey.Schlimmer '@' a.gp.cs.cmu.edu)
                                            ```


                                            Dataset description


                                            This dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family. Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like ``leaflets three, let it be'' for Poisonous Oak and Ivy.


                                            Attributes Information


                                            1. cap-shape: bell=b,conical=c,convex=x,flat=f, knobbed=k,sunken=s
                                            2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s
                                            3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r, pink=p,purple=u,red=e,white=w,yellow=y
                                            4. bruises?: bruises=t,no=f
                                            5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f, musty=m,none=n,pungent=p,spicy=s
                                            6. gill-attachment: attached=a,descending=d,", + "description": "https://openml.org \n\n**Author** : [Jeff Schlimmer](Jeffrey.Schlimmer@a.gp.cs.cmu.edu) \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/mushroom) \\- 1981 \n \n**Please cite** : The Audubon Society Field Guide to North American Mushrooms\n(1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf\n\n \n\n### Description\n\n \n\nThis dataset describes mushrooms in terms of their physical characteristics.\nThey are classified into: poisonous or edible.\n\n \n\n### Source\n\n \n\n``` \n(a) Origin: \nMushroom records are drawn from The Audubon Society Field Guide to North\nAmerican Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf\n\n \n\n(b) Donor: \nJeff Schlimmer (Jeffrey.Schlimmer '@' a.gp.cs.cmu.edu) \n```\n\n \n\n### Dataset description\n\n \n\nThis dataset includes descriptions of hypothetical samples corresponding to 23\nspecies of gilled mushrooms in the Agaricus and Lepiota Family. Each species\nis identified as definitely edible, definitely poisonous, or of unknown\nedibility and not recommended. This latter class was combined with the\npoisonous one. The Guide clearly states that there is no simple rule for\ndetermining the edibility of a mushroom; no rule like ``leaflets three, let it\nbe'' for Poisonous Oak and Ivy.\n\n \n\n### Attributes Information\n\n \n\n`1. cap-shape: bell=b,conical=c,convex=x,flat=f, knobbed=k,sunken=s \n2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s \n3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r, pink=p,purple=u,red=e,white=w,yellow=y \n4. bruises?: bruises=t,no=f \n5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f, musty=m,none=n,pungent=p,spicy=s \n6. gill-attachment: attached=a,descending=d,free=f,notched=n \n7. gill-spacing: close=c,crowded=w,distant=d \n8. gill-size: broad=b,narrow=n \n9. gill-color: black=k,bro", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0a6e6072-441e-4274-bf2a-6216def228bd&revisionId=d6acfed4-6030-4b57-ac62-277a78f4592d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -371,7 +371,7 @@ "platform": "ai4experiments", "platform_identifier": "21", "name": "ecoli", - "description": "https://openml.org

                                            ecoli-pmlb

                                            ", + "description": "https://openml.org \n\necoli-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0c052358-19dd-4904-a209-f58f7457623e&revisionId=bbd47da9-e81c-43da-97ba-490c32c80089&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -417,7 +417,7 @@ "platform": "ai4experiments", "platform_identifier": "22", "name": "AudioFileBroker", - "description": "

                                            This model is used for the beginning of an audio mining pipeline and dispachtes the audio files.

                                            ", + "description": "This model is used for the beginning of an audio mining pipeline and\ndispachtes the audio files.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0c4d6ad9-c9df-4054-a030-e8d22613afc5&revisionId=b3a2910a-0c19-47e8-9521-8482c203b49f&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -555,7 +555,7 @@ "platform": "ai4experiments", "platform_identifier": "25", "name": "ai4iot-data-source", - "description": "

                                            The Data Source component serves as an aggregator of data incoming from different services, and which is useful for the AI4IoT pipeline. In particular, it connects to external APIs and provides data in an unified (and standardized through protobuf message definition) way. The AI4IoT tackles air quality in the city of Trondheim, Norway. Therefore, the current version of this component fetches data for this city. The structure can, however, be replicated to any other place by extending the scripts with the given API calls for the place of interest. Currently, available data through this component is pollution measurements both from a network of low-cost sensors, a (much smaller) network of industrial sensors and meteorological data.

                                            ", + "description": "The Data Source component serves as an aggregator of data incoming from\ndifferent services, and which is useful for the AI4IoT pipeline. In\nparticular, it connects to external APIs and provides data in an unified (and\nstandardized through protobuf message definition) way. The AI4IoT tackles air\nquality in the city of Trondheim, Norway. Therefore, the current version of\nthis component fetches data for this city. The structure can, however, be\nreplicated to any other place by extending the scripts with the given API\ncalls for the place of interest. Currently, available data through this\ncomponent is pollution measurements both from a network of low-cost sensors, a\n(much smaller) network of industrial sensors and meteorological data.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&revisionId=a68fc42c-2e73-4328-97dc-34424eec75c5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.3", @@ -647,7 +647,7 @@ "platform": "ai4experiments", "platform_identifier": "27", "name": "wdbc", - "description": "https://openml.org

                                            Author: William H. Wolberg, W. Nick Street, Olvi L. Mangasarian

                                            Source: UCI, University of Wisconsin - 1995

                                            Please cite: UCI


                                            Breast Cancer Wisconsin (Diagnostic) Data Set (WDBC). Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. The target feature records the prognosis (benign (1) or malignant (2)). Original data available here


                                            Current dataset was adapted to ARFF format from the UCI version. Sample code ID's were removed.


                                            ! Note that there is also a related Breast Cancer Wisconsin (Original) Data Set with a different set of features, better known as breast-w.


                                            Feature description


                                            Ten real-valued features are computed for each of 3 cell nuclei, yielding a total of 30 descriptive features. See the papers below for more details on how they were computed. The 10 features (in order) are:


                                            a) radius (mean of distances from center to points on the perimeter)

                                            b) texture (standard deviation of gray-scale values)

                                            c) perimeter

                                            d) area

                                            e) smoothness (local variation in radius lengths)

                                            f) compactness (perimeter^2 / area - 1.0)

                                            g) concavity (severity of concave portions of the contour)

                                            h) conca", + "description": "https://openml.org \n\n**Author** : William H. Wolberg, W. Nick Street, Olvi L. Mangasarian \n \n**Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+\\(original\\)),\n[University of Wisconsin](http://pages.cs.wisc.edu/~olvi/uwmp/cancer.html) \\-\n1995 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Breast Cancer Wisconsin (Diagnostic) Data Set (WDBC).** Features are\ncomputed from a digitized image of a fine needle aspirate (FNA) of a breast\nmass. They describe characteristics of the cell nuclei present in the image.\nThe target feature records the prognosis (benign (1) or malignant (2)).\n[Original data available here](ftp://ftp.cs.wisc.edu/math-prog/cpo-\ndataset/machine-learn/cancer/)\n\n \n\nCurrent dataset was adapted to ARFF format from the UCI version. Sample code\nID's were removed.\n\n \n\n! Note that there is also a related Breast Cancer Wisconsin (Original) Data\nSet with a different set of features, better known as\n[breast-w](https://www.openml.org/d/15).\n\n \n\n### Feature description\n\n \n\nTen real-valued features are computed for each of 3 cell nuclei, yielding a\ntotal of 30 descriptive features. See the papers below for more details on how\nthey were computed. The 10 features (in order) are:\n\n \n\na) radius (mean of distances from center to points on the perimeter) \n \nb) texture (standard deviation of gray-scale values) \n \nc) perimeter \n \nd) area \n \ne) smoothness (local variation in radius lengths) \n \nf) compactness (perimeter^2 / area - 1.0) \n \ng) concavity (severity of concave portions of the contour) \n \nh) concave points (number of concave portions of the contour) \n \ni) symmetry \n \nj) fractal dimension (\"coastline approximation\" - 1)\n\n \n\n### Relevant Papers\n\n \n\nW.N. Street, W.H. Wo", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0f467230-f8cf-4e8e-8ef0-1428d5147b29&revisionId=7b0db765-cd12-44cc-b22a-b9b92b31bdf4&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -739,7 +739,7 @@ "platform": "ai4experiments", "platform_identifier": "30", "name": "liver-disorders", - "description": "https://openml.org

                                            Author: BUPA Medical Research Ltd. Donor: Richard S. Forsyth

                                            Source: UCI - 5/15/1990

                                            Please cite:


                                            BUPA liver disorders


                                            The first 5 variables are all blood tests which are thought to be sensitive to liver disorders that might arise from excessive alcohol consumption. Each line in the dataset constitutes the record of a single male individual.


                                            Important note: The 7th field (selector) has been widely misinterpreted in the past as a dependent variable representing presence or absence of a liver disorder. This is incorrect [1]. The 7th field was created by BUPA researchers as a train/test selector. It is not suitable as a dependent variable for classification. The dataset does not contain any variable representing presence or absence of a liver disorder. Researchers who wish to use this dataset as a classification benchmark should follow the method used in experiments by the donor (Forsyth & Rada, 1986, Machine learning: applications in expert systems and information retrieval) and others (e.g. Turney, 1995, Cost-sensitive classification: Empirical evaluation of a hybrid genetic decision tree induction algorithm), who used the 6th field (drinks), after dichotomising, as a dependent variable for classification. Because of widespread misinterpretation in the past, researchers should take care to state their method clearly.


                                            Attribute information

                                            1. mcv mean corpuscular volume

                                            2. alkphos alkaline phosphotase

                                            3. sgpt alanine aminotransferase

                                            4. sgot aspartate aminotra", + "description": "https://openml.org \n\n**Author** : BUPA Medical Research Ltd. Donor: Richard S. Forsyth \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Liver+Disorders) \\-\n5/15/1990 \n \n **Please cite** :\n\n \n\n**BUPA liver disorders**\n\n \n\nThe first 5 variables are all blood tests which are thought to be sensitive to\nliver disorders that might arise from excessive alcohol consumption. Each line\nin the dataset constitutes the record of a single male individual.\n\n \n\n**Important note:** The 7th field (selector) has been widely misinterpreted in\nthe past as a dependent variable representing presence or absence of a liver\ndisorder. This is incorrect [1]. The 7th field was created by BUPA researchers\nas a train/test selector. It is not suitable as a dependent variable for\nclassification. The dataset does not contain any variable representing\npresence or absence of a liver disorder. Researchers who wish to use this\ndataset as a classification benchmark should follow the method used in\nexperiments by the donor (Forsyth & Rada, 1986, Machine learning: applications\nin expert systems and information retrieval) and others (e.g. Turney, 1995,\nCost-sensitive classification: Empirical evaluation of a hybrid genetic\ndecision tree induction algorithm), who used the 6th field (drinks), after\ndichotomising, as a dependent variable for classification. Because of\nwidespread misinterpretation in the past, researchers should take care to\nstate their method clearly.\n\n \n\n **Attribute information** \n \n1\\. mcv mean corpuscular volume \n \n2\\. alkphos alkaline phosphotase \n \n3\\. sgpt alanine aminotransferase \n \n4\\. sgot aspartate aminotransferase \n \n5\\. gammagt gamma-glutamyl transpeptidase \n \n6\\. drinks number of half-pint equivalents of alcoholic beverages drunk per\nday \n \n7\\. se", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1061beb3-646c-458a-bb10-6bea01fce9d7&revisionId=94f838e4-944a-401d-84a7-49b5582a540b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -785,7 +785,7 @@ "platform": "ai4experiments", "platform_identifier": "34", "name": "grpc_hydro_hubeau", - "description": "

                                            Connector to get French hydrology data.


                                            The API makes it possible to interrogate the French hydrometric reference system (sites and observation stations of the French measurement network) as well as the observations of water level (H) and flow (Q) called \"real time\".


                                            The API is updated every 2 minutes over 24 hours deep and maintains a one month history.


                                            The data disseminated is the raw data measured in the field, without expertise or improvements made by hydrometers.


                                            Observations are expressed in the following units:


                                            • mm for water heights (divide by 1000 to convert to meters);
                                            • l / s for flow rates (divide by 1000 to convert to m3 / s).

                                            Dates are expressed in Coordinated Universal Time (UTC) in ISO 8601 format.


                                            In metropolitan France, add 1 hour to UTC time during winter time, and 2 hours during summer time. In Guadeloupe and Martinique, subtract 4 hours from UTC time; In Guyana subtract 3 hours from UTC time; In Mayotte add 3 hours to UTC time; In Reunion, add 4 hours to UTC time.

                                            ", + "description": "**Connector** to get French hydrology data.\n\n \n\nThe API makes it possible to interrogate the French hydrometric reference\nsystem (sites and observation stations of the French measurement network) as\nwell as the observations of water level (H) and flow (Q) called \"real time\".\n\n \n\nThe API is updated every 2 minutes over 24 hours deep and maintains a one\nmonth history.\n\n \n\nThe data disseminated is the raw data measured in the field, without expertise\nor improvements made by hydrometers.\n\n \n\nObservations are expressed in the following units:\n\n \n\n * mm for water heights (divide by 1000 to convert to meters);\n * l / s for flow rates (divide by 1000 to convert to m3 / s).\n\nDates are expressed in Coordinated Universal Time (UTC) in ISO 8601 format.\n\n \n\nIn metropolitan France, add 1 hour to UTC time during winter time, and 2 hours\nduring summer time. In Guadeloupe and Martinique, subtract 4 hours from UTC\ntime; In Guyana subtract 3 hours from UTC time; In Mayotte add 3 hours to UTC\ntime; In Reunion, add 4 hours to UTC time.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=11b6681c-d8df-49c0-ba38-480b3ee2f63c&revisionId=ad13c11d-9d68-4101-a325-e9da62142ce0&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.3", @@ -831,7 +831,7 @@ "platform": "ai4experiments", "platform_identifier": "38", "name": "wilt", - "description": "https://openml.org

                                            Author: Brian Johnson

                                            Source: [UCI] (https://archive.ics.uci.edu/ml/datasets/Wilt)

                                            Please cite: Johnson, B., Tateishi, R., Hoan, N., 2013. A hybrid pansharpening approach and multiscale object-based image analysis for mapping diseased pine and oak trees. International Journal of Remote Sensing, 34 (20), 6969-6982.


                                            Changes w.r.t. version 1: renamed variables such that they match description.


                                            Dataset:


                                            Wilt Data Set


                                            Abstract:


                                            High-resolution Remote Sensing data set (Quickbird). Small number of training samples of diseased trees, large number for other land cover. Testing data set from stratified random sample of image.


                                            Source:


                                            Brian Johnson;
                                            Institute for Global Environmental Strategies;
                                            2108-11 Kamiyamaguchi, Hayama, Kanagawa,240-0115 Japan;
                                            Email: Johnson '@' iges.or.jp


                                            Data Set Information:


                                            This data set contains some training and testing data from a remote sensing study by Johnson et al. (2013) that involved detecting diseased trees in Quickbird imagery. There are few training samples for the 'diseased trees' class (74) and many for 'other land cover' class (4265).


                                            The data set consists of image segments, generated by segmenting the pansharpened image. The segments contain spectral information from the Quickbird multispectral image bands and texture information from the panchromatic (Pan) image band. The testing data set is for the row with \u00e2\u20ac\u0153Segmentation scale 15\u00e2\u20ac\u009d segments and \u00e2\u20ac\u0153original multi-spectral image\u00e2\u20ac\u009d Spectral information in Table 2 of the reference (i.e. row 5). Please see the reference below for more information on", + "description": "https://openml.org \n\n**Author** : Brian Johnson \n \n**Source** : [UCI] (https://archive.ics.uci.edu/ml/datasets/Wilt) \n \n **Please cite** : Johnson, B., Tateishi, R., Hoan, N., 2013. A hybrid\npansharpening approach and multiscale object-based image analysis for mapping\ndiseased pine and oak trees. International Journal of Remote Sensing, 34 (20),\n6969-6982.\n\n \n\n**Changes w.r.t. version 1: renamed variables such that they match\ndescription.**\n\n \n\n### Dataset:\n\n \n\nWilt Data Set\n\n \n\n### Abstract:\n\n \n\nHigh-resolution Remote Sensing data set (Quickbird). Small number of training\nsamples of diseased trees, large number for other land cover. Testing data set\nfrom stratified random sample of image.\n\n \n\n### Source:\n\n \n\nBrian Johnson; \nInstitute for Global Environmental Strategies; \n2108-11 Kamiyamaguchi, Hayama, Kanagawa,240-0115 Japan; \nEmail: Johnson '@' iges.or.jp\n\n \n\n### Data Set Information:\n\n \n\nThis data set contains some training and testing data from a remote sensing\nstudy by Johnson et al. (2013) that involved detecting diseased trees in\nQuickbird imagery. There are few training samples for the 'diseased trees'\nclass (74) and many for 'other land cover' class (4265).\n\n \n\nThe data set consists of image segments, generated by segmenting the\npansharpened image. The segments contain spectral information from the\nQuickbird multispectral image bands and texture information from the\npanchromatic (Pan) image band. The testing data set is for the row with\n\u00e2\u20ac\u0153Segmentation scale 15\u00e2\u20ac\u009d segments and \u00e2\u20ac\u0153original multi-spectral image\u00e2\u20ac\u009d\nSpectral information in Table 2 of the reference (i.e. row 5). Please see the\nreference below for more information on the data set, and please cite the\nreference if you use this data set. Enjoy!\n\n \n\n### Attribute Information:\n\n \n\ncl", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1413584f-738b-4241-9b60-80228e509fb7&revisionId=0bc075c6-0a74-48a0-97fb-b1dd62870920&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -877,7 +877,7 @@ "platform": "ai4experiments", "platform_identifier": "39", "name": "cjs", - "description": "https://openml.org

                                            Author: Dr. Fernando Camacho

                                            Source: Unknown - 1995

                                            Please cite: Camacho, F. and Arron, G. (1995) Effects of the regulators paclobutrazol and flurprimidol on the growth of terminal sprouts formed on trimmed silver maple trees. Canadian Journal of Statistics 3(23).


                                            Data on tree growth used in the Case Study published in the September, 1995 issue of the Canadian Journal of Statistics. This data set was been provided by Dr. Fernando Camacho, Ontario Hydro Technologies, 800 Kipling Ave, Toronto Canada M3Z 5S4. It forms the basis of the Case Study in Data Analysis published in the Canadian Journal of Statistics, September 1995. It can be freely used for noncommercial purposes, as long as proper acknowledgement to the source and to the Canadian Journal of Statistics is made.


                                            Description


                                            The effects of the Growth Regulators Paclobutrazol (PP 333)
                                            and Flurprimidol (EL-500) on the Number and Length of Internodes
                                            in Terminal Sprouts Formed on Trimmed Silver Maple Trees.


                                            Introduction:


                                            The trimming of trees under distribution lines on city streets and
                                            in rural areas is a major problem and expense for electrical
                                            utilities. Such operations are routinely performed at intervals of
                                            one to eight years depending upon the individual species growth rate
                                            and the amount of clearance required. Ontario Hydro trims about
                                            500,000 trees per year at a cost of about $25 per tree.


                                            Much effort has been spent in developing chemicals for the horticultural
                                            industry to retard the growth of woody and herbaceous plants. Recently,
                                            a group of new growth regulators was introduced which was shown to be
                                            effective in controlli", + "description": "https://openml.org \n\n**Author** : Dr. Fernando Camacho \n \n **Source** : Unknown - 1995 \n \n **Please cite** : Camacho, F. and Arron, G. (1995) Effects of the regulators\npaclobutrazol and flurprimidol on the growth of terminal sprouts formed on\ntrimmed silver maple trees. Canadian Journal of Statistics 3(23).\n\n \n\nData on tree growth used in the Case Study published in the September, 1995\nissue of the Canadian Journal of Statistics. This data set was been provided\nby Dr. Fernando Camacho, Ontario Hydro Technologies, 800 Kipling Ave, Toronto\nCanada M3Z 5S4. It forms the basis of the Case Study in Data Analysis\npublished in the Canadian Journal of Statistics, September 1995. It can be\nfreely used for noncommercial purposes, as long as proper acknowledgement to\nthe source and to the Canadian Journal of Statistics is made.\n\n \n\nDescription\n\n \n\nThe effects of the Growth Regulators Paclobutrazol (PP 333) \nand Flurprimidol (EL-500) on the Number and Length of Internodes \nin Terminal Sprouts Formed on Trimmed Silver Maple Trees.\n\n \n\nIntroduction:\n\n \n\nThe trimming of trees under distribution lines on city streets and \nin rural areas is a major problem and expense for electrical \nutilities. Such operations are routinely performed at intervals of \none to eight years depending upon the individual species growth rate \nand the amount of clearance required. Ontario Hydro trims about \n500,000 trees per year at a cost of about $25 per tree.\n\n \n\nMuch effort has been spent in developing chemicals for the horticultural \nindustry to retard the growth of woody and herbaceous plants. Recently, \na group of new growth regulators was introduced which was shown to be \neffective in controlling the growth of trees without producing \nnoticeable injury symptoms. In this group are P", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=141de15b-91a7-4dcb-9eb3-4297e217c3de&revisionId=62ae0822-9a5e-4003-afb5-3fef610694cd&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -923,7 +923,7 @@ "platform": "ai4experiments", "platform_identifier": "40", "name": "credit-g", - "description": "https://openml.org

                                            Author: Dr. Hans Hofmann

                                            Source: UCI - 1994

                                            Please cite: UCI


                                            German Credit data

                                            This dataset classifies people described by a set of attributes as good or bad credit risks.


                                            This dataset comes with a cost matrix:
                                            Good Bad (predicted)
                                            Good 0 1 (actual)
                                            Bad 5 0


                                            It is worse to class a customer as good when they are bad (5), than it is to class a customer as bad when they are good (1).


                                            Attribute description



                                            1. Status of existing checking account, in Deutsche Mark.

                                            2. Duration in months

                                            3. Credit history (credits taken, paid back duly, delays, critical accounts)

                                            4. Purpose of the credit (car, television,...)

                                            5. Credit amount

                                            6. Status of savings account/bonds, in Deutsche Mark.

                                            7. Present employment, in number of years.

                                            8. Installment rate in percentage of disposable income

                                            9. Personal status (married, single,...) and sex

                                            10. Other debtors / guarantors

                                            11. Present residence since X years

                                            12. Property (e.g. real estate)

                                            13. Age in years

                                            14. Other installment plans (banks, stores)

                                            15. Housing (rent, own,...)

                                            16. Number of existing credits at this bank

                                            17. Job

                                            18. Number of people being liable to provide maintenance for

                                            19. Telephone (yes,no)

                                            20. Foreign worker (yes,no)

                                            ", + "description": "https://openml.org \n\n**Author** : Dr. Hans Hofmann \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/statlog+\\(german+credit+data\\))\n\\- 1994 \n \n**Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n **German Credit data** \n \nThis dataset classifies people described by a set of attributes as good or bad\ncredit risks.\n\n \n\nThis dataset comes with a cost matrix: \n`Good Bad (predicted) \nGood 0 1 (actual) \nBad 5 0`\n\n \n\nIt is worse to class a customer as good when they are bad (5), than it is to\nclass a customer as bad when they are good (1).\n\n \n\n### Attribute description\n\n \n\n \n\n 1. Status of existing checking account, in Deutsche Mark. \n \n\n 2. Duration in months \n \n\n 3. Credit history (credits taken, paid back duly, delays, critical accounts) \n \n\n 4. Purpose of the credit (car, television,...) \n \n\n 5. Credit amount \n \n\n 6. Status of savings account/bonds, in Deutsche Mark. \n \n\n 7. Present employment, in number of years. \n \n\n 8. Installment rate in percentage of disposable income \n \n\n 9. Personal status (married, single,...) and sex \n \n\n 10. Other debtors / guarantors \n \n\n 11. Present residence since X years \n \n\n 12. Property (e.g. real estate) \n \n\n 13. Age in years \n \n\n 14. Other installment plans (banks, stores) \n \n\n 15. Housing (rent, own,...) \n \n\n 16. Number of existing credits at this bank \n \n\n 17. Job \n \n\n 18. Number of people being liable to provide maintenance for \n \n\n 19. Telephone (yes,no) \n \n\n 20. Foreign worker (yes,no)\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=14578085-5f08-4275-a790-5a9cfbefb412&revisionId=ce377185-b3f0-4f39-8910-d6296ddef03b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -969,7 +969,7 @@ "platform": "ai4experiments", "platform_identifier": "41", "name": "GAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_75_EDM-2_001", - "description": "https://openml.org

                                            GAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_75_EDM-2_001-pmlb

                                            ", + "description": "https://openml.org \n\nGAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_75_EDM-2_001-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=14f91a0e-6262-454d-8edf-90e68eb8de15&revisionId=7b852968-64e8-417f-947b-487a4b0ffca8&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1015,7 +1015,7 @@ "platform": "ai4experiments", "platform_identifier": "44", "name": "FileUploadDataBroker", - "description": "

                                            This is a simple file upload data broker. It can be used as a starting point for pipelines which process files. It offers a web interface with a simple file upload dialog. The uploaded files are saved on a shared volume, then the corresponding paths are sent to the next model in the pipeline. For example, this data broker can be used in connection with the following models: MusicDetection, SpeechDection, MusicAnnotation, and ObjectDetection. In the current version, only single files are supported.













                                            ", + "description": "This is a simple file upload data broker. It can be used as a starting point\nfor pipelines which process files. It offers a web interface with a simple\nfile upload dialog. The uploaded files are saved on a shared volume, then the\ncorresponding paths are sent to the next model in the pipeline. For example,\nthis data broker can be used in connection with the following models:\nMusicDetection, SpeechDection, MusicAnnotation, and ObjectDetection. In the\ncurrent version, only single files are supported.\n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1681c927-ae2c-41f6-9ee4-51ece5e80806&revisionId=f5f3b0cc-2486-45ac-8928-8769b89c8825&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1061,7 +1061,7 @@ "platform": "ai4experiments", "platform_identifier": "50", "name": "led7", - "description": "https://openml.org

                                            led7-pmlb

                                            ", + "description": "https://openml.org \n\nled7-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1bb76aa6-45df-4944-b2bf-6c6de92df1cc&revisionId=d6a09a23-a730-4298-93cb-76a00cc4d1ea&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1107,7 +1107,7 @@ "platform": "ai4experiments", "platform_identifier": "55", "name": "vehicle", - "description": "https://openml.org

                                            Author: Dr. Pete Mowforth and Dr. Barry Shepherd

                                            Source: UCI
                                            Please cite: Siebert,JP. Turing Institute Research Memorandum TIRM-87-018 \"Vehicle Recognition Using Rule Based Methods\" (March 1987)


                                            NAME
                                            vehicle silhouettes


                                            PURPOSE
                                            to classify a given silhouette as one of four types of vehicle,
                                            using a set of features extracted from the silhouette. The
                                            vehicle may be viewed from one of many different angles.


                                            PROBLEM TYPE
                                            classification


                                            SOURCE
                                            Drs.Pete Mowforth and Barry Shepherd
                                            Turing Institute
                                            George House
                                            36 North Hanover St.
                                            Glasgow
                                            G1 2AD


                                            CONTACT
                                            Alistair Sutherland
                                            Statistics Dept.
                                            Strathclyde University
                                            Livingstone Tower
                                            26 Richmond St.
                                            GLASGOW G1 1XH
                                            Great Britain


                                                 Tel: 041 552 4400 x3033

                                            Fax: 041 552 4711

                                            e-mail: alistair@uk.ac.strathclyde.stams

                                            HISTORY
                                            This data was originally gathered at the TI in 1986-87 by
                                            JP Siebert. It was partially financed by Barr and Stroud Ltd.
                                            The original purpose was to find a method of distinguishing
                                            3D objects within a 2D image by application of an ensemble of
                                            shape feature extractors to the 2D silhouettes of the objects.
                                            Measures of shape features extracted from example silhouettes
                                            of objects to be discriminated were used to generate a class-
                                            ", + "description": "https://openml.org \n\n**Author** : Dr. Pete Mowforth and Dr. Barry Shepherd \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/Statlog+\\(Vehicle+Silhouettes\\)) \n **Please cite** : Siebert,JP. Turing Institute Research Memorandum\nTIRM-87-018 \"Vehicle Recognition Using Rule Based Methods\" (March 1987)\n\n \n\nNAME \nvehicle silhouettes\n\n \n\nPURPOSE \nto classify a given silhouette as one of four types of vehicle, \nusing a set of features extracted from the silhouette. The \nvehicle may be viewed from one of many different angles.\n\n \n\nPROBLEM TYPE \nclassification\n\n \n\nSOURCE \nDrs.Pete Mowforth and Barry Shepherd \nTuring Institute \nGeorge House \n36 North Hanover St. \nGlasgow \nG1 2AD\n\n \n\nCONTACT \nAlistair Sutherland \nStatistics Dept. \nStrathclyde University \nLivingstone Tower \n26 Richmond St. \nGLASGOW G1 1XH \nGreat Britain\n\n \n\n \n \n Tel: 041 552 4400 x3033 \n \n Fax: 041 552 4711 \n \n e-mail: alistair@uk.ac.strathclyde.stams \n \n\n \n\nHISTORY \nThis data was originally gathered at the TI in 1986-87 by \nJP Siebert. It was partially financed by Barr and Stroud Ltd. \nThe original purpose was to find a method of distinguishing \n3D objects within a 2D image by application of an ensemble of \nshape feature extractors to the 2D silhouettes of the objects. \nMeasures of shape features extracted from example silhouettes \nof objects to be discriminated were used to generate a class- \nification rule tree by means of computer induction. \nThis object recognition strategy was successfully used to \ndiscriminate between silhouettes of model cars, vans and buses \nviewed from constrained elevation but all angles of rotation. \nThe rule tree classification performance compared favourably \nto MDC (Minimum Distance C", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=201367ca-d077-4a98-be44-bff9bee718b6&revisionId=d36b7554-5acf-4f6f-a3c1-702b540faf51&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1153,7 +1153,7 @@ "platform": "ai4experiments", "platform_identifier": "58", "name": "audio-file-broker", - "description": "

                                            Audio File Broker is a Python component that exposes an endpoint to receive an audio file (i.e., wav) through a POST endpoint reachable using the command:

                                            minikube service \\--url audio-file-broker1webui\n

                                            The output is an audio file with a static ID that can be used for further elaboration.


                                            Details and source code can be found here: https://github.com/Engineering-Research-and-Development/audio-file-broker

                                            ", + "description": "Audio File Broker is a Python component that exposes an endpoint to receive an\naudio file (i.e., wav) through a POST endpoint reachable using the command:\n\n \n \n minikube service \\--url audio-file-broker1webui\n \n\nThe output is an audio file with a static ID that can be used for further\nelaboration.\n\n \n\nDetails and source code can be found here: \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=228e7550-ddc8-4774-89c8-e2b9638b72fa&revisionId=0fb523a2-61ea-4348-9b66-1ea7a9c28056&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1199,7 +1199,7 @@ "platform": "ai4experiments", "platform_identifier": "62", "name": "analcatdata_dmft", - "description": "https://openml.org

                                            Author: Unknown

                                            Source: Jeffrey S. Simonoff - 2003

                                            Please cite: Jeffrey S. Simonoff, Analyzing Categorical Data, Springer-Verlag, 2003


                                            One of the datasets used in the book \"Analyzing Categorical Data,\"
                                            by Jeffrey S. Simonoff. It contains data on the DMFT Index (Decayed, Missing, and Filled Teeth) before and after different prevention strategies. The prevention strategy is commonly used as the (categorical) target.


                                            Attribute information



                                            • DMFT.Begin and DMFT.End: DMFT index before and after the prevention strategy

                                            • Gender of the individual

                                            • Ethnicity of the individual

                                            ", + "description": "https://openml.org \n\n**Author** : Unknown \n \n**Source** : [Jeffrey S.\nSimonoff](http://people.stern.nyu.edu/jsimonof/AnalCatData/Data/) \\- 2003 \n \n**Please cite** : Jeffrey S. Simonoff, Analyzing Categorical Data, Springer-\nVerlag, 2003\n\n \n\nOne of the datasets used in the book \"Analyzing Categorical Data,\" \nby Jeffrey S. Simonoff. It contains data on the DMFT Index (Decayed, Missing,\nand Filled Teeth) before and after different prevention strategies. The\nprevention strategy is commonly used as the (categorical) target.\n\n \n\n### Attribute information\n\n \n\n \n\n * DMFT.Begin and DMFT.End: DMFT index before and after the prevention strategy\n \n\n * Gender of the individual\n \n\n * Ethnicity of the individual\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2580db21-2cd8-405b-8912-e9881ada1454&revisionId=49ed507f-ad20-469b-a293-43628d39546c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1245,7 +1245,7 @@ "platform": "ai4experiments", "platform_identifier": "64", "name": "ai4eu-robotics-pump-6144-fft-broker", - "description": "



                                            The robotic pump demonstrator represents a hydraulic pump that can be mounted on an industrial robot, for example, to pump liquid paint for spray painting. On this pump, one accelerometer is mounted for vibration monitoring and recording.

                                            The pump can be controlled in terms of speed (rotations per minute, rpm), affecting the throughput of paint and the pressure in and out of the pump.

                                            The dataset consists of 380 million measurements of several sensor data of the pump system in 1-second intervals over two months in 2020.





                                            The complete dataset & documentation is available on Zenodo.

                                            ", + "description": " \n\n \n\nThe robotic pump demonstrator represents a hydraulic pump that can be mounted\non an industrial robot, for example, to pump liquid paint for spray painting.\nOn this pump, one accelerometer is mounted for vibration monitoring and\nrecording.\n\nThe pump can be controlled in terms of speed (rotations per minute, rpm),\naffecting the throughput of paint and the pressure in and out of the pump.\n\nThe dataset consists of 380 million measurements of several sensor data of the\npump system in 1-second intervals over two months in 2020.\n\n \n\n \n\n \n\n \n\n[The complete dataset & documentation is available on\nZenodo.](https://zenodo.org/record/5729187)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2764acc6-f82f-4b9c-ada8-fcc4edffa180&revisionId=822f9bd2-a5f7-42f7-b39d-01161ad2af1c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1291,7 +1291,7 @@ "platform": "ai4experiments", "platform_identifier": "66", "name": "parity5_plus_5", - "description": "https://openml.org

                                            parity5_plus_5-pmlb

                                            ", + "description": "https://openml.org \n\nparity5_plus_5-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2863f408-3bf5-46e5-a5e8-2c1d49547a73&revisionId=0ce18abf-1767-4bb5-b7fe-351aeaa74102&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1337,7 +1337,7 @@ "platform": "ai4experiments", "platform_identifier": "67", "name": "profb", - "description": "https://openml.org

                                            Author: Hal Stern, Robin Lock

                                            Source: StatLib

                                            Please cite:


                                            PRO FOOTBALL SCORES (raw data appears after the description below)


                                            How well do the oddsmakers of Las Vegas predict the outcome of
                                            professional football games? Is there really a home field advantage - if
                                            so how large is it? Are teams that play the Monday Night game at a
                                            disadvantage when they play again the following Sunday? Do teams benefit
                                            from having a \"bye\" week off in the current schedule? These questions and
                                            a host of others can be investigated using this data set.


                                            Hal Stern from the Statistics Department at Harvard University has
                                            made available his compilation of scores for all National Football League
                                            games from the 1989, 1990, and 1991 seasons. Dr. Stern used these data as
                                            part of his presentation \"Who's Number One?\" in the special \"Best of
                                            Boston\" session at the 1992 Joint Statistics Meetings.


                                            Several variables in the data are keyed to the oddsmakers \"point
                                            spread\" for each game. The point spread is a value assigned before each
                                            game to serve as a handicap for whichever is perceived to be the better
                                            team. Thus, to win against the point spread, the \"favorite\" team must beat
                                            the \"underdog\" team by more points than the spread. The underdog \"wins\"
                                            against the spread if it wins the game outright or manages to lose by fewer
                                            points than the spread. In theory, the point spread should represent the
                                            \"expert\" prediction as to the game's outcome. In practice, it more usually
                                            denotes a point at which an equal amount of money will be wagered both for
                                            and against th", + "description": "https://openml.org \n\n**Author** : Hal Stern, Robin Lock \n \n **Source** : [StatLib](http://lib.stat.cmu.edu/datasets/profb) \n \n**Please cite** :\n\n \n\nPRO FOOTBALL SCORES (raw data appears after the description below)\n\n \n\nHow well do the oddsmakers of Las Vegas predict the outcome of \nprofessional football games? Is there really a home field advantage - if \nso how large is it? Are teams that play the Monday Night game at a \ndisadvantage when they play again the following Sunday? Do teams benefit \nfrom having a \"bye\" week off in the current schedule? These questions and \na host of others can be investigated using this data set.\n\n \n\nHal Stern from the Statistics Department at Harvard University has \nmade available his compilation of scores for all National Football League \ngames from the 1989, 1990, and 1991 seasons. Dr. Stern used these data as \npart of his presentation \"Who's Number One?\" in the special \"Best of \nBoston\" session at the 1992 Joint Statistics Meetings.\n\n \n\nSeveral variables in the data are keyed to the oddsmakers \"point \nspread\" for each game. The point spread is a value assigned before each \ngame to serve as a handicap for whichever is perceived to be the better \nteam. Thus, to win against the point spread, the \"favorite\" team must beat \nthe \"underdog\" team by more points than the spread. The underdog \"wins\" \nagainst the spread if it wins the game outright or manages to lose by fewer \npoints than the spread. In theory, the point spread should represent the \n\"expert\" prediction as to the game's outcome. In practice, it more usually \ndenotes a point at which an equal amount of money will be wagered both for \nand against the favored team.\n\n \n\nRaw data below contains 672 cases (all 224 regular season games in \neach season and infor", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2b1bb308-9b5f-4ba9-afa0-0ac42878bf1b&revisionId=5f66eea5-684f-451a-902e-f8a85d3cac02&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1383,7 +1383,7 @@ "platform": "ai4experiments", "platform_identifier": "75", "name": "PersistentVolumeProvider", - "description": "

                                            The Persistent Volume Provider can be used to provide a common file storage for elements of a pipeline. The name of the node should be the absolute directory path.

                                            ", + "description": "The Persistent Volume Provider can be used to provide a common file storage\nfor elements of a pipeline. The name of the node should be the absolute\ndirectory path.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2f20e0ad-bc67-4629-9c8b-89f40a8c12d6&revisionId=4a8e7107-be77-4fd3-b1ec-c00afea2b4e6&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1429,7 +1429,7 @@ "platform": "ai4experiments", "platform_identifier": "88", "name": "threeOf9", - "description": "https://openml.org

                                            threeOf9-pmlb

                                            ", + "description": "https://openml.org \n\nthreeOf9-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=35271ff6-47b3-488c-9021-b0c5f893abd0&revisionId=c4c2d7bd-c07f-44e0-be8e-9711db0fb44a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1475,7 +1475,7 @@ "platform": "ai4experiments", "platform_identifier": "89", "name": "monks-problems-2", - "description": "https://openml.org

                                            Author: Sebastian Thrun (Carnegie Mellon University)

                                            Source: UCI - October 1992

                                            Please cite: UCI


                                            The Monk's Problems: Problem 2

                                            Once upon a time, in July 1991, the monks of Corsendonk Priory were faced with a school held in their priory, namely the 2nd European Summer School on Machine Learning. After listening more than one week to a wide variety of learning algorithms, they felt rather confused: Which algorithm would be optimal? And which one to avoid? As a consequence of this dilemma, they created a simple task on which all learning algorithms ought to be compared: the three MONK's problems.


                                            The target concept associated with the 2nd Monk's problem is the binary outcome of the logical formula:

                                            MONK-2: EXACTLY TWO of {a1 = 1, a2 = 1, a3 = 1, a4 = 1, a5 = 1, a6 = 1}


                                            In this dataset, the original train and test sets were merged to allow other sampling procedures. However, the original train-test splits can be found as one of the OpenML tasks.


                                            Attribute information:



                                            • attr1: 1, 2, 3

                                            • attr2: 1, 2, 3

                                            • attr3: 1, 2

                                            • attr4: 1, 2, 3

                                            • attr5: 1, 2, 3, 4

                                            • attr6: 1, 2


                                            Relevant papers


                                            The MONK's Problems - A Performance Comparison of Different Learning Algorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J. Cheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K. Kaufman, S. Keller, I. Kononenko, J. Kreuziger, R.S. Michalski, T. Mitch", + "description": "https://openml.org \n\n**Author** : Sebastian Thrun (Carnegie Mellon University) \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/MONK's+Problems)\n\\- October 1992 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**The Monk's Problems: Problem 2** \n \nOnce upon a time, in July 1991, the monks of Corsendonk Priory were faced with\na school held in their priory, namely the 2nd European Summer School on\nMachine Learning. After listening more than one week to a wide variety of\nlearning algorithms, they felt rather confused: Which algorithm would be\noptimal? And which one to avoid? As a consequence of this dilemma, they\ncreated a simple task on which all learning algorithms ought to be compared:\nthe three MONK's problems.\n\n \n\nThe target concept associated with the 2nd Monk's problem is the binary\noutcome of the logical formula: \n \nMONK-2: EXACTLY TWO of {a1 = 1, a2 = 1, a3 = 1, a4 = 1, a5 = 1, a6 = 1}\n\n \n\nIn this dataset, the original train and test sets were merged to allow other\nsampling procedures. However, the original train-test splits can be found as\none of the OpenML tasks.\n\n \n\n### Attribute information:\n\n \n\n \n\n * attr1: 1, 2, 3 \n \n\n * attr2: 1, 2, 3 \n \n\n * attr3: 1, 2 \n \n\n * attr4: 1, 2, 3 \n \n\n * attr5: 1, 2, 3, 4 \n \n\n * attr6: 1, 2 \n \n\n \n\n### Relevant papers\n\n \n\nThe MONK's Problems - A Performance Comparison of Different Learning\nAlgorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J.\nCheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K.\nKaufman, S. Keller, I. Kononenko, J. Kreuziger, R.S. Michalski, T. Mitchell,\nP. Pachowicz, Y. Reich H. Vafaie, W. Van de Welde, W. Wenzel, J. Wnek, and J.\nZhang. Technical Report CS-CMU-91-197, Carnegie Mellon University, D", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=35915a44-ff16-4bdb-a6d6-fa88df61bf26&revisionId=549f1574-d126-42c0-8197-64ec12cbc567&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1521,7 +1521,7 @@ "platform": "ai4experiments", "platform_identifier": "90", "name": "AI4Agri-frontend", - "description": "

                                            GUI and back-end logic for the AI4Agri models

                                            ", + "description": "GUI and back-end logic for the AI4Agri models\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=35d9681b-c182-466b-9edf-1a9c962d0888&revisionId=6f92c9f4-b497-411d-8a4b-38e2b32251be&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1567,7 +1567,7 @@ "platform": "ai4experiments", "platform_identifier": "96", "name": "mofn-3-7-10", - "description": "https://openml.org

                                            Author: Unknown

                                            Source: PMLB Supposedly from UCI originally, but can't find it there.

                                            Please cite


                                            The origin is not clear, but presumably this is an artificial problem representing M-of-N rules. The target is 1 if a certain M 'bits' are '1'? (Joaquin Vanschoren)

                                            ", + "description": "https://openml.org \n\n**Author** : Unknown \n \n **Source** : [PMLB](https://github.com/EpistasisLab/penn-ml-\nbenchmarks/tree/master/datasets/classification) Supposedly from UCI\noriginally, but can't find it there. \n \n **Please cite**\n\n \n\nThe origin is not clear, but presumably this is an artificial problem\nrepresenting M-of-N rules. The target is 1 if a certain M 'bits' are '1'?\n(Joaquin Vanschoren)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=39846e3e-27c7-47c4-a613-55469ec5bd39&revisionId=9a0ab46a-219c-43c2-9f7d-464f8fb1da02&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1613,7 +1613,7 @@ "platform": "ai4experiments", "platform_identifier": "97", "name": "monks-problems-3", - "description": "https://openml.org

                                            Author: Sebastian Thrun (Carnegie Mellon University)

                                            Source: UCI - October 1992

                                            Please cite: UCI


                                            The Monk's Problems: Problem 3

                                            Once upon a time, in July 1991, the monks of Corsendonk Priory were faced with a school held in their priory, namely the 2nd European Summer School on Machine Learning. After listening more than one week to a wide variety of learning algorithms, they felt rather confused: Which algorithm would be optimal? And which one to avoid? As a consequence of this dilemma, they created a simple task on which all learning algorithms ought to be compared: the three MONK's problems.


                                            The target concept associated with the 3rd Monk's problem is the binary outcome of the logical formula:

                                            MONK-3: (a5 = 3 and a4 = 1) or (a5 /= 4 and a2 /= 3)

                                            In addition, 5% class noise was added to the training set


                                            In this dataset, the original train and test sets were merged to allow other sampling procedures. However, the original train-test splits can be found as one of the OpenML tasks.


                                            Attribute information:



                                            • attr1: 1, 2, 3

                                            • attr2: 1, 2, 3

                                            • attr3: 1, 2

                                            • attr4: 1, 2, 3

                                            • attr5: 1, 2, 3, 4

                                            • attr6: 1, 2


                                            Relevant papers


                                            The MONK's Problems - A Performance Comparison of Different Learning Algorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J. Cheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K. Kaufman, S. Keller, I. K", + "description": "https://openml.org \n\n**Author** : Sebastian Thrun (Carnegie Mellon University) \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/MONK's+Problems)\n\\- October 1992 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**The Monk's Problems: Problem 3** \n \nOnce upon a time, in July 1991, the monks of Corsendonk Priory were faced with\na school held in their priory, namely the 2nd European Summer School on\nMachine Learning. After listening more than one week to a wide variety of\nlearning algorithms, they felt rather confused: Which algorithm would be\noptimal? And which one to avoid? As a consequence of this dilemma, they\ncreated a simple task on which all learning algorithms ought to be compared:\nthe three MONK's problems.\n\n \n\nThe target concept associated with the 3rd Monk's problem is the binary\noutcome of the logical formula: \n \nMONK-3: (a5 = 3 and a4 = 1) or (a5 /= 4 and a2 /= 3) \n \nIn addition, 5% class noise was added to the training set\n\n \n\nIn this dataset, the original train and test sets were merged to allow other\nsampling procedures. However, the original train-test splits can be found as\none of the OpenML tasks.\n\n \n\n### Attribute information:\n\n \n\n \n\n * attr1: 1, 2, 3 \n \n\n * attr2: 1, 2, 3 \n \n\n * attr3: 1, 2 \n \n\n * attr4: 1, 2, 3 \n \n\n * attr5: 1, 2, 3, 4 \n \n\n * attr6: 1, 2 \n \n\n \n\n### Relevant papers\n\n \n\nThe MONK's Problems - A Performance Comparison of Different Learning\nAlgorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J.\nCheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K.\nKaufman, S. Keller, I. Kononenko, J. Kreuziger, R.S. Michalski, T. Mitchell,\nP. Pachowicz, Y. Reich H. Vafaie, W. Van de Welde, W. Wenzel, J. Wnek, and J.\nZhang. Technical Report ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3c42202a-5c1f-4ebf-954f-b54ad0fb03e5&revisionId=dd7fa7d1-b185-460a-999e-8e792943ca7e&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1659,7 +1659,7 @@ "platform": "ai4experiments", "platform_identifier": "98", "name": "zoo", - "description": "https://openml.org

                                            Author: Richard S. Forsyth

                                            Source: UCI - 5/15/1990

                                            Please cite:


                                            Zoo database

                                            A simple database containing 17 Boolean-valued attributes describing animals. The \"type\" attribute appears to be the class attribute.


                                            Notes:

                                            * I find it unusual that there are 2 instances of \"frog\" and one of \"girl\"!
                                            * feature 'animal' is an identifier (though not unique) and should be ignored when modeling

                                            ", + "description": "https://openml.org \n\n**Author** : Richard S. Forsyth \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Zoo) \\- 5/15/1990 \n \n**Please cite** :\n\n \n\n**Zoo database** \n \nA simple database containing 17 Boolean-valued attributes describing animals.\nThe \"type\" attribute appears to be the class attribute.\n\n \n\nNotes: \n \n* I find it unusual that there are 2 instances of \"frog\" and one of \"girl\"! \n* feature 'animal' is an identifier (though not unique) and should be ignored when modeling\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=41098614-571a-4c70-b45d-6a7fbcabdcf8&revisionId=eccea8f4-cc22-4962-934f-1dbf3da9f983&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1705,7 +1705,7 @@ "platform": "ai4experiments", "platform_identifier": "100", "name": "breast-w", - "description": "https://openml.org

                                            Author: Dr. William H. Wolberg, University of Wisconsin

                                            Source: UCI, University of Wisconsin - 1995

                                            Please cite: See below, plus UCI


                                            Breast Cancer Wisconsin (Original) Data Set. Features are computed from a digitized image of a fine needle aspirate (FNA) of a breast mass. They describe characteristics of the cell nuclei present in the image. The target feature records the prognosis (malignant or benign). Original data available here


                                            Current dataset was adapted to ARFF format from the UCI version. Sample code ID's were removed.


                                            ! Note that there is also a related Breast Cancer Wisconsin (Diagnosis) Data Set with a different set of features, better known as wdbc.


                                            Relevant Papers


                                            W.N. Street, W.H. Wolberg and O.L. Mangasarian. Nuclear feature extraction for breast tumor diagnosis. IS&T/SPIE 1993 International Symposium on Electronic Imaging: Science and Technology, volume 1905, pages 861-870, San Jose, CA, 1993.


                                            O.L. Mangasarian, W.N. Street and W.H. Wolberg. Breast cancer diagnosis and prognosis via linear programming. Operations Research, 43(4), pages 570-577, July-August 1995.


                                            Citation request


                                            This breast cancer database was obtained from the University of Wisconsin Hospitals, Madison from Dr. William H. Wolberg. If you publish ", + "description": "https://openml.org \n\n**Author** : Dr. William H. Wolberg, University of Wisconsin \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+\\(original\\)),\n[University of Wisconsin](http://pages.cs.wisc.edu/~olvi/uwmp/cancer.html) \\-\n1995 \n \n **Please cite** : See below, plus\n[UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Breast Cancer Wisconsin (Original) Data Set.** Features are computed from a\ndigitized image of a fine needle aspirate (FNA) of a breast mass. They\ndescribe characteristics of the cell nuclei present in the image. The target\nfeature records the prognosis (malignant or benign). [Original data available\nhere](ftp://ftp.cs.wisc.edu/math-prog/cpo-dataset/machine-learn/cancer/)\n\n \n\nCurrent dataset was adapted to ARFF format from the UCI version. Sample code\nID's were removed.\n\n \n\n! Note that there is also a related Breast Cancer Wisconsin (Diagnosis) Data\nSet with a different set of features, better known as\n[wdbc](https://www.openml.org/d/1510).\n\n \n\n### Relevant Papers\n\n \n\nW.N. Street, W.H. Wolberg and O.L. Mangasarian. Nuclear feature extraction for\nbreast tumor diagnosis. IS&T/SPIE 1993 International Symposium on Electronic\nImaging: Science and Technology, volume 1905, pages 861-870, San Jose, CA,\n1993.\n\n \n\nO.L. Mangasarian, W.N. Street and W.H. Wolberg. Breast cancer diagnosis and\nprognosis via linear programming. Operations Research, 43(4), pages 570-577,\nJuly-August 1995.\n\n \n\n### Citation request\n\n \n\nThis breast cancer database was obtained from the University of Wisconsin\nHospitals, Madison from Dr. William H. Wolberg. If you publish results when\nusing this database, then please include this information in your\nacknowledgments. Also, please cite one or more of:\n\n \n\n \n\n 1. \n\nO. L. Mangasa", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=42cec034-786e-4b26-b299-c28e428c7b40&revisionId=3a85905c-0034-4a87-b284-b7eac431cf28&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1751,7 +1751,7 @@ "platform": "ai4experiments", "platform_identifier": "103", "name": "mux6", - "description": "https://openml.org

                                            mux6-pmlb

                                            ", + "description": "https://openml.org \n\nmux6-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=45e6dcba-4163-4613-8443-2333d958b9a5&revisionId=aa8d762f-b679-4687-9d96-33b887a3d39c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1797,7 +1797,7 @@ "platform": "ai4experiments", "platform_identifier": "104", "name": "MyIris", - "description": "https://openml.org

                                            Author:

                                            Source: Unknown - Date unknown

                                            Please cite:


                                            MyExampleIris

                                            ", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\nMyExampleIris\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4787776f-fd13-43cd-afab-eb863338f6e5&revisionId=9c95ba8a-2f03-41a4-8499-6421229acc9a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1843,7 +1843,7 @@ "platform": "ai4experiments", "platform_identifier": "123", "name": "steel-plates-fault", - "description": "https://openml.org

                                            Author: Semeion, Research Center of Sciences of Communication, Rome, Italy.

                                            Source: UCI

                                            Please cite: Dataset provided by Semeion, Research Center of Sciences of Communication, Via Sersale 117, 00128, Rome, Italy.


                                            Steel Plates Faults Data Set

                                            A dataset of steel plates' faults, classified into 7 different types. The goal was to train machine learning for automatic pattern recognition.


                                            The dataset consists of 27 features describing each fault (location, size, ...) and 7 binary features indicating the type of fault (on of 7: Pastry, Z_Scratch, K_Scatch, Stains, Dirtiness, Bumps, Other_Faults). The latter is commonly used as a binary classification target ('common' or 'other' fault.)


                                            Attribute Information



                                            • V1: X_Minimum

                                            • V2: X_Maximum

                                            • V3: Y_Minimum

                                            • V4: Y_Maximum

                                            • V5: Pixels_Areas

                                            • V6: X_Perimeter

                                            • V7: Y_Perimeter

                                            • V8: Sum_of_Luminosity

                                            • V9: Minimum_of_Luminosity

                                            • V10: Maximum_of_Luminosity

                                            • V11: Length_of_Conveyer

                                            • V12: TypeOfSteel_A300

                                            • V13: TypeOfSteel_A400

                                            • V14: Steel_Plate_Thickness

                                            • V15: Edges_Index

                                            • V16: Empty_Index

                                            • V17: Square_Index

                                            • V18: Outside_X_Index

                                            • V19: Edges_X_Index

                                            • V20: Edges_Y_Index

                                            • V21: Outside_Global_Index

                                            • V22: LogOfAreas

                                            • V23: Log_X_Index

                                            • V24: Log_Y_Index

                                            • V25: Orientation_Index

                                            • V", + "description": "https://openml.org \n\n**Author** : Semeion, Research Center of Sciences of Communication, Rome,\nItaly. \n \n**Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/steel+plates+faults) \n \n**Please cite** : Dataset provided by Semeion, Research Center of Sciences of\nCommunication, Via Sersale 117, 00128, Rome, Italy.\n\n \n\n**Steel Plates Faults Data Set** \n \nA dataset of steel plates' faults, classified into 7 different types. The goal\nwas to train machine learning for automatic pattern recognition.\n\n \n\nThe dataset consists of 27 features describing each fault (location, size,\n...) and 7 binary features indicating the type of fault (on of 7: Pastry,\nZ_Scratch, K_Scatch, Stains, Dirtiness, Bumps, Other_Faults). The latter is\ncommonly used as a binary classification target ('common' or 'other' fault.)\n\n \n\n### Attribute Information\n\n \n\n \n\n * V1: X_Minimum \n \n\n * V2: X_Maximum \n \n\n * V3: Y_Minimum \n \n\n * V4: Y_Maximum \n \n\n * V5: Pixels_Areas \n \n\n * V6: X_Perimeter \n \n\n * V7: Y_Perimeter \n \n\n * V8: Sum_of_Luminosity \n \n\n * V9: Minimum_of_Luminosity \n \n\n * V10: Maximum_of_Luminosity \n \n\n * V11: Length_of_Conveyer \n \n\n * V12: TypeOfSteel_A300 \n \n\n * V13: TypeOfSteel_A400 \n \n\n * V14: Steel_Plate_Thickness \n \n\n * V15: Edges_Index \n \n\n * V16: Empty_Index \n \n\n * V17: Square_Index \n \n\n * V18: Outside_X_Index \n \n\n * V19: Edges_X_Index \n \n\n * V20: Edges_Y_Index \n \n\n * V21: Outside_Global_Index \n \n\n * V22: LogOfAreas \n \n\n * V23: Log_X_Index \n \n\n * V24: Log_Y_Index \n \n\n * V25: Orientation_Index \n \n\n * V26: Luminosity_Index \n \n\n * V27: SigmoidOfAreas \n \n\n * V28: Pastry \n \n\n * V29: Z_Scratch \n \n\n * V30: K_Scatch \n \n\n * V31: Stains \n \n\n * V32: Dirtiness \n \n\n * V33: Bumps \n \n\n * Class: Other_Faults \n \n\n \n\n### Rel", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5636ef7b-50d6-49e1-8e78-5b68f24274c5&revisionId=731152cd-a431-4c78-9e65-02f74b6c5c0a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1889,7 +1889,7 @@ "platform": "ai4experiments", "platform_identifier": "124", "name": "ai4eu-robotics-pump-1024-raw-broker", - "description": "

                                              The robotic pump demonstrator represents a hydraulic pump that can be mounted on an industrial robot, for example, to pump liquid paint for spray painting. On this pump, one accelerometer is mounted for vibration monitoring and recording.

                                              The pump can be controlled in terms of speed (rotations per minute, rpm), affecting the throughput of paint and the pressure in and out of the pump.

                                              The dataset consists of 380 million measurements of several sensor data of the pump system in 1-second intervals over two months in 2020.

                                              The complete dataset & documentation is available on Zenodo.

                                              ", + "description": "The robotic pump demonstrator represents a hydraulic pump that can be mounted\non an industrial robot, for example, to pump liquid paint for spray painting.\nOn this pump, one accelerometer is mounted for vibration monitoring and\nrecording.\n\nThe pump can be controlled in terms of speed (rotations per minute, rpm),\naffecting the throughput of paint and the pressure in and out of the pump.\n\nThe dataset consists of 380 million measurements of several sensor data of the\npump system in 1-second intervals over two months in 2020.\n\n[The complete dataset & documentation is available on\nZenodo.](https://zenodo.org/record/5729187)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=57617093-3530-44fc-a72e-b5f6f83630cd&revisionId=a25721c1-88bf-4146-b219-3a4db5c00059&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1935,7 +1935,7 @@ "platform": "ai4experiments", "platform_identifier": "126", "name": "irish", - "description": "https://openml.org

                                              Author: Vincent Greaney, Thomas Kelleghan (St. Patrick's College, Dublin)

                                              Source: StatLib - 1984

                                              Please cite: StatLib


                                              Irish Educational Transitions Data

                                              Data on educational transitions for a sample of 500 Irish schoolchildren aged 11 in 1967. The data were collected by Greaney and Kelleghan (1984), and reanalyzed by Raftery and Hout (1985, 1993).


                                              Attribute information



                                              • Sex: 1=male; 2=female.

                                              • DVRT (Drumcondra Verbal Reasoning Test Score).

                                              • Educational level attained

                                              • Leaving Certificate. 1 if Leaving Certificate not taken; 2 if taken.

                                              • Prestige score for father's occupation (calculated by Raftery and Hout, 1985).

                                              • Type of school: 1=secondary; 2=vocational; 9=primary terminal leaver.


                                              Relevant papers


                                              Greaney, V. and Kelleghan, T. (1984). Equality of Opportunity in Irish
                                              Schools. Dublin: Educational Company.


                                              Kass, R.E. and Raftery, A.E. (1993). Bayes factors and model uncertainty.
                                              Technical Report no. 254, Department of Statistics, University of Washington.
                                              Revised version to appear in Journal of the American Statistical
                                              Association.


                                              Raftery, A.E. (1988). Approximate Bayes factors for generalized linear models.
                                              Technical Report no. 121, Department of Statistics, University of Washington.


                                              Raftery, A.E. and Hout, M. (1985). Does Irish education approach the
                                              meritocratic ideal? A logistic analysis.
                                              Economic and Social Review, 16, 115-140.


                                              Raftery, A.E. and Hout, M. (1", + "description": "https://openml.org \n\n**Author** : Vincent Greaney, Thomas Kelleghan (St. Patrick's College, Dublin) \n \n**Source** : [StatLib](http://lib.stat.cmu.edu/datasets/irish.ed) \\- 1984 \n \n **Please cite** : [StatLib](http://lib.stat.cmu.edu/datasets/)\n\n \n\n **Irish Educational Transitions Data** \n \nData on educational transitions for a sample of 500 Irish schoolchildren aged\n11 in 1967. The data were collected by Greaney and Kelleghan (1984), and\nreanalyzed by Raftery and Hout (1985, 1993).\n\n \n\n### Attribute information\n\n \n\n \n\n * Sex: 1=male; 2=female.\n \n\n * DVRT (Drumcondra Verbal Reasoning Test Score).\n \n\n * Educational level attained\n \n\n * Leaving Certificate. 1 if Leaving Certificate not taken; 2 if taken.\n \n\n * Prestige score for father's occupation (calculated by Raftery and Hout, 1985).\n \n\n * Type of school: 1=secondary; 2=vocational; 9=primary terminal leaver.\n \n\n \n\n### Relevant papers\n\n \n\nGreaney, V. and Kelleghan, T. (1984). Equality of Opportunity in Irish \nSchools. Dublin: Educational Company.\n\n \n\nKass, R.E. and Raftery, A.E. (1993). Bayes factors and model uncertainty. \nTechnical Report no. 254, Department of Statistics, University of Washington. \nRevised version to appear in Journal of the American Statistical \nAssociation.\n\n \n\nRaftery, A.E. (1988). Approximate Bayes factors for generalized linear models. \nTechnical Report no. 121, Department of Statistics, University of Washington.\n\n \n\nRaftery, A.E. and Hout, M. (1985). Does Irish education approach the \nmeritocratic ideal? A logistic analysis. \nEconomic and Social Review, 16, 115-140.\n\n \n\nRaftery, A.E. and Hout, M. (1993). Maximally maintained inequality: \nExpansion, reform and opportunity in Irish schools. \nSociology of Education, 66, 41-62.\n\n \n\n### Ownership Statement\n\n ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5a43bbed-a905-4af3-840b-eec565f2165b&revisionId=920c28eb-e743-4ef1-9606-04b382db90c5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1981,7 +1981,7 @@ "platform": "ai4experiments", "platform_identifier": "128", "name": "meta", - "description": "https://openml.org

                                              Author:

                                              Source: Unknown - Date unknown

                                              Please cite:




                                              1. Title: meta-data




                                              2. Sources:
                                                (a) Creator:
                                                LIACC - University of Porto
                                                R.Campo Alegre 823
                                                4150 PORTO
                                                (b) Donor: P.B.Brazdil or J.Gama Tel.: +351 600 1672
                                                LIACC, University of Porto Fax.: +351 600 3654
                                                Rua Campo Alegre 823 Email: statlog-adm@ncc.up.pt
                                                4150 Porto, Portugal
                                                (c) Date: March, 1996




                                              (d) Acknowlegements:
                                              LIACC wishes to thank Commission of European Communities
                                              for their support. Also, we wish to thank the following partners
                                              for providing the individual test results:



                                              • Dept. of Statistics, University of Strathclyde, Glasgow, UK

                                              • Dept. of Statistics, University of Leeds, UK

                                              • Aston University, Birmingham, UK

                                              • Forschungszentrum Ulm, Daimler-Benz AG, Germany

                                              • Brainware GmbH, Berlin, Germany

                                              • Frauenhofer Gesellschaft IITB-EPO, Berlin, Germany

                                              • Institut fuer Kybernetik, Bochum, Germany

                                              • ISoft, Gif sur Yvette, France


                                              • Dept. of CS and AI, University of Granada, Spain




                                              • Past Usage:




                                              Meta-Data was used in order to give advice about which classification
                                              method is appropriate for a particular dataset.
                                              This work is described in:


                                              -\"Machine Learning, Neural and Statistical Learning\"
                                              Eds. D.Michie,D.J.Spiegelhalter and C.Taylor
                                              Ellis Horwood-1994



                                              • \"Characterizing the Applicability of
                                                Classification Algorithms Using Meta-Level Learning\",
                                                P. Brazdil, J.Gama and B.Hen", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\n \n\n 1. \n\nTitle: meta-data\n\n \n\n \n\n 2. \n\nSources: \n(a) Creator: \nLIACC - University of Porto \nR.Campo Alegre 823 \n4150 PORTO \n(b) Donor: P.B.Brazdil or J.Gama Tel.: +351 600 1672 \nLIACC, University of Porto Fax.: +351 600 3654 \nRua Campo Alegre 823 Email: statlog-adm@ncc.up.pt \n4150 Porto, Portugal \n(c) Date: March, 1996\n\n \n\n \n\n \n\n(d) Acknowlegements: \nLIACC wishes to thank Commission of European Communities \nfor their support. Also, we wish to thank the following partners \nfor providing the individual test results:\n\n \n\n \n\n * Dept. of Statistics, University of Strathclyde, Glasgow, UK\n \n\n * Dept. of Statistics, University of Leeds, UK\n \n\n * Aston University, Birmingham, UK\n \n\n * Forschungszentrum Ulm, Daimler-Benz AG, Germany\n \n\n * Brainware GmbH, Berlin, Germany\n \n\n * Frauenhofer Gesellschaft IITB-EPO, Berlin, Germany\n \n\n * Institut fuer Kybernetik, Bochum, Germany\n \n\n * ISoft, Gif sur Yvette, France\n \n\n * \n\nDept. of CS and AI, University of Granada, Spain\n\n \n\n \n\n * \n\nPast Usage:\n\n \n\n \n\n \n\nMeta-Data was used in order to give advice about which classification \nmethod is appropriate for a particular dataset. \nThis work is described in:\n\n \n\n-\"Machine Learning, Neural and Statistical Learning\" \nEds. D.Michie,D.J.Spiegelhalter and C.Taylor \nEllis Horwood-1994\n\n \n\n \n\n * \"Characterizing the Applicability of \nClassification Algorithms Using Meta-Level Learning\", \nP. Brazdil, J.Gama and B.Henery: \nin Proc. of Machine Learning - ECML-94, \ned. F.Bergadano and L.de Raedt,LNAI Vol.784 Springer-Verlag.\n\n \n\n \n\n-\"Characterization of Classification Algorithms\" \nJ.Gama, P.Brazdil \nin Proc. of EPIA 95, LNAI Vol.990 \n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5bdd6ed1-601e-482b-904e-886921963a2d&revisionId=eb64f31c-2e72-4bd9-a60d-0598b8e83b33&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2027,7 +2027,7 @@ "platform": "ai4experiments", "platform_identifier": "130", "name": "glass", - "description": "https://openml.org

                                                Author:

                                                Source: Unknown -

                                                Please cite:




                                                1. Title: Glass Identification Database




                                                2. Sources:
                                                  (a) Creator: B. German
                                                  -- Central Research Establishment
                                                  Home Office Forensic Science Service
                                                  Aldermaston, Reading, Berkshire RG7 4PN
                                                  (b) Donor: Vina Spiehler, Ph.D., DABFT
                                                  Diagnostic Products Corporation
                                                  (213) 776-0180 (ext 3014)
                                                  (c) Date: September, 1987




                                                3. Past Usage:
                                                  -- Rule Induction in Forensic Science
                                                  -- Ian W. Evett and Ernest J. Spiehler
                                                  -- Central Research Establishment
                                                  Home Office Forensic Science Service
                                                  Aldermaston, Reading, Berkshire RG7 4PN
                                                  -- Unknown technical note number (sorry, not listed here)
                                                  -- General Results: nearest neighbor held its own with respect to the
                                                  rule-based system




                                                4. Relevant Information:n
                                                  Vina conducted a comparison test of her rule-based system, BEAGLE, the
                                                  nearest-neighbor algorithm, and discriminant analysis. BEAGLE is
                                                  a product available through VRS Consulting, Inc.; 4676 Admiralty Way,
                                                  Suite 206; Marina Del Ray, CA 90292 (213) 827-7890 and FAX: -3189.
                                                  In determining whether the glass was a type of \"float\" glass or not,
                                                  the following results were obtained (# incorrect answers):


                                                        Type of Sample                            Beagle   NN    DA
                                                  Windows that were float processed (87) 10 12 21
                                                  Windows that were not: (76) 19 16 ", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - \n \n**Please cite** :\n\n \n\n \n\n 1. \n\nTitle: Glass Identification Database\n\n \n\n \n\n 2. \n\nSources: \n(a) Creator: B. German \n\\-- Central Research Establishment \nHome Office Forensic Science Service \nAldermaston, Reading, Berkshire RG7 4PN \n(b) Donor: Vina Spiehler, Ph.D., DABFT \nDiagnostic Products Corporation \n(213) 776-0180 (ext 3014) \n(c) Date: September, 1987\n\n \n\n \n\n 3. \n\nPast Usage: \n\\-- Rule Induction in Forensic Science \n\\-- Ian W. Evett and Ernest J. Spiehler \n\\-- Central Research Establishment \nHome Office Forensic Science Service \nAldermaston, Reading, Berkshire RG7 4PN \n\\-- Unknown technical note number (sorry, not listed here) \n\\-- General Results: nearest neighbor held its own with respect to the \nrule-based system\n\n \n\n \n\n 4. \n\nRelevant Information:n \nVina conducted a comparison test of her rule-based system, BEAGLE, the \nnearest-neighbor algorithm, and discriminant analysis. BEAGLE is \na product available through VRS Consulting, Inc.; 4676 Admiralty Way, \nSuite 206; Marina Del Ray, CA 90292 (213) 827-7890 and FAX: -3189. \nIn determining whether the glass was a type of \"float\" glass or not, \nthe following results were obtained (# incorrect answers):\n\n \n\n \n Type of Sample Beagle NN DA \n Windows that were float processed (87) 10 12 21 \n Windows that were not: (76) 19 16 22 \n \n\n \n\nThe study of classification of types of glass was motivated by \ncriminological investigation. At the scene of the crime, the glass left \ncan be used as evidence...if it is correctly identified!\n\n \n\n \n\n 5. \n\nNumber of Instances: 214\n\n \n\n \n\n 6. \n\nNumber of Attributes:", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5c5599f7-73c7-4874-ace1-4c6e312409c4&revisionId=64523754-bb18-406c-827d-4fe090d0e5e6&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2073,7 +2073,7 @@ "platform": "ai4experiments", "platform_identifier": "131", "name": "wall-robot-navigation", - "description": "https://openml.org

                                                  Author: Ananda Freire, Marcus Veloso and Guilherme Barreto

                                                  Source: UCI - 2010

                                                  Please cite: UCI


                                                  Wall-Following Robot Navigation Data Data Set

                                                  The data were collected as the SCITOS G5 robot navigates through the room following the wall in a clockwise direction, for 4 rounds, using 24 ultrasound sensors arranged circularly around its 'waist'.


                                                  The data consists of raw values of the measurements of all 24 ultrasound sensors and the corresponding class label. Sensor readings are sampled at a rate of 9 samples per second.


                                                  The class labels are:

                                                  1. Move-Forward,

                                                  2. Slight-Right-Turn,

                                                  3. Sharp-Right-Turn,

                                                  4. Slight-Left-Turn


                                                  It is worth mentioning that the 24 ultrasound readings and the simplified distances were collected at the same time step, so each file has the same number of rows (one for each sampling time step).


                                                  The wall-following task and data gathering were designed to test the hypothesis that this apparently simple navigation task is indeed a non-linearly separable classification task. Thus, linear classifiers, such as the Perceptron network, are not able to learn the task and command the robot around the room without collisions. Nonlinear neural classifiers, such as the MLP network, are able to learn the task and command the robot successfully without collisions.


                                                  Attribute Information:



                                                  1. US1: ultrasound sensor at the front of the robot (reference angle: 180\u00b0)

                                                  2. US2", + "description": "https://openml.org \n\n**Author** : Ananda Freire, Marcus Veloso and Guilherme Barreto \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Wall-\nFollowing+Robot+Navigation+Data) \\- 2010 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n **Wall-Following Robot Navigation Data Data Set** \n \nThe data were collected as the SCITOS G5 robot navigates through the room\nfollowing the wall in a clockwise direction, for 4 rounds, using 24 ultrasound\nsensors arranged circularly around its 'waist'.\n\n \n\nThe data consists of raw values of the measurements of all 24 ultrasound\nsensors and the corresponding class label. Sensor readings are sampled at a\nrate of 9 samples per second.\n\n \n\nThe class labels are: \n \n1\\. Move-Forward, \n \n2\\. Slight-Right-Turn, \n \n3\\. Sharp-Right-Turn, \n \n4\\. Slight-Left-Turn\n\n \n\nIt is worth mentioning that the 24 ultrasound readings and the simplified\ndistances were collected at the same time step, so each file has the same\nnumber of rows (one for each sampling time step).\n\n \n\nThe wall-following task and data gathering were designed to test the\nhypothesis that this apparently simple navigation task is indeed a non-\nlinearly separable classification task. Thus, linear classifiers, such as the\nPerceptron network, are not able to learn the task and command the robot\naround the room without collisions. Nonlinear neural classifiers, such as the\nMLP network, are able to learn the task and command the robot successfully\nwithout collisions.\n\n \n\n### Attribute Information:\n\n \n\n \n\n 1. US1: ultrasound sensor at the front of the robot (reference angle: 180\u00b0) \n \n\n 2. US2: ultrasound reading (reference angle: -165\u00b0)\n \n\n 3. US3: ultrasound reading (reference angle: -150\u00b0)\n \n\n 4. US4: ultrasound reading ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5d6161e5-1cbb-46fc-a005-85607fd7caea&revisionId=7df9f5eb-70a2-4480-901f-7a2f2783520a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2119,7 +2119,7 @@ "platform": "ai4experiments", "platform_identifier": "132", "name": "cleve", - "description": "https://openml.org

                                                    cleve-pmlb

                                                    ", + "description": "https://openml.org \n\ncleve-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5d938cc4-8cff-4e09-80cf-d8b08461d9c4&revisionId=ef0a6892-61d1-4ef8-9d98-3f29b71c15bf&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2165,7 +2165,7 @@ "platform": "ai4experiments", "platform_identifier": "133", "name": "GAMETES_Epistasis_3-Way_20atts_0.2H_EDM-1_1", - "description": "https://openml.org

                                                    GAMETES_Epistasis_3-Way_20atts_0.2H_EDM-1_1-pmlb

                                                    ", + "description": "https://openml.org \n\nGAMETES_Epistasis_3-Way_20atts_0.2H_EDM-1_1-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5e840f29-a572-47c2-acdf-c1b8c0b4b8b7&revisionId=3e1cfa13-826c-4672-852e-438ec491a045&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2211,7 +2211,7 @@ "platform": "ai4experiments", "platform_identifier": "136", "name": "monks-problems-1", - "description": "https://openml.org

                                                    Author: Sebastian Thrun (Carnegie Mellon University)

                                                    Source: UCI - October 1992

                                                    Please cite: UCI


                                                    The Monk's Problems: Problem 1

                                                    Once upon a time, in July 1991, the monks of Corsendonk Priory were faced with a school held in their priory, namely the 2nd European Summer School on Machine Learning. After listening more than one week to a wide variety of learning algorithms, they felt rather confused: Which algorithm would be optimal? And which one to avoid? As a consequence of this dilemma, they created a simple task on which all learning algorithms ought to be compared: the three MONK's problems.


                                                    The target concept associated with the 1st Monk's problem is the binary outcome of the logical formula:

                                                    MONK-1: (a1 == a2) or (a5 == 1)


                                                    In this dataset, the original train and test sets were merged to allow other sampling procedures. However, the original train-test splits can be found as one of the OpenML tasks.


                                                    Attribute information:



                                                    • attr1: 1, 2, 3

                                                    • attr2: 1, 2, 3

                                                    • attr3: 1, 2

                                                    • attr4: 1, 2, 3

                                                    • attr5: 1, 2, 3, 4

                                                    • attr6: 1, 2


                                                    Relevant papers


                                                    The MONK's Problems - A Performance Comparison of Different Learning Algorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J. Cheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K. Kaufman, S. Keller, I. Kononenko, J. Kreuziger, R.S. Michalski, T. Mitchell, P. Pachowicz, Y. Reich H. Vafaie, W", + "description": "https://openml.org \n\n**Author** : Sebastian Thrun (Carnegie Mellon University) \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/MONK's+Problems)\n\\- October 1992 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**The Monk's Problems: Problem 1** \n \nOnce upon a time, in July 1991, the monks of Corsendonk Priory were faced with\na school held in their priory, namely the 2nd European Summer School on\nMachine Learning. After listening more than one week to a wide variety of\nlearning algorithms, they felt rather confused: Which algorithm would be\noptimal? And which one to avoid? As a consequence of this dilemma, they\ncreated a simple task on which all learning algorithms ought to be compared:\nthe three MONK's problems.\n\n \n\nThe target concept associated with the 1st Monk's problem is the binary\noutcome of the logical formula: \n \nMONK-1: (a1 == a2) or (a5 == 1)\n\n \n\nIn this dataset, the original train and test sets were merged to allow other\nsampling procedures. However, the original train-test splits can be found as\none of the OpenML tasks.\n\n \n\n### Attribute information:\n\n \n\n \n\n * attr1: 1, 2, 3 \n \n\n * attr2: 1, 2, 3 \n \n\n * attr3: 1, 2 \n \n\n * attr4: 1, 2, 3 \n \n\n * attr5: 1, 2, 3, 4 \n \n\n * attr6: 1, 2 \n \n\n \n\n### Relevant papers\n\n \n\nThe MONK's Problems - A Performance Comparison of Different Learning\nAlgorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J.\nCheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K.\nKaufman, S. Keller, I. Kononenko, J. Kreuziger, R.S. Michalski, T. Mitchell,\nP. Pachowicz, Y. Reich H. Vafaie, W. Van de Welde, W. Wenzel, J. Wnek, and J.\nZhang. Technical Report CS-CMU-91-197, Carnegie Mellon University, Dec. 1991.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6265676d-b001-4bd6-949c-05b7db6affae&revisionId=1375bd8f-18ca-4971-9a7b-c7dcb7a27c0c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2257,7 +2257,7 @@ "platform": "ai4experiments", "platform_identifier": "138", "name": "hayes-roth", - "description": "https://openml.org

                                                    Author: Barbara and Frederick Hayes-Roth


                                                    Source: original -

                                                    Please cite:


                                                    Hayes-Roth Database


                                                    This is a merged version of the separate train and test set which are usually distributed. On OpenML this train-test split can be found as one of the possible tasks.


                                                    Source Information:
                                                    (a) Creators: Barbara and Frederick Hayes-Roth
                                                    (b) Donor: David W. Aha (aha@ics.uci.edu) (714) 856-8779

                                                    (c) Date: March, 1989


                                                    Attribute Information:
                                                    -- 1. name: distinct for each instance and represented numerically
                                                    -- 2. hobby: nominal values ranging between 1 and 3
                                                    -- 3. age: nominal values ranging between 1 and 4
                                                    -- 4. educational level: nominal values ranging between 1 and 4
                                                    -- 5. marital status: nominal values ranging between 1 and 4
                                                    -- 6. class: nominal value between 1 and 3


                                                    Detailed description of the experiment:
                                                    1. 3 categories (1, 2, and neither -- which I call 3)
                                                    -- some of the instances could be classified in either class 1 or 2, and they have been evenly distributed between the two classes
                                                    2. 5 Attributes
                                                    -- A. name (a randomly-generated number between 1 and 132)
                                                    -- B. hobby (a randomly-generated number between 1 and 3)
                                                    -- C. age (a number between 1 and 4)
                                                    -- D. education level (a number between 1 and 4)
                                                    -- E. marital status (a number between 1 and 4)
                                                    3. Classification:

                                                    -- only attributes C-E are diagnostic; values for A and B are ignored
                                                    -- Class Neither: if a 4 occurs for any attribute C-E
                                                    -- Class 1: Otherwise, if (# of 1's)>(# of 2's) for attributes C-E
                                                    -- Class 2", + "description": "https://openml.org \n\n**Author** : Barbara and Frederick Hayes-Roth\n\n \n\n**Source** : [original](https://archive.ics.uci.edu/ml/datasets/Hayes-Roth) \\- \n \n**Please cite** :\n\n \n\nHayes-Roth Database\n\n \n\nThis is a merged version of the separate train and test set which are usually\ndistributed. On OpenML this train-test split can be found as one of the\npossible tasks.\n\n \n\nSource Information: \n(a) Creators: Barbara and Frederick Hayes-Roth \n(b) Donor: David W. Aha (aha@ics.uci.edu) (714) 856-8779 \n \n(c) Date: March, 1989\n\n \n\nAttribute Information: \n\\-- 1. name: distinct for each instance and represented numerically \n\\-- 2. hobby: nominal values ranging between 1 and 3 \n\\-- 3. age: nominal values ranging between 1 and 4 \n\\-- 4. educational level: nominal values ranging between 1 and 4 \n\\-- 5. marital status: nominal values ranging between 1 and 4 \n\\-- 6. class: nominal value between 1 and 3\n\n \n\nDetailed description of the experiment: \n1\\. 3 categories (1, 2, and neither -- which I call 3) \n\\-- some of the instances could be classified in either class 1 or 2, and they\nhave been evenly distributed between the two classes \n2\\. 5 Attributes \n\\-- A. name (a randomly-generated number between 1 and 132) \n\\-- B. hobby (a randomly-generated number between 1 and 3) \n\\-- C. age (a number between 1 and 4) \n\\-- D. education level (a number between 1 and 4) \n\\-- E. marital status (a number between 1 and 4) \n3\\. Classification: \n \n\\-- only attributes C-E are diagnostic; values for A and B are ignored \n\\-- Class Neither: if a 4 occurs for any attribute C-E \n\\-- Class 1: Otherwise, if (# of 1's)>(# of 2's) for attributes C-E \n\\-- Class 2: Otherwise, if (# of 2's)>(# of 1's) for attributes C-E \n\\-- Either 1 or 2: Otherwise, if (# of 2's)=(# of 1's) for attribut", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=62f47eea-85d2-44a6-a3b2-cd3ac3d2ff32&revisionId=6df90024-afec-494b-b59e-724b350d5eab&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2303,7 +2303,7 @@ "platform": "ai4experiments", "platform_identifier": "148", "name": "ai4eu-robotics-wrist-1024-raw-broker", - "description": "

                                                    The robotic wrist demonstrator represents a mechanical wrist with three axes that can hold tools, e.g. for spray painting in combination with a pump. On this robotic wrist, two accelerometers are mounted for vibration monitoring and recording: one in the movable front part of the wrist and one in the shaft. The wrist can be controlled through the torque or the designated position of each axis\u2019 motor.

                                                    The dataset consists of 1.8 billion measurements of several sensor data of the robotic wrist in 1-second intervals over six months in 2020.

                                                    The complete dataset & description is available on Zenodo

                                                    ", + "description": "The robotic wrist demonstrator represents a mechanical wrist with three axes\nthat can hold tools, e.g. for spray painting in combination with a pump. On\nthis robotic wrist, two accelerometers are mounted for vibration monitoring\nand recording: one in the movable front part of the wrist and one in the\nshaft. The wrist can be controlled through the torque or the designated\nposition of each axis\u2019 motor.\n\nThe dataset consists of 1.8 billion measurements of several sensor data of the\nrobotic wrist in 1-second intervals over six months in 2020.\n\n[The complete dataset & description is available on\nZenodo](https://zenodo.org/record/5729818)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6819ff36-f57d-459d-a5f7-11e1e8e096fe&revisionId=6400e2d0-ed8f-48fd-8aab-50504461c72b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2349,7 +2349,7 @@ "platform": "ai4experiments", "platform_identifier": "152", "name": "allbp", - "description": "https://openml.org

                                                    allbp-pmlb

                                                    ", + "description": "https://openml.org \n\nallbp-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6a6f5d38-3775-485d-a6d6-1b90952daee9&revisionId=35d8f990-459e-41b0-918c-07895c554e3d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2395,7 +2395,7 @@ "platform": "ai4experiments", "platform_identifier": "153", "name": "xd6", - "description": "https://openml.org

                                                    Author: Unknown

                                                    Source: PMLB - Supposedly originates from UCI, but can't find it there anymore.

                                                    Please cite:


                                                    XD6 Dataset
                                                    Dataset used by Buntine and Niblett (1992). Composed of 10 features, one of which is irrelevant. The target is a disjunctive normal form formula over the nine other attributes, with additional classification noise.


                                                    More info.

                                                    ", + "description": "https://openml.org \n\n**Author** : Unknown \n \n **Source** : [PMLB](https://github.com/EpistasisLab/penn-ml-\nbenchmarks/tree/master/datasets/classification) \\- Supposedly originates from\nUCI, but can't find it there anymore. \n \n **Please cite:**\n\n \n\n**XD6 Dataset** \nDataset used by Buntine and Niblett (1992). Composed of 10 features, one of\nwhich is irrelevant. The target is a disjunctive normal form formula over the\nnine other attributes, with additional classification noise.\n\n \n\n[More\ninfo](https://books.google.be/books?id=W2bmBwAAQBAJ&pg=PA313&lpg=PA313&dq=dataset+xd6&source=bl&ots=6hYPdz8_Nl&sig=TR1ieOg9D1pCrvNyeKbb-3eKmd8&hl=en&sa=X&ved=0ahUKEwj_tZ_MxozZAhVHa1AKHZVEBBsQ6AEIQjAF#v=onepage&q=dataset\nxd6&f=false).\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6af5c9cf-73bf-406b-a250-5bbf7d0e5e47&revisionId=c3c334c0-d744-4b9a-96aa-d4333c5d3e8a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2441,7 +2441,7 @@ "platform": "ai4experiments", "platform_identifier": "163", "name": "ucrsuite-config", - "description": "

                                                    Data broker for subsequence search in time series

                                                    This data broker offers a web interface for uploading files and setting search parameters. It saves the uploaded files on a shared volume and sends the corresponding paths to the next model in the pipeline. It was created to be used with ucrsuite-dtw and ucrsuite-ed models, and supports data and query files in txt format.

                                                    ", + "description": "Data broker for subsequence search in time series\n\nThis data broker offers a web interface for uploading files and setting search\nparameters. It saves the uploaded files on a shared volume and sends the\ncorresponding paths to the next model in the pipeline. It was created to be\nused with ucrsuite-dtw and ucrsuite-ed models, and supports data and query\nfiles in txt format.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6e64762b-97e0-4278-8dad-c9d1513fabb4&revisionId=e41459ef-3143-4ead-a1c0-907b136f6e9a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2487,7 +2487,7 @@ "platform": "ai4experiments", "platform_identifier": "164", "name": "iris", - "description": "https://openml.org

                                                    Author: R.A. Fisher

                                                    Source: UCI - 1936 - Donated by Michael Marshall

                                                    Please cite:


                                                    Iris Plants Database

                                                    This is perhaps the best known database to be found in the pattern recognition literature. Fisher's paper is a classic in the field and is referenced frequently to this day. (See Duda & Hart, for example.) The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. One class is linearly separable from the other 2; the latter are NOT linearly separable from each other.


                                                    Predicted attribute: class of iris plant.

                                                    This is an exceedingly simple domain.


                                                    Attribute Information:


                                                    1. sepal length in cm
                                                    2. sepal width in cm
                                                    3. petal length in cm
                                                    4. petal width in cm
                                                    5. class:
                                                    -- Iris Setosa
                                                    -- Iris Versicolour
                                                    -- Iris Virginica
                                                    ", + "description": "https://openml.org \n\n**Author** : R.A. Fisher \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Iris) \\- 1936 -\nDonated by Michael Marshall \n \n **Please cite** :\n\n \n\n**Iris Plants Database** \n \nThis is perhaps the best known database to be found in the pattern recognition\nliterature. Fisher's paper is a classic in the field and is referenced\nfrequently to this day. (See Duda & Hart, for example.) The data set contains\n3 classes of 50 instances each, where each class refers to a type of iris\nplant. One class is linearly separable from the other 2; the latter are NOT\nlinearly separable from each other.\n\n \n\nPredicted attribute: class of iris plant. \n \nThis is an exceedingly simple domain.\n\n \n\n### Attribute Information:\n\n \n\n \n \n 1. sepal length in cm \n 2. sepal width in cm \n 3. petal length in cm \n 4. petal width in cm \n 5. class: \n -- Iris Setosa \n -- Iris Versicolour \n -- Iris Virginica \n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6e9c598d-8928-437b-9013-d698f3321a37&revisionId=d3cee283-9ba0-40c2-b502-aa7ab4871ecf&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2533,7 +2533,7 @@ "platform": "ai4experiments", "platform_identifier": "168", "name": "cars1", - "description": "https://openml.org

                                                    cars1-pmlb

                                                    ", + "description": "https://openml.org \n\ncars1-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6feea5d8-c6fb-40d9-99bb-c8ad4ad4d242&revisionId=2699c172-24e4-4d32-aca3-2f74eb6dc968&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2579,7 +2579,7 @@ "platform": "ai4experiments", "platform_identifier": "172", "name": "ner-databroker", - "description": "

                                                    This is the databroker component of the NER pipeline.

                                                    Through the Web UI of the ner-databroker, you can provide the text to be received as an input for the entity recognizer. The language of the text should be German, since the NER model is trained on German data. More than one sentence can be given as input.

                                                    Make sure to run ner-pipeline, instead of ner-databroker as a standalone component. As ner-pipeline is successfully deployed, open the WEB UI and follow the instructions to submit the text.

                                                    ", + "description": "This is the databroker component of the NER pipeline.\n\nThrough the Web UI of the ner-databroker, you can provide the text to be\nreceived as an input for the entity recognizer. The language of the text\nshould be German, since the NER model is trained on German data. More than one\nsentence can be given as input.\n\nMake sure to run ner-pipeline, instead of ner-databroker as a standalone\ncomponent. As ner-pipeline is successfully deployed, open the WEB UI and\nfollow the instructions to submit the text.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=73244125-66e5-4087-9fe8-8229a39944c2&revisionId=e586beb7-322e-4a3e-82a7-b96bbbf49464&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2625,7 +2625,7 @@ "platform": "ai4experiments", "platform_identifier": "173", "name": "corral", - "description": "https://openml.org

                                                    corral-pmlb

                                                    ", + "description": "https://openml.org \n\ncorral-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7337b1db-a3e8-4e24-8ab1-130d86f032c8&revisionId=a9a6ebfb-485b-4678-8f3a-00b27877c492&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2671,7 +2671,7 @@ "platform": "ai4experiments", "platform_identifier": "175", "name": "autoUniv-au7-1100", - "description": "https://openml.org

                                                    Author: Ray. J. Hickey

                                                    Source: UCI

                                                    Please cite:



                                                    • Dataset Title:


                                                    AutoUniv Dataset

                                                    data problem: autoUniv-au7-300-drift-au7-cpd1-800



                                                    • Abstract:


                                                    AutoUniv is an advanced data generator for classifications tasks. The aim is to reflect the nuances and heterogeneity of real data. Data can be generated in .csv, ARFF or C4.5 formats.



                                                    • Source:


                                                    AutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com
                                                    AutoUniv web-site: http://sites.google.com/site/autouniv/.



                                                    • Data Set Information:


                                                    The user first creates a classification model and then generates classified examples from it. To create a model, the following are specified: the number of attributes (up to 1000) and their type (discrete or continuous), the number of classes (up to 10), the complexity of the underlying rules and the noise level. AutoUniv then produces a model through a process of constrained randomised search to satisfy the user's requirements. A model can have up to 3000 rules. Rare class models can be designed. A sequence of models can be designed to reflect concept and/or population drift.


                                                    AutoUniv creates three text files for a model: a Prolog specification of the model used to generate examples (.aupl); a user-friendly statement of the classification rules in an 'if ... then' format (.aurules); a statistical summary of the main properties of the model, including its Bayes rate (.auprops).



                                                    • Attribute Information:


                                                    Attributes may be discrete with up to 10 values or continuous. A dis", + "description": "https://openml.org \n\n**Author** : Ray. J. Hickey \n \n**Source** : UCI \n \n **Please cite** :\n\n \n\n \n\n * Dataset Title: \n \n\n \n\nAutoUniv Dataset \n \ndata problem: autoUniv-au7-300-drift-au7-cpd1-800\n\n \n\n \n\n * Abstract: \n \n\n \n\nAutoUniv is an advanced data generator for classifications tasks. The aim is\nto reflect the nuances and heterogeneity of real data. Data can be generated\nin .csv, ARFF or C4.5 formats.\n\n \n\n \n\n * Source: \n \n\n \n\nAutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com \nAutoUniv web-site: http://sites.google.com/site/autouniv/.\n\n \n\n \n\n * Data Set Information:\n \n\n \n\nThe user first creates a classification model and then generates classified\nexamples from it. To create a model, the following are specified: the number\nof attributes (up to 1000) and their type (discrete or continuous), the number\nof classes (up to 10), the complexity of the underlying rules and the noise\nlevel. AutoUniv then produces a model through a process of constrained\nrandomised search to satisfy the user's requirements. A model can have up to\n3000 rules. Rare class models can be designed. A sequence of models can be\ndesigned to reflect concept and/or population drift.\n\n \n\nAutoUniv creates three text files for a model: a Prolog specification of the\nmodel used to generate examples (.aupl); a user-friendly statement of the\nclassification rules in an 'if ... then' format (.aurules); a statistical\nsummary of the main properties of the model, including its Bayes rate\n(.auprops).\n\n \n\n \n\n * Attribute Information: \n \n\n \n\nAttributes may be discrete with up to 10 values or continuous. A discrete\nattribute can be nominal with values v1, v2, v3 ... or integer with values 0,\n1, 2 , ... .\n\n \n\n \n\n * Relevant Papers:\n \n\n \n\nMarrs, G, Hickey, RJ an", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7422c0f9-0fda-41ab-8bc0-91233a3455e1&revisionId=739ac852-a2b4-45fc-84ca-f93ca4c4d17f&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2717,7 +2717,7 @@ "platform": "ai4experiments", "platform_identifier": "176", "name": "ai4eu-robotics-wrist-1024-fft-broker", - "description": "

                                                    The robotic wrist demonstrator represents a mechanical wrist with three axes that can hold tools, e.g. for spray painting in combination with a pump. On this robotic wrist, two accelerometers are mounted for vibration monitoring and recording: one in the movable front part of the wrist and one in the shaft. The wrist can be controlled through the torque or the designated position of each axis\u2019 motor.

                                                    The dataset consists of 1.8 billion measurements of several sensor data of the robotic wrist in 1-second intervals over six months in 2020.

                                                    The complete dataset & description is available on Zenodo

                                                    ", + "description": "The robotic wrist demonstrator represents a mechanical wrist with three axes\nthat can hold tools, e.g. for spray painting in combination with a pump. On\nthis robotic wrist, two accelerometers are mounted for vibration monitoring\nand recording: one in the movable front part of the wrist and one in the\nshaft. The wrist can be controlled through the torque or the designated\nposition of each axis\u2019 motor.\n\nThe dataset consists of 1.8 billion measurements of several sensor data of the\nrobotic wrist in 1-second intervals over six months in 2020.\n\n[The complete dataset & description is available on\nZenodo](https://zenodo.org/record/5729818)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=74b69064-462f-4176-a8ce-7719638f237a&revisionId=1933cb96-3d47-4700-a73a-09692385ad69&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2763,7 +2763,7 @@ "platform": "ai4experiments", "platform_identifier": "177", "name": "grpc_piezo_hubeau", - "description": "

                                                    Data from the \"Piezometry\" API come from the ADES portal (national portal for access to French groundwater data). They relate to piezometric measurements (water level in groundwater tables), throughout France, from all the partners of the water information system (see metadata).


                                                    The updates are integrated daily into the API.


                                                    Data is expressed

                                                    • in NGF meters for levels (or ratings);
                                                    • in meters in relation to the measurement mark for the depths.


                                                    ", + "description": "Data from the \"Piezometry\" API come from the ADES portal (national portal for\naccess to French groundwater data). They relate to piezometric measurements\n(water level in groundwater tables), throughout France, from all the partners\nof the water information system (see metadata).\n\n \n\nThe updates are integrated daily into the API.\n\n \n\nData is expressed\n\n * in NGF meters for levels (or ratings);\n * in meters in relation to the measurement mark for the depths.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=76fda708-9907-4241-9d35-4d18a406eb35&revisionId=e3ff0320-a93a-4358-b13d-949df627c0b0&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2809,7 +2809,7 @@ "platform": "ai4experiments", "platform_identifier": "180", "name": "teachingAssistant", - "description": "https://openml.org

                                                    Author:

                                                    Source: Unknown - Date unknown

                                                    Please cite:


                                                    Dataset from the MLRR repository: http://axon.cs.byu.edu:5000/

                                                    ", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\nDataset from the MLRR repository: http://axon.cs.byu.edu:5000/\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7bc5051d-f852-4547-a317-e1c510f66332&revisionId=5f2ac1b6-7a8f-4762-9c64-82a14dea66b1&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2855,7 +2855,7 @@ "platform": "ai4experiments", "platform_identifier": "181", "name": "wine-quality-red", - "description": "https://openml.org

                                                    wine-quality-red-pmlb

                                                    ", + "description": "https://openml.org \n\nwine-quality-red-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7c5315b5-ca3c-488c-b235-f7f4d0534b16&revisionId=cecbcde7-4870-4ed3-9bb4-af01655e0c27&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2901,7 +2901,7 @@ "platform": "ai4experiments", "platform_identifier": "183", "name": "diabetes", - "description": "https://openml.org

                                                    Author: Vincent Sigillito


                                                    Source: Obtained from UCI


                                                    Please cite: UCI citation policy




                                                    1. Title: Pima Indians Diabetes Database




                                                    2. Sources:
                                                      (a) Original owners: National Institute of Diabetes and Digestive and
                                                      Kidney Diseases
                                                      (b) Donor of database: Vincent Sigillito (vgs@aplcen.apl.jhu.edu)
                                                      Research Center, RMI Group Leader
                                                      Applied Physics Laboratory
                                                      The Johns Hopkins University
                                                      Johns Hopkins Road
                                                      Laurel, MD 20707
                                                      (301) 953-6231
                                                      (c) Date received: 9 May 1990




                                                    3. Past Usage:




                                                      1. Smith,~J.~W., Everhart,~J.~E., Dickson,~W.~C., Knowler,~W.~C., &
                                                        Johannes,~R.~S. (1988). Using the ADAP learning algorithm to forecast
                                                        the onset of diabetes mellitus. In {it Proceedings of the Symposium
                                                        on Computer Applications and Medical Care} (pp. 261--265). IEEE
                                                        Computer Society Press.


                                                        The diagnostic, binary-valued variable investigated is whether the
                                                        patient shows signs of diabetes according to World Health Organization
                                                        criteria (i.e., if the 2 hour post-load plasma glucose was at least
                                                        200 mg/dl at any survey examination or if found during routine medical
                                                        care). The population lives near Phoenix, Arizona, USA.


                                                        Results: Their ADAP al", + "description": "https://openml.org \n\n**Author** : [Vincent Sigillito](vgs@aplcen.apl.jhu.edu)\n\n \n\n**Source** : [Obtained from\nUCI](https://archive.ics.uci.edu/ml/datasets/pima+indians+diabetes)\n\n \n\n**Please cite** : [UCI citation\npolicy](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n \n\n 1. \n\nTitle: Pima Indians Diabetes Database\n\n \n\n \n\n 2. \n\nSources: \n(a) Original owners: National Institute of Diabetes and Digestive and \nKidney Diseases \n(b) Donor of database: Vincent Sigillito (vgs@aplcen.apl.jhu.edu) \nResearch Center, RMI Group Leader \nApplied Physics Laboratory \nThe Johns Hopkins University \nJohns Hopkins Road \nLaurel, MD 20707 \n(301) 953-6231 \n(c) Date received: 9 May 1990\n\n \n\n \n\n 3. \n\nPast Usage:\n\n \n \n\n 1. \n\nSmith,~J.~W., Everhart,~J.~E., Dickson,~W.~C., Knowler,~W.~C., & \nJohannes,~R.~S. (1988). Using the ADAP learning algorithm to forecast \nthe onset of diabetes mellitus. In {it Proceedings of the Symposium \non Computer Applications and Medical Care} (pp. 261--265). IEEE \nComputer Society Press.\n\n \n\nThe diagnostic, binary-valued variable investigated is whether the \npatient shows signs of diabetes according to World Health Organization \ncriteria (i.e., if the 2 hour post-load plasma glucose was at least \n200 mg/dl at any survey examination or if found during routine medical \ncare). The population lives near Phoenix, Arizona, USA.\n\n \n\nResults: Their ADAP algorithm makes a real-valued prediction between \n0 and 1. This was transformed into a binary decision using a cutoff of \n0.448. Using 576 training instances, the sensitivity and specificity \nof their algorithm was 76% on the remaining 192 instances.\n\n \n\n \n \n\n \n\n 4. \n\nRelevant Information: \nSeveral constraints were placed on the selection of these instances from ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7d5de8fb-5f22-4b99-9ce3-9ae00f6c86db&revisionId=72ecabe9-fd16-4c78-954a-c7e86585d15c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2947,7 +2947,7 @@ "platform": "ai4experiments", "platform_identifier": "184", "name": "dis", - "description": "https://openml.org

                                                        dis-pmlb

                                                        ", + "description": "https://openml.org \n\ndis-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7d5f6906-f781-4b68-93cc-95e733010b75&revisionId=6b69f0c0-9e4f-437c-8563-55b3b177ef2a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2993,7 +2993,7 @@ "platform": "ai4experiments", "platform_identifier": "186", "name": "lymph", - "description": "https://openml.org

                                                        Author:

                                                        Source: Unknown -

                                                        Please cite:


                                                        Citation Request:
                                                        This lymphography domain was obtained from the University Medical Centre,
                                                        Institute of Oncology, Ljubljana, Yugoslavia. Thanks go to M. Zwitter and
                                                        M. Soklic for providing the data. Please include this citation if you plan
                                                        to use this database.




                                                        1. Title: Lymphography Domain




                                                        2. Sources:
                                                          (a) See Above.
                                                          (b) Donors: Igor Kononenko,
                                                          University E.Kardelj
                                                          Faculty for electrical engineering
                                                          Trzaska 25
                                                          61000 Ljubljana (tel.: (38)(+61) 265-161


                                                                   Bojan Cestnik
                                                          Jozef Stefan Institute
                                                          Jamova 39
                                                          61000 Ljubljana
                                                          Yugoslavia (tel.: (38)(+61) 214-399 ext.287)

                                                          (c) Date: November 1988




                                                        3. Past Usage: (sveral)



                                                          1. Cestnik,G., Konenenko,I, & Bratko,I. (1987). Assistant-86: A
                                                            Knowledge-Elicitation Tool for Sophisticated Users. In I.Bratko
                                                            & N.Lavrac (Eds.) Progress in Machine Learning, 31-45, Sigma Press.
                                                            -- Assistant-86: 76% accuracy

                                                          2. Clark,P. & Niblett,T. (1987). Induction in Noisy Domains. In
                                                            I.Bratko & N.Lavrac (Eds.) Progress in Machine Learning, 11-30,
                                                            Sigma Press.
                                                            -- Simple Bayes: 83% accuracy
                                                            -- CN2 (99% threshold): 82%

                                                          3. Michalski,R., Mozetic,I. Hong,J., & Lavrac,N. (1986). The Multi-Purpose
                                                            Incremental Learning System AQ15 and its Testing Applications to Three
                                                            Medical Domains. In Proceedings of the Fifth Nat", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - \n \n**Please cite** :\n\n \n\nCitation Request: \nThis lymphography domain was obtained from the University Medical Centre, \nInstitute of Oncology, Ljubljana, Yugoslavia. Thanks go to M. Zwitter and \nM. Soklic for providing the data. Please include this citation if you plan \nto use this database.\n\n \n\n \n\n 1. \n\nTitle: Lymphography Domain\n\n \n\n \n\n 2. \n\nSources: \n(a) See Above. \n(b) Donors: Igor Kononenko, \nUniversity E.Kardelj \nFaculty for electrical engineering \nTrzaska 25 \n61000 Ljubljana (tel.: (38)(+61) 265-161\n\n \n\n \n Bojan Cestnik \n Jozef Stefan Institute \n Jamova 39 \n 61000 Ljubljana \n Yugoslavia (tel.: (38)(+61) 214-399 ext.287) \n \n\n \n\n(c) Date: November 1988\n\n \n\n \n\n 3. \n\nPast Usage: (sveral)\n\n \n \n\n 1. Cestnik,G., Konenenko,I, & Bratko,I. (1987). Assistant-86: A \nKnowledge-Elicitation Tool for Sophisticated Users. In I.Bratko \n& N.Lavrac (Eds.) Progress in Machine Learning, 31-45, Sigma Press. \n\\-- Assistant-86: 76% accuracy\n\n \n\n 2. Clark,P. & Niblett,T. (1987). Induction in Noisy Domains. In \nI.Bratko & N.Lavrac (Eds.) Progress in Machine Learning, 11-30, \nSigma Press. \n\\-- Simple Bayes: 83% accuracy \n\\-- CN2 (99% threshold): 82%\n\n \n\n 3. Michalski,R., Mozetic,I. Hong,J., & Lavrac,N. (1986). The Multi-Purpose \nIncremental Learning System AQ15 and its Testing Applications to Three \nMedical Domains. In Proceedings of the Fifth National Conference on \nArtificial Intelligence, 1041-1045. Philadelphia, PA: Morgan Kaufmann. \n\\-- Experts: 85% accuracy (estimate) \n\\-- AQ15: 80-82%\n\n \n \n\n \n\n 4. \n\nRelevant Information: \nThis is one of three domains provided by the Oncology Institute \n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7f5388ed-8ec4-4f00-8230-e5624404ed95&revisionId=306ff0fb-0cee-48f7-ba80-d9567d62f039&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3039,7 +3039,7 @@ "platform": "ai4experiments", "platform_identifier": "187", "name": "allrep", - "description": "https://openml.org

                                                            allrep-pmlb

                                                            ", + "description": "https://openml.org \n\nallrep-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7f976866-58a9-41a2-a2c4-b66ee2ebb502&revisionId=dd968b72-c353-4de1-9da6-bbaaa6083b6d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3085,7 +3085,7 @@ "platform": "ai4experiments", "platform_identifier": "189", "name": "ai4eu-robotics-wrist-6144-raw-broker", - "description": "

                                                            The robotic wrist demonstrator represents a mechanical wrist with three axes that can hold tools, e.g. for spray painting in combination with a pump. On this robotic wrist, two accelerometers are mounted for vibration monitoring and recording: one in the movable front part of the wrist and one in the shaft. The wrist can be controlled through the torque or the designated position of each axis\u2019 motor.

                                                            The dataset consists of 1.8 billion measurements of several sensor data of the robotic wrist in 1-second intervals over six months in 2020.

                                                            The complete dataset & description is available on Zenodo

                                                            ", + "description": "The robotic wrist demonstrator represents a mechanical wrist with three axes\nthat can hold tools, e.g. for spray painting in combination with a pump. On\nthis robotic wrist, two accelerometers are mounted for vibration monitoring\nand recording: one in the movable front part of the wrist and one in the\nshaft. The wrist can be controlled through the torque or the designated\nposition of each axis\u2019 motor.\n\nThe dataset consists of 1.8 billion measurements of several sensor data of the\nrobotic wrist in 1-second intervals over six months in 2020.\n\n[The complete dataset & description is available on\nZenodo](https://zenodo.org/record/5729818)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8521c122-91e5-4748-aacd-c99e0cc7549e&revisionId=de99b386-a460-4eb9-96f0-7d53f01e3801&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3131,7 +3131,7 @@ "platform": "ai4experiments", "platform_identifier": "198", "name": "JapaneseVowels", - "description": "https://openml.org

                                                            Author: Mineichi Kudo, Jun Toyama, Masaru Shimbo

                                                            Source: UCI

                                                            Please cite:


                                                            Japanese vowels

                                                            This dataset records 640 time series of 12 LPC cepstrum coefficients taken from nine male speakers.


                                                            The data was collected for examining our newly developed classifier for multidimensional curves (multidimensional time series). Nine male speakers uttered two Japanese vowels /ae/ successively. For each utterance, with the analysis parameters described below, we applied 12-degree linear prediction analysis to it to obtain a discrete-time series with 12 LPC cepstrum coefficients. This means that one utterance by a speaker forms a time series whose length is in the range 7-29 and each point of a time series is of 12 features (12 coefficients).


                                                            Similar data are available for different utterances /ei/, /iu/, /uo/, /oa/ in addition to /ae/. Please contact the donor if you are interested in using this data.


                                                            The number of the time series is 640 in total. We used one set of 270 time series for training and the other set of 370 time series for testing.


                                                            Analysis parameters:

                                                            * Sampling rate : 10kHz
                                                            * Frame length : 25.6 ms
                                                            * Shift length : 6.4ms
                                                            * Degree of LPC coefficients : 12


                                                            Each line represents 12 LPC coefficients in the increasing order separated by spaces. This corresponds to one analysis
                                                            frame. Lines are organized into blocks, which are a set of 7-29 lines separated by blank lines and corresponds to a single speech utterance of /ae/ with 7-29 frames.


                                                            Each speaker is a set of consecutive blocks. In ae.t", + "description": "https://openml.org \n\n**Author** : Mineichi Kudo, Jun Toyama, Masaru Shimbo \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Japanese+Vowels) \n \n**Please cite** :\n\n \n\n**Japanese vowels** \n \nThis dataset records 640 time series of 12 LPC cepstrum coefficients taken\nfrom nine male speakers.\n\n \n\nThe data was collected for examining our newly developed classifier for\nmultidimensional curves (multidimensional time series). Nine male speakers\nuttered two Japanese vowels /ae/ successively. For each utterance, with the\nanalysis parameters described below, we applied 12-degree linear prediction\nanalysis to it to obtain a discrete-time series with 12 LPC cepstrum\ncoefficients. This means that one utterance by a speaker forms a time series\nwhose length is in the range 7-29 and each point of a time series is of 12\nfeatures (12 coefficients).\n\n \n\nSimilar data are available for different utterances /ei/, /iu/, /uo/, /oa/ in\naddition to /ae/. Please contact the donor if you are interested in using this\ndata.\n\n \n\nThe number of the time series is 640 in total. We used one set of 270 time\nseries for training and the other set of 370 time series for testing.\n\n \n\nAnalysis parameters: \n \n* Sampling rate : 10kHz \n* Frame length : 25.6 ms \n* Shift length : 6.4ms \n* Degree of LPC coefficients : 12\n\n \n\nEach line represents 12 LPC coefficients in the increasing order separated by\nspaces. This corresponds to one analysis \nframe. Lines are organized into blocks, which are a set of 7-29 lines\nseparated by blank lines and corresponds to a single speech utterance of /ae/\nwith 7-29 frames.\n\n \n\nEach speaker is a set of consecutive blocks. In ae.train there are 30 blocks\nfor each speaker. Blocks 1-30 represent speaker 1, blocks 31-60 represent\nspeaker 2, and so on up to ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=898883b9-a6b7-47a1-ae2c-cdf9012ceaaf&revisionId=e5a5e2dc-1c77-4853-91a8-f559a2c8346a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3177,7 +3177,7 @@ "platform": "ai4experiments", "platform_identifier": "199", "name": "badges2", - "description": "https://openml.org

                                                            Author:

                                                            Source: Unknown - Date unknown

                                                            Please cite:


                                                            Dataset from the MLRR repository: http://axon.cs.byu.edu:5000/

                                                            ", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\nDataset from the MLRR repository: http://axon.cs.byu.edu:5000/\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8abffb54-85d2-40d6-9428-dbd62ffa345d&revisionId=49191518-c230-4f13-81b5-b64ba49d0621&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3223,7 +3223,7 @@ "platform": "ai4experiments", "platform_identifier": "200", "name": "Augmented_data_registry", - "description": "

                                                            Description of the solution

                                                            The most important requirement for machine learning based tools is the presence of a robust and reliable data pipeline.  A data pipeline is a series of (possibly automated) data transformations needed before such data can be used by any machine learning model. 


                                                            Figure 1: Example of a typical machine learning data pipeline


                                                            As Figure 1 shows the main steps to prepare data are 1) Data preparation which ensures that the raw data collected via different streams is properly cleaned and associated with a certain quality. 2) Data processing which transforms cleaned data into a format compatible with standard machine learning algorithms.

                                                            The presence of an automated pipeline of this kind makes sure that the same data transformation process can be repeated in time, for example while using the model in real life or when re-training the same model. Data pipelines should be reproducible and reliable and should therefore be properly include", + "description": "# Description of the solution\n\nThe most important requirement for machine learning based tools is the\npresence of a robust and reliable data pipeline. A data pipeline is a series\nof (possibly automated) data transformations needed before such data can be\nused by any machine learning model.\n\n \n\n![](https://lh4.googleusercontent.com/Cp7fIU1RFvkjFQEecn-\nxwDbmJnc_MKdYyVPHwsGLY_enP84iGr-YaKaG8rvv4OKa3d4tGlTfokOut7NM92sPOOLFYkBBLe-\npIMsXln2lw_qKgYJniZJLiRehA3VsWLw73TSAQAB2)\n\nFigure 1: Example of a typical machine learning data pipeline\n\n \n\nAs Figure 1 shows the main steps to prepare data are 1) Data preparation which\nensures that the raw data collected via different streams is properly cleaned\nand associated with a certain quality. 2) Data processing which transforms\ncleaned data into a format compatible with standard machine learning\nalgorithms.\n\nThe presence of an automated pipeline of this kind makes sure that the same\ndata transformation process can be repeated in time, for example while using\nthe model in real life or when re-training the same model. Data pipelines\nshould be reproducible and reliable and should therefore be properly included\ninside a version control system.\n\nSeveral tools and libraries are being currently developed to improve version\ncontrol in data pipelines. Data Version Control ([DVC](https://dvc.org/)) is\nbecoming one of the most popular solutions as it can be seamlessly integrated\nwith Git based versioning solutions.\n\nWithin the scope of this project we decided to deliver an augmented data\nregistry built on top of DVC. The idea, shown in Figure 2, is to provide data\nengineers and data scientists with a way to automatically generate data\nquality reports and processing pipelines every time a new data entity is\npushed to a given DVC repository.\n\n ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8b133ef7-6353-480e-82e4-5d66dad7ced8&revisionId=fa47a809-eaaf-44ee-9f21-636290983357&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3269,7 +3269,7 @@ "platform": "ai4experiments", "platform_identifier": "202", "name": "VideoFileBroker", - "description": "

                                                            The Video file broker feeds video files to video models, typically starting with segmentation.

                                                            ", + "description": "The Video file broker feeds video files to video models, typically starting\nwith segmentation.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8eaa811e-46ff-4577-a88d-b203f7757338&revisionId=b102e42d-5a16-4e96-9fa6-fba8dab9616b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3315,7 +3315,7 @@ "platform": "ai4experiments", "platform_identifier": "203", "name": "autoUniv-au7-500", - "description": "https://openml.org

                                                            Author: Ray. J. Hickey

                                                            Source: UCI

                                                            Please cite:



                                                            • Dataset Title:


                                                            AutoUniv Dataset

                                                            data problem: autoUniv-au7-cpd1-500



                                                            • Abstract:


                                                            AutoUniv is an advanced data generator for classifications tasks. The aim is to reflect the nuances and heterogeneity of real data. Data can be generated in .csv, ARFF or C4.5 formats.



                                                            • Source:


                                                            AutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com
                                                            AutoUniv web-site: http://sites.google.com/site/autouniv/.



                                                            • Data Set Information:


                                                            The user first creates a classification model and then generates classified examples from it. To create a model, the following are specified: the number of attributes (up to 1000) and their type (discrete or continuous), the number of classes (up to 10), the complexity of the underlying rules and the noise level. AutoUniv then produces a model through a process of constrained randomised search to satisfy the user's requirements. A model can have up to 3000 rules. Rare class models can be designed. A sequence of models can be designed to reflect concept and/or population drift.


                                                            AutoUniv creates three text files for a model: a Prolog specification of the model used to generate examples (.aupl); a user-friendly statement of the classification rules in an 'if ... then' format (.aurules); a statistical summary of the main properties of the model, including its Bayes rate (.auprops).



                                                            • Attribute Information:


                                                            Attributes may be discrete with up to 10 values or continuous. A discrete attri", + "description": "https://openml.org \n\n**Author** : Ray. J. Hickey \n \n**Source** : UCI \n \n **Please cite** :\n\n \n\n \n\n * Dataset Title: \n \n\n \n\nAutoUniv Dataset \n \ndata problem: autoUniv-au7-cpd1-500\n\n \n\n \n\n * Abstract: \n \n\n \n\nAutoUniv is an advanced data generator for classifications tasks. The aim is\nto reflect the nuances and heterogeneity of real data. Data can be generated\nin .csv, ARFF or C4.5 formats.\n\n \n\n \n\n * Source: \n \n\n \n\nAutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com \nAutoUniv web-site: http://sites.google.com/site/autouniv/.\n\n \n\n \n\n * Data Set Information:\n \n\n \n\nThe user first creates a classification model and then generates classified\nexamples from it. To create a model, the following are specified: the number\nof attributes (up to 1000) and their type (discrete or continuous), the number\nof classes (up to 10), the complexity of the underlying rules and the noise\nlevel. AutoUniv then produces a model through a process of constrained\nrandomised search to satisfy the user's requirements. A model can have up to\n3000 rules. Rare class models can be designed. A sequence of models can be\ndesigned to reflect concept and/or population drift.\n\n \n\nAutoUniv creates three text files for a model: a Prolog specification of the\nmodel used to generate examples (.aupl); a user-friendly statement of the\nclassification rules in an 'if ... then' format (.aurules); a statistical\nsummary of the main properties of the model, including its Bayes rate\n(.auprops).\n\n \n\n \n\n * Attribute Information: \n \n\n \n\nAttributes may be discrete with up to 10 values or continuous. A discrete\nattribute can be nominal with values v1, v2, v3 ... or integer with values 0,\n1, 2 , ... .\n\n \n\n \n\n * Relevant Papers:\n \n\n \n\nMarrs, G, Hickey, RJ and Black, MM (2", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8ef7f983-b1d2-4891-b76c-6f4ee2202248&revisionId=66cc456d-3bb0-476f-976a-e96562a3545b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3361,7 +3361,7 @@ "platform": "ai4experiments", "platform_identifier": "204", "name": "IoTxKG_TEST", - "description": "

                                                            IoTxKG Ontology Analysis Model

                                                            identify main concepts based on clustering


                                                            The Internet of Things (IoT) primary objective is to make a hyper-connected world for various application domains. However, IoT suffers from a lack of interoperability leading to a substantial threat to the predicted economic value. Schema.org provides semantic interoperability to structure heterogeneous data on the Web. An extension of this vocabulary for the IoT domain (iot.schema.org) is an ongoing research effort to address semantic interoperability for the Web of Things (WoT). To design this vocabulary, a central challenge is to identify the main topics (concepts and properties) automatically from existing knowledge in IoT applications. IoTxKG automatically identifies the most important topics from ontologies of the 4 KE4WoT challenge domains \u2013 smart building, mobility, energy and weather \u2013 based on suitable language models.


                                                            The following technologies are employed in IoTxKG

                                                            • W3C Semantic Web Technologies (e.g. RDF, OWL, SPARQL, SKOS)
                                                            • Deep Learning Models (language models)
                                                            • Clustering Algorithms (e.g. k-means clustering)



                                                            ", + "description": "# **IoTxKG Ontology Analysis Model**\n\n **identify main concepts based on clustering**\n\n \n\nThe Internet of Things (IoT) primary objective is to make a hyper-connected\nworld for various application domains. However, IoT suffers from a lack of\ninteroperability leading to a substantial threat to the predicted economic\nvalue. Schema.org provides semantic interoperability to structure\nheterogeneous data on the Web. An extension of this vocabulary for the IoT\ndomain (iot.schema.org) is an ongoing research effort to address semantic\ninteroperability for the Web of Things (WoT). To design this vocabulary, a\ncentral challenge is to identify the main topics (concepts and properties)\nautomatically from existing knowledge in IoT applications. IoTxKG\nautomatically identifies the most important topics from ontologies of the 4\nKE4WoT challenge domains \u2013 smart building, mobility, energy and weather \u2013\nbased on suitable language models.\n\n \n\nThe following technologies are employed in IoTxKG\n\n * W3C Semantic Web Technologies (e.g. RDF, OWL, SPARQL, SKOS)\n * Deep Learning Models (language models)\n * Clustering Algorithms (e.g. k-means clustering)\n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=900e0378-1f94-4727-b3ba-2907f7cdd818&revisionId=8d0f6c80-b67e-43db-ab6f-3646ed2f57b1&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -3407,7 +3407,7 @@ "platform": "ai4experiments", "platform_identifier": "206", "name": "dsc-text", - "description": "

                                                            This is a simple integration of an IDS Data Connector into a AI4EU Model.

                                                            The source code is available in the tutorials repository on Github: https://github.com/ai4eu/tutorials/tree/master/DSC_Data_Exchange



                                                            To configure the what data the Model should download from an DSC one can use the providet REST-Api accessable through the path /api/v1/ of the webui container.




                                                            The following Endpoints are provided:


                                                            recipient (address of the DSC that provides the Data), resourceId, artifactId, contract, customDSC (address of the DSC that should download the Data)



                                                            ", + "description": "This is a simple integration of an IDS Data Connector into a AI4EU Model.\n\nThe source code is available in the tutorials repository on Github:\nhttps://github.com/ai4eu/tutorials/tree/master/DSC_Data_Exchange\n\n \n\n \n\nTo configure the what data the Model should download from an DSC one can use\nthe providet REST-Api accessable through the path /api/v1/ of the webui\ncontainer.\n\n \n\n \n\n \n\nThe following Endpoints are provided:\n\n \n\nrecipient (address of the DSC that provides the Data), resourceId, artifactId,\ncontract, customDSC (address of the DSC that should download the Data)\n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=926bd2eb-51b6-4e64-8a76-b6544cce5162&revisionId=d764d260-491b-4e55-8476-29b2a2598aa5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3453,7 +3453,7 @@ "platform": "ai4experiments", "platform_identifier": "209", "name": "GAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1", - "description": "https://openml.org

                                                            GAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1-pmlb

                                                            ", + "description": "https://openml.org \n\nGAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=93f29f2f-1fd0-4d24-a057-544397af20bf&revisionId=216e926a-76c7-4c6f-aee9-7c005eb2d6a1&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3499,7 +3499,7 @@ "platform": "ai4experiments", "platform_identifier": "210", "name": "ai4eu-security-pilot-databroker", - "description": "

                                                            This container provides data for Thread Prediction in Network Traffic.

                                                            Therefore, this container can deliver test and training data.


                                                            You can connect the training data output of the ai4eu-security-pilot-databroker container with the training data input of the ai4eu-security-pilot.model container. This data will be used to train the model. It only contains benign traffic. To test your model you can connect the prediction data output of the ai4eu-security-pilot-databroker container with the prediction data input of the ai4eu-security-pilot.model container. This data will be used to test the model. It contains benign and fraud traffic.

                                                            ", + "description": "This container provides data for Thread Prediction in Network Traffic.\n\nTherefore, this container can deliver test and training data.\n\n \n\nYou can connect the training data output of the ai4eu-security-pilot-\ndatabroker container with the training data input of the ai4eu-security-\npilot.model container. This data will be used to train the model. It only\ncontains benign traffic. To test your model you can connect the prediction\ndata output of the ai4eu-security-pilot-databroker container with the\nprediction data input of the ai4eu-security-pilot.model container. This data\nwill be used to test the model. It contains benign and fraud traffic.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=95c624d4-05ed-40c0-ad1d-a833e35da282&revisionId=653b3402-027c-4fac-96ce-ce8fa0969bce&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3545,7 +3545,7 @@ "platform": "ai4experiments", "platform_identifier": "214", "name": "advice-img-databroker", - "description": "

                                                            advice-img-databroker collects the user's images placed on the shared folder and releases them into the pipeline

                                                            ", + "description": "advice-img-databroker collects the user's images placed on the shared folder\nand releases them into the pipeline\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9a0944ce-a5d3-4e01-8da0-d44be9b42814&revisionId=c754d039-d083-4997-abb2-6d67b1d6f3f5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3591,7 +3591,7 @@ "platform": "ai4experiments", "platform_identifier": "215", "name": "moon_rl", - "description": "

                                                            This document contains information regarding the developments done within the MOON project. Such project took place within the AI4EU Open Call for the Alph -D challenge, addressing machining control optimization through Reinforcement Learning. The content of the document can be summarized with the following points that hold all the information and are ordered in a logical way, going from the problem presentation to the solution proposed to face it. The last point contains comments related to problems found within the project, how MOON has adapted to such scenario and possible future steps. See README file for details on notebook.


                                                            ", + "description": "This document contains information regarding the developments done within the\nMOON project. Such project took place within the AI4EU Open Call for the Alph\n-D challenge, addressing machining control optimization through Reinforcement\nLearning. The content of the document can be summarized with the following\npoints that hold all the information and are ordered in a logical way, going\nfrom the problem presentation to the solution proposed to face it. The last\npoint contains comments related to problems found within the project, how MOON\nhas adapted to such scenario and possible future steps. See README file for\ndetails on notebook.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9c0ed8db-b9e3-4a8f-8c63-b6350d951337&revisionId=9e63d89f-6525-48dc-8aba-36f6a6b04f81&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3637,7 +3637,7 @@ "platform": "ai4experiments", "platform_identifier": "217", "name": "vowel", - "description": "https://openml.org

                                                            Author: Peter Turney (peter@ai.iit.nrc.ca)

                                                            Source: UCI - date unknown

                                                            Please cite: UCI citation policy


                                                            Vowel Recognition (Deterding data)
                                                            Speaker independent recognition of the eleven steady state vowels of British English using a specified training set of lpc derived log area ratios.
                                                            Collected by David Deterding (data and non-connectionist analysis), Mahesan Niranjan (first connectionist analysis), Tony Robinson (description, program, data, and results)


                                                            A very comprehensive description including comments by the authors can be found here


                                                            The problem is specified by the accompanying data file, \"vowel.data\". This
                                                            consists of a three dimensional array: voweldata [speaker, vowel, input].
                                                            The speakers are indexed by integers 0-89. (Actually, there are fifteen
                                                            individual speakers, each saying each vowel six times.) The vowels are
                                                            indexed by integers 0-10. For each utterance, there are ten floating-point
                                                            input values, with array indices 0-9.


                                                            The problem is to train the network as well as possible using only on data
                                                            from \"speakers\" 0-47, and then to test the network on speakers 48-89,
                                                            reporting the number of correct classifications in the test set.


                                                            For a more detailed explanation of the problem, see the excerpt from Tony
                                                            Robinson's Ph.D. thesis in the COMMENTS section. In Robinson's opin", + "description": "https://openml.org \n\n**Author** : Peter Turney (peter@ai.iit.nrc.ca) \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/machine-learning-\ndatabases/undocumented/connectionist-bench/vowel/) \\- date unknown \n \n **Please cite** : [UCI citation\npolicy](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n **Vowel Recognition (Deterding data)** \nSpeaker independent recognition of the eleven steady state vowels of British\nEnglish using a specified training set of lpc derived log area ratios. \nCollected by David Deterding (data and non-connectionist analysis), Mahesan\nNiranjan (first connectionist analysis), Tony Robinson (description, program,\ndata, and results)\n\n \n\nA very comprehensive description including comments by the authors can be\nfound [here](https://archive.ics.uci.edu/ml/machine-learning-\ndatabases/undocumented/connectionist-bench/vowel/vowel.names)\n\n \n\nThe problem is specified by the accompanying data file, \"vowel.data\". This \nconsists of a three dimensional array: voweldata [speaker, vowel, input]. \nThe speakers are indexed by integers 0-89. (Actually, there are fifteen \nindividual speakers, each saying each vowel six times.) The vowels are \nindexed by integers 0-10. For each utterance, there are ten floating-point \ninput values, with array indices 0-9.\n\n \n\nThe problem is to train the network as well as possible using only on data \nfrom \"speakers\" 0-47, and then to test the network on speakers 48-89, \nreporting the number of correct classifications in the test set.\n\n \n\nFor a more detailed explanation of the problem, see the excerpt from Tony \nRobinson's Ph.D. thesis in the COMMENTS section. In Robinson's opinion, \nconnectionist problems fall into two classes, the possible and the \nimpossible. He is interested in the latter, by which h", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9d05f3f0-d155-4dc4-84a7-b7551bcba3e2&revisionId=7295e950-aa23-4a8e-bd1d-075622985ae5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3683,7 +3683,7 @@ "platform": "ai4experiments", "platform_identifier": "221", "name": "NewsDatabroker", - "description": "

                                                            Overview:

                                                            Provides textual data to the news-classifier


                                                            ", + "description": "Overview:\n\nProvides textual data to the news-classifier\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a0588b74-603d-4c6d-bed7-fef41bdaa8eb&revisionId=0cd9b307-60c3-48f4-9308-07108854cf09&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3729,7 +3729,7 @@ "platform": "ai4experiments", "platform_identifier": "224", "name": "rebase-data-broker", - "description": "

                                                            This data broker can load open datasets from https://www.rebase.energy/datasets. This will enable access to all upcoming open datasets in the Rebase Platform. The goal of this broker is to make it easy for anyone on the AIOD platform to access additional open energy datasets. 

                                                            The broker provides a user interface to download train and validation sets in a unified way that can quickly be used to evaluate your model. It also exposes LoadData rpc method to get data. A demonstration video can be found here. Please refer to this readme to understand more about how to use and install.

                                                            This project has received funding from the European Union's Horizon 2020 research and innovation programme within the framework of the I-NERGY Project, funded under grant agreement No 101016508.\nThis will enable access to all upcoming open datasets in the Rebase Platform.\nThe goal of this broker is to make it easy for anyone on the AIOD platform to\naccess additional open energy datasets.\n\nThe broker provides a user interface to download train and validation sets in\na unified way that can quickly be used to evaluate your model. It also exposes\nLoadData rpc method to get data. A demonstration video can be found\n[here](https://drive.google.com/file/d/1xYYv1rZRrQSZT1-A73suNSvji122-GhY/view?usp=sharing).\nPlease refer to this[ ](https://github.com/rebase-energy/ai4eu-\nexperiment/tree/master/dataset)[readme](https://github.com/rebase-\nenergy/ai4eu-experiment/tree/master/dataset) to understand more about how to\nuse and install.\n\nThis project has received funding from the European Union's Horizon 2020\nresearch and innovation programme within the framework of the I-NERGY Project,\nfunded under grant agreement No 101016508\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a514218c-d37f-4c38-a06d-c60a267eda42&revisionId=8ad34ae9-6fd3-4815-b890-99d6f22bf929&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3775,7 +3775,7 @@ "platform": "ai4experiments", "platform_identifier": "225", "name": "biomed", - "description": "https://openml.org

                                                            Author:

                                                            Source: Unknown - Date unknown

                                                            Please cite:


                                                            February 23, 1982


                                                            The 1982 annual meetings of the American Statistical Association (ASA)
                                                            will be held August 16-19, 1982 in Cincinnati. At that meeting, the ASA
                                                            Committee on Statistical Graphics plans to sponsor an \"Exposition of
                                                            Statistical Graphics Technology.\" The purpose of this activity is to
                                                            more fully inform the ASA membership about the capabilities and uses of
                                                            computer graphcis in statistical work. This letter is to invite you to
                                                            participate in the Exposition.


                                                            Attached is a set of biomedical data containing 209 observations (134
                                                            for \"normals\" and 75 for \"carriers\"). Each vendor of provider of
                                                            statistical graphics software participating in the Exposition is to
                                                            analyze these data using their software and to prepare tabular, graphical
                                                            and text output illustrating the use of graphics in these analyses and
                                                            summarizing their conclusions. The tabular and graphical materials must be
                                                            direct computer output from the statistical graphics software; the
                                                            textual descriptions and summaries need not be. The total display space
                                                            available to each participant at the meeting will be a standard poster-
                                                            board (approximately 4' x 2 1/2'). All entries will be displayed in one
                                                            location at the meetings, together with brief written commentary by
                                                            the committee summarizing the results of this activity.


                                                            Reference


                                                            Exposition of Statistical Graphics Technology,
                                                            L. H. Cox, M. M. Johnson, K. Kafadar,
                                                            ASA Proc Stat. Comp Section, 1982, pp 55-56.
                                                            Enclosures


                                                            THE DATA


                                                            The following data arose in a study to develop ", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\nFebruary 23, 1982\n\n \n\nThe 1982 annual meetings of the American Statistical Association (ASA) \nwill be held August 16-19, 1982 in Cincinnati. At that meeting, the ASA \nCommittee on Statistical Graphics plans to sponsor an \"Exposition of \nStatistical Graphics Technology.\" The purpose of this activity is to \nmore fully inform the ASA membership about the capabilities and uses of \ncomputer graphcis in statistical work. This letter is to invite you to \nparticipate in the Exposition.\n\n \n\nAttached is a set of biomedical data containing 209 observations (134 \nfor \"normals\" and 75 for \"carriers\"). Each vendor of provider of \nstatistical graphics software participating in the Exposition is to \nanalyze these data using their software and to prepare tabular, graphical \nand text output illustrating the use of graphics in these analyses and \nsummarizing their conclusions. The tabular and graphical materials must be \ndirect computer output from the statistical graphics software; the \ntextual descriptions and summaries need not be. The total display space \navailable to each participant at the meeting will be a standard poster- \nboard (approximately 4' x 2 1/2'). All entries will be displayed in one \nlocation at the meetings, together with brief written commentary by \nthe committee summarizing the results of this activity.\n\n \n\nReference\n\n \n\nExposition of Statistical Graphics Technology, \nL. H. Cox, M. M. Johnson, K. Kafadar, \nASA Proc Stat. Comp Section, 1982, pp 55-56. \nEnclosures\n\n \n\nTHE DATA\n\n \n\nThe following data arose in a study to develop screening methods to \nidentify carriers of a rare genetic disorder. Four measurements m1, \nm2, m3, m4 were made ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a6b3cc75-5ff7-4293-b1b7-36731c797020&revisionId=d1323bad-7098-462e-b402-6b6c6f77cfce&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3821,7 +3821,7 @@ "platform": "ai4experiments", "platform_identifier": "226", "name": "kc2", - "description": "https://openml.org

                                                            Author: Mike Chapman, NASA

                                                            Source: tera-PROMISE - 2004

                                                            Please cite: Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE Repository of Software Engineering Databases. School of Information Technology and Engineering, University of Ottawa, Canada.


                                                            KC2 Software defect prediction

                                                            One of the NASA Metrics Data Program defect data sets. Data from software for science data processing. Data comes from McCabe and Halstead features extractors of source code. These features were defined in the 70s in an attempt to objectively characterize code features that are associated with software quality.


                                                            Attribute Information



                                                            1. loc : numeric % McCabe's line count of code

                                                            2. v(g) : numeric % McCabe \"cyclomatic complexity\"

                                                            3. ev(g) : numeric % McCabe \"essential complexity\"

                                                            4. iv(g) : numeric % McCabe \"design complexity\"

                                                            5. n : numeric % Halstead total operators + operands

                                                            6. v : numeric % Halstead \"volume\"

                                                            7. l : numeric % Halstead \"program length\"

                                                            8. d : numeric % Halstead \"difficulty\"

                                                            9. i : numeric % Halstead \"intelligence\"

                                                            10. e : numeric % Halstead \"effort\"

                                                            11. b : numeric % Halstead

                                                            12. t : numeric % Halstead's time estimator

                                                            13. lOCode : numeric % Halstead's line count

                                                            14. lOComment : numeric % Halstead's count of lines of comments

                                                            15. lOBlank : numeric % Ha", + "description": "https://openml.org \n\n**Author** : Mike Chapman, NASA \n \n **Source** : [tera-\nPROMISE](http://openscience.us/repo/defect/mccabehalsted/kc2.html) \\- 2004 \n \n **Please cite** : Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE\nRepository of Software Engineering Databases. School of Information Technology\nand Engineering, University of Ottawa, Canada.\n\n \n\n**KC2 Software defect prediction** \n \nOne of the NASA Metrics Data Program defect data sets. Data from software for\nscience data processing. Data comes from McCabe and Halstead features\nextractors of source code. These features were defined in the 70s in an\nattempt to objectively characterize code features that are associated with\nsoftware quality.\n\n \n\n### Attribute Information\n\n \n\n \n\n 1. loc : numeric % McCabe's line count of code\n \n\n 2. v(g) : numeric % McCabe \"cyclomatic complexity\"\n \n\n 3. ev(g) : numeric % McCabe \"essential complexity\"\n \n\n 4. iv(g) : numeric % McCabe \"design complexity\"\n \n\n 5. n : numeric % Halstead total operators + operands\n \n\n 6. v : numeric % Halstead \"volume\"\n \n\n 7. l : numeric % Halstead \"program length\"\n \n\n 8. d : numeric % Halstead \"difficulty\"\n \n\n 9. i : numeric % Halstead \"intelligence\"\n \n\n 10. e : numeric % Halstead \"effort\"\n \n\n 11. b : numeric % Halstead \n \n\n 12. t : numeric % Halstead's time estimator\n \n\n 13. lOCode : numeric % Halstead's line count\n \n\n 14. lOComment : numeric % Halstead's count of lines of comments\n \n\n 15. lOBlank : numeric % Halstead's count of blank lines\n \n\n 16. lOCodeAndComment: numeric\n \n\n 17. uniq_Op : numeric % unique operators\n \n\n 18. uniq_Opnd : numeric % unique operands\n \n\n 19. total_Op : numeric % total operators\n \n\n 20. total_Opnd : numeric % total operands\n \n\n 21. branchCount : numeric % of the flow ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a80a6f2d-b129-4ae0-bfce-22f7631801fe&revisionId=066db903-f64c-4bf9-9118-28ed77006e9a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3867,7 +3867,7 @@ "platform": "ai4experiments", "platform_identifier": "227", "name": "autoUniv-au7-700", - "description": "https://openml.org

                                                              Author: Ray. J. Hickey

                                                              Source: UCI

                                                              Please cite:



                                                              • Dataset Title:


                                                              AutoUniv Dataset

                                                              data problem: autoUniv-au7-700



                                                              • Abstract:


                                                              AutoUniv is an advanced data generator for classifications tasks. The aim is to reflect the nuances and heterogeneity of real data. Data can be generated in .csv, ARFF or C4.5 formats.



                                                              • Source:


                                                              AutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com
                                                              AutoUniv web-site: http://sites.google.com/site/autouniv/.



                                                              • Data Set Information:


                                                              The user first creates a classification model and then generates classified examples from it. To create a model, the following are specified: the number of attributes (up to 1000) and their type (discrete or continuous), the number of classes (up to 10), the complexity of the underlying rules and the noise level. AutoUniv then produces a model through a process of constrained randomised search to satisfy the user's requirements. A model can have up to 3000 rules. Rare class models can be designed. A sequence of models can be designed to reflect concept and/or population drift.


                                                              AutoUniv creates three text files for a model: a Prolog specification of the model used to generate examples (.aupl); a user-friendly statement of the classification rules in an 'if ... then' format (.aurules); a statistical summary of the main properties of the model, including its Bayes rate (.auprops).



                                                              • Attribute Information:


                                                              Attributes may be discrete with up to 10 values or continuous. A discrete attribute", + "description": "https://openml.org \n\n**Author** : Ray. J. Hickey \n \n**Source** : UCI \n \n **Please cite** :\n\n \n\n \n\n * Dataset Title: \n \n\n \n\nAutoUniv Dataset \n \ndata problem: autoUniv-au7-700\n\n \n\n \n\n * Abstract: \n \n\n \n\nAutoUniv is an advanced data generator for classifications tasks. The aim is\nto reflect the nuances and heterogeneity of real data. Data can be generated\nin .csv, ARFF or C4.5 formats.\n\n \n\n \n\n * Source: \n \n\n \n\nAutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com \nAutoUniv web-site: http://sites.google.com/site/autouniv/.\n\n \n\n \n\n * Data Set Information:\n \n\n \n\nThe user first creates a classification model and then generates classified\nexamples from it. To create a model, the following are specified: the number\nof attributes (up to 1000) and their type (discrete or continuous), the number\nof classes (up to 10), the complexity of the underlying rules and the noise\nlevel. AutoUniv then produces a model through a process of constrained\nrandomised search to satisfy the user's requirements. A model can have up to\n3000 rules. Rare class models can be designed. A sequence of models can be\ndesigned to reflect concept and/or population drift.\n\n \n\nAutoUniv creates three text files for a model: a Prolog specification of the\nmodel used to generate examples (.aupl); a user-friendly statement of the\nclassification rules in an 'if ... then' format (.aurules); a statistical\nsummary of the main properties of the model, including its Bayes rate\n(.auprops).\n\n \n\n \n\n * Attribute Information: \n \n\n \n\nAttributes may be discrete with up to 10 values or continuous. A discrete\nattribute can be nominal with values v1, v2, v3 ... or integer with values 0,\n1, 2 , ... .\n\n \n\n \n\n * Relevant Papers:\n \n\n \n\nMarrs, G, Hickey, RJ and Black, MM (2010) ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a853cebc-f57d-4e28-afa8-88b8f7a27e9f&revisionId=45d90a0e-8de7-44a8-b04f-c05c0ec44b32&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3913,7 +3913,7 @@ "platform": "ai4experiments", "platform_identifier": "231", "name": "cleveland-nominal", - "description": "https://openml.org

                                                              cleveland-nominal-pmlb

                                                              ", + "description": "https://openml.org \n\ncleveland-nominal-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ae60613f-f66e-4250-b9ee-92784a85ed89&revisionId=6e9c6eea-42b0-4bd1-8d7d-ecc7c170af17&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3959,7 +3959,7 @@ "platform": "ai4experiments", "platform_identifier": "238", "name": "i-nergy-load-forecasting-databroker", - "description": "

                                                              This is a Databroker service used for Timeseries. This service is implemented in context of the I-NERGY project. A User Interface is included where the users can upload their Timeseries in a csv format. For more information on how to use the solution, please see README.pdf in the Documents section. 

                                                              ", + "description": "This is a Databroker service used for Timeseries. This service is implemented\nin context of the [I-NERGY](https://www.i-nergy.eu/) project. A User Interface\nis included where the users can upload their Timeseries in a csv format. For\nmore information on how to use the solution, please see README.pdf in the\nDocuments section.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b303991f-d5cf-40b0-a941-1d0c0292f4f9&revisionId=fa3adc1a-1cee-40df-aea7-628a4942b01b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4005,7 +4005,7 @@ "platform": "ai4experiments", "platform_identifier": "239", "name": "ai4eu-robotics-pump-1024-fft-broker", - "description": "

                                                              The robotic pump demonstrator represents a hydraulic pump that can be mounted on an industrial robot, for example, to pump liquid paint for spray painting. On this pump, one accelerometer is mounted for vibration monitoring and recording.

                                                              The pump can be controlled in terms of speed (rotations per minute, rpm), affecting the throughput of paint and the pressure in and out of the pump.

                                                              The dataset consists of 380 million measurements of several sensor data of the pump system in 1-second intervals over two months in 2020.

                                                              The complete dataset & documentation is available on Zenodo.

                                                              ", + "description": "The robotic pump demonstrator represents a hydraulic pump that can be mounted\non an industrial robot, for example, to pump liquid paint for spray painting.\nOn this pump, one accelerometer is mounted for vibration monitoring and\nrecording.\n\nThe pump can be controlled in terms of speed (rotations per minute, rpm),\naffecting the throughput of paint and the pressure in and out of the pump.\n\nThe dataset consists of 380 million measurements of several sensor data of the\npump system in 1-second intervals over two months in 2020.\n\n[The complete dataset & documentation is available on\nZenodo.](https://zenodo.org/record/5729187)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b3bef42d-b521-4d63-866b-26b6a4b1e053&revisionId=191d8798-2b8b-4ebb-9c4b-9e58caf91bdc&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4051,7 +4051,7 @@ "platform": "ai4experiments", "platform_identifier": "245", "name": "car-evaluation", - "description": "https://openml.org

                                                              car-evaluation-pmlb

                                                              ", + "description": "https://openml.org \n\ncar-evaluation-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b70b266d-8c03-4f01-b809-668eb6ad4d89&revisionId=61420377-785c-4b22-8344-f04eeda911b7&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4097,7 +4097,7 @@ "platform": "ai4experiments", "platform_identifier": "248", "name": "phoneme", - "description": "https://openml.org

                                                              Author: Dominique Van Cappel, THOMSON-SINTRA

                                                              Source: KEEL, ELENA - 1993

                                                              Please cite: None


                                                              The aim of this dataset is to distinguish between nasal (class 0) and oral sounds (class 1). Five different attributes were chosen to characterize each vowel: they are the amplitudes of the five first harmonics AHi, normalised by the total energy Ene (integrated on all the frequencies): AHi/Ene. The phonemes are transcribed as follows: sh as in she, dcl as in dark, iy as the vowel in she, aa as the vowel in dark, and ao as the first vowel in water.


                                                              Source


                                                              The current dataset was formatted by the KEEL repository, but originally hosted by the ELENA Project. The dataset originates from the European ESPRIT 5516 project: ROARS. The aim of this project was the development and the implementation of a real time analytical system for French and Spanish speech recognition.


                                                              Relevant information


                                                              Most of the already existing speech recognition systems are global systems (typically Hidden Markov Models and Time Delay Neural Networks) which recognizes signals and do not really use the speech
                                                              specificities. On the contrary, analytical systems take into account the articulatory process leading to the different phonemes of a given language, the idea being to deduce the presence of each of the
                                                              phonetic features from the acoustic observation.


                                                              The main difficulty of analytical sy", + "description": "https://openml.org \n\n**Author** : Dominique Van Cappel, THOMSON-SINTRA \n \n **Source** : [KEEL](http://sci2s.ugr.es/keel/dataset.php?cod=105#sub2),\n[ELENA](https://www.elen.ucl.ac.be/neural-\nnets/Research/Projects/ELENA/databases/REAL/phoneme/) \\- 1993 \n \n **Please cite** : None\n\n \n\nThe aim of this dataset is to distinguish between nasal (class 0) and oral\nsounds (class 1). Five different attributes were chosen to characterize each\nvowel: they are the amplitudes of the five first harmonics AHi, normalised by\nthe total energy Ene (integrated on all the frequencies): AHi/Ene. The\nphonemes are transcribed as follows: sh as in she, dcl as in dark, iy as the\nvowel in she, aa as the vowel in dark, and ao as the first vowel in water.\n\n \n\n### Source\n\n \n\nThe current dataset was formatted by the KEEL repository, but originally\nhosted by the [ELENA Project](https://www.elen.ucl.ac.be/neural-\nnets/Research/Projects/ELENA/elena.htm#stuff). The dataset originates from the\nEuropean ESPRIT 5516 project: ROARS. The aim of this project was the\ndevelopment and the implementation of a real time analytical system for French\nand Spanish speech recognition.\n\n \n\n### Relevant information\n\n \n\nMost of the already existing speech recognition systems are global systems\n(typically Hidden Markov Models and Time Delay Neural Networks) which\nrecognizes signals and do not really use the speech \nspecificities. On the contrary, analytical systems take into account the\narticulatory process leading to the different phonemes of a given language,\nthe idea being to deduce the presence of each of the \nphonetic features from the acoustic observation.\n\n \n\nThe main difficulty of analytical systems is to obtain acoustical parameters\nsufficiantly reliable. These acoustical measurements must :\n\n \n\n \n\n ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b824ceef-6647-4286-999c-6e175cebc886&revisionId=4517efb8-1b0a-485f-9603-1667a3738dc4&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4143,7 +4143,7 @@ "platform": "ai4experiments", "platform_identifier": "249", "name": "tic-tac-toe", - "description": "https://openml.org

                                                              Author: David W. Aha

                                                              Source: UCI - 1991

                                                              Please cite: UCI


                                                              Tic-Tac-Toe Endgame database

                                                              This database encodes the complete set of possible board configurations at the end of tic-tac-toe games, where \"x\" is assumed to have played first. The target concept is \"win for x\" (i.e., true when \"x\" has one of 8 possible ways to create a \"three-in-a-row\").


                                                              Attribute Information


                                                               (x=player x has taken, o=player o has taken, b=blank)
                                                              1. top-left-square: {x,o,b}
                                                              2. top-middle-square: {x,o,b}
                                                              3. top-right-square: {x,o,b}
                                                              4. middle-left-square: {x,o,b}
                                                              5. middle-middle-square: {x,o,b}
                                                              6. middle-right-square: {x,o,b}
                                                              7. bottom-left-square: {x,o,b}
                                                              8. bottom-middle-square: {x,o,b}
                                                              9. bottom-right-square: {x,o,b}
                                                              10. Class: {positive,negative}
                                                              ", + "description": "https://openml.org \n\n**Author** : David W. Aha \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Tic-Tac-\nToe+Endgame) \\- 1991 \n \n**Please cite** : [UCI](http://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n **Tic-Tac-Toe Endgame database** \n \nThis database encodes the complete set of possible board configurations at the\nend of tic-tac-toe games, where \"x\" is assumed to have played first. The\ntarget concept is \"win for x\" (i.e., true when \"x\" has one of 8 possible ways\nto create a \"three-in-a-row\").\n\n \n\n### Attribute Information\n\n \n\n \n \n (x=player x has taken, o=player o has taken, b=blank) \n 1. top-left-square: {x,o,b} \n 2. top-middle-square: {x,o,b} \n 3. top-right-square: {x,o,b} \n 4. middle-left-square: {x,o,b} \n 5. middle-middle-square: {x,o,b} \n 6. middle-right-square: {x,o,b} \n 7. bottom-left-square: {x,o,b} \n 8. bottom-middle-square: {x,o,b} \n 9. bottom-right-square: {x,o,b} \n 10. Class: {positive,negative} \n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b8a104fa-968e-4492-aca6-7ea4b6de9a2d&revisionId=ebb899ed-1abb-4f88-9d7a-f85922b29557&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4189,7 +4189,7 @@ "platform": "ai4experiments", "platform_identifier": "250", "name": "wine", - "description": "https://openml.org

                                                              Author:

                                                              Source: Unknown -

                                                              Please cite:




                                                              1. Title of Database: Wine recognition data
                                                                Updated Sept 21, 1998 by C.Blake : Added attribute information




                                                              2. Sources:
                                                                (a) Forina, M. et al, PARVUS - An Extendible Package for Data
                                                                Exploration, Classification and Correlation. Institute of Pharmaceutical
                                                                and Food Analysis and Technologies, Via Brigata Salerno,
                                                                16147 Genoa, Italy.


                                                                (b) Stefan Aeberhard, email: stefan@coral.cs.jcu.edu.au
                                                                (c) July 1991
                                                                3. Past Usage:


                                                                (1)
                                                                S. Aeberhard, D. Coomans and O. de Vel,
                                                                Comparison of Classifiers in High Dimensional Settings,
                                                                Tech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of
                                                                Mathematics and Statistics, James Cook University of North Queensland.
                                                                (Also submitted to Technometrics).


                                                                The data was used with many others for comparing various
                                                                classifiers. The classes are separable, though only RDA
                                                                has achieved 100% correct classification.
                                                                (RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data))
                                                                (All results using the leave-one-out technique)


                                                                In a classification context, this is a well posed problem
                                                                with \"well behaved\" class structures. A good data set
                                                                for first testing of a new classifier, but not very
                                                                challenging.


                                                                (2)
                                                                S. Aeberhard, D. Coomans and O. de Vel,
                                                                \"THE CLASSIFICATION PERFORMANCE OF RDA\"
                                                                Tech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of
                                                                Mathematics and Statistics, James Cook University of North Queensland.
                                                                (Also submitted to Journal of Chemometrics).


                                                                Here, the data was used to illustr", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - \n \n**Please cite** :\n\n \n\n \n\n 1. \n\nTitle of Database: Wine recognition data \nUpdated Sept 21, 1998 by C.Blake : Added attribute information\n\n \n\n \n\n 2. \n\nSources: \n(a) Forina, M. et al, PARVUS - An Extendible Package for Data \nExploration, Classification and Correlation. Institute of Pharmaceutical \nand Food Analysis and Technologies, Via Brigata Salerno, \n16147 Genoa, Italy.\n\n \n\n(b) Stefan Aeberhard, email: stefan@coral.cs.jcu.edu.au \n(c) July 1991 \n3\\. Past Usage:\n\n \n\n(1) \nS. Aeberhard, D. Coomans and O. de Vel, \nComparison of Classifiers in High Dimensional Settings, \nTech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of \nMathematics and Statistics, James Cook University of North Queensland. \n(Also submitted to Technometrics).\n\n \n\nThe data was used with many others for comparing various \nclassifiers. The classes are separable, though only RDA \nhas achieved 100% correct classification. \n(RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data)) \n(All results using the leave-one-out technique)\n\n \n\nIn a classification context, this is a well posed problem \nwith \"well behaved\" class structures. A good data set \nfor first testing of a new classifier, but not very \nchallenging.\n\n \n\n(2) \nS. Aeberhard, D. Coomans and O. de Vel, \n\"THE CLASSIFICATION PERFORMANCE OF RDA\" \nTech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of \nMathematics and Statistics, James Cook University of North Queensland. \n(Also submitted to Journal of Chemometrics).\n\n \n\nHere, the data was used to illustrate the superior performance of \nthe use of a new appreciation function with RDA.\n\n \n\n \n\n 3. \n\nRelevant Information:\n\n \n\n\\-- These data are the results of a", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b937a775-61e8-4522-8511-09597c6b40c9&revisionId=9adb25dd-4ded-4104-a593-f5aaad1ff3c2&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4281,7 +4281,7 @@ "platform": "ai4experiments", "platform_identifier": "254", "name": "file-viewer", - "description": "

                                                                A simple file viewer that lists provided files with download links.

                                                                To connect with other components a link to SharedFolderProvider is needed. The viewer will show a list with recent files and their download link. The content of last file will be presented if its text or an image.


                                                                ", + "description": "A simple file viewer that lists provided files with download links.\n\nTo connect with other components a link to SharedFolderProvider is needed. The\nviewer will show a list with recent files and their download link. The content\nof last file will be presented if its text or an image.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=811faf16-86aa-41a0-8720-4e4dcc352074&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.3", @@ -4327,7 +4327,7 @@ "platform": "ai4experiments", "platform_identifier": "255", "name": "file-viewer", - "description": "

                                                                A simple file viewer that lists provided files with download links.

                                                                ", + "description": "A simple file viewer that lists provided files with download links.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=aedc2371-cf0d-433a-8878-8b5ab4aec112&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.2", @@ -4373,7 +4373,7 @@ "platform": "ai4experiments", "platform_identifier": "256", "name": "file-viewer", - "description": "

                                                                A simple file viewer that lists provided files with download links.

                                                                ", + "description": "A simple file viewer that lists provided files with download links.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=f8389c64-a5e0-4ce4-b97d-ef63de60db19&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.2", @@ -4419,7 +4419,7 @@ "platform": "ai4experiments", "platform_identifier": "258", "name": "recognaize-ui", - "description": "

                                                                Recognaize UI

                                                                ", + "description": "Recognaize UI\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bc867fa6-eb1d-4905-bb76-2ebe413c2e91&revisionId=c7add00b-b4b4-46ee-8594-bd0e067f5665&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4465,7 +4465,7 @@ "platform": "ai4experiments", "platform_identifier": "260", "name": "autos", - "description": "https://openml.org

                                                                Author: Jeffrey C. Schlimmer (Jeffrey.Schlimmer@a.gp.cs.cmu.edu)

                                                                Source: UCI - 1987

                                                                Please cite:


                                                                1985 Auto Imports Database

                                                                This data set consists of three types of entities: (a) the specification of an auto in terms of various characteristics, (b) its assigned insurance risk rating, (c) its normalized losses in use as compared to other cars. The second rating corresponds to the degree to which the auto is more risky than its price indicates. Cars are initially assigned a risk factor symbol associated with its price. Then, if it is more risky (or less), this symbol is adjusted by moving it up (or down) the scale. Actuarians call this process \"symboling\". A value of +3 indicates that the auto is risky, -3 that it is probably pretty safe.


                                                                The third factor is the relative average loss payment per insured vehicle year. This value is normalized for all autos within a particular size classification (two-door small, station wagons, sports/speciality, etc...), and represents the average loss per car per year.


                                                                Several of the attributes in the database could be used as a \"class\" attribute.


                                                                Sources:

                                                                1) 1985 Model Import Car and Truck Specifications, 1985 Ward's Automotive Yearbook.
                                                                2) Personal Auto Manuals, Insurance Services Office, 160 Water Street, New York, NY 10038
                                                                3) Insurance Collision Report, Insurance Institute for Highway Safety, Watergate 600, Washington, DC 20037


                                                                Past Usage:

                                                                Kibler,~D., Aha,~D.~W., & Albert,~M. (1989). Instance-based prediction of real-valued attributes. {it Computational Intelli", + "description": "https://openml.org \n\n**Author** : Jeffrey C. Schlimmer (Jeffrey.Schlimmer@a.gp.cs.cmu.edu) \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Automobile) \\- 1987 \n \n **Please cite** :\n\n \n\n**1985 Auto Imports Database** \n \nThis data set consists of three types of entities: (a) the specification of an\nauto in terms of various characteristics, (b) its assigned insurance risk\nrating, (c) its normalized losses in use as compared to other cars. The second\nrating corresponds to the degree to which the auto is more risky than its\nprice indicates. Cars are initially assigned a risk factor symbol associated\nwith its price. Then, if it is more risky (or less), this symbol is adjusted\nby moving it up (or down) the scale. Actuarians call this process \"symboling\".\nA value of +3 indicates that the auto is risky, -3 that it is probably pretty\nsafe.\n\n \n\nThe third factor is the relative average loss payment per insured vehicle\nyear. This value is normalized for all autos within a particular size\nclassification (two-door small, station wagons, sports/speciality, etc...),\nand represents the average loss per car per year.\n\n \n\nSeveral of the attributes in the database could be used as a \"class\"\nattribute.\n\n \n\nSources: \n \n1) 1985 Model Import Car and Truck Specifications, 1985 Ward's Automotive\nYearbook. \n2) Personal Auto Manuals, Insurance Services Office, 160 Water Street, New\nYork, NY 10038 \n3) Insurance Collision Report, Insurance Institute for Highway Safety,\nWatergate 600, Washington, DC 20037\n\n \n\nPast Usage: \n \nKibler,~D., Aha,~D.~W., & Albert,~M. (1989). Instance-based prediction of\nreal-valued attributes. {it Computational Intelligence}, {it 5}, 51--57.\n\n \n\nAttribute Information:\n\n \n\n> \n>\n>\n> \n>\n> 1. symboling: -3, -2, -1, 0, 1, 2, 3.\n> \n>\n> 2. ", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c3822188-8928-4e20-b604-4a274ff34503&revisionId=d1574d67-64d0-4b00-8dfa-7b35d810ddb1&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4511,7 +4511,7 @@ "platform": "ai4experiments", "platform_identifier": "277", "name": "seismic-bumps", - "description": "https://openml.org

                                                                Author: Sikora M., Wrobel L.

                                                                Source: UCI

                                                                Please cite: Sikora M., Wrobel L.: Application of rule induction algorithms for analysis of data collected by seismic hazard monitoring systems in coal mines. Archives of Mining Sciences, 55(1), 2010, 91-114.



                                                                • Title:


                                                                seismic-bumps Data Set



                                                                • Abstract:


                                                                The data describe the problem of high energy (higher than 10^4 J) seismic bumps forecasting in a coal mine. Data come from two of longwalls located in a Polish coal mine.



                                                                • Source:


                                                                Marek Sikora^{1,2} (marek.sikora '@' polsl.pl), Lukasz Wrobel^{1} (lukasz.wrobel '@' polsl.pl)
                                                                (1) Institute of Computer Science, Silesian University of Technology, 44-100 Gliwice, Poland
                                                                (2) Institute of Innovative Technologies EMAG, 40-189 Katowice, Poland



                                                                • Data Set Information:


                                                                Mining activity was and is always connected with the occurrence of dangers which are commonly called mining hazards. A special case of such threat is a seismic hazard which frequently occurs in many underground mines. Seismic hazard is the hardest detectable and predictable of natural hazards and in this respect it is comparable to an earthquake. More and more advanced seismic and seismoacoustic monitoring systems allow a better understanding rock mass processes and definition of seismic hazard
                                                                prediction methods. Accuracy of so far created methods is however far from perfect. Complexity of seismic processes and big disproportion between the number of low-energy seismic events and the number of high-energy phenomena (e.g. > 10^4J) causes the statistical technique", + "description": "https://openml.org \n\n**Author** : Sikora M., Wrobel L. \n \n**Source** : UCI \n \n**Please cite** : Sikora M., Wrobel L.: Application of rule induction\nalgorithms for analysis of data collected by seismic hazard monitoring systems\nin coal mines. Archives of Mining Sciences, 55(1), 2010, 91-114.\n\n \n\n \n\n * Title: \n \n\n \n\nseismic-bumps Data Set\n\n \n\n \n\n * Abstract: \n \n\n \n\nThe data describe the problem of high energy (higher than 10^4 J) seismic\nbumps forecasting in a coal mine. Data come from two of longwalls located in a\nPolish coal mine.\n\n \n\n \n\n * Source:\n \n\n \n\nMarek Sikora^{1,2} (marek.sikora '@' polsl.pl), Lukasz Wrobel^{1}\n(lukasz.wrobel '@' polsl.pl) \n(1) Institute of Computer Science, Silesian University of Technology, 44-100\nGliwice, Poland \n(2) Institute of Innovative Technologies EMAG, 40-189 Katowice, Poland\n\n \n\n \n\n * Data Set Information:\n \n\n \n\nMining activity was and is always connected with the occurrence of dangers\nwhich are commonly called mining hazards. A special case of such threat is a\nseismic hazard which frequently occurs in many underground mines. Seismic\nhazard is the hardest detectable and predictable of natural hazards and in\nthis respect it is comparable to an earthquake. More and more advanced seismic\nand seismoacoustic monitoring systems allow a better understanding rock mass\nprocesses and definition of seismic hazard \nprediction methods. Accuracy of so far created methods is however far from\nperfect. Complexity of seismic processes and big disproportion between the\nnumber of low-energy seismic events and the number of high-energy phenomena\n(e.g. > 10^4J) causes the statistical techniques to be insufficient to predict\nseismic hazard. Therefore, it is essential to search for new opportunities of\nbetter hazard prediction, a", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ce2033a8-a389-435d-a64c-90a173e6775f&revisionId=97be56b0-b72d-41cd-8821-99a6a38e7285&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4557,7 +4557,7 @@ "platform": "ai4experiments", "platform_identifier": "278", "name": "car", - "description": "https://openml.org

                                                                Author: Marko Bohanec, Blaz Zupan

                                                                Source: UCI - 1997

                                                                Please cite: UCI


                                                                Car Evaluation Database

                                                                This database was derived from a simple hierarchical decision model originally developed for the demonstration of DEX (M. Bohanec, V. Rajkovic: Expert system for decision making. Sistemica 1(1), pp. 145-157, 1990.).


                                                                The model evaluates cars according to the following concept structure:


                                                                CAR                      car acceptability
                                                                . PRICE overall price
                                                                . . buying buying price
                                                                . . maint price of the maintenance
                                                                . TECH technical characteristics
                                                                . . COMFORT comfort
                                                                . . . doors number of doors
                                                                . . . persons capacity in terms of persons to carry
                                                                . . . lug_boot the size of luggage boot
                                                                . . safety estimated safety of the car

                                                                Input attributes are printed in lowercase. Besides the target concept (CAR), the model includes three intermediate concepts: PRICE, TECH, COMFORT. Every concept is in the original model related to its lower level descendants by a set of examples (for
                                                                these examples sets see http://www-ai.ijs.si/BlazZupan/car.html).


                                                                The Car Evaluation Database contains examples with the structural information removed, i.e., directly relates CAR to the six input attributes: buying, maint, doors, persons, lug_boot, safety. Because of known underlying concept structure, this database may be particularly useful f", + "description": "https://openml.org \n\n**Author** : Marko Bohanec, Blaz Zupan \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/car+evaluation) \\-\n1997 \n \n**Please cite** : [UCI](http://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Car Evaluation Database** \n \nThis database was derived from a simple hierarchical decision model originally\ndeveloped for the demonstration of DEX (M. Bohanec, V. Rajkovic: Expert system\nfor decision making. Sistemica 1(1), pp. 145-157, 1990.).\n\n \n\nThe model evaluates cars according to the following concept structure:\n\n \n\n \n \n CAR car acceptability \n . PRICE overall price \n . . buying buying price \n . . maint price of the maintenance \n . TECH technical characteristics \n . . COMFORT comfort \n . . . doors number of doors \n . . . persons capacity in terms of persons to carry \n . . . lug_boot the size of luggage boot \n . . safety estimated safety of the car \n \n\n \n\nInput attributes are printed in lowercase. Besides the target concept (CAR),\nthe model includes three intermediate concepts: PRICE, TECH, COMFORT. Every\nconcept is in the original model related to its lower level descendants by a\nset of examples (for \nthese examples sets see http://www-ai.ijs.si/BlazZupan/car.html).\n\n \n\nThe Car Evaluation Database contains examples with the structural information\nremoved, i.e., directly relates CAR to the six input attributes: buying,\nmaint, doors, persons, lug_boot, safety. Because of known underlying concept\nstructure, this database may be particularly useful for testing constructive\ninduction and structure discovery methods.\n\n \n\n### Changes wit", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cf25ba45-37d5-4548-b3d1-103c5cbbf24c&revisionId=105d6390-095f-4d54-bb6d-5e5c24cc5d88&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4603,7 +4603,7 @@ "platform": "ai4experiments", "platform_identifier": "279", "name": "banknote-authentication", - "description": "https://openml.org

                                                                Author: Volker Lohweg (University of Applied Sciences, Ostwestfalen-Lippe)

                                                                Source: UCI - 2012

                                                                Please cite: UCI


                                                                Dataset about distinguishing genuine and forged banknotes. Data were extracted from images that were taken from genuine and forged banknote-like specimens. For digitization, an industrial camera usually used for print inspection was used. The final images have 400x 400 pixels. Due to the object lens and distance to the investigated object gray-scale pictures with a resolution of about 660 dpi were gained. A Wavelet Transform tool was used to extract features from these images.


                                                                Attribute Information


                                                                V1. variance of Wavelet Transformed image (continuous)

                                                                V2. skewness of Wavelet Transformed image (continuous)

                                                                V3. curtosis of Wavelet Transformed image (continuous)

                                                                V4. entropy of image (continuous)


                                                                Class (target). Presumably 1 for genuine and 2 for forged

                                                                ", + "description": "https://openml.org \n\nAuthor: Volker Lohweg (University of Applied Sciences, Ostwestfalen-Lippe) \n \nSource: [UCI](https://archive.ics.uci.edu/ml/datasets/banknote+authentication)\n\\- 2012 \n \nPlease cite: [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\nDataset about distinguishing genuine and forged banknotes. Data were extracted\nfrom images that were taken from genuine and forged banknote-like specimens.\nFor digitization, an industrial camera usually used for print inspection was\nused. The final images have 400x 400 pixels. Due to the object lens and\ndistance to the investigated object gray-scale pictures with a resolution of\nabout 660 dpi were gained. A Wavelet Transform tool was used to extract\nfeatures from these images.\n\n \n\n### Attribute Information\n\n \n\nV1. variance of Wavelet Transformed image (continuous) \n \nV2. skewness of Wavelet Transformed image (continuous) \n \nV3. curtosis of Wavelet Transformed image (continuous) \n \nV4. entropy of image (continuous)\n\n \n\nClass (target). Presumably 1 for genuine and 2 for forged\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cfd561b4-1973-40a1-a572-b70ffdf4d4a0&revisionId=d507733b-9e93-4bef-9161-01dbd46a505a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4649,7 +4649,7 @@ "platform": "ai4experiments", "platform_identifier": "282", "name": "Idiap_BEAT_Databroker_-_M-NIST", - "description": "

                                                                This data broker provides the public MNIST database as a series of image.The data broker itself is standalone as it relies on the BOB mnist database package to provide the data.

                                                                There is no need for any configuration to be done in order to use it.

                                                                It can be used as input to benchmark other Acumos models.There is not image processing done in it. The output is a two dimensional numpy array that is stored as a binary type in order to avoid complex type creation as there's no notion of array size with protobuf.

                                                                The corresponding BEAT experiment can be found on the BEAT platform



                                                                .

                                                                ", + "description": "This data broker provides the public MNIST database as a series of image.The\ndata broker itself is standalone as it relies on the [BOB mnist database\npackage\n](https://www.idiap.ch/software/bob/docs/bob/bob.db.mnist/master/index.html)to\nprovide the data.\n\nThere is no need for any configuration to be done in order to use it.\n\nIt can be used as input to benchmark other Acumos models.There is not image\nprocessing done in it. The output is a two dimensional numpy array that is\nstored as a binary type in order to avoid complex type creation as there's no\nnotion of array size with protobuf.\n\nThe corresponding BEAT experiment can be found on the [BEAT\nplatform](https://www.idiap.ch/software/beat/platform/experiments/amohammadi/amohammadi/mnist_simple/1/mnist1/)\n\n \n\n \n\n.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d1bb94b1-9a27-47d2-a36a-7fceb57a8a9d&revisionId=082d9988-6731-48a9-aa03-22e8ca420541&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4695,7 +4695,7 @@ "platform": "ai4experiments", "platform_identifier": "284", "name": "kc1", - "description": "https://openml.org

                                                                Author: Mike Chapman, NASA

                                                                Source: tera-PROMISE - 2004

                                                                Please cite: Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE Repository of Software Engineering Databases. School of Information Technology and Engineering, University of Ottawa, Canada.


                                                                KC1 Software defect prediction

                                                                One of the NASA Metrics Data Program defect data sets. Data from software for storage management for receiving and processing ground data. Data comes from McCabe and Halstead features extractors of source code. These features were defined in the 70s in an attempt to objectively characterize code features that are associated with software quality.


                                                                Attribute Information



                                                                1. loc : numeric % McCabe's line count of code

                                                                2. v(g) : numeric % McCabe \"cyclomatic complexity\"

                                                                3. ev(g) : numeric % McCabe \"essential complexity\"

                                                                4. iv(g) : numeric % McCabe \"design complexity\"

                                                                5. n : numeric % Halstead total operators + operands

                                                                6. v : numeric % Halstead \"volume\"

                                                                7. l : numeric % Halstead \"program length\"

                                                                8. d : numeric % Halstead \"difficulty\"

                                                                9. i : numeric % Halstead \"intelligence\"

                                                                10. e : numeric % Halstead \"effort\"

                                                                11. b : numeric % Halstead

                                                                12. t : numeric % Halstead's time estimator

                                                                13. lOCode : numeric % Halstead's line count

                                                                14. lOComment : numeric % Halstead's count of lines of comments
                                                                15. SUMO/RL implements a pipeline with a traffic simulator of the city of Trondheim, Norway, and a reinforcement learning autonomous agent that learns and implements traffic control policies with the goal of minimizing the number of pollution peaks above a given threshold. Each component can be ran stand alone.

                                                                  The simulator is a wrapper of the Sumo simulator, that provides more functionality. The simulator is directly targeted to Trondheim city, with the goal to study the traffic related emissions.

                                                                  For a more detailed description check the github repository of the resouce: https://github.com/tsveiga/AI4EU-RL-Trondheim

                                                                  ", + "description": "SUMO/RL implements a pipeline with a traffic simulator of the city of\nTrondheim, Norway, and a reinforcement learning autonomous agent that learns\nand implements traffic control policies with the goal of minimizing the number\nof pollution peaks above a given threshold. Each component can be ran stand\nalone.\n\nThe simulator is a wrapper of the Sumo simulator, that provides more\nfunctionality. The simulator is directly targeted to Trondheim city, with the\ngoal to study the traffic related emissions.\n\nFor a more detailed description check the github repository of the resouce:\nhttps://github.com/tsveiga/AI4EU-RL-Trondheim\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d40fdc2b-fe34-4de3-979d-507b55e96a0f&revisionId=a7ca617c-c274-4500-aff0-3bff21a24298&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -4833,7 +4833,7 @@ "platform": "ai4experiments", "platform_identifier": "290", "name": "edm-env", - "description": "

                                                                  EDM env component is a numpy based EDM environment that follows the gym API. It emulates the rib machining pattern, with 4 available actions for z-axis control: 0 (stay), 1 (lower by 10\u03bcm), 2 (raise by 10\u03bcm), 3 (flush). Environment returns observed average voltage of the sparks, and the frequency of sparking (both are normalized)

                                                                  This component exposes a protobuf based control API via 8061 port. Using this API it can be controlled by the demo EDM agent (edm-agent component in AI4EU platform). For instructions to run the agent and the enviroment together see the component repository at https://github.com/threethirds/edm


                                                                  ", + "description": "EDM env component is a numpy based EDM environment that follows the gym API.\nIt emulates the rib machining pattern, with 4 available actions for z-axis\ncontrol: 0 (stay), 1 (lower by 10\u03bcm), 2 (raise by 10\u03bcm), 3 (flush).\nEnvironment returns observed average voltage of the sparks, and the frequency\nof sparking (both are normalized)\n\nThis component exposes a protobuf based control API via 8061 port. Using this\nAPI it can be controlled by the demo EDM agent (edm-agent component in AI4EU\nplatform). For instructions to run the agent and the enviroment together see\nthe component repository at https://github.com/threethirds/edm\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&revisionId=6fe2ae46-9234-4ce6-843b-adbf4e963c63&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.3", @@ -4971,7 +4971,7 @@ "platform": "ai4experiments", "platform_identifier": "294", "name": "ionosphere", - "description": "https://openml.org

                                                                  Author: Space Physics Group, Applied Physics Laboratory, Johns Hopkins University. Donated by Vince Sigillito.

                                                                  Source: UCI Machine Learning Repository

                                                                  Please cite: UCI


                                                                  Johns Hopkins University Ionosphere database

                                                                  This radar data was collected by a system in Goose Bay, Labrador. This system consists of a phased array of 16 high-frequency antennas with a total transmitted power on the order of 6.4 kilowatts. See the paper for more details.


                                                                  Attribute information


                                                                  Received signals were processed using an autocorrelation function whose arguments are the time of a pulse and the pulse number. There were 17 pulse numbers for the Goose Bay system. Instances in this database are described by 2 attributes per pulse number, corresponding to the complex values returned by the function resulting from the complex electromagnetic signal.


                                                                  The targets were free electrons in the ionosphere. \"Good\" (g) radar returns are those showing evidence of some type of structure in the ionosphere. \"Bad\" (b) returns are those that do not; their signals pass through the ionosphere.


                                                                  Relevant papers


                                                                  Sigillito, V. G., Wing, S. P., Hutton, L. V., & Baker, K. B. (1989). Classification of radar returns from the ionosphere using neural networks. Johns Hopkins APL Technical Digest, 10, 262-266.

                                                                  ", + "description": "https://openml.org \n\n**Author** : Space Physics Group, Applied Physics Laboratory, Johns Hopkins\nUniversity. Donated by Vince Sigillito. \n \n **Source** : [UCI Machine Learning\nRepository](https://archive.ics.uci.edu/ml/datasets/ionosphere) \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Johns Hopkins University Ionosphere database** \n \nThis radar data was collected by a system in Goose Bay, Labrador. This system\nconsists of a phased array of 16 high-frequency antennas with a total\ntransmitted power on the order of 6.4 kilowatts. See the paper for more\ndetails.\n\n \n\n### Attribute information\n\n \n\nReceived signals were processed using an autocorrelation function whose\narguments are the time of a pulse and the pulse number. There were 17 pulse\nnumbers for the Goose Bay system. Instances in this database are described by\n2 attributes per pulse number, corresponding to the complex values returned by\nthe function resulting from the complex electromagnetic signal.\n\n \n\nThe targets were free electrons in the ionosphere. \"Good\" (g) radar returns\nare those showing evidence of some type of structure in the ionosphere. \"Bad\"\n(b) returns are those that do not; their signals pass through the ionosphere.\n\n \n\n### Relevant papers\n\n \n\nSigillito, V. G., Wing, S. P., Hutton, L. V., & Baker, K. B. (1989).\nClassification of radar returns from the ionosphere using neural networks.\nJohns Hopkins APL Technical Digest, 10, 262-266.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d6b959e6-59c4-4311-a0b2-550b9a1bd407&revisionId=48b02822-24ca-4e2e-9e05-f606db3b6be2&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5017,7 +5017,7 @@ "platform": "ai4experiments", "platform_identifier": "304", "name": "credit-approval", - "description": "https://openml.org

                                                                  Author: Confidential - Donated by Ross Quinlan

                                                                  Source: UCI - 1987

                                                                  Please cite: UCI


                                                                  Credit Approval
                                                                  This file concerns credit card applications. All attribute names and values have been changed to meaningless symbols to protect the confidentiality of the data.


                                                                  This dataset is interesting because there is a good mix of attributes -- continuous, nominal with small numbers of values, and nominal with larger numbers of values. There are also a few missing values.

                                                                  ", + "description": "https://openml.org \n\n**Author** : Confidential - Donated by Ross Quinlan \n \n**Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/credit+approval) \\-\n1987 \n \n **Please cite** : [UCI](http://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Credit Approval** \nThis file concerns credit card applications. All attribute names and values\nhave been changed to meaningless symbols to protect the confidentiality of the\ndata.\n\n \n\nThis dataset is interesting because there is a good mix of attributes --\ncontinuous, nominal with small numbers of values, and nominal with larger\nnumbers of values. There are also a few missing values.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dc36f026-d89b-4017-943e-560012105d3d&revisionId=9238fdfe-0824-45cf-933d-d51cb54deb54&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5063,7 +5063,7 @@ "platform": "ai4experiments", "platform_identifier": "309", "name": "segment", - "description": "https://openml.org

                                                                  Author: University of Massachusetts Vision Group, Carla Brodley

                                                                  Source: UCI - 1990

                                                                  Please cite: UCI


                                                                  Image Segmentation Data Set
                                                                  The instances were drawn randomly from a database of 7 outdoor images. The images were hand-segmented to create a classification for every pixel. Each instance is a 3x3 region.


                                                                  Attribute Information



                                                                  1. region-centroid-col: the column of the center pixel of the region.

                                                                  2. region-centroid-row: the row of the center pixel of the region.

                                                                  3. region-pixel-count: the number of pixels in a region = 9.

                                                                  4. short-line-density-5: the results of a line extractoin algorithm that
                                                                    counts how many lines of length 5 (any orientation) with
                                                                    low contrast, less than or equal to 5, go through the region.

                                                                  5. short-line-density-2: same as short-line-density-5 but counts lines
                                                                    of high contrast, greater than 5.

                                                                  6. vedge-mean: measure the contrast of horizontally
                                                                    adjacent pixels in the region. There are 6, the mean and
                                                                    standard deviation are given. This attribute is used as
                                                                    a vertical edge detector.

                                                                  7. vegde-sd: (see 6)

                                                                  8. hedge-mean: measures the contrast of vertically adjacent
                                                                    pixels. Used for horizontal line detection.

                                                                  9. hedge-sd: (see 8).

                                                                  10. intensity-mean: the average over the region of (R + G + B)/3

                                                                  11. rawred-mean: the average over the region of the R value.

                                                                  12. rawblue-mean: th", + "description": "https://openml.org \n\n**Author** : University of Massachusetts Vision Group, Carla Brodley \n \n **Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/image+segmentation)\n\\- 1990 \n \n **Please cite** : [UCI](http://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Image Segmentation Data Set** \nThe instances were drawn randomly from a database of 7 outdoor images. The\nimages were hand-segmented to create a classification for every pixel. Each\ninstance is a 3x3 region.\n\n \n\n### Attribute Information\n\n \n\n \n\n 1. region-centroid-col: the column of the center pixel of the region.\n \n\n 2. region-centroid-row: the row of the center pixel of the region.\n \n\n 3. region-pixel-count: the number of pixels in a region = 9.\n \n\n 4. short-line-density-5: the results of a line extractoin algorithm that \ncounts how many lines of length 5 (any orientation) with \nlow contrast, less than or equal to 5, go through the region.\n\n \n\n 5. short-line-density-2: same as short-line-density-5 but counts lines \nof high contrast, greater than 5.\n\n \n\n 6. vedge-mean: measure the contrast of horizontally \nadjacent pixels in the region. There are 6, the mean and \nstandard deviation are given. This attribute is used as \na vertical edge detector.\n\n \n\n 7. vegde-sd: (see 6)\n \n\n 8. hedge-mean: measures the contrast of vertically adjacent \npixels. Used for horizontal line detection.\n\n \n\n 9. hedge-sd: (see 8).\n \n\n 10. intensity-mean: the average over the region of (R + G + B)/3\n \n\n 11. rawred-mean: the average over the region of the R value.\n \n\n 12. rawblue-mean: the average over the region of the B value.\n \n\n 13. rawgreen-mean: the average over the region of the G value.\n \n\n 14. exred-mean: measure the excess red: (2R - (G + B))\n \n\n 15. exblue-mean: measure the exces", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df660739-9aee-423a-a44e-df9b637cfe1b&revisionId=6f1d61b0-1028-44ee-ac03-ce7b562550c3&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5109,7 +5109,7 @@ "platform": "ai4experiments", "platform_identifier": "310", "name": "segment", - "description": "https://openml.org

                                                                    Author: University of Massachusetts Vision Group, Carla Brodley

                                                                    Source: UCI - 1990

                                                                    Please cite: UCI


                                                                    Image Segmentation Data Set
                                                                    The instances were drawn randomly from a database of 7 outdoor images. The images were hand-segmented to create a classification for every pixel. Each instance is a 3x3 region.


                                                                    Major changes w.r.t. version 2: ignored first two variables as they do not fit the classification task (they reflect the location of the sample in the original image). The 3rd is constant, so should also be ignored.


                                                                    Attribute Information



                                                                    1. short-line-density-5: the results of a line extractoin algorithm that
                                                                      counts how many lines of length 5 (any orientation) with
                                                                      low contrast, less than or equal to 5, go through the region.

                                                                    2. short-line-density-2: same as short-line-density-5 but counts lines
                                                                      of high contrast, greater than 5.

                                                                    3. vedge-mean: measure the contrast of horizontally
                                                                      adjacent pixels in the region. There are 6, the mean and
                                                                      standard deviation are given. This attribute is used as
                                                                      a vertical edge detector.

                                                                    4. vegde-sd: (see 6)

                                                                    5. hedge-mean: measures the contrast of vertically adjacent
                                                                      pixels. Used for horizontal line detection.

                                                                    6. hedge-sd: (see 8).

                                                                    7. intensity-mean: the average over the region of (R + G + B)/3

                                                                    8. rawred-mean: the average over the region of the R value.

                                                                    9. r", + "description": "https://openml.org \n\n**Author** : University of Massachusetts Vision Group, Carla Brodley \n \n **Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/image+segmentation)\n\\- 1990 \n \n **Please cite** : [UCI](http://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Image Segmentation Data Set** \nThe instances were drawn randomly from a database of 7 outdoor images. The\nimages were hand-segmented to create a classification for every pixel. Each\ninstance is a 3x3 region.\n\n \n\n **Major changes w.r.t. version 2: ignored first two variables as they do not\nfit the classification task (they reflect the location of the sample in the\noriginal image). The 3rd is constant, so should also be ignored.**\n\n \n\n### Attribute Information\n\n \n\n \n\n 1. short-line-density-5: the results of a line extractoin algorithm that \ncounts how many lines of length 5 (any orientation) with \nlow contrast, less than or equal to 5, go through the region.\n\n \n\n 2. short-line-density-2: same as short-line-density-5 but counts lines \nof high contrast, greater than 5.\n\n \n\n 3. vedge-mean: measure the contrast of horizontally \nadjacent pixels in the region. There are 6, the mean and \nstandard deviation are given. This attribute is used as \na vertical edge detector.\n\n \n\n 4. vegde-sd: (see 6)\n \n\n 5. hedge-mean: measures the contrast of vertically adjacent \npixels. Used for horizontal line detection.\n\n \n\n 6. hedge-sd: (see 8).\n \n\n 7. intensity-mean: the average over the region of (R + G + B)/3\n \n\n 8. rawred-mean: the average over the region of the R value.\n \n\n 9. rawblue-mean: the average over the region of the B value.\n \n\n 10. rawgreen-mean: the average over the region of the G value.\n \n\n 11. exred-mean: measure the excess red: (2R - (G + B))\n \n\n 12. exblue-mean: measure th", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df660739-9aee-423a-a44e-df9b637cfe1b&revisionId=a0a9b64d-774e-438d-b13c-c9c20e220da0&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -5155,7 +5155,7 @@ "platform": "ai4experiments", "platform_identifier": "313", "name": "pc1", - "description": "https://openml.org

                                                                      Author: Mike Chapman, NASA

                                                                      Source: tera-PROMISE - 2004

                                                                      Please cite: Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE Repository of Software Engineering Databases. School of Information Technology and Engineering, University of Ottawa, Canada.


                                                                      PC1 Software defect prediction

                                                                      One of the NASA Metrics Data Program defect data sets. Data from flight software for earth orbiting satellite. Data comes from McCabe and Halstead features extractors of source code. These features were defined in the 70s in an attempt to objectively characterize code features that are associated with software quality.


                                                                      Attribute Information



                                                                      1. loc : numeric % McCabe's line count of code

                                                                      2. v(g) : numeric % McCabe \"cyclomatic complexity\"

                                                                      3. ev(g) : numeric % McCabe \"essential complexity\"

                                                                      4. iv(g) : numeric % McCabe \"design complexity\"

                                                                      5. n : numeric % Halstead total operators + operands

                                                                      6. v : numeric % Halstead \"volume\"

                                                                      7. l : numeric % Halstead \"program length\"

                                                                      8. d : numeric % Halstead \"difficulty\"

                                                                      9. i : numeric % Halstead \"intelligence\"

                                                                      10. e : numeric % Halstead \"effort\"

                                                                      11. b : numeric % Halstead

                                                                      12. t : numeric % Halstead's time estimator

                                                                      13. lOCode : numeric % Halstead's line count

                                                                      14. lOComment : numeric % Halstead's count of lines of comments

                                                                      15. lOBlank : nume", + "description": "https://openml.org \n\n**Author** : Mike Chapman, NASA \n \n **Source** : [tera-\nPROMISE](http://openscience.us/repo/defect/mccabehalsted/pc1.html) \\- 2004 \n \n **Please cite** : Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE\nRepository of Software Engineering Databases. School of Information Technology\nand Engineering, University of Ottawa, Canada.\n\n \n\n**PC1 Software defect prediction** \n \nOne of the NASA Metrics Data Program defect data sets. Data from flight\nsoftware for earth orbiting satellite. Data comes from McCabe and Halstead\nfeatures extractors of source code. These features were defined in the 70s in\nan attempt to objectively characterize code features that are associated with\nsoftware quality.\n\n \n\n### Attribute Information\n\n \n\n \n\n 1. loc : numeric % McCabe's line count of code\n \n\n 2. v(g) : numeric % McCabe \"cyclomatic complexity\"\n \n\n 3. ev(g) : numeric % McCabe \"essential complexity\"\n \n\n 4. iv(g) : numeric % McCabe \"design complexity\"\n \n\n 5. n : numeric % Halstead total operators + operands\n \n\n 6. v : numeric % Halstead \"volume\"\n \n\n 7. l : numeric % Halstead \"program length\"\n \n\n 8. d : numeric % Halstead \"difficulty\"\n \n\n 9. i : numeric % Halstead \"intelligence\"\n \n\n 10. e : numeric % Halstead \"effort\"\n \n\n 11. b : numeric % Halstead \n \n\n 12. t : numeric % Halstead's time estimator\n \n\n 13. lOCode : numeric % Halstead's line count\n \n\n 14. lOComment : numeric % Halstead's count of lines of comments\n \n\n 15. lOBlank : numeric % Halstead's count of blank lines\n \n\n 16. lOCodeAndComment: numeric\n \n\n 17. uniq_Op : numeric % unique operators\n \n\n 18. uniq_Opnd : numeric % unique operands\n \n\n 19. total_Op : numeric % total operators\n \n\n 20. total_Opnd : numeric % total operands\n \n\n 21. branchCount : numeric % of t", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e20b38c6-c46c-4cf6-96cf-c3ce14285c88&revisionId=c63c438d-ba5f-4544-94f2-8be84fb8e252&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5293,7 +5293,7 @@ "platform": "ai4experiments", "platform_identifier": "320", "name": "SensorThings_API_connector", - "description": "

                                                                        This is a generic connector for the SensorThings API. It will allow you to connect to any SensorThings API in the world and therefore potentially recover data on any domain. For example, this would facilitate the retrieval of public Covid19 data, harvested from various sources including Johns Hopkins and RKI, or from near-real-time air quality across Europe, from both national sources (harvested from AT SOS and WFS) and Europe (EEA).

                                                                        To illustrate the potential uses of these different SensorThings API (with a single connector), one can take a look at these different applications: a visualisation tool[1] bringing together French and German flow data, a covid-19 dashboard[2] and the Windy Web site[3] focused on the weather forecast.


                                                                        [1] https://wg-brgm.k8s.ilt-dmz.iosb.fraunhofer.de/servlet/is/110/


                                                                        [2] http://www.covid19dashboard.org/


                                                                        [3] https://www.windy.com/fr/-NO2-no2?camsEu,no2,47.905,1.908,5

                                                                        ", + "description": "This is a generic connector for the SensorThings API. It will allow you to\nconnect to any SensorThings API in the world and therefore potentially recover\ndata on any domain. For example, this would facilitate the retrieval of public\nCovid19 data, harvested from various sources including Johns Hopkins and RKI,\nor from near-real-time air quality across Europe, from both national sources\n(harvested from AT SOS and WFS) and Europe (EEA).\n\nTo illustrate the potential uses of these different SensorThings API (with a\nsingle connector), one can take a look at these different applications: a\nvisualisation tool[[1]](about:blank) bringing together French and German flow\ndata, a covid-19 dashboard[[2]](about:blank) and the [Windy Web\nsite](https://www.windy.com/fr/-NO2-no2?camsEu,no2,47.905,1.908,5)[[3]](about:blank)\nfocused on the weather forecast.\n\n \n\n[[1]](about:blank) \n\n \n\n[[2]](about:blank) \n\n \n\n[[3]](about:blank)\n\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&revisionId=baf4c46b-673b-48d0-ac27-1fa2a87ba625&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.3", @@ -5385,7 +5385,7 @@ "platform": "ai4experiments", "platform_identifier": "327", "name": "IoTxKG", - "description": "

                                                                        The Internet of Things (IoT) primary objective is to make a hyper-connected world for various application domains. However, IoT suffers from a lack of interoperability leading to a substantial threat to the predicted economic value. Schema.org provides semantic interoperability to structure heterogeneous data on the Web. An extension of this vocabulary for the IoT domain (iot.schema.org) is an ongoing research effort to address semantic interoperability for the Web of Things (WoT). To design this vocabulary, a central challenge is to identify the main topics (concepts and properties) automatically from existing knowledge in IoT applications. IoTxKG automatically 1) identifies the most important topics from existing ontologies of the 4 KE4WoT challenge domains \u2013 smart building, mobility, energy and weather \u2013 based on suitable language models and 2) visualises the topics using both wordclouds and interactive graph-based word clouds. 


                                                                        The following technologies are employed in IoTxKG

                                                                        • W3C Semantic Web Technologies (e.g. RDF, OWL, SPARQL, SKOS)
                                                                        • Deep Learning Language Models (Word2vec, BERT, ERNIE, GPT)
                                                                        • Clustering Algorithms (e.g. k-means clustering)
                                                                        • Graph-based Visualization
                                                                        ", + "description": "The Internet of Things (IoT) primary objective is to make a hyper-connected\nworld for various application domains. However, IoT suffers from a lack of\ninteroperability leading to a substantial threat to the predicted economic\nvalue. Schema.org provides semantic interoperability to structure\nheterogeneous data on the Web. An extension of this vocabulary for the IoT\ndomain (iot.schema.org) is an ongoing research effort to address semantic\ninteroperability for the Web of Things (WoT). To design this vocabulary, a\ncentral challenge is to identify the main topics (concepts and properties)\nautomatically from existing knowledge in IoT applications. IoTxKG\nautomatically 1) identifies the most important topics from existing ontologies\nof the 4 KE4WoT challenge domains \u2013 smart building, mobility, energy and\nweather \u2013 based on suitable language models and 2) visualises the topics using\nboth wordclouds and interactive graph-based word clouds.\n\n \n\nThe following technologies are employed in IoTxKG\n\n * W3C Semantic Web Technologies (e.g. RDF, OWL, SPARQL, SKOS)\n * Deep Learning Language Models (Word2vec, BERT, ERNIE, GPT)\n * Clustering Algorithms (e.g. k-means clustering)\n * Graph-based Visualization\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e55074a8-d66b-4e83-84c9-e0cd4371c79b&revisionId=75ce6a2f-1762-4907-8b94-a12ec9607f23&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5431,7 +5431,7 @@ "platform": "ai4experiments", "platform_identifier": "328", "name": "ai4eu-robotics-pump-6144-raw-broker", - "description": "

                                                                        The robotic pump demonstrator represents a hydraulic pump that can be mounted on an industrial robot, for example, to pump liquid paint for spray painting. On this pump, one accelerometer is mounted for vibration monitoring and recording.

                                                                        The pump can be controlled in terms of speed (rotations per minute, rpm), affecting the throughput of paint and the pressure in and out of the pump.

                                                                        The dataset consists of 380 million measurements of several sensor data of the pump system in 1-second intervals over two months in 2020.

                                                                        The complete dataset & documentation is available on Zenodo.

                                                                        ", + "description": "The robotic pump demonstrator represents a hydraulic pump that can be mounted\non an industrial robot, for example, to pump liquid paint for spray painting.\nOn this pump, one accelerometer is mounted for vibration monitoring and\nrecording.\n\nThe pump can be controlled in terms of speed (rotations per minute, rpm),\naffecting the throughput of paint and the pressure in and out of the pump.\n\nThe dataset consists of 380 million measurements of several sensor data of the\npump system in 1-second intervals over two months in 2020.\n\n[The complete dataset & documentation is available on\nZenodo.](https://zenodo.org/record/5729187)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e5be6960-bea7-4d62-8301-be494ab1ac46&revisionId=2626b3dc-d3a3-4f3c-b7b9-e523758dd5b5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.2", @@ -5523,7 +5523,7 @@ "platform": "ai4experiments", "platform_identifier": "330", "name": "TEK_THOR_DATA_CURATION", - "description": "

                                                                        AI4EU - THOR THOR solution consists in a Hybrid optimization solution to make the right decision on the amount spare parts in stock, considering past sales and forecasts. The purchase decision considers as input information current stock status, production needs, production forecast, sales forecast, variability Price of stock material and several restriction parameters.

                                                                        Data-Curation. Different datasets extracted from company ERP are analyzed and normalized by a \u2018Quality\u2019 module, which uses different statistical techniques to calculate quality metrics and fix missing values.

                                                                        ", + "description": "**AI4EU - THOR **THOR solution consists in a Hybrid optimization solution to\nmake the right decision on the amount spare parts in stock, considering past\nsales and forecasts. The purchase decision considers as input information\ncurrent stock status, production needs, production forecast, sales forecast,\nvariability Price of stock material and several restriction parameters.\n\n **Data-Curation**. Different datasets extracted from company ERP are analyzed\nand normalized by a \u2018Quality\u2019 module, which uses different statistical\ntechniques to calculate quality metrics and fix missing values.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e6d5038c-424a-44ce-9415-34fa129bf9a5&revisionId=bf98fd1e-fdf2-4ada-9a9a-c30fb1a90fea&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5569,7 +5569,7 @@ "platform": "ai4experiments", "platform_identifier": "331", "name": "breast-cancer", - "description": "https://openml.org

                                                                        Author:

                                                                        Source: Unknown -

                                                                        Please cite:


                                                                        Citation Request:
                                                                        This breast cancer domain was obtained from the University Medical Centre,
                                                                        Institute of Oncology, Ljubljana, Yugoslavia. Thanks go to M. Zwitter and
                                                                        M. Soklic for providing the data. Please include this citation if you plan
                                                                        to use this database.




                                                                        1. Title: Breast cancer data (Michalski has used this)




                                                                        2. Sources:
                                                                          -- Matjaz Zwitter & Milan Soklic (physicians)
                                                                          Institute of Oncology
                                                                          University Medical Center
                                                                          Ljubljana, Yugoslavia
                                                                          -- Donors: Ming Tan and Jeff Schlimmer (Jeffrey.Schlimmer@a.gp.cs.cmu.edu)
                                                                          -- Date: 11 July 1988




                                                                        3. Past Usage: (Several: here are some)
                                                                          -- Michalski,R.S., Mozetic,I., Hong,J., & Lavrac,N. (1986). The
                                                                          Multi-Purpose Incremental Learning System AQ15 and its Testing
                                                                          Application to Three Medical Domains. In Proceedings of the
                                                                          Fifth National Conference on Artificial Intelligence, 1041-1045,
                                                                          Philadelphia, PA: Morgan Kaufmann.
                                                                          -- accuracy range: 66%-72%
                                                                          -- Clark,P. & Niblett,T. (1987). Induction in Noisy Domains. In
                                                                          Progress in Machine Learning (from the Proceedings of the 2nd
                                                                          European Working Session on Learning), 11-30, Bled,
                                                                          Yugoslavia: Sigma Press.
                                                                          -- 8 test results given: 65%-72% accuracy range
                                                                          -- Tan, M., & Eshelman, L. (1988). Using weighted networks to
                                                                          represent classification knowledge in noisy domains. Proceedings
                                                                          of the Fifth International Confere", + "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - \n \n**Please cite** :\n\n \n\nCitation Request: \nThis breast cancer domain was obtained from the University Medical Centre, \nInstitute of Oncology, Ljubljana, Yugoslavia. Thanks go to M. Zwitter and \nM. Soklic for providing the data. Please include this citation if you plan \nto use this database.\n\n \n\n \n\n 1. \n\nTitle: Breast cancer data (Michalski has used this)\n\n \n\n \n\n 2. \n\nSources: \n\\-- Matjaz Zwitter & Milan Soklic (physicians) \nInstitute of Oncology \nUniversity Medical Center \nLjubljana, Yugoslavia \n\\-- Donors: Ming Tan and Jeff Schlimmer (Jeffrey.Schlimmer@a.gp.cs.cmu.edu) \n\\-- Date: 11 July 1988\n\n \n\n \n\n 3. \n\nPast Usage: (Several: here are some) \n\\-- Michalski,R.S., Mozetic,I., Hong,J., & Lavrac,N. (1986). The \nMulti-Purpose Incremental Learning System AQ15 and its Testing \nApplication to Three Medical Domains. In Proceedings of the \nFifth National Conference on Artificial Intelligence, 1041-1045, \nPhiladelphia, PA: Morgan Kaufmann. \n\\-- accuracy range: 66%-72% \n\\-- Clark,P. & Niblett,T. (1987). Induction in Noisy Domains. In \nProgress in Machine Learning (from the Proceedings of the 2nd \nEuropean Working Session on Learning), 11-30, Bled, \nYugoslavia: Sigma Press. \n\\-- 8 test results given: 65%-72% accuracy range \n\\-- Tan, M., & Eshelman, L. (1988). Using weighted networks to \nrepresent classification knowledge in noisy domains. Proceedings \nof the Fifth International Conference on Machine Learning, 121-134, \nAnn Arbor, MI. \n\\-- 4 systems tested: accuracy range was 68%-73.5% \n\\-- Cestnik,G., Konenenko,I, & Bratko,I. (1987). Assistant-86: A \nKnowledge-Elicitation Tool for Sophisticated Users. In I.Bratko \n& N.Lavrac (Eds.) Progress in Machine Learning, 31-45, Sig", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e76a359c-ad44-48f2-a5be-f969434c0079&revisionId=62a3f013-f8ae-46b8-9887-aadd4b079659&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5615,7 +5615,7 @@ "platform": "ai4experiments", "platform_identifier": "335", "name": "GAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_50_EDM-2_001", - "description": "https://openml.org

                                                                          GAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_50_EDM-2_001-pmlb

                                                                          ", + "description": "https://openml.org \n\nGAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_50_EDM-2_001-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ea98c298-5fcb-4b37-8262-828d3605cfaf&revisionId=70f884e0-9a7e-458b-bdf0-ad3bba0667dc&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5661,7 +5661,7 @@ "platform": "ai4experiments", "platform_identifier": "347", "name": "thyroid-new", - "description": "https://openml.org

                                                                          new-thyroid-pmlb

                                                                          ", + "description": "https://openml.org \n\nnew-thyroid-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=edc15172-70fb-489b-bff1-c1c28c61ce6b&revisionId=96529752-d961-4e5e-8f0f-b104c3e1b603&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5707,7 +5707,7 @@ "platform": "ai4experiments", "platform_identifier": "348", "name": "churn", - "description": "https://openml.org

                                                                          Author: Unknown

                                                                          Source: PMLB, BigML, Supposedly from UCI but I can't find it there.

                                                                          Please cite:


                                                                          A dataset relating characteristics of telephony account features and usage and whether or not the customer churned. Originally used in Discovering Knowledge in Data: An Introduction to Data Mining.

                                                                          ", + "description": "https://openml.org \n\n**Author** : Unknown \n \n **Source** : [PMLB](https://github.com/EpistasisLab/penn-ml-\nbenchmarks/tree/master/datasets/classification),\n[BigML](https://bigml.com/user/francisco/gallery/dataset/5163ad540c0b5e5b22000383),\nSupposedly from UCI but I can't find it there. \n \n **Please cite** :\n\n \n\nA dataset relating characteristics of telephony account features and usage and\nwhether or not the customer churned. Originally used in [Discovering Knowledge\nin Data: An Introduction to Data Mining](http://secs.ac.in/wp-\ncontent/CSE_PORTAL/DataMining_Daniel.pdf).\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ee42788e-0ec5-45a9-97e4-6a0634ac84e2&revisionId=8cf5e565-aff3-41fd-ac89-c428b59a0a21&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5753,7 +5753,7 @@ "platform": "ai4experiments", "platform_identifier": "349", "name": "blood-transfusion-service-center", - "description": "https://openml.org

                                                                          Author: Prof. I-Cheng Yeh

                                                                          Source: UCI

                                                                          Please cite: Yeh, I-Cheng, Yang, King-Jang, and Ting, Tao-Ming, \"Knowledge discovery on RFM model using Bernoulli sequence\", Expert Systems with Applications, 2008.


                                                                          Blood Transfusion Service Center Data Set

                                                                          Data taken from the Blood Transfusion Service Center in Hsin-Chu City in Taiwan -- this is a classification problem.


                                                                          To demonstrate the RFMTC marketing model (a modified version of RFM), this study adopted the donor database of Blood Transfusion Service Center in Hsin-Chu City in Taiwan. The center passes their blood transfusion service bus to one university in Hsin-Chu City to gather blood donated about every three months. To build an FRMTC model, we selected 748 donors at random from the donor database.


                                                                          Attribute Information



                                                                          • V1: Recency - months since last donation

                                                                          • V2: Frequency - total number of donation

                                                                          • V3: Monetary - total blood donated in c.c.

                                                                          • V4: Time - months since first donation), and a binary variable representing whether he/she donated blood in March 2007 (1 stand for donating blood; 0 stands for not donating blood).


                                                                          The target attribute is a binary variable representing whether he/she donated blood in March 2007 (2 stands for donating blood; 1 stands for not donating blood).

                                                                          ", + "description": "https://openml.org \n\n**Author** : Prof. I-Cheng Yeh \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/Blood+Transfusion+Service+Center) \n \n **Please cite** : Yeh, I-Cheng, Yang, King-Jang, and Ting, Tao-Ming,\n\"Knowledge discovery on RFM model using Bernoulli sequence\", Expert Systems\nwith Applications, 2008.\n\n \n\n**Blood Transfusion Service Center Data Set** \n \nData taken from the Blood Transfusion Service Center in Hsin-Chu City in\nTaiwan -- this is a classification problem.\n\n \n\nTo demonstrate the RFMTC marketing model (a modified version of RFM), this\nstudy adopted the donor database of Blood Transfusion Service Center in Hsin-\nChu City in Taiwan. The center passes their blood transfusion service bus to\none university in Hsin-Chu City to gather blood donated about every three\nmonths. To build an FRMTC model, we selected 748 donors at random from the\ndonor database.\n\n \n\n### Attribute Information\n\n \n\n \n\n * V1: Recency - months since last donation\n \n\n * V2: Frequency - total number of donation\n \n\n * V3: Monetary - total blood donated in c.c.\n \n\n * V4: Time - months since first donation), and a binary variable representing whether he/she donated blood in March 2007 (1 stand for donating blood; 0 stands for not donating blood).\n \n\n \n\nThe target attribute is a binary variable representing whether he/she donated\nblood in March 2007 (2 stands for donating blood; 1 stands for not donating\nblood).\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eea25848-33cf-4b43-9677-1e932d8e710a&revisionId=9b1bfbf7-438a-45a7-99b0-c3c470a2551c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5799,7 +5799,7 @@ "platform": "ai4experiments", "platform_identifier": "351", "name": "edm_aad_data_node_cl", - "description": "

                                                                          EDM RL Controller predictions (Solution Provider: Artificialy SA)


                                                                          Reinforcement learning applied to Electrical discharge machining (EDM) control for the AI4EU project with Agie Charmilles SA. 


                                                                          The solution consist of two nodes: `data_node` server which streams a DataFrame of observations (EDM machine states) read from the path provided by the client (`infile`); and an `agent_node` server which predicts control actions based on the agent / controller specified by the client. Output predictions are stored inside the `./data_predictions/` folder of the `agent_node` Docker container.


                                                                          To use this solution, please use the Docker container and the additional files (which are in the Documents tap of the model in the marketplace) from both the `data_node` and `agent_node`. They are both in the AI4EU platform market place named as `edm_aad_agent_node_cl` and `edm_aad_data_node_cl`

                                                                          ", + "description": "**EDM RL Controller predictions (Solution Provider: Artificialy SA)**\n\n \n\nReinforcement learning applied to Electrical discharge machining (EDM) control\nfor the AI4EU project with Agie Charmilles SA.\n\n \n\nThe solution consist of two nodes: `data_node` server which streams a\nDataFrame of observations (EDM machine states) read from the path provided by\nthe client (`infile`); and an `agent_node` server which predicts control\nactions based on the agent / controller specified by the client. Output\npredictions are stored inside the `./data_predictions/` folder of the\n`agent_node` Docker container.\n\n \n\nTo use this solution, please use the Docker container and the additional files\n(which are in the Documents tap of the model in the marketplace) from both the\n`data_node` and `agent_node`. They are both in the AI4EU platform market place\nnamed as `edm_aad_agent_node_cl` and `edm_aad_data_node_cl`\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f257ab28-e846-4d05-8fc1-9e53cddab23a&revisionId=0672b76d-0046-4ff5-afc1-5e7a64554451&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5845,7 +5845,7 @@ "platform": "ai4experiments", "platform_identifier": "352", "name": "led24", - "description": "https://openml.org

                                                                          led24-pmlb

                                                                          ", + "description": "https://openml.org \n\nled24-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f7d84667-d8e6-4dc3-af68-0845d7e984e2&revisionId=27254760-7bc4-4b93-b466-3e5c93490461&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5891,7 +5891,7 @@ "platform": "ai4experiments", "platform_identifier": "354", "name": "ilpd", - "description": "https://openml.org

                                                                          Author: Bendi Venkata Ramana, M. Surendra Prasad Babu, N. B. Venkateswarlu

                                                                          Source: UCI - 2012

                                                                          Please cite: UCI


                                                                          Indian Liver Patient Dataset

                                                                          This data set contains 416 liver patient records and 167 non liver patient records.The data set was collected from north east of Andhra Pradesh, India. The class label divides the patients into 2 groups (liver patient or not). This data set contains 441 male patient records and 142 female patient records.


                                                                          Attribute Information


                                                                          V1. Age of the patient. Any patient whose age exceeded 89 is listed as being of age \"90\".

                                                                          V2. Gender of the patient

                                                                          V3. Total Bilirubin

                                                                          V4. Direct Bilirubin

                                                                          V5. Alkphos Alkaline Phosphatase

                                                                          V6. Sgpt Alanine Aminotransferase

                                                                          V7. Sgot Aspartate Aminotransferase

                                                                          V8. Total Proteins

                                                                          V9. Albumin

                                                                          V10. A/G Ratio Albumin and Globulin Ratio


                                                                          A feature indicating a train-test split has been removed.


                                                                          Relevant Papers



                                                                          1. Bendi Venkata Ramana, Prof. M. S. Prasad Babu and Prof. N. B. Venkateswarlu, A Critical Comparative Study of Liver Patients from USA and INDIA: An Exploratory Analysis\u009d, International Journal of Computer Science Issues, ISSN:1694-0784, May 2012.

                                                                          2. Bendi Venkata Ramana, Prof. M. S. Prasad Babu and Prof. N. B. Venkateswarlu, A Critical Study of Selected Classification Algorithms for Liver Disease Diagnosis, International Journal of Database Management Systems (IJDMS), Vo", + "description": "https://openml.org \n\n**Author** : Bendi Venkata Ramana, M. Surendra Prasad Babu, N. B.\nVenkateswarlu \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/ILPD+\\(Indian+Liver+Patient+Dataset\\))\n\\- 2012 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Indian Liver Patient Dataset** \n \nThis data set contains 416 liver patient records and 167 non liver patient\nrecords.The data set was collected from north east of Andhra Pradesh, India.\nThe class label divides the patients into 2 groups (liver patient or not).\nThis data set contains 441 male patient records and 142 female patient\nrecords.\n\n \n\n### Attribute Information\n\n \n\nV1. Age of the patient. Any patient whose age exceeded 89 is listed as being\nof age \"90\". \n \nV2. Gender of the patient \n \nV3. Total Bilirubin \n \nV4. Direct Bilirubin \n \nV5. Alkphos Alkaline Phosphatase \n \nV6. Sgpt Alanine Aminotransferase \n \nV7. Sgot Aspartate Aminotransferase \n \nV8. Total Proteins \n \nV9. Albumin \n \nV10. A/G Ratio Albumin and Globulin Ratio\n\n \n\nA feature indicating a train-test split has been removed.\n\n \n\n### Relevant Papers\n\n \n\n \n\n 1. Bendi Venkata Ramana, Prof. M. S. Prasad Babu and Prof. N. B. Venkateswarlu, A Critical Comparative Study of Liver Patients from USA and INDIA: An Exploratory Analysis\u009d, International Journal of Computer Science Issues, ISSN:1694-0784, May 2012. \n \n\n 2. Bendi Venkata Ramana, Prof. M. S. Prasad Babu and Prof. N. B. Venkateswarlu, A Critical Study of Selected Classification Algorithms for Liver Disease Diagnosis, International Journal of Database Management Systems (IJDMS), Vol.3, No.2, ISSN : 0975-5705, PP 101-114, May 2011.\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8016853-8e2c-45f3-8326-bd38387351e7&revisionId=050f2f0a-629d-4f41-a381-14220bd76465&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5937,7 +5937,7 @@ "platform": "ai4experiments", "platform_identifier": "355", "name": "ModelInitializer", - "description": "

                                                                            The Model Initializer is an infrastructure node that can provide initial config parameters to a model.

                                                                            ", + "description": "The Model Initializer is an infrastructure node that can provide initial\nconfig parameters to a model.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f888ec3c-1076-4e57-b56a-05f055aa4760&revisionId=76c80c0b-1883-4cb3-8f6c-4857c77ac4d5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5983,7 +5983,7 @@ "platform": "ai4experiments", "platform_identifier": "356", "name": "solar-flare", - "description": "https://openml.org

                                                                            flare-pmlb

                                                                            ", + "description": "https://openml.org \n\nflare-pmlb\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&revisionId=276fb3d2-00a8-4695-abdc-bbcc8d8ed604&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.2", @@ -6029,7 +6029,7 @@ "platform": "ai4experiments", "platform_identifier": "357", "name": "solar-flare", - "description": "https://openml.org

                                                                            Author: Gary Bradshaw

                                                                            Source: UCI

                                                                            Please cite:


                                                                            Solar Flare database
                                                                            Relevant Information:
                                                                            -- The database contains 3 potential classes, one for the number of times a
                                                                            certain type of solar flare occured in a 24 hour period.
                                                                            -- Each instance represents captured features for 1 active region on the
                                                                            sun.
                                                                            -- The data are divided into two sections. The second section (flare.data2)
                                                                            has had much more error correction applied to the it, and has
                                                                            consequently been treated as more reliable.


                                                                            Number of Instances: flare.data1: 323, flare.data2: 1066


                                                                            Number of attributes: 13 (includes 3 class attributes)


                                                                            Attribute Information


                                                                            1. Code for class (modified Zurich class)  (A,B,C,D,E,F,H)
                                                                            2. Code for largest spot size (X,R,S,A,H,K)
                                                                            3. Code for spot distribution (X,O,I,C)
                                                                            4. Activity (1 = reduced, 2 = unchanged)
                                                                            5. Evolution (1 = decay, 2 = no growth,
                                                                            3 = growth)
                                                                            6. Previous 24 hour flare activity code (1 = nothing as big as an M1,
                                                                            2 = one M1,
                                                                            3 = more activity than one M1)
                                                                            7. Historically-complex (1 = Yes, 2 = No)
                                                                            8. Did region become historically complex (1 = yes, 2 = no)
                                                                            on this pass across the sun's disk
                                                                            9. Area (1 = small, 2 = large)


                                                                            1. Area", + "description": "https://openml.org \n\n**Author** : Gary Bradshaw \n \n**Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/solar+flare) \n \n**Please cite** :\n\n \n\n**Solar Flare database** \nRelevant Information: \n\\-- The database contains 3 potential classes, one for the number of times a \ncertain type of solar flare occured in a 24 hour period. \n\\-- Each instance represents captured features for 1 active region on the \nsun. \n\\-- The data are divided into two sections. The second section (flare.data2) \nhas had much more error correction applied to the it, and has \nconsequently been treated as more reliable.\n\n \n\nNumber of Instances: flare.data1: 323, flare.data2: 1066\n\n \n\nNumber of attributes: 13 (includes 3 class attributes)\n\n \n\n### Attribute Information\n\n \n\n \n \n 1. Code for class (modified Zurich class) (A,B,C,D,E,F,H) \n 2. Code for largest spot size (X,R,S,A,H,K) \n 3. Code for spot distribution (X,O,I,C) \n 4. Activity (1 = reduced, 2 = unchanged) \n 5. Evolution (1 = decay, 2 = no growth, \n 3 = growth) \n 6. Previous 24 hour flare activity code (1 = nothing as big as an M1, \n 2 = one M1, \n 3 = more activity than one M1) \n 7. Historically-complex (1 = Yes, 2 = No) \n 8. Did region become historically complex (1 = yes, 2 = no) \n on this pass across the sun's disk \n 9. Area (1 = small, 2 = large) \n \n\n \n\n \n\n 1. Area of the largest spot (1 = <=5, 2 = >5)\n \n\n \n\nFrom all these predictors three classes of flares are predicted, which a", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&revisionId=91ca0a1e-60e1-45ce-a2c0-7c3f79498739&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6075,7 +6075,7 @@ "platform": "ai4experiments", "platform_identifier": "358", "name": "solar-flare", - "description": "https://openml.org

                                                                              Author: Gary Bradshaw

                                                                              Source: UCI

                                                                              Please cite:


                                                                              Solar Flare database
                                                                              Relevant Information:
                                                                              -- The database contains 3 potential classes, one for the number of times a
                                                                              certain type of solar flare occured in a 24 hour period.
                                                                              -- Each instance represents captured features for 1 active region on the
                                                                              sun.
                                                                              -- The data are divided into two sections. The second section (flare.data2)
                                                                              has had much more error correction applied to the it, and has
                                                                              consequently been treated as more reliable.


                                                                              Number of Instances: flare.data1: 323, flare.data2: 1066


                                                                              Number of attributes: 13 (includes 3 class attributes)


                                                                              Attribute Information


                                                                              1. Code for class (modified Zurich class)  (A,B,C,D,E,F,H)
                                                                              2. Code for largest spot size (X,R,S,A,H,K)
                                                                              3. Code for spot distribution (X,O,I,C)
                                                                              4. Activity (1 = reduced, 2 = unchanged)
                                                                              5. Evolution (1 = decay, 2 = no growth,
                                                                              3 = growth)
                                                                              6. Previous 24 hour flare activity code (1 = nothing as big as an M1,
                                                                              2 = one M1,
                                                                              3 = more activity than one M1)
                                                                              7. Historically-complex (1 = Yes, 2 = No)
                                                                              8. Did region become historically complex (1 = yes, 2 = no)
                                                                              on this pass across the sun's disk
                                                                              9. Area (1 = small, 2 = large)


                                                                              1. Area", + "description": "https://openml.org \n\n**Author** : Gary Bradshaw \n \n**Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/solar+flare) \n \n**Please cite** :\n\n \n\n**Solar Flare database** \nRelevant Information: \n\\-- The database contains 3 potential classes, one for the number of times a \ncertain type of solar flare occured in a 24 hour period. \n\\-- Each instance represents captured features for 1 active region on the \nsun. \n\\-- The data are divided into two sections. The second section (flare.data2) \nhas had much more error correction applied to the it, and has \nconsequently been treated as more reliable.\n\n \n\nNumber of Instances: flare.data1: 323, flare.data2: 1066\n\n \n\nNumber of attributes: 13 (includes 3 class attributes)\n\n \n\n### Attribute Information\n\n \n\n \n \n 1. Code for class (modified Zurich class) (A,B,C,D,E,F,H) \n 2. Code for largest spot size (X,R,S,A,H,K) \n 3. Code for spot distribution (X,O,I,C) \n 4. Activity (1 = reduced, 2 = unchanged) \n 5. Evolution (1 = decay, 2 = no growth, \n 3 = growth) \n 6. Previous 24 hour flare activity code (1 = nothing as big as an M1, \n 2 = one M1, \n 3 = more activity than one M1) \n 7. Historically-complex (1 = Yes, 2 = No) \n 8. Did region become historically complex (1 = yes, 2 = no) \n on this pass across the sun's disk \n 9. Area (1 = small, 2 = large) \n \n\n \n\n \n\n 1. Area of the largest spot (1 = <=5, 2 = >5)\n \n\n \n\nFrom all these predictors three classes of flares are predicted, which a", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&revisionId=f333bc3c-b87a-42b8-a5e9-5290036cc520&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -6121,7 +6121,7 @@ "platform": "ai4experiments", "platform_identifier": "360", "name": "ai4eu-robotics-wrist-6144-fft-broker", - "description": "

                                                                                The robotic wrist demonstrator represents a mechanical wrist with three axes that can hold tools, e.g. for spray painting in combination with a pump. On this robotic wrist, two accelerometers are mounted for vibration monitoring and recording: one in the movable front part of the wrist and one in the shaft. The wrist can be controlled through the torque or the designated position of each axis\u2019 motor.

                                                                                The dataset consists of 1.8 billion measurements of several sensor data of the robotic wrist in 1-second intervals over six months in 2020.

                                                                                The complete dataset & description is available on Zenodo

                                                                                ", + "description": "The robotic wrist demonstrator represents a mechanical wrist with three axes\nthat can hold tools, e.g. for spray painting in combination with a pump. On\nthis robotic wrist, two accelerometers are mounted for vibration monitoring\nand recording: one in the movable front part of the wrist and one in the\nshaft. The wrist can be controlled through the torque or the designated\nposition of each axis\u2019 motor.\n\nThe dataset consists of 1.8 billion measurements of several sensor data of the\nrobotic wrist in 1-second intervals over six months in 2020.\n\n[The complete dataset & description is available on\nZenodo](https://zenodo.org/record/5729818)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f9425d71-4d33-4af5-b4ba-25d6fa8aa3c4&revisionId=2399eb3e-67fb-419f-a630-df48c3cf138a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6167,7 +6167,7 @@ "platform": "ai4experiments", "platform_identifier": "363", "name": "climate-model-simulation-crashes", - "description": "https://openml.org

                                                                                Author: D. Lucas, R. Klein, J. Tannahill, D. Ivanova, S. Brandon, D. Domyancic, Y. Zhang.


                                                                                Source: UCI


                                                                                Please Cite:
                                                                                Lucas, D. D., Klein, R., Tannahill, J., Ivanova, D., Brandon, S., Domyancic, D., and Zhang, Y.: Failure analysis of parameter-induced simulation crashes in climate models, Geosci. Model Dev. Discuss., 6, 585-623, Web Link, 2013.


                                                                                Source:


                                                                                D. Lucas (ddlucas .at. alum.mit.edu), Lawrence Livermore National Laboratory; R. Klein (rklein .at. astron.berkeley.edu), Lawrence Livermore National Laboratory & U.C. Berkeley; J. Tannahill (tannahill1 .at. llnl.gov), Lawrence Livermore National Laboratory; D. Ivanova (ivanova2 .at. llnl.gov), Lawrence Livermore National Laboratory; S. Brandon (brandon1 .at. llnl.gov), Lawrence Livermore National Laboratory; D. Domyancic (domyancic1 .at. llnl.gov), Lawrence Livermore National Laboratory; Y. Zhang (zhang24 .at. llnl.gov), Lawrence Livermore National Laboratory .


                                                                                This data was constructed using LLNL's UQ Pipeline, was created under the auspices of the US Department of Energy by Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344, was funded by LLNL's Uncertainty Quantification Strategic Initiative Laboratory Directed Research and Development Project under tracking code 10-SI-013, and is released under UCRL number LLNL-MISC-633994.


                                                                                Data Set Information:


                                                                                This dataset contains records of simulation crashes encountered during climate model uncertainty quantification (UQ) ensembles. Ensemb", + "description": "https://openml.org \n\n**Author** : D. Lucas, R. Klein, J. Tannahill, D. Ivanova, S. Brandon, D.\nDomyancic, Y. Zhang.\n\n \n\n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/climate+model+simulation+crashes)\n\n \n\n **Please Cite** : \nLucas, D. D., Klein, R., Tannahill, J., Ivanova, D., Brandon, S., Domyancic,\nD., and Zhang, Y.: Failure analysis of parameter-induced simulation crashes in\nclimate models, Geosci. Model Dev. Discuss., 6, 585-623, [Web\nLink](http://www.geosci-model-dev-\ndiscuss.net/6/585/2013/gmdd-6-585-2013.html), 2013.\n\n \n\nSource:\n\n \n\nD. Lucas (ddlucas .at. alum.mit.edu), Lawrence Livermore National Laboratory;\nR. Klein (rklein .at. astron.berkeley.edu), Lawrence Livermore National\nLaboratory & U.C. Berkeley; J. Tannahill (tannahill1 .at. llnl.gov), Lawrence\nLivermore National Laboratory; D. Ivanova (ivanova2 .at. llnl.gov), Lawrence\nLivermore National Laboratory; S. Brandon (brandon1 .at. llnl.gov), Lawrence\nLivermore National Laboratory; D. Domyancic (domyancic1 .at. llnl.gov),\nLawrence Livermore National Laboratory; Y. Zhang (zhang24 .at. llnl.gov),\nLawrence Livermore National Laboratory .\n\n \n\nThis data was constructed using LLNL's UQ Pipeline, was created under the\nauspices of the US Department of Energy by Lawrence Livermore National\nLaboratory under Contract DE-AC52-07NA27344, was funded by LLNL's Uncertainty\nQuantification Strategic Initiative Laboratory Directed Research and\nDevelopment Project under tracking code 10-SI-013, and is released under UCRL\nnumber LLNL-MISC-633994.\n\n \n\nData Set Information:\n\n \n\nThis dataset contains records of simulation crashes encountered during climate\nmodel uncertainty quantification (UQ) ensembles. Ensemble members were\nconstructed using a Latin hypercube method in LLNL's UQ Pipeline software\nsystem to", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fba9e526-edb4-4fb0-9cb1-31ea29f07a2f&revisionId=6b6905e7-2855-43c9-a344-c01991e4efca&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", diff --git a/src/connectors/example/resources/resource/experiments.json b/src/connectors/example/resources/resource/experiments.json index 39ee9194..f0d4a311 100644 --- a/src/connectors/example/resources/resource/experiments.json +++ b/src/connectors/example/resources/resource/experiments.json @@ -93,7 +93,7 @@ "platform": "ai4experiments", "platform_identifier": "368", "name": "AI4IoT-Calibration-Solution", - "description": "

                                                                                This solution implements a pipeline to air quality sensor calibration in the context of the AI4IoT pilot, consisting of three modules which, together, offer a solution for calibration of low-cost air quality sensors in the city of Trondheim, Norway. The modules are: a data source which fetches data from several external APIs and concatenates them, a calibration which predicts the true value at the sensor location and a simple visualization module which implements a web interface to analyze the output of the calibration procedure.


                                                                                More info on the pipeline can be found at the github repository: https://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.

                                                                                ", + "description": "This solution implements a pipeline to air quality sensor calibration in the\ncontext of the AI4IoT pilot, consisting of three modules which, together,\noffer a solution for calibration of low-cost air quality sensors in the city\nof Trondheim, Norway. The modules are: a `data source` which fetches data from\nseveral external APIs and concatenates them, a `calibration` which predicts\nthe true value at the sensor location and a simple `visualization` module\nwhich implements a web interface to analyze the output of the calibration\nprocedure.\n\n \n\nMore info on the pipeline can be found at the github repository:\nhttps://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&revisionId=478028bb-1c58-4641-9bc0-eba716119aec&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.2", @@ -138,7 +138,7 @@ "platform": "ai4experiments", "platform_identifier": "369", "name": "AI4IoT-Calibration-Solution", - "description": "

                                                                                This solution implements a pipeline to air quality sensor calibration in the context of the AI4IoT pilot, consisting of three modules which, together, offer a solution for calibration of low-cost air quality sensors in the city of Trondheim, Norway. The modules are: a data source which fetches data from several external APIs and concatenates them, a calibration which predicts the true value at the sensor location and a simple visualization module which implements a web interface to analyze the output of the calibration procedure.


                                                                                More info on the pipeline can be found at the github repository: https://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.

                                                                                ", + "description": "This solution implements a pipeline to air quality sensor calibration in the\ncontext of the AI4IoT pilot, consisting of three modules which, together,\noffer a solution for calibration of low-cost air quality sensors in the city\nof Trondheim, Norway. The modules are: a `data source` which fetches data from\nseveral external APIs and concatenates them, a `calibration` which predicts\nthe true value at the sensor location and a simple `visualization` module\nwhich implements a web interface to analyze the output of the calibration\nprocedure.\n\n \n\nMore info on the pipeline can be found at the github repository:\nhttps://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&revisionId=478028bb-1c58-4641-9bc0-eba716119aec&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -183,7 +183,7 @@ "platform": "ai4experiments", "platform_identifier": "370", "name": "AI4IoT-Calibration-Solution", - "description": "

                                                                                This solution implements a pipeline to air quality sensor calibration in the context of the AI4IoT pilot, consisting of three modules which, together, offer a solution for calibration of low-cost air quality sensors in the city of Trondheim, Norway. The modules are: a data source which fetches data from several external APIs and concatenates them, a calibration which predicts the true value at the sensor location and a simple visualization module which implements a web interface to analyze the output of the calibration procedure.


                                                                                More info on the pipeline can be found at the github repository: https://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.

                                                                                ", + "description": "This solution implements a pipeline to air quality sensor calibration in the\ncontext of the AI4IoT pilot, consisting of three modules which, together,\noffer a solution for calibration of low-cost air quality sensors in the city\nof Trondheim, Norway. The modules are: a `data source` which fetches data from\nseveral external APIs and concatenates them, a `calibration` which predicts\nthe true value at the sensor location and a simple `visualization` module\nwhich implements a web interface to analyze the output of the calibration\nprocedure.\n\n \n\nMore info on the pipeline can be found at the github repository:\nhttps://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&revisionId=478028bb-1c58-4641-9bc0-eba716119aec&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.1", @@ -228,7 +228,7 @@ "platform": "ai4experiments", "platform_identifier": "371", "name": "MusicDetection-pipeline", - "description": "

                                                                                This simple pipeline automatically analyzes audio content with the MusicDetection model and annotates music attributes like genre and tempo.

                                                                                Content to be analyzed can be provided via file upload, detection results will be presented in WebUI and can be downloaded.


                                                                                Remark: Since MusicDetection model is not publicly accessible, for the deployment of this pipeline it is necessary to acquire access credentials. Please send you requests to ai-assets@idmt.fraunhofer.de


                                                                                ", + "description": "This simple pipeline automatically analyzes audio content with the\nMusicDetection model and annotates music attributes like genre and tempo.\n\nContent to be analyzed can be provided via file upload, detection results will\nbe presented in WebUI and can be downloaded.\n\n \n\nRemark: Since MusicDetection model is not publicly accessible, for the\ndeployment of this pipeline it is necessary to acquire access credentials.\nPlease send you requests to[ ai-assets@idmt.fraunhofer.de](mailto:ai-\nassets@idmt.fraunhofer.de??subject=Music-Detection-pipeline)\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fc0b6dc-46e5-468b-9adf-841d9b062e51&revisionId=1b067b23-4730-4dc1-95aa-0bfc78b0a6ce&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "0.9.0", @@ -273,7 +273,7 @@ "platform": "ai4experiments", "platform_identifier": "372", "name": "clinical_evida_text_classifier", - "description": "

                                                                                This model let us to classify clinical text related to colon cancer or non-colon cancer texts based on ICD10 categories. The main objective is to get a label (1 or 0) depending if the input text belongs to C18 ICD category, which corresponds to Colon Cancer Category. The model is based on distilBERT transformer model and was trained using CodiEsp dataset. The input is a plain text and the output will be a number label.

                                                                                ", + "description": "This model let us to classify clinical text related to colon cancer or non-\ncolon cancer texts based on ICD10 categories. The main objective is to get a\nlabel (1 or 0) depending if the input text belongs to C18 ICD category, which\ncorresponds to Colon Cancer Category. The model is based on distilBERT\ntransformer model and was trained using CodiEsp dataset. The input is a plain\ntext and the output will be a number label.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=13648d7f-5002-4fd8-98f7-27d50d2d964e&revisionId=65657060-5fac-48d5-bdf8-e75dab26ae23&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.1", @@ -318,7 +318,7 @@ "platform": "ai4experiments", "platform_identifier": "373", "name": "sentiment-analysis-pipeline", - "description": "

                                                                                Sentiment analysis pipeline.



                                                                                It takes the query text from the user and connects to the prediction model. The results can then be viewed on the Prediction model's UI.

                                                                                ", + "description": "Sentiment analysis pipeline.\n\n \n\n \n\nIt takes the query text from the user and connects to the prediction model.\nThe results can then be viewed on the Prediction model's UI.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=24269432-3dcf-42a8-a04e-463ed0c59757&revisionId=a951dffc-98f8-4914-a1d5-0fa79cb76640&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -363,7 +363,7 @@ "platform": "ai4experiments", "platform_identifier": "374", "name": "TrainingPipeline", - "description": "

                                                                                Overview:

                                                                                The training pipeline for the news_training example consists of 4 main nodes,

                                                                                1) News-Classifier - The core of the pipeline

                                                                                2) trainer-model - Facilitates the training process

                                                                                3) Tensorboard - Provides diagnostics preview

                                                                                4) News-Databroker - Starting point for data feed

                                                                                 

                                                                                Note:

                                                                                Apart from demonstrating a training scenario, this example also shows the use of a shared folder for common file access for pipeline nodes.

                                                                                Each of the 4 mentioned nodes are also available as independent models here.

                                                                                 

                                                                                Repository link:

                                                                                Please refer the following link for the code that represents the training pipeline in the Eclipse Graphene platform - https://gitlab.eclipse.org/eclipse/graphene/tutorials/-/tree/main/news_training

                                                                                ", + "description": "**Overview:**\n\nThe training pipeline for the news_training example consists of 4 main nodes,\n\n1) News-Classifier - The core of the pipeline\n\n2) trainer-model - Facilitates the training process\n\n3) Tensorboard - Provides diagnostics preview\n\n4) News-Databroker - Starting point for data feed\n\n\n\nNote:\n\nApart from demonstrating a training scenario, this example also shows the use\nof a shared folder for common file access for pipeline nodes.\n\nEach of the 4 mentioned nodes are also available as independent models here.\n\n\n\nRepository link:\n\nPlease refer the following link for the code that represents the training\npipeline in the Eclipse Graphene platform -\n\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2d4b9dff-c822-4fb6-9b5f-06f761fcbe2c&revisionId=7e95c907-2bdf-405d-8da4-4961e785514b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "v2", @@ -408,7 +408,7 @@ "platform": "ai4experiments", "platform_identifier": "375", "name": "ucrsuite-dtw-pip", - "description": "

                                                                                Overview

                                                                                The UCR Suite DTW pipeline ucrsuite-dtw-pip implements fast nearest-neighbor retrieval under the dynamic time warping (DTW)

                                                                                ucrsuite-config data broker is a starting point for the pipeline that process files and parameters to perform subsequence search in time series. ucrsuite-dtw calculates the nearest neighbor of a times series in a larger time series expressed as location and distance, using the UCR suite DTW algorithm.

                                                                                Usage

                                                                                To use the ucrsuite-dtw-pip solution, you can either download it from the Marketplace or run it on the Playground. Once the solution is deployed in the Playground, open the Web-UI of the ucrsuite-config model and enter the following information:


                                                                                * Data file: The path to the file containing the long time series.

                                                                                * Query file: The path to the file containing the query time series.

                                                                                * R: The size of the warping window. The value is in range 0-1, e.g., R=0.05 means windows of size +/-5%.


                                                                                Then, Run the solution. The distance calculation will start in the background.

                                                                                The result of calculation, expressed as location and distance, will be stored in the shared folder as a `dtw_distance.txt` file.

                                                                                Detailed result also available in the logs of the ucrsuite-dtw model in the following format:

                                                                                ------------------------

                                                                                Location: 756562

                                                                                Distance: 3.77562

                                                                                This is the assembled Solution of the AI4Industry Pilot of the AI4EU project. To run the solution, please use \"Deploy to local\" in the AI4EU Experiments Platform on this solution and follow the readme in the package or the YouTube Tutorial (Deploy and Run).


                                                                                This solution is the result of a collaboration between

                                                                                • Siemens Germany - Ivan Gocev
                                                                                • Fraunhofer IAIS - Raoul Blankertz, Nico H\u00f6ft
                                                                                • Technische Universit\u00e4t Wien - Peter Sch\u00fcller





                                                                                Contact:

                                                                                Peter Sch\u00fcller

                                                                                ", + "description": "This is the assembled Solution of the [AI4Industry\nPilot](https://www.ai4europe.eu/node/106) of the [AI4EU\nproject](www.ai4europe.eu). To run the solution, please use \"Deploy to local\"\nin the [AI4EU Experiments Platform](https://acumos-int-fhg.ai4eu.eu/) on this\nsolution and follow the readme in the package or the [YouTube Tutorial (Deploy\nand Run)](https://www.youtube.com/watch?v=gM-HRMNOi4w).\n\n \n\nThis solution is the result of a collaboration between\n\n * Siemens Germany - Ivan Gocev\n * Fraunhofer IAIS - Raoul Blankertz, Nico H\u00f6ft\n * Technische Universit\u00e4t Wien - Peter Sch\u00fcller\n\n \n\n \n\n \n\n \n\nContact:\n\n[Peter Sch\u00fcller](mailto:peter@peterschueller.com)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=33b984f4-fa6e-42e3-9af7-8cb3464ae10b&revisionId=1f58db48-282b-4629-a596-c379c9550f66&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "2", @@ -498,7 +498,7 @@ "platform": "ai4experiments", "platform_identifier": "377", "name": "AI4IndustryPilot", - "description": "

                                                                                This is the assembled Solution of the AI4Industry Pilot of the AI4EU project. To run the solution, please use \"Deploy to local\" in the AI4EU Experiments Platform on this solution and follow the readme in the package or the YouTube Tutorial (Deploy and Run).


                                                                                This solution is the result of a collaboration between

                                                                                • Siemens Germany - Ivan Gocev
                                                                                • Fraunhofer IAIS - Raoul Blankertz, Nico H\u00f6ft
                                                                                • Technische Universit\u00e4t Wien - Peter Sch\u00fcller





                                                                                Contact:

                                                                                Peter Sch\u00fcller

                                                                                ", + "description": "This is the assembled Solution of the [AI4Industry\nPilot](https://www.ai4europe.eu/node/106) of the [AI4EU\nproject](www.ai4europe.eu). To run the solution, please use \"Deploy to local\"\nin the [AI4EU Experiments Platform](https://acumos-int-fhg.ai4eu.eu/) on this\nsolution and follow the readme in the package or the [YouTube Tutorial (Deploy\nand Run)](https://www.youtube.com/watch?v=gM-HRMNOi4w).\n\n \n\nThis solution is the result of a collaboration between\n\n * Siemens Germany - Ivan Gocev\n * Fraunhofer IAIS - Raoul Blankertz, Nico H\u00f6ft\n * Technische Universit\u00e4t Wien - Peter Sch\u00fcller\n\n \n\n \n\n \n\n \n\nContact:\n\n[Peter Sch\u00fcller](mailto:peter@peterschueller.com)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=33b984f4-fa6e-42e3-9af7-8cb3464ae10b&revisionId=1f58db48-282b-4629-a596-c379c9550f66&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1", @@ -543,7 +543,7 @@ "platform": "ai4experiments", "platform_identifier": "378", "name": "house-prices-pipeline", - "description": "

                                                                                Overview description

                                                                                The House Prices Pipeline is a simple example pipeline that predicts house prices. The pipeline illustrates how the price development is predicted by entering relevant parameters that provide information about the status of a property.

                                                                                 

                                                                                Use case example

                                                                                As an interested house owner, an estimate can be made based on the AI forecast, how much the property will increase in value or not.

                                                                                 

                                                                                Usage

                                                                                Select the \"houseprice-pipeline\" solution in the Marketplace or in the Design Studio. It is possible to download the solution or to run it on the Playground for testing purposes. When the solution is deployed in the Playground, select the Web-UI of the databroker and fill in the parameters. Then go back to the Playground and run the solution once and open the Web-UI (interface) of the model. In the second interface you will get the prediction based on your input.

                                                                                 

                                                                                Support

                                                                                The solution is part of the tutorials with developer documentation and source code available. For further construction feel free to reach out to the AI.Lab team ai-lab@iais.fraunhofer.de or directly with the developer of the technology. The developer teams are generally open for feedback and happy about co-creation opportunities.


                                                                                ", + "description": "**Overview description**\n\nThe House Prices Pipeline is a simple example pipeline that predicts house\nprices. The pipeline illustrates how the price development is predicted by\nentering relevant parameters that provide information about the status of a\nproperty.\n\n ** **\n\n **Use case example**\n\nAs an interested house owner, an estimate can be made based on the AI\nforecast, how much the property will increase in value or not.\n\n ** **\n\n **Usage**\n\nSelect the \"houseprice-pipeline\" solution in the Marketplace or in the Design\nStudio. It is possible to download the solution or to run it on the Playground\nfor testing purposes. When the solution is deployed in the Playground, select\nthe Web-UI of the databroker and fill in the parameters. Then go back to the\nPlayground and run the solution once and open the Web-UI (interface) of the\nmodel. In the second interface you will get the prediction based on your\ninput.\n\n\n\n **Support**\n\nThe solution is part of the tutorials with developer documentation and source\ncode available. For further construction feel free to reach out to the AI.Lab\nteam ai-lab@iais.fraunhofer.de or directly with the developer of the\ntechnology. The developer teams are generally open for feedback and happy\nabout co-creation opportunities.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=42bdc41c-6144-4c7b-88b6-4509999bff6d&revisionId=ec4a4a98-d37a-49c5-aaa1-97437d8a5a31&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -588,7 +588,7 @@ "platform": "ai4experiments", "platform_identifier": "379", "name": "Sudoku-Tutorial-Stream", - "description": "

                                                                                This is the streaming version of the deployable Solution of the AI4EU Experiments Sudoku Hello World!

                                                                                It is a Proof of Concept for a Sudoku design assistant based on ASP, gRPC, and Protobuf, deployable in the AI4EU Experiments Platform.

                                                                                The Git repository holding this component of the Hello World is publicly available here: https://github.com/peschue/ai4eu-sudoku

                                                                                A Tutorial video about this \"Sudoku Hello World\" can be found here: https://www.youtube.com/watch?v=gM-HRMNOi4w

                                                                                ", + "description": "This is the **streaming** version of the deployable **Solution** of the\n**AI4EU Experiments Sudoku Hello World**!\n\nIt is a Proof of Concept for a Sudoku design assistant based on ASP, gRPC, and\nProtobuf, deployable in the AI4EU Experiments Platform.\n\nThe Git repository holding this component of the Hello World is publicly\navailable here: \n\nA Tutorial video about this \"Sudoku Hello World\" can be found here:\n\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=516d1afa-44ae-4315-be0a-88232698778d&revisionId=72489923-f34e-454a-85ef-2a0b8a54ed54&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1", @@ -633,7 +633,7 @@ "platform": "ai4experiments", "platform_identifier": "380", "name": "Hubeau_Piezo_Stations", - "description": "

                                                                                This is an example of solution to access data of the French groundwater level stations (piezometer sensor).

                                                                                Data from the \"Piezometry\" API come from the ADES portal (national portal for access to French groundwater data). They relate to piezometric measurements (water level in groundwater tables), throughout France, from all the partners of the water information system (see metadata).


                                                                                The updates are integrated daily into the API.


                                                                                Data is expressed


                                                                                in NGF meters for levels (or ratings);

                                                                                in meters in relation to the measurement mark for the depths.


                                                                                ", + "description": "This is an example of solution to access data of the French groundwater level\nstations (piezometer sensor).\n\nData from the \"Piezometry\" API come from the ADES portal (national portal for\naccess to French groundwater data). They relate to piezometric measurements\n(water level in groundwater tables), throughout France, from all the partners\nof the water information system (see metadata).\n\n \n\nThe updates are integrated daily into the API.\n\n \n\nData is expressed\n\n \n\nin NGF meters for levels (or ratings);\n\nin meters in relation to the measurement mark for the depths.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5a56cb42-bfc5-48c6-a92b-92bb06a2b308&revisionId=780ab7bd-c541-4e36-9493-f80dcd67f743&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -678,7 +678,7 @@ "platform": "ai4experiments", "platform_identifier": "381", "name": "Iris_Pipeline", - "description": "

                                                                                Iris Pipeline: Made use of generic data broker to connect to iris dataset.

                                                                                ", + "description": "Iris Pipeline: Made use of generic data broker to connect to iris dataset.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5b367470-e405-44de-b930-4c1e5f3e7161&revisionId=8b2b253f-3bd1-4719-8d0a-9f1084bf15bf&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -723,7 +723,7 @@ "platform": "ai4experiments", "platform_identifier": "382", "name": "ner-pipeline", - "description": "

                                                                                This is the ner-pipeline, which represents a deep learning Entity Recognizer in German.

                                                                                After successfully deploying ner-pipeline in the KI.NRW Playground, submit the desired text via ner-databroker's Web UI first (1), then RUN the pipeline (2) and go to the Web UI of the ner-model (3). You will see a list of processed texts, with the most recent provided text on top of the list.


                                                                                For each new NER request to the deployed ner-pipeline, repeat the steps from 1 to 3.

                                                                                ", + "description": "This is the ner-pipeline, which represents a deep learning Entity Recognizer\nin German.\n\nAfter successfully deploying ner-pipeline in the KI.NRW Playground, submit the\ndesired text via ner-databroker's Web UI first (1), then RUN the pipeline (2)\nand go to the Web UI of the ner-model (3). You will see a list of processed\ntexts, with the most recent provided text on top of the list.\n\n \n\nFor each new NER request to the deployed ner-pipeline, repeat the steps from 1\nto 3.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=612a819c-66fe-4ac4-86ae-b04e95ef4624&revisionId=a63bc9db-1691-45ca-a022-98e89ff43fd5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -768,7 +768,7 @@ "platform": "ai4experiments", "platform_identifier": "383", "name": "advice-inference-pipeline", - "description": "

                                                                                The process is divided into two independent workflows, the first one is the prediction, and includes the advice-img-databroker, advice-road-crop and advice-yolo nodes, which will perform the whole process of label prediction. On the other hand, the advice-label-assitant node allows the user to perform the relabelling task while the inference process is performed in the background

                                                                                ", + "description": "The process is divided into two independent workflows, the first one is the\nprediction, and includes the advice-img-databroker, advice-road-crop and\nadvice-yolo nodes, which will perform the whole process of label prediction.\nOn the other hand, the advice-label-assitant node allows the user to perform\nthe relabelling task while the inference process is performed in the\nbackground\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=659ee5a9-0fbe-4676-8b1f-bb27d8379c30&revisionId=bae9c467-8208-47cc-b46f-ba6c97e9930d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "st3", @@ -813,7 +813,7 @@ "platform": "ai4experiments", "platform_identifier": "384", "name": "ADVICE", - "description": "

                                                                                AI-baseD predictiVe road maIntenanCe for a safer Europe (ADVICE) consist of a two stages pipeline for pothole detection, pothole size estimation and pothole formation forecasting. The pipeline is expected to evolve into a hybrid solution of edge and cloud computing.

                                                                                ", + "description": "**A** I-base **D** predicti **V** e road ma **I** ntenan **C** e for a safer\n**E** urope (ADVICE) consist of a two stages pipeline for pothole detection,\npothole size estimation and pothole formation forecasting. The pipeline is\nexpected to evolve into a hybrid solution of edge and cloud computing.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6a58218e-ae25-446e-96b0-ebbb954f76e9&revisionId=5487352a-0934-465d-a9bd-feb927033a82&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "0.0.1", @@ -858,7 +858,7 @@ "platform": "ai4experiments", "platform_identifier": "385", "name": "Hubeau_Piezo_Chroniques", - "description": "

                                                                                This is an example of solution to access data of the French groundwater level observations timeseries (from piezometer sensor).

                                                                                Data from the \"Piezometry\" API come from the ADES portal (national portal for access to French groundwater data). They relate to piezometric measurements (water level in groundwater tables), throughout France, from all the partners of the water information system (see metadata).


                                                                                The updates are integrated daily into the API.


                                                                                Data is expressed


                                                                                in NGF meters for levels (or ratings);

                                                                                in meters in relation to the measurement mark for the depths.


                                                                                ", + "description": "This is an example of **solution** to access data of the French groundwater\nlevel observations timeseries (from piezometer sensor).\n\nData from the \"Piezometry\" API come from the ADES portal (national portal for\naccess to French groundwater data). They relate to piezometric measurements\n(water level in groundwater tables), throughout France, from all the partners\nof the water information system (see metadata).\n\n \n\nThe updates are integrated daily into the API.\n\n \n\nData is expressed\n\n \n\nin NGF meters for levels (or ratings);\n\nin meters in relation to the measurement mark for the depths.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7694139a-dabf-4aa3-98ba-40ffe4c5fcad&revisionId=19527676-2736-419a-be52-0fa6895b2c50&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -903,7 +903,7 @@ "platform": "ai4experiments", "platform_identifier": "386", "name": "AI_REGIO_NLP_DSS", - "description": "

                                                                                AI Regio Pipeline structured to receive natural language text from a mic client over internet, transforming audio into text and using the produced text to help an operator in manufacturing domain.

                                                                                NLP is coupled with a self-learning DSS system that updates probability tables based on past answers given by the operator. It is able to understand short answers, like yes / no / don't know. NLP module, instead, maps a full sentence into well-know problem, allowing the system to ask the right first question

                                                                                ", + "description": "AI Regio Pipeline structured to receive natural language text from a mic\nclient over internet, transforming audio into text and using the produced text\nto help an operator in manufacturing domain.\n\nNLP is coupled with a self-learning DSS system that updates probability tables\nbased on past answers given by the operator. It is able to understand short\nanswers, like yes / no / don't know. NLP module, instead, maps a full sentence\ninto well-know problem, allowing the system to ask the right first question\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8556ecaf-35ef-4b40-91bb-699165f89d71&revisionId=41386cb6-d281-429a-9415-b9c20c0cc9cb&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.1", @@ -948,7 +948,7 @@ "platform": "ai4experiments", "platform_identifier": "387", "name": "AI_REGIO_NLP_DSS", - "description": "

                                                                                AI Regio Pipeline structured to receive natural language text from a mic client over internet, transforming audio into text and using the produced text to help an operator in manufacturing domain.

                                                                                NLP is coupled with a self-learning DSS system that updates probability tables based on past answers given by the operator. It is able to understand short answers, like yes / no / don't know. NLP module, instead, maps a full sentence into well-know problem, allowing the system to ask the right first question

                                                                                ", + "description": "AI Regio Pipeline structured to receive natural language text from a mic\nclient over internet, transforming audio into text and using the produced text\nto help an operator in manufacturing domain.\n\nNLP is coupled with a self-learning DSS system that updates probability tables\nbased on past answers given by the operator. It is able to understand short\nanswers, like yes / no / don't know. NLP module, instead, maps a full sentence\ninto well-know problem, allowing the system to ask the right first question\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8556ecaf-35ef-4b40-91bb-699165f89d71&revisionId=41386cb6-d281-429a-9415-b9c20c0cc9cb&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -1128,7 +1128,7 @@ "platform": "ai4experiments", "platform_identifier": "391", "name": "Standard_STA_Flow", - "description": "

                                                                                This is an example of solution exploiting the generic connector for the SensorThings API. This connector allows to connect to any SensorThings API in the world and therefore potentially recover data on any domain. For example, this would facilitate the retrieval of public Covid19 data, harvested from various sources including Johns Hopkins and RKI, or from near-real-time air quality across Europe, from both national sources (harvested from AT SOS and WFS) and Europe (EEA).

                                                                                To illustrate the potential uses of these different SensorThings API (with a single connector), one can take a look at these different applications: a visualisation tool[1] bringing together French and German flow data, a covid-19 dashboard[2] and the Windy Web site[3] focused on the weather forecast.


                                                                                [1] https://wg-brgm.k8s.ilt-dmz.iosb.fraunhofer.de/servlet/is/110/


                                                                                [2] http://www.covid19dashboard.org/


                                                                                [3] https://www.windy.com/fr/-NO2-no2?camsEu,no2,47.905,1.908,5

                                                                                ", + "description": "This is an example of **solution** exploiting the generic connector for the\nSensorThings API. This connector allows to connect to any SensorThings API in\nthe world and therefore potentially recover data on any domain. For example,\nthis would facilitate the retrieval of public Covid19 data, harvested from\nvarious sources including Johns Hopkins and RKI, or from near-real-time air\nquality across Europe, from both national sources (harvested from AT SOS and\nWFS) and Europe (EEA).\n\nTo illustrate the potential uses of these different SensorThings API (with a\nsingle connector), one can take a look at these different applications: a\nvisualisation tool[[1]](about:blank) bringing together French and German flow\ndata, a covid-19 dashboard[[2]](about:blank) and the [Windy Web\nsite](https://www.windy.com/fr/-NO2-no2?camsEu,no2,47.905,1.908,5)[[3]](about:blank)\nfocused on the weather forecast.\n\n \n\n[[1]](about:blank) \n\n \n\n[[2]](about:blank) \n\n \n\n[[3]](about:blank)\n\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a5a36ff2-f9f7-4272-abde-b81cf4cbbb80&revisionId=8caf7a53-d01e-4ea7-8c43-fc5dc27fcbc3&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -1353,7 +1353,7 @@ "platform": "ai4experiments", "platform_identifier": "396", "name": "ObjectDetectionP", - "description": "

                                                                                This is a simple pipeline wrapping the object detection model.

                                                                                ", + "description": "This is a simple pipeline wrapping the object detection model.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b08401ec-f24a-452b-bf42-c57cb91b21e8&revisionId=490b5ed8-b498-4ddb-a99b-0cb1662f533c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1398,7 +1398,7 @@ "platform": "ai4experiments", "platform_identifier": "397", "name": "aqpredvisualize", - "description": "

                                                                                Air Quality Prediction and Visualization Pipeline for the area of Trondheim. The pipeline consists of 3 modules, a databroker module, a prediction module based on a pre-trained machine learning model and a visualization module with a web interface. More information and instructions can be found in the github repository: https://github.com/EliasKal/ai4eu_pipeline_visualization

                                                                                ", + "description": "Air Quality Prediction and Visualization Pipeline for the area of Trondheim.\nThe pipeline consists of 3 modules, a databroker module, a prediction module\nbased on a pre-trained machine learning model and a visualization module with\na web interface. More information and instructions can be found in the github\nrepository: \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c4d4ec8b-1e43-4bf7-941e-8d81612cb71e&revisionId=3d63a545-e260-46a1-a743-298902fb2818&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -1443,7 +1443,7 @@ "platform": "ai4experiments", "platform_identifier": "398", "name": "ObjectDetectionPipeline", - "description": "

                                                                                This is a simple pipeline wrapping the object detection model. The underlying object detection model in this pipeline is a public image.

                                                                                ", + "description": "This is a simple pipeline wrapping the object detection model. The underlying\nobject detection model in this pipeline is a public image.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cd303086-6599-41cf-b89b-66f31f7c4f44&revisionId=0d4d73db-e069-447f-949f-2eb1bc9e98e5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -1488,7 +1488,7 @@ "platform": "ai4experiments", "platform_identifier": "399", "name": "Hubeau_Hydro_Observations", - "description": "

                                                                                Example of solution to retrieve French hydrology observations data using the \"Grpc hydro hubeau\" component.

                                                                                ", + "description": "Example of **solution** to retrieve French hydrology observations data using\nthe \"Grpc hydro hubeau\" component.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d95fa687-97d9-45b4-bda6-cadddebb6343&revisionId=1ee16b73-9874-413d-ba66-33502c2bb689&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -1758,7 +1758,7 @@ "platform": "ai4experiments", "platform_identifier": "405", "name": "MusicDetectionPL", - "description": "

                                                                                This pipeline is designed to use the MusicDetection model for the analysis of single audio files that are provided by file upload. Results of the MusicDetection are provided via WebUI.

                                                                                Since MusicDetection model is not publicly accessible, for the deployment of this pipeline it is necessary to acquire access credentials from the provider of the MusicDetection model. NB: Access can not be provided from the publisher of this pipeline.

                                                                                ", + "description": "This pipeline is designed to use the MusicDetection model for the analysis of\nsingle audio files that are provided by file upload. Results of the\nMusicDetection are provided via WebUI.\n\nSince MusicDetection model is not publicly accessible, for the deployment of\nthis pipeline it is necessary to acquire access credentials from the provider\nof the MusicDetection model. NB: Access can not be provided from the publisher\nof this pipeline.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eea265e1-f1b8-4f5d-8694-299b37fc3d0d&revisionId=a44f39bb-56b2-4d5e-b72c-f36cd24a9992&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1803,7 +1803,7 @@ "platform": "ai4experiments", "platform_identifier": "406", "name": "Hubeau_Hydro_Stations", - "description": "

                                                                                Example of solution to retrieve French hydrology stations data using the \"Grpc hydro hubeau\" component.

                                                                                This service makes it possible to query the stations in the French hydrometric reference system. A station can carry height and / or flow observations (directly measured or calculated from a rating curve).



                                                                                ", + "description": "Example of **solution** to retrieve French hydrology stations data using the\n\"Grpc hydro hubeau\" component.\n\nThis service makes it possible to query the stations in the French hydrometric\nreference system. A station can carry height and / or flow observations\n(directly measured or calculated from a rating curve).\n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f476f311-e38c-4c60-a550-605a8b7c5af0&revisionId=4ae0dfe8-95c8-47ae-877d-b9247a249e77&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -1848,7 +1848,7 @@ "platform": "ai4experiments", "platform_identifier": "407", "name": "ucrsuite-ed-pip", - "description": "

                                                                                Overview

                                                                                The UCR Suite ED pipeline ucrsuite-ed-pip implements fast nearest-neighbor retrieval under the Euclidean Distance (ED)

                                                                                ucrsuite-config data broker is a starting point for the pipeline that process files and parameters to perform subsequence search in time series. ucrsuite-ed calculates the nearest neighbor of a times series in a larger time series expressed as location and distance, using the UCR suite ED algorithm.


                                                                                Usage

                                                                                To use the ucrsuite-ed-pip solution, you can either download it from the Marketplace or run it on the Playground. Once the solution is deployed in the Playground, open the Web-UI of the ucrsuite-config model and enter the following information:

                                                                                * Data file: The path to the file containing the long time series.

                                                                                * Query file: The path to the file containing the query time series.

                                                                                Then, Run the solution. The distance calculation will start in the background.

                                                                                The result of calculation, expressed as location and distance, will be stored in the shared folder as a `ed_distance.txt` file.

                                                                                Detailed result also available in the logs of the ucrsuite-ed model in the following format:

                                                                                ----------------------------------------------------

                                                                                Location : 347236

                                                                                Distance : 7.03705

                                                                                Data Scanned : 1000000

                                                                                Total Execution Time : 1.029 sec

                                                                                ", + "description": "## Overview\n\nThe UCR Suite ED pipeline **ucrsuite-ed-pip** implements fast nearest-neighbor\nretrieval under the Euclidean Distance (ED)\n\n **ucrsuite-config** data broker is a starting point for the pipeline that\nprocess files and parameters to perform subsequence search in time series.\n**ucrsuite-ed** calculates the nearest neighbor of a times series in a larger\ntime series expressed as location and distance, using the UCR suite ED\nalgorithm.\n\n \n\n## Usage\n\nTo use the **ucrsuite-ed-pip** solution, you can either download it from the\nMarketplace or run it on the Playground. Once the solution is deployed in the\nPlayground, open the Web-UI of the **ucrsuite-config** model and enter the\nfollowing information:\n\n* **Data file:** The path to the file containing the long time series.\n\n* **Query file:** The path to the file containing the query time series.\n\nThen, **Run** the solution. The distance calculation will start in the\nbackground.\n\nThe result of calculation, expressed as location and distance, will be stored\nin the shared folder as a `ed_distance.txt` file.\n\nDetailed result also available in the logs of the **ucrsuite-ed** model in the\nfollowing format:\n\n\\----------------------------------------------------\n\nLocation : 347236\n\nDistance : 7.03705\n\nData Scanned : 1000000\n\nTotal Execution Time : 1.029 sec\n\n\\----------------------------------------------------\n\nThe `Location` field specifies the starting location of the nearest neighbor\nof the given query, of size M, in the data file. Note that location starts\nfrom 0.\n\nThe `Distance` field specifies the distance between the nearest neighbor and\nthe query.\n\nThe `Data Scanned` field specifies the number of data points in the input data\nfile.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f87058b4-a1f1-4e0e-a944-ece53adcf8b3&revisionId=20402b92-1b2e-4547-b1e0-e2866c439645&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -1893,7 +1893,7 @@ "platform": "ai4experiments", "platform_identifier": "408", "name": "RecognaizePipeline", - "description": "

                                                                                The RecognAIze pipeline coverts images to text including layout detection and table handling and consists of our microservices:

                                                                                Databroker with UI, Preprocessing, Segmentation and OCR.


                                                                                ", + "description": "**The RecognAIze** pipeline coverts images to text including layout detection\nand table handling and consists of our microservices:\n\nDatabroker with UI, Preprocessing, Segmentation and OCR.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fca70f4f-d6b7-4fed-a98a-8800b7831ef8&revisionId=c7b3cfaf-7960-472b-91e3-03b930dca96a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.1.1", @@ -1938,7 +1938,7 @@ "platform": "ai4experiments", "platform_identifier": "409", "name": "ai4eu-sec-pilot", - "description": "

                                                                                This simulation can detect threads in network traffic. To train the model connect the model with the training data interface from the databroker container. The train data are made with benign traffic and does not contain any fraud because the model should lern how benign traffic looks like.

                                                                                To predict traffic connect the prediction data output from the databroker container with the prediction interface. The traffic to predict includes benign and fraud traffic. The output will be a number between 0 and 1. You can set the threshold according to your data. The best threshold cna be found in the model validation folder insider the model container.

                                                                                ", + "description": "This simulation can detect threads in network traffic. To train the model\nconnect the model with the training data interface from the databroker\ncontainer. The train data are made with benign traffic and does not contain\nany fraud because the model should lern how benign traffic looks like.\n\nTo predict traffic connect the prediction data output from the databroker\ncontainer with the prediction interface. The traffic to predict includes\nbenign and fraud traffic. The output will be a number between 0 and 1. You can\nset the threshold according to your data. The best threshold cna be found in\nthe model validation folder insider the model container.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ff236ff3-f08e-40d1-9b76-a42f7e792b96&revisionId=bd6920a5-6998-470b-a4d0-cb0ed9ea73ec&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "0.0.2", diff --git a/src/connectors/example/resources/resource/ml_models.json b/src/connectors/example/resources/resource/ml_models.json index dcc88d91..6c34df4a 100644 --- a/src/connectors/example/resources/resource/ml_models.json +++ b/src/connectors/example/resources/resource/ml_models.json @@ -7,7 +7,7 @@ "status": "draft" }, "name": "AIM4PS", - "description": "

                                                                                AIM4PS employs state-of-the-art AI methodologies for intaking and processing public procurement data, taking as a reference the specific production- and product-related information collected from manufacturing EISs.

                                                                                ", + "description": "AIM4PS employs state-of-the-art AI methodologies for intaking and processing\npublic procurement data, taking as a reference the specific production- and\nproduct-related information collected from manufacturing EISs.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0146cfdb-7853-48aa-b4b2-76183a3f3c14&revisionId=7c089fc1-a981-4c93-9137-dfef1bc19bd8&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -50,7 +50,7 @@ "status": "draft" }, "name": "AudioSpeechToTextGerman", - "description": "

                                                                                This model converts an audio segment to German text.

                                                                                ", + "description": "This model converts an audio segment to German text.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=015a16fd-8fea-495a-ae94-1fc92384d2b3&revisionId=0e5ad85f-29df-4d60-9b7d-178c1382abe0&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -93,7 +93,7 @@ "status": "draft" }, "name": "Doc2Answer", - "description": "

                                                                                The model implements 2 main tasks of the AI4EU call. It is able to parse and extract information from 2 type of INPS documents: \"O7\" and \"SocialCard\".

                                                                                The first type it locates cells and extract the content as text (i.e. numbers, dates).

                                                                                The second type locates stamps and classify them.

                                                                                ", + "description": "The model implements 2 main tasks of the AI4EU call. It is able to parse and\nextract information from 2 type of INPS documents: \"O7\" and \"SocialCard\".\n\nThe first type it locates cells and extract the content as text (i.e. numbers,\ndates).\n\nThe second type locates stamps and classify them.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=01742dd8-cc32-4332-93ca-a181be3853e7&revisionId=d5cab0b1-4827-4b75-b270-8b11a2e08b99&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -136,7 +136,7 @@ "status": "draft" }, "name": "CODE_V2", - "description": "

                                                                                The main objective of the challenge is to develop an method for automatic classification of clinical narratives to ICD-10 codes.

                                                                                Our approach for semantic text classification has three core components: (1) Formalization of domain knowledge of medical information and techniques of semantic data fusion; (2) Multilingual NLP techniques for document preprocessing including all or some of: data cleaning, data normalization, data augmentation, transitive connections analysis, data balancing, expert\u2019s heuristics. For medical data based on our expertise on DOID, ICD-O, ICD-9, ICD-10, MESH, MONDO, UMLS, Orphanet, SNOMED classification, data augmentation including typos simulation and synonym replacement will be used; (3) Multilingual deep learning methods for supervised classification of disease into its corresponding class from the ICD-10. We are fine tuning pretrained BERT family models (bioBERT, clinicalBERT, MultilingualBERT, PubMedBERT, etc.) with domain specific terminology for the target language. Additional corpora generated from public documents and linked open data is used for fine-tuning of the deep learning classification model for the specific ICD-10 classification.

                                                                                ", + "description": "The main objective of the challenge is to develop an method for automatic\nclassification of clinical narratives to ICD-10 codes.\n\nOur approach for semantic text classification has three core components: (1)\nFormalization of domain knowledge of medical information and techniques of\nsemantic data fusion; (2) Multilingual NLP techniques for document\npreprocessing including all or some of: data cleaning, data normalization,\ndata augmentation, transitive connections analysis, data balancing, expert\u2019s\nheuristics. For medical data based on our expertise on DOID, ICD-O, ICD-9,\nICD-10, MESH, MONDO, UMLS, Orphanet, SNOMED classification, data augmentation\nincluding typos simulation and synonym replacement will be used; (3)\nMultilingual deep learning methods for supervised classification of disease\ninto its corresponding class from the ICD-10. We are fine tuning pretrained\nBERT family models (bioBERT, clinicalBERT, MultilingualBERT, PubMedBERT, etc.)\nwith domain specific terminology for the target language. Additional corpora\ngenerated from public documents and linked open data is used for fine-tuning\nof the deep learning classification model for the specific ICD-10\nclassification.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=01d95f4f-3bb4-4807-b6af-eb2d35d352cf&revisionId=2dc164ec-b92a-4413-a78e-70efc6643bc5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -179,7 +179,7 @@ "status": "draft" }, "name": "i-nergy-load-forecasting-nbeats", - "description": "

                                                                                This is a time series forecasting service for predicting of the Portuguese aggregated electricity load series (15-min resolution, 24hr forecasting horizon). This service is based on an NBEATS model trained in the context of I-NERGY project. The model has been trained on the Portuguese timeseries from 2013 to 2019 validated on year 2020 and tested on 2021 with Mean Absolute Percentage Error (MAPE) = 2.35%. No time covariates or external variables have been included in the model. The lookback window of the model is 10 days. The model can be used to produce forecasts for periods from 2022 and later for Portugal. Other transmission system operators may use it as well, however expecting lower performance in general. No external variables have been considered. Please keep in mind that the effects of the pandemic on national loads can negatively affect the model\u2019s performance. For more information please go to ReadME.md in the Documents section.

                                                                                ", + "description": "This is a time series forecasting service for predicting of the Portuguese\naggregated electricity load series (15-min resolution, 24hr forecasting\nhorizon). This service is based on an NBEATS model trained in the context of\n[I-NERGY](https://www.i-nergy.eu/) project. The model has been trained on the\nPortuguese timeseries from 2013 to 2019 validated on year 2020 and tested on\n2021 with Mean Absolute Percentage Error (MAPE) = 2.35%. No time covariates or\nexternal variables have been included in the model. The lookback window of the\nmodel is 10 days. The model can be used to produce forecasts for periods from\n2022 and later for Portugal. Other transmission system operators may use it as\nwell, however expecting lower performance in general. No external variables\nhave been considered. Please keep in mind that the effects of the pandemic on\nnational loads can negatively affect the model\u2019s performance. For more\ninformation please go to ReadME.md in the Documents section.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0440778a-74e8-4d7f-950f-e6e1ce6bc29e&revisionId=3622c8ba-999d-4ce3-b711-b2bf4b43fa88&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -222,7 +222,7 @@ "status": "draft" }, "name": "road-damage-detector", - "description": "

                                                                                # AI4EU Pluto two-stage detector


                                                                                The model is a two stage detector based on [YOLOv5](https://github.com/ultralytics/yolov5).


                                                                                The object detector will detect objects of the following classes:

                                                                                 - Rutting

                                                                                 - Pothole

                                                                                 - Manhole

                                                                                 - Gully

                                                                                 - EdgeDeterioration

                                                                                 - Cracking


                                                                                The second stage classifier will, for `Potholes`, also classify the depth as 1 of 4 discrete values:


                                                                                 - lt2

                                                                                 - 2to5

                                                                                 - 5to10

                                                                                 - gt10



                                                                                # Example client


                                                                                ```python

                                                                                import os

                                                                                import grpc

                                                                                import model_pb2

                                                                                import model_pb2_grpc


                                                                                ## Setup

                                                                                port_addr = 'localhost:8061'


                                                                                # open a gRPC channel

                                                                                channel_opt = [('grpc.max_send_message_length', 512 * 1024 * 1024),

                                                                                        ('grpc.max_receive_message_length', 512 * 1024 * 1024)]

                                                                                channel = grpc.insecure_channel(port_addr, options = channel_opt)

                                                                                stub = model_pb2_grpc.PredictStub(channel)



                                                                                ## Make prediction

                                                                                filepath = \"assets/test.png\"


                                                                                with open(filepath, 'rb') as f:

                                                                                  content = f.read()


                                                                                responsePrediction = stub.make_prediction(requestPrediction)



                                                                                ## Interpret result

                                                                                for annotation in responsePrediction.annotations:

                                                                                 print(f\"Detections: {annotation}\")


                                                                                ```


                                                                                ", + "description": "# AI4EU Pluto two-stage detector\n\n \n\nThe model is a two stage detector based on\n[YOLOv5](https://github.com/ultralytics/yolov5).\n\n \n\nThe object detector will detect objects of the following classes:\n\n \\- Rutting\n\n \\- Pothole\n\n \\- Manhole\n\n \\- Gully\n\n \\- EdgeDeterioration\n\n \\- Cracking\n\n \n\nThe second stage classifier will, for `Potholes`, also classify the depth as 1\nof 4 discrete values:\n\n \n\n \\- lt2\n\n \\- 2to5\n\n \\- 5to10\n\n \\- gt10\n\n \n\n \n\n# Example client\n\n \n\n```python\n\nimport os\n\nimport grpc\n\nimport model_pb2\n\nimport model_pb2_grpc\n\n \n\n## Setup\n\nport_addr = 'localhost:8061'\n\n \n\n# open a gRPC channel\n\nchannel_opt = [('grpc.max_send_message_length', 512 * 1024 * 1024),\n\n ('grpc.max_receive_message_length', 512 * 1024 * 1024)]\n\nchannel = grpc.insecure_channel(port_addr, options = channel_opt)\n\nstub = model_pb2_grpc.PredictStub(channel)\n\n \n\n \n\n## Make prediction\n\nfilepath = \"assets/test.png\"\n\n \n\nwith open(filepath, 'rb') as f:\n\n content = f.read()\n\n \n\nresponsePrediction = stub.make_prediction(requestPrediction)\n\n \n\n \n\n## Interpret result\n\nfor annotation in responsePrediction.annotations:\n\n print(f\"Detections: {annotation}\")\n\n \n\n```\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=075252b1-3ff7-424d-ab6d-19ca2d90f0f0&revisionId=8297b2b4-2260-42ec-bb89-072918b7c843&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -265,7 +265,7 @@ "status": "draft" }, "name": "iOCR", - "description": "


                                                                                iOCR can easily convert scanned or photographed documents into digital text using its underlying Deep Learning technologies in order to automatically localize and recognize the text inside of these images.

                                                                                With our innovative product you will reduce the amount of effort required to digitize your data as iOCR ensures the data is not lost and correctly digitized. The need for specialized scanners or high manual effort will decrease as iOCR aims to improve and scale with your business returning the costs required for this kind of effort back to you, offering you more opportunities to extend your company.

                                                                                ", + "description": " \n\n**iOCR** can easily convert scanned or photographed documents into digital\ntext using its underlying **Deep Learning** technologies in order to\nautomatically localize and recognize the text inside of these images.\n\nWith our innovative product you will reduce the amount of effort required to\ndigitize your data as iOCR ensures the data is not lost and correctly\ndigitized. The need for specialized scanners or high manual effort will\ndecrease as iOCR aims to improve and scale with your business returning the\ncosts required for this kind of effort back to you, offering you more\nopportunities to extend your company.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=08be83e3-f261-428d-846a-99f2fb0d46fb&revisionId=e74c2c19-130d-451f-a095-86c01e6739a6&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -308,7 +308,7 @@ "status": "draft" }, "name": "Text2ImageSearch", - "description": "

                                                                                This model implements a text-to-image search engine: it searches images in a publicly available database (MIRFlickr100K) using natural language sentences as a query.

                                                                                ", + "description": "This model implements a text-to-image search engine: it searches images in a\npublicly available database (MIRFlickr100K) using natural language sentences\nas a query.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=090281fe-4884-4ff8-80e1-fb87a41aa327&revisionId=cbe08f0a-9266-498a-a4ca-ab4f1edf5462&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -351,7 +351,7 @@ "status": "draft" }, "name": "AWDrugsModel", - "description": "






                                                                                The first draft of the drug decision support system (ANN model) determines a status of candidate drug molecules as approved or withdrawn categories by means of molecular descriptors.  The dataset has 44 features for analyzing the drugs and contains 220 drugs having 110 approved and 110 withdrawn drugs. We calculated molecular descriptors (760 descriptors) for all molecules in the drug datasets and selected the most effective attributes (44 features) to reduce the dimensionality of data on the drug dataset.




                                                                                ", + "description": " \n\n \n\n \n\n \n\n \n\nThe first draft of the drug decision support system (ANN model) determines a\nstatus of candidate drug molecules as approved or withdrawn categories by\nmeans of molecular descriptors. The dataset has 44 features for analyzing the\ndrugs and contains 220 drugs having 110 approved and 110 withdrawn drugs. We\ncalculated molecular descriptors (760 descriptors) for all molecules in the\ndrug datasets and selected the most effective attributes (44 features) to\nreduce the dimensionality of data on the drug dataset.\n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fd660e7-7a8c-4616-98af-75a866065b40&revisionId=1c0d6691-fc28-4fd4-bb27-8ad6c3b69bf6&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -394,7 +394,7 @@ "status": "draft" }, "name": "FH_SWF_SAT", - "description": "

                                                                                Documentation of the concept for Reinforcement learning based machine tool control

                                                                                The following description is part of the submitted solution approach of the AI4EU Challenge and explains the interrelationships of the submitted documents. As part of the challenge, sample data was generated, which are similar to the described input and output data. Some of the approaches developed contained detailed explanations and implementations as well as secret solutions that were re-elaborated as pseudo-code. If our solution is among the finalists, the secret solutions will be explained further.

                                                                                Structuring the system solution as Docker container

                                                                                An important aspect of the challenge is modularity and flexibility. For this reason, the developed solution approach is implemented as Docker container. The developed solution is connected via port 8061 with 8 inputs (float - machine parameter) and generates 1 output (float - threshold). The designed solution based on an artificial intelligence reinforcement learner. The developed solution is a reinforcement agent. These generates on the basis of the trained knowledge an action (threshold) which is given as parameter to the environment (rib and surface machine). From the environment the current reward (KPI) and state (8 Inputs) are feedback to the agent (developed solution).

                                                                                Included documents in the Docker container

                                                                                For the realisation of the solution approach different python files and data protocols are realised. An overview of the generated files can be seen in the following listing.

                                                                                actor.pth - data.csv - network.py - README.md - define_threshold.py - license.jason - model.proto - model_pb2.py - model_pb2_grpc.py - requirements.txt - threshold_genera", + "description": "# Documentation of the concept for Reinforcement learning based machine tool\ncontrol\n\nThe following description is part of the submitted solution approach of the\nAI4EU Challenge and explains the interrelationships of the submitted\ndocuments. As part of the challenge, sample data was generated, which are\nsimilar to the described input and output data. Some of the approaches\ndeveloped contained detailed explanations and implementations as well as\nsecret solutions that were re-elaborated as pseudo-code. If our solution is\namong the finalists, the secret solutions will be explained further.\n\n## Structuring the system solution as Docker container\n\nAn important aspect of the challenge is modularity and flexibility. For this\nreason, the developed solution approach is implemented as Docker container.\nThe developed solution is connected via port 8061 with 8 inputs (float -\nmachine parameter) and generates 1 output (float - threshold). The designed\nsolution based on an artificial intelligence reinforcement learner. The\ndeveloped solution is a reinforcement agent. These generates on the basis of\nthe trained knowledge an action (threshold) which is given as parameter to the\nenvironment (rib and surface machine). From the environment the current reward\n(KPI) and state (8 Inputs) are feedback to the agent (developed solution).\n\n## Included documents in the Docker container\n\nFor the realisation of the solution approach different python files and data\nprotocols are realised. An overview of the generated files can be seen in the\nfollowing listing.\n\nactor.pth - data.csv - network.py - README.md - define_threshold.py -\nlicense.jason - model.proto - model_pb2.py - model_pb2_grpc.py -\nrequirements.txt - threshold_generator_client.py -\nthreshold_generator_server.py\n\nThe python file network.", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=108c1bd1-a0f7-4ada-8d39-a72b1b56fe2e&revisionId=fc31a182-5bfd-48fc-b5ea-a55034a70c41&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -437,7 +437,7 @@ "status": "draft" }, "name": "DSS4TB-IMECH", - "description": "

                                                                                A modification on the module \"AI REGIO DSS4TB\" an intelligent troubleshooting system that able to identify the component that is most probably faulty after a series of closed-ended questions answered by the operator.

                                                                                The system works on a probabilistic model that selects the most suitable question to ask the operator on the basis of:

                                                                                1. Information matrix established by an expert
                                                                                2. Previous answers
                                                                                3. Description given by the user (interpreted by the NLP-IMECH module)

                                                                                Operator knowledge is made available to the algorithm in the form of csv files that contain dynamic information matrices that are updated after every troubleshooting session. The use of these files means the system can quickly be adapted to a different contexts by simply that switching out the information matrix.

                                                                                Responding to the questions asked with YES, NO or DON'T KNOW the operator can quickly arrive at the identification of the fault. The system demonstrates a level of resilience in its ability to arrive at the correct diagnosis despite a some errors and uncertainty in the answers given.

                                                                                The module is intended for use in conjunction with the following AI4EU modules:

                                                                                1. NLP-IMECH
                                                                                2. AudioFileBroker
                                                                                3. ConvertAudioToTextEng
                                                                                4. FileViewer
                                                                                5. SharedFolderProvider
                                                                                6. 4 x FileUploadDataBroker

                                                                                Overview:

                                                                                The NewsTrainer module facilitates the training process by specifying the classifier node with the required hyperparameters. The number of epochs, batch size, validation ratio and model filename are the different parameters available in the web-UI.

                                                                                Repository link:

                                                                                Please refer the following link for the code that represents the trainer module in the Eclipse Graphene platform - https://gitlab.eclipse.org/eclipse/graphene/tutorials/-/tree/main/news_training/trainer

                                                                                ", + "description": "**Overview:**\n\nThe NewsTrainer module facilitates the training process by specifying the\nclassifier node with the required hyperparameters. The number of epochs, batch\nsize, validation ratio and model filename are the different parameters\navailable in the web-UI.\n\n **Repository link:**\n\nPlease refer the following link for the code that represents the trainer\nmodule in the Eclipse Graphene platform -\n\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=13416c8e-ae15-488a-b1f3-db33b799eb1a&revisionId=cda82f21-469f-4101-a82f-d1c34b819b74&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -523,7 +523,7 @@ "status": "draft" }, "name": "Fraunhofer-uncertainty-metrics-for-classification-tasks", - "description": "

                                                                                Uncertainty Metric for Classification tasks


                                                                                Implements uncertainty estimation metrics for classification tasks.

                                                                                Input

                                                                                The input to the metric computation module is a prediction from multiple forward passes of Monte Carlo Dropout or the models in an ensemble. The prediction is expected as a single data point, so the shape is N x C where N is the number of forward passes, and C is the number of classes.

                                                                                Metrics

                                                                                The metrics used to quantify uncertainty in the predictions are entropy, mutual information and variance.

                                                                                ", + "description": "# Uncertainty Metric for Classification tasks\n\n \n\nImplements uncertainty estimation metrics for classification tasks.\n\n## Input\n\nThe input to the metric computation module is a prediction from multiple\nforward passes of Monte Carlo Dropout or the models in an ensemble. The\nprediction is expected as a single data point, so the shape is N x C where N\nis the number of forward passes, and C is the number of classes.\n\n## Metrics\n\nThe metrics used to quantify uncertainty in the predictions are entropy,\nmutual information and variance.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=13f5a196-0775-4730-88a0-a62f911ddb3a&revisionId=a549ad83-c0b9-48cb-a43e-0c5be7f4f9fd&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -566,7 +566,7 @@ "status": "draft" }, "name": "tensorflow-iris-model", - "description": "

                                                                                Classify Iris Blossoms with a tensorflow model

                                                                                ", + "description": "Classify Iris Blossoms with a tensorflow model\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=15a3f457-401e-466e-9b85-1e25d8ae0b69&revisionId=42f38ede-7feb-4ebe-ba7c-2a6912aad332&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -609,7 +609,7 @@ "status": "draft" }, "name": "INERGY_Cold_Decision", - "description": "

                                                                                This service is based on a decision support system (DSS) implemented in context of I-NERGY project. The overall vision of I-NERGY is to promote AI in the energy sector by delivering:

                                                                                • An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.
                                                                                • Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.

                                                                                This is a DSS service for for help in the decision on which energy source (for cold generation) use in a Spanish Hospital in hourly basis. The data was provided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital complex have a district heating network. The layout of this district heating network is a ring system composed by two independent rings for heating and cooling. This ring just provides energy for heating and Domestic Hot Water (DHW).

                                                                                Apart from being a district heating network, this system is complex due to the different production sources used for heating and cooling. In this facility heat, cold and steam are produced by using different sources.

                                                                                For more information on how to use the service, please see Documents section.

                                                                                The project leading to this service has received funding from the European Union\u2019s Horizon 2020 research and innovation programme under grant agreement No 101016508

                                                                                ", + "description": "This service is based on a decision support system (DSS) implemented in\ncontext of I-NERGY project. The overall vision of I-NERGY is to promote AI in\nthe energy sector by delivering:\n\n * An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.\n * Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.\n\nThis is a DSS service for for help in the decision on which energy source (for\ncold generation) use in a Spanish Hospital in hourly basis. The data was\nprovided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital\ncomplex have a district heating network. The layout of this district heating\nnetwork is a ring system composed by two independent rings for heating and\ncooling. This ring just provides energy for heating and Domestic Hot Water\n(DHW).\n\nApart from being a district heating network, this system is complex due to the\ndifferent production sources used for heating and cooling. In this facility\nheat, cold and steam are produced by using different sources.\n\nFor more information on how to use the service, please see Documents section.\n\n _The project leading to this service has received funding from the European\nUnion\u2019s Horizon 2020 research and innovation programme under grant agreement\nNo 101016508_\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=169c308d-3451-4bb9-9fe1-84316863c18b&revisionId=68550ad2-0036-4e2d-a29c-99dc940cb235&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -652,7 +652,7 @@ "status": "draft" }, "name": "i-nergy-load-forecasting-ren-hourly-lstm-2018-2019", - "description": "

                                                                                This is a forecasting service for predicting the aggregated hourly net electrical load of the Portuguese transmission system operator (REN). The core of the service is a totally recurrent LSTM deep neural network. The model has been trained on the REN load time series for the years 2018 and 2019 (except December 2019). The service is served as a docker container and a client script is also provided to help the user form their inference requests. The model is totally configurable in terms of:

                                                                                1. Provided ground truth data points: The client can update the existing model with the desired length of new data points that have been observed. The provided input should follow the format of the csv file history_sample.csv.
                                                                                2. Forecast horizons: The client can request a forecast horizon of their preference. It should be noted that large forecast horizons lead to worse results due to the error propagation caused by the LSTM recurrence.

                                                                                This model has been developed within I-NERGY EU project.


                                                                                ", + "description": "This is a forecasting service for predicting the aggregated hourly net\nelectrical load of the Portuguese transmission system operator (REN). The core\nof the service is a totally recurrent LSTM deep neural network. The model has\nbeen trained on the REN load time series for the years 2018 and 2019 (except\nDecember 2019). The service is served as a docker container and a client\nscript is also provided to help the user form their inference requests. The\nmodel is totally configurable in terms of:\n\n 1. **Provided ground truth data points:** The client can update the existing model with the desired length of new data points that have been observed. The provided input should follow the format of the csv file history_sample.csv.\n 2. **Forecast horizons:** The client can request a forecast horizon of their preference. It should be noted that large forecast horizons lead to worse results due to the error propagation caused by the LSTM recurrence.\n\nThis model has been developed within [I-NERGY EU](https://i-nergy.eu/)\nproject.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=16d39167-1650-487a-ab25-29eee8eb838f&revisionId=b2c1b964-aab1-4002-bbe7-d4d5ae438e61&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -695,7 +695,7 @@ "status": "draft" }, "name": "AI4agriNDVI", - "description": "

                                                                                AI4AGRI model for correcting NDVI information from satellite images

                                                                                ", + "description": "AI4AGRI model for correcting NDVI information from satellite images\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=178e0fdf-05ec-42ad-9e0a-da5f147de7fd&revisionId=af75387e-635b-46d1-a442-a47b993b061b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -738,7 +738,7 @@ "status": "draft" }, "name": "SAPI_wheat_model_v0", - "description": "

                                                                                SAPI machine learning regression model based on satellite productivity maps is a powerful tool for predicting crop yields in agriculture. By utilizing advanced algorithms, this model analyzes data from satellite imagery to estimate the expected yield of wheat. The output from the model is predicted yield for particular parcel. The model learns from past data to establish patterns and relationships between the satellite imagery and crop yields. It then applies this knowledge to make predictions for the test parcel. This regression model provides a non-invasive and cost-effective method for yield prediction, as it eliminates the need for manual data collection or extensive field visits.

                                                                                ", + "description": "SAPI machine learning regression model based on satellite productivity maps is\na powerful tool for predicting crop yields in agriculture. By utilizing\nadvanced algorithms, this model analyzes data from satellite imagery to\nestimate the expected yield of wheat. The output from the model is predicted\nyield for particular parcel. The model learns from past data to establish\npatterns and relationships between the satellite imagery and crop yields. It\nthen applies this knowledge to make predictions for the test parcel. This\nregression model provides a non-invasive and cost-effective method for yield\nprediction, as it eliminates the need for manual data collection or extensive\nfield visits.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=195181e4-090f-45e9-91cc-5919718ad0d9&revisionId=ac253be9-81ee-43f2-8a24-79369b10a45c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -781,7 +781,7 @@ "status": "draft" }, "name": "ObjectDetection", - "description": "

                                                                                Detection of physical objects in still images or videos


                                                                                The object detection mining service allows to detect one or more physical objects to be found in images and videos. 


                                                                                Input: Image file or video file. You can specify which frames are to be processed for a video.


                                                                                Output: A set of detected objects will be returned for the image or each processed frame. For each detected object an axially parallel bounding box, an object category and a rating are returned. The rating indicates the certainty of the model regarding the category of the identified object within a bounding box.

                                                                                In addition, an automatically generated ID is assigned to each detected object to allow the unambiguous identification of all detected objects in one media file. This ID has no relation to the category of the detected Object.

                                                                                ", + "description": "Detection of physical objects in still images or videos\n\n \n\nThe object detection mining service allows to detect one or more physical\nobjects to be found in images and videos.\n\n \n\nInput: Image file or video file. You can specify which frames are to be\nprocessed for a video.\n\n \n\nOutput: A set of detected objects will be returned for the image or each\nprocessed frame. For each detected object an axially parallel bounding box, an\nobject category and a rating are returned. The rating indicates the certainty\nof the model regarding the category of the identified object within a bounding\nbox.\n\nIn addition, an automatically generated ID is assigned to each detected object\nto allow the unambiguous identification of all detected objects in one media\nfile. This ID has no relation to the category of the detected Object.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&revisionId=6efaddee-cb74-4995-a8c3-9bc8e3f9c29b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -867,7 +867,7 @@ "status": "draft" }, "name": "ObjectDetection", - "description": "

                                                                                Detection of physical objects in still images or videos


                                                                                The object detection mining service allows to detect one or more physical objects to be found in images and videos. 


                                                                                Input: Image file or video file. You can specify which frames are to be processed for a video.


                                                                                Output: A set of detected objects will be returned for the image or each processed frame. For each detected object an axially parallel bounding box, an object category and a rating are returned. The rating indicates the certainty of the model regarding the category of the identified object within a bounding box.

                                                                                In addition, an automatically generated ID is assigned to each detected object to allow the unambiguous identification of all detected objects in one media file. This ID has no relation to the category of the detected Object.

                                                                                ", + "description": "Detection of physical objects in still images or videos\n\n \n\nThe object detection mining service allows to detect one or more physical\nobjects to be found in images and videos.\n\n \n\nInput: Image file or video file. You can specify which frames are to be\nprocessed for a video.\n\n \n\nOutput: A set of detected objects will be returned for the image or each\nprocessed frame. For each detected object an axially parallel bounding box, an\nobject category and a rating are returned. The rating indicates the certainty\nof the model regarding the category of the identified object within a bounding\nbox.\n\nIn addition, an automatically generated ID is assigned to each detected object\nto allow the unambiguous identification of all detected objects in one media\nfile. This ID has no relation to the category of the detected Object.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&revisionId=f85ede77-a094-46e4-9147-fb9e595f2b91&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -910,7 +910,7 @@ "status": "draft" }, "name": "QRUL", - "description": "

                                                                                The model processes quality test data results and estimate the Remaining Useful Life (RUL) of a produced pump from the Pfeiffer company. The provided solution offers 2 classification techniques estimating whether a pump is going to fail in the first year of operation or not, or estimating the time range that the pump will fail.

                                                                                ", + "description": "The model processes quality test data results and estimate the Remaining\nUseful Life (RUL) of a produced pump from the Pfeiffer company. The provided\nsolution offers 2 classification techniques estimating whether a pump is going\nto fail in the first year of operation or not, or estimating the time range\nthat the pump will fail.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1ee174ca-e7c4-405e-8137-27611cb0b6bc&revisionId=6dc27e5f-72b7-406e-a5fb-6db99737b816&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -953,7 +953,7 @@ "status": "draft" }, "name": "VideoSegmentation", - "description": "

                                                                                The Video Segmentation model splits the incoming video into scene segments

                                                                                ", + "description": "The Video Segmentation model splits the incoming video into scene segments\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=21459f4b-ed64-455b-93ae-5e345f046148&revisionId=9113a839-bfa1-470a-b4c2-7714be30a03c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -996,7 +996,7 @@ "status": "draft" }, "name": "SAPI_maize_model_v0", - "description": "

                                                                                SAPI machine learning regression model based on satellite productivity maps is a powerful tool for predicting crop yields in agriculture. By utilizing advanced algorithms, this model analyzes data from satellite imagery to estimate the expected yield of maize. The output from the model is predicted yield for particular parcel. The model learns from past data to establish patterns and relationships between the satellite imagery and crop yields. It then applies this knowledge to make predictions for the test parcel. This regression model provides a non-invasive and cost-effective method for yield prediction, as it eliminates the need for manual data collection or extensive field visits.

                                                                                ", + "description": "SAPI machine learning regression model based on satellite productivity maps is\na powerful tool for predicting crop yields in agriculture. By utilizing\nadvanced algorithms, this model analyzes data from satellite imagery to\nestimate the expected yield of maize. The output from the model is predicted\nyield for particular parcel. The model learns from past data to establish\npatterns and relationships between the satellite imagery and crop yields. It\nthen applies this knowledge to make predictions for the test parcel. This\nregression model provides a non-invasive and cost-effective method for yield\nprediction, as it eliminates the need for manual data collection or extensive\nfield visits.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=21a28a9d-bc8b-490e-85e5-e1452ad74e3e&revisionId=b11fdff7-5654-48de-bd4e-70d3f1131703&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1039,7 +1039,7 @@ "status": "draft" }, "name": "polaris_mep_ai", - "description": "

                                                                                Polaris MEP AI module is an addon for Polaris MEP, an execution planner to optimize production plannings using restrictions-based solvers. The new AI module adds features to predict and forecast the inputs of the planner. So production demand and resource availability can be predicted with AI and optimized with OR. Regression methods Linear Regressi\u00f3n, Lasso, Gradient Boosting, Random Forest, and K-NN are included. Autoregressive methods ARIMA, SARIMA, VARMA, LSTM, and Fuzzy NN are included.



                                                                                ", + "description": "Polaris MEP AI module is an addon for Polaris MEP, an execution planner to\noptimize production plannings using restrictions-based solvers. The new AI\nmodule adds features to predict and forecast the inputs of the planner. So\nproduction demand and resource availability can be predicted with AI and\noptimized with OR. Regression methods Linear Regressi\u00f3n, Lasso, Gradient\nBoosting, Random Forest, and K-NN are included. Autoregressive methods ARIMA,\nSARIMA, VARMA, LSTM, and Fuzzy NN are included.\n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=24f4722f-9c82-489c-b9b0-359976eb792f&revisionId=76dbff09-04b5-4ec6-af32-8a3e82b60ded&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1082,7 +1082,7 @@ "status": "draft" }, "name": "dummy-environment-clarspy", - "description": "

                                                                                Dummy model for 1st Call for Solutions

                                                                                ", + "description": "Dummy model for 1st Call for Solutions\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2532264a-b2aa-4cf4-8a90-8eb5f0546b9f&revisionId=558d248e-bd5d-4e53-a360-8bdc95dc8cc0&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1125,7 +1125,7 @@ "status": "draft" }, "name": "AI_REGIO_CUSUM_RLS_filter", - "description": "

                                                                                CUSUM RLS filter contains a change detection algorithm for multiple sensors, using the Recursive Least Squares (RLS) and Cumulative Sum (CUSUM) methods [F. Gustafsson. Adaptive Filtering and Change Detection. John Willey & Sons, LTD 2000].

                                                                                As an AI resource the \u201cCUSUMRLSfilter\" asset is currently implemented as Open Source Solution whose main aim is to detect abrupt changes on the measurements recorded by a set of sensors.The asset was implemented as part of one of the experiment of the AI REGIO project, and subsequently adapted for general use.

                                                                                ", + "description": "**CUSUM RLS filter** contains a change detection algorithm for multiple\nsensors, using the Recursive Least Squares (RLS) and Cumulative Sum (CUSUM)\nmethods [F. Gustafsson. _Adaptive Filtering and Change Detection_. John Willey\n& Sons, LTD 2000].\n\nAs an AI resource the \u201c _CUSUMRLSfilter_ \" asset is currently implemented as\nOpen Source Solution whose main aim is to detect abrupt changes on the\nmeasurements recorded by a set of sensors.The asset was implemented as part of\none of the experiment of the AI REGIO project, and subsequently adapted for\ngeneral use.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=259afff9-66a4-47e7-b55c-4f19b2d75b8d&revisionId=f3b61e6d-904c-48ab-9930-72eedd3eb62c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1168,7 +1168,7 @@ "status": "draft" }, "name": "ner-model", - "description": "

                                                                                This is the ner-model component of the ner-pipeline.


                                                                                Through the Web UI of the ner-model, you can access the results of the entity recognition task on a given text. The most recent result will show on top of the results. An entity is defined within \"|\", followed by its type and confidence score in round brackets.

                                                                                Make sure to run ner-pipeline, instead of ner-model as a standalone component. As ner-pipeline is successfully deployed, first submit the text via ner-databroker, then RUN the pipeline and go to the Web UI of the ner-model. You will see a list of processed texts, with the most recent provided text on top of the list.

                                                                                ", + "description": "This is the ner-model component of the ner-pipeline.\n\n \n\nThrough the Web UI of the ner-model, you can access the results of the entity\nrecognition task on a given text. The most recent result will show on top of\nthe results. An entity is defined within \"|\", followed by its type and\nconfidence score in round brackets.\n\nMake sure to run ner-pipeline, instead of ner-model as a standalone component.\nAs ner-pipeline is successfully deployed, first submit the text via ner-\ndatabroker, then RUN the pipeline and go to the Web UI of the ner-model. You\nwill see a list of processed texts, with the most recent provided text on top\nof the list.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=27e777bc-2968-427c-9df5-9f5593613475&revisionId=77f58af9-73d4-48b8-9237-7c6e1d3cdb97&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1211,7 +1211,7 @@ "status": "draft" }, "name": "cnext_decision_intelligence", - "description": "

                                                                                The published model is a result of the AI4EU challenge \u201cDecision Intelligence for Healthcare\u201d, and is focused on delivering data-driven decision support on the question \u201cwhat is the next step in handling patient test/diagnoses related to suspected COVID infection.   

                                                                                As part of this challenge, we needed to validate a Machine Learning Model \u2013 published on the AI4EU marketplace \u2013 using GRPC (protobuf) as inference endpoint and docker container image as packaging model could act as a decision brick and as such be plugged in into our Decision Intelligence Platform.


                                                                                More information about the solution can be found in the accompanying AI4EU_Cnext.pdf file.




                                                                                ", + "description": "The published model is a result of the AI4EU challenge \u201cDecision Intelligence\nfor Healthcare\u201d, and is focused on delivering data-driven decision support on\nthe question \u201cwhat is the next step in handling patient test/diagnoses related\nto suspected COVID infection. \n\nAs part of this challenge, we needed to validate a Machine Learning Model \u2013\npublished on the AI4EU marketplace \u2013 using GRPC (protobuf) as inference\nendpoint and docker container image as packaging model could act as a decision\nbrick and as such be plugged in into our Decision Intelligence Platform.\n\n \n\nMore information about the solution can be found in the accompanying\nAI4EU_Cnext.pdf file.\n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2b3f75d9-a480-4589-9992-457b0863b7b5&revisionId=cb074874-ee6b-458c-a825-e5d129ca4635&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.6", @@ -1254,7 +1254,7 @@ "status": "draft" }, "name": "Vibration_analysis", - "description": "

                                                                                This model allows the analysis of vibration of rotating machines. It is based on vibration measurements in the three spatial directions, on strategic measurement points: MDE (Motor driven end) and MNDE (Motor non driven end).  It allows to detect if a machine presents a faulty behaviour and to establish the cause of this problem and to evaluate its intensity on a scale from 1 to 3.

                                                                                ", + "description": "This model allows the analysis of vibration of rotating machines. It is based\non vibration measurements in the three spatial directions, on strategic\nmeasurement points: MDE (Motor driven end) and MNDE (Motor non driven end).\nIt allows to detect if a machine presents a faulty behaviour and to establish\nthe cause of this problem and to evaluate its intensity on a scale from 1 to\n3.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4&revisionId=42a20377-3b6f-41c5-88b2-76b07993aa0b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1297,7 +1297,7 @@ "status": "draft" }, "name": "Vibration_analysis", - "description": "

                                                                                This model allows from acceleration mesurements in the three directions on the measurement points mde (motor driven end) and mnde (motor non driven end), to detect a machine malfunction and to establish its nature. The type of failure detected in this version are unbalance issue and bearing issue . Other types of failure will be supported in the next versions, stay tuned.

                                                                                ", + "description": "This model allows from acceleration mesurements in the three directions on the\nmeasurement points mde (motor driven end) and mnde (motor non driven end), to\ndetect a machine malfunction and to establish its nature. The type of failure\ndetected in this version are unbalance issue and bearing issue . Other types\nof failure will be supported in the next versions, stay tuned.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4&revisionId=533fbe3c-2b51-48ef-89bd-fe9ee96cf13a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -1340,7 +1340,7 @@ "status": "draft" }, "name": "INERGY_Cold_Demand_Prediction", - "description": "

                                                                                This service is based on a Random Forest model implemented in context of I-NERGY project. The overall vision of I-NERGY is to promote AI in the energy sector by delivering:

                                                                                • An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.
                                                                                • Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.

                                                                                This is a forecasting service for predicting thermal load (cold energy) of a Spanish Hospital in hourly basis. The data was provided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital complex have a district heating network. The layout of this district heating network is a ring system composed by two independent rings for heating and cooling. This ring just provides energy for heating and Domestic Hot Water (DHW).

                                                                                Apart from being a district heating network, this system is complex due to the different production sources used for heating and cooling. In this facility heat, cold and steam are produced by using different sources.

                                                                                For more information on how to use the service, please see Documents section.

                                                                                The project leading to this service has received funding from the European Union\u2019s Horizon 2020 research and innovation programme under grant agreement No 101016508

                                                                                ", + "description": "This service is based on a Random Forest model implemented in context of\nI-NERGY project. The overall vision of I-NERGY is to promote AI in the energy\nsector by delivering:\n\n * An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.\n * Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.\n\nThis is a forecasting service for predicting thermal load (cold energy) of a\nSpanish Hospital in hourly basis. The data was provided by VEOLIA, from the\nhospital complex in C\u00f3rdoba (Spain). The hospital complex have a district\nheating network. The layout of this district heating network is a ring system\ncomposed by two independent rings for heating and cooling. This ring just\nprovides energy for heating and Domestic Hot Water (DHW).\n\nApart from being a district heating network, this system is complex due to the\ndifferent production sources used for heating and cooling. In this facility\nheat, cold and steam are produced by using different sources.\n\nFor more information on how to use the service, please see Documents section.\n\n _The project leading to this service has received funding from the European\nUnion\u2019s Horizon 2020 research and innovation programme under grant agreement\nNo 101016508_\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2ef3e3fb-afe9-422a-b037-88168d219a80&revisionId=8fc73f14-3456-4eda-af0a-68af28faada0&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1426,7 +1426,7 @@ "status": "draft" }, "name": "O7_information_extractor", - "description": "

                                                                                This model is implemented to extract O7 information from Italian social workers' cards.

                                                                                ", + "description": "This model is implemented to extract **O7** information from Italian social\nworkers' cards.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&revisionId=b4d4ea0c-c723-4dca-9066-5af00f2d9133&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.5", @@ -1512,7 +1512,7 @@ "status": "draft" }, "name": "advice-yolo", - "description": "

                                                                                advice-yolo is the implementation of YOLOv4 deep learning model. The model is already trained for detecting road defects

                                                                                ", + "description": "advice-yolo is the implementation of YOLOv4 deep learning model. The model is\nalready trained for detecting road defects\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3332868c-0248-4f2c-8401-1464faf56166&revisionId=3cc90b52-2567-4432-b6bb-6368ab68ad6f&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -1598,7 +1598,7 @@ "status": "draft" }, "name": "Drug-Attrition-Oracle", - "description": "

                                                                                Drug Attrition Oracle is a deep neural network model, based on the chemical structure of the compounds, which can predict the probability of withdrawal from the market for compounds that have passed initial trials. The model provides an interpretable layer which can find chemical substructures that are most influential for making the prediction as well as additional drug and molecular properties which can influence the probability of withdrawal. The model takes as an input only the SMILES string of the molecule and outputs a conformal prediction whether the molecule is approved or withdrawn along with a confidence score. The explanation for a prediction is given using the GNN Explainer. To improve the GCN model predictions we trained additional graph neural network models for predicting molecular properties: Bioavailability, Clearance Hepatocyte, CYP2C9 Substrate and Toxicity (nr-ppar-gamma). These predictions are used with the base GCN model for predicting the withdrawal in an XGBoost model which uses SHAP values for interpretation.\ufeff

                                                                                Code is available on", + "description": "Drug Attrition Oracle is a deep neural network model, based on the chemical\nstructure of the compounds, which can predict the probability of withdrawal\nfrom the market for compounds that have passed initial trials. The model\nprovides an interpretable layer which can find chemical substructures that are\nmost influential for making the prediction as well as additional drug and\nmolecular properties which can influence the probability of withdrawal. The\nmodel takes as an input only the SMILES string of the molecule and outputs a\n[conformal prediction](http://alrw.net/articles/06.pdf) whether the molecule\nis approved or withdrawn along with a confidence score. The explanation for a\nprediction is given using the [GNN\nExplainer](http://snap.stanford.edu/gnnexplainer/). To improve the GCN model\npredictions we trained additional graph neural network models for predicting\nmolecular properties: Bioavailability, Clearance Hepatocyte, CYP2C9 Substrate\nand Toxicity (nr-ppar-gamma). These predictions are used with the base GCN\nmodel for predicting the withdrawal in an XGBoost model which uses [SHAP\nvalues](https://shap.readthedocs.io/en/latest/index.html) for interpretation.\ufeff\n\nCode is available on [Github](https://github.com/dionizijefa/Drug-Attrition-\nOracle)\n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=33de7b45-cc1e-4ff4-b01a-7eb08c5859e9&revisionId=b8f10760-6b7d-4b6c-aea9-74a7851e2027&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1684,7 +1684,7 @@ "status": "draft" }, "name": "innerpageanalysis", - "description": "

                                                                                Advanced deep learning models are trained separately and applied for each type of information, and then put together in **Inner Page Analysis** pipeline. The pipeline extracts the information from historical data from Italian workers' social security cards.

                                                                                Analysis of stamps data and extraction of their key informations is the main goal of this project.

                                                                                input and output of this project will be like below:

                                                                                1. input is a full page of stamps in both raw scanned files or ordinary images in .png or .jpg format. file name will be like 11831_2b.

                                                                                2. output will be a .csv file that contains below information for each stamp as columns:

                                                                                  * filename,ID,xb,stamp_id,stamp_class,price,face,color




                                                                                ", + "description": "Advanced deep learning models are trained separately and applied for each type\nof information, and then put together in **Inner Page Analysis** pipeline. The\npipeline extracts the information from historical data from Italian workers'\nsocial security cards.\n\nAnalysis of stamps data and extraction of their key informations is the main\ngoal of this project.\n\ninput and output of this project will be like below:\n\n1\\. input is a full page of stamps in both raw scanned files or ordinary\nimages in .png or .jpg format. file name will be like 11831_2b.\n\n2\\. output will be a .csv file that contains below information for each stamp\nas columns:\n\n * filename,ID,xb,stamp_id,stamp_class,price,face,color\n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3664c82c-39e1-4fd8-bf0a-ee7c7e745068&revisionId=b29ec7cf-9cdc-4cc3-9864-d2c607bab121&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -1727,7 +1727,7 @@ "status": "draft" }, "name": "OHDSI_PLP_PILOT", - "description": "

                                                                                Pilot for Patient level Prediction for the AI4EU challenge.

                                                                                ", + "description": "Pilot for Patient level Prediction for the AI4EU challenge.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=367469d8-cbd1-42c9-b3e9-ecd670e95ce8&revisionId=c2da9001-caf3-4594-9fe9-cccd84aa4181&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1770,7 +1770,7 @@ "status": "draft" }, "name": "OpenWIDE", - "description": "

                                                                                OpenWIDE


                                                                                Trustworthy Detection of mouthes for automated swab robot.


                                                                                The service finds a mouth in the image securely by cascading 3 detectors, person->face->mouth and evaluate how open the mouth is.It will only give one mouth per image, which is the dominant mouth.The result is given as a DICT where the most relevant information is:


                                                                                1. mouthbox: \tbbox of mouth in format x1,y1,x2,y2
                                                                                2. facebox: \t\tbbox of face in format x1,y1,x2,y2
                                                                                3. personbox:\tbbox of person in format x1,y1,x2,y2
                                                                                4. Score: \t\t\tCollective score of the three models
                                                                                5. Open: \t\t\tA measure of openness. >0.8 tends to be WIDE open.
                                                                                6. H: \t\t\t\tHow centered is the mouth horizontally. ~0 = looking straight into the camera.
                                                                                7. V: \t\t\t\tHow centered is the mouth vertically. ~-.3 = looking straight into the camera.


                                                                                Cloud host


                                                                                It is hosted as a RPC service in Azure

                                                                                * openwide.northeurope.azurecontainer.io:8061


                                                                                Dockerhub


                                                                                * dtivisionboxcloud/openwide:v1.1


                                                                                Test

                                                                                Included is a test image and a test script. 


                                                                                Just run :


                                                                                * python testRPCService.py


                                                                                and you should receive a dict with information about the mouth.


                                                                                ", + "description": "# OpenWIDE\n\n# \n\n###### Trustworthy Detection of mouthes for automated swab robot.\n\n \n\nThe service finds a mouth in the image securely by cascading 3 detectors,\nperson->face->mouth and evaluate how open the mouth is.It will only give one\nmouth per image, which is the dominant mouth.The result is given as a DICT\nwhere the most relevant information is:\n\n \n\n 1. mouthbox: bbox of mouth in format x1,y1,x2,y2\n 2. facebox: bbox of face in format x1,y1,x2,y2\n 3. personbox: bbox of person in format x1,y1,x2,y2\n 4. Score: Collective score of the three models\n 5. Open: A measure of openness. >0.8 tends to be WIDE open.\n 6. H: How centered is the mouth horizontally. ~0 = looking straight into the camera.\n 7. V: How centered is the mouth vertically. ~-.3 = looking straight into the camera.\n\n \n\n## Cloud host\n\n \n\nIt is hosted as a RPC service in Azure\n\n* openwide.northeurope.azurecontainer.io:8061\n\n \n\n## Dockerhub\n\n \n\n* dtivisionboxcloud/openwide:v1.1\n\n \n\n## Test\n\nIncluded is a test image and a test script.\n\n \n\nJust run :\n\n \n\n* python testRPCService.py\n\n \n\nand you should receive a dict with information about the mouth.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=36ae858b-6486-46ae-8e8c-01d644b93d4d&revisionId=515a1a44-4ad1-4b29-b4f4-efadfa665dee&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1813,7 +1813,7 @@ "status": "draft" }, "name": "divis_pump_lifetime_classification", - "description": "

                                                                                The image provides a model for the classifiction on vacuum pumps into the categories \"short living\" (less than one year) and \"long living\". The data needed is specific to the format of a challenge owner of the AI4EU project.

                                                                                ", + "description": "The image provides a model for the classifiction on vacuum pumps into the\ncategories \"short living\" (less than one year) and \"long living\". The data\nneeded is specific to the format of a challenge owner of the AI4EU project.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=36e5b789-fdb8-4016-84d6-829423b58ffc&revisionId=ca6c26a5-9252-4fa0-81c3-aea31d26dca8&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1856,7 +1856,7 @@ "status": "draft" }, "name": "edm_aad_agent_node_cl", - "description": "

                                                                                EDM RL Controller predictions (Solution Provider: Artificialy SA)


                                                                                Reinforcement learning applied to Electrical discharge machining (EDM) control for the AI4EU project with Agie Charmilles SA. For in Depth instructions of how to use this model, please follow the README.pdf which is placed in the Documents tab.







                                                                                The solution consists of two nodes: `data_node` server which streams a DataFrame of observations (EDM machine states) read from the path provided by the client (`infile`); and an `agent_node` server which predicts control actions based on the agent / controller specified by the client. Output predictions are stored inside the `./data_predictions/` folder of the `agent_node` Docker container.


                                                                                To use this solution, please use the Docker container and the additional files (which are in the Documents tap of the model in the marketplace) from both the `data_node` and `agent_node`. They are both in the AI4EU platform market place named as `edm_aad_agent_node_cl` and `edm_aad_data_node_cl`:

                                                                                ", + "description": "**EDM RL Controller predictions (Solution Provider: Artificialy SA)**\n\n \n\nReinforcement learning applied to Electrical discharge machining (EDM) control\nfor the AI4EU project with Agie Charmilles SA. _For in Depth instructions of\nhow to use this model, please follow the README.pdf which is placed in the\nDocuments tab._\n\n \n\n \n\n \n\n \n\n \n\n \n\nThe solution consists of two nodes: `data_node` server which streams a\nDataFrame of observations (EDM machine states) read from the path provided by\nthe client (`infile`); and an `agent_node` server which predicts control\nactions based on the agent / controller specified by the client. Output\npredictions are stored inside the `./data_predictions/` folder of the\n`agent_node` Docker container.\n\n \n\nTo use this solution, please use the Docker container and the additional files\n(which are in the Documents tap of the model in the marketplace) from both the\n`data_node` and `agent_node`. They are both in the AI4EU platform market place\nnamed as `edm_aad_agent_node_cl` and `edm_aad_data_node_cl`:\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=414791ed-55f9-457d-b377-f790161e2cd6&revisionId=7622a8e4-d52f-4288-9bc6-88d64da6f7f6&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1899,7 +1899,7 @@ "status": "draft" }, "name": "ICD-10-CM-classifier", - "description": "

                                                                                ICD-10-CM classifier


                                                                                The ICD-10-CM classifier is docker image containing two neural classifier models contained within a gRPC server that allows for classification of medical texts in Spanish or English.

                                                                                Fine-tuned on the CodiEsp dataset, the models for both languages are built upon the Bert architecture. The Spanish model achieves a 0.5980 MAP score across the test set of the CodiEsp-Diagnostic dataset, whereas the English version achieves a 0.5249 MAP score.

                                                                                This module may provide help for researchers or other data-science enthusiasts that are looking into building tools to automatically diagnose medical descriptions.


                                                                                ", + "description": "# ICD-10-CM classifier\n\n \n\nThe ICD-10-CM classifier is docker image containing two neural classifier\nmodels contained within a gRPC server that allows for classification of\nmedical texts in Spanish or English.\n\nFine-tuned on the CodiEsp dataset, the models for both languages are built\nupon the Bert architecture. The Spanish model achieves a 0.5980 MAP score\nacross the test set of the CodiEsp-Diagnostic dataset, whereas the English\nversion achieves a 0.5249 MAP score.\n\nThis module may provide help for researchers or other data-science enthusiasts\nthat are looking into building tools to automatically diagnose medical\ndescriptions.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4356534c-aec1-4271-8eda-f125cb08909b&revisionId=ee4f05c5-b86d-423c-b1d6-21b24b14be4d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1942,7 +1942,7 @@ "status": "draft" }, "name": "AudioSegmentation", - "description": "

                                                                                This model splits an audio file into segments like one speaker and removes silence.

                                                                                ", + "description": "This model splits an audio file into segments like one speaker and removes\nsilence.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4366dce4-cd87-4a51-bd39-2dbfe5fd5b6c&revisionId=4a4c3771-6c63-46b6-aad6-d5cf78e1a03f&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -1985,7 +1985,7 @@ "status": "draft" }, "name": "CODE", - "description": "

                                                                                The main objective of the challenge is to develop an method for automatic classification of clinical narratives to ICD-10 codes.

                                                                                Our approach for semantic text classification has three core components: (1) Formalization of domain knowledge of medical information and techniques of semantic data fusion; (2) Multilingual NLP techniques for document preprocessing including all or some of: data cleaning, data normalization, data augmentation, transitive connections analysis, data balancing, expert\u2019s heuristics. For medical data based on our expertise on DOID, ICD-O, ICD-9, ICD-10, MESH, MONDO, UMLS, Orphanet, SNOMED classification, data augmentation including typos simulation and synonym replacement will be used; (3) Multilingual deep learning methods for supervised classification of disease into its corresponding class from the ICD-10. We are fine tuning pretrained BERT family models (bioBERT, clinicalBERT, MultilingualBERT, PubMedBERT, etc.) with domain specific terminology for the target language. Additional corpora generated from public documents and linked open data is used for fine-tuning of the deep learning classification model for the specific ICD-10 classification.

                                                                                ", + "description": "The main objective of the challenge is to develop an method for automatic\nclassification of clinical narratives to ICD-10 codes.\n\nOur approach for semantic text classification has three core components: (1)\nFormalization of domain knowledge of medical information and techniques of\nsemantic data fusion; (2) Multilingual NLP techniques for document\npreprocessing including all or some of: data cleaning, data normalization,\ndata augmentation, transitive connections analysis, data balancing, expert\u2019s\nheuristics. For medical data based on our expertise on DOID, ICD-O, ICD-9,\nICD-10, MESH, MONDO, UMLS, Orphanet, SNOMED classification, data augmentation\nincluding typos simulation and synonym replacement will be used; (3)\nMultilingual deep learning methods for supervised classification of disease\ninto its corresponding class from the ICD-10. We are fine tuning pretrained\nBERT family models (bioBERT, clinicalBERT, MultilingualBERT, PubMedBERT, etc.)\nwith domain specific terminology for the target language. Additional corpora\ngenerated from public documents and linked open data is used for fine-tuning\nof the deep learning classification model for the specific ICD-10\nclassification.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=47920b57-7ab9-4abe-9881-f77d57144944&revisionId=6fdf671b-38d8-4995-b924-30ef638df116&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2028,7 +2028,7 @@ "status": "draft" }, "name": "aquila-ai-service", - "description": "

                                                                                The experiment aims to compare the design of an electronic product, represented by a CAD file, with the picture of a real artifact of the product.

                                                                                The proposed solution consists of two main phases. First, the system establishes a machine learning flow that utilizes a neural architecture to address the issue of component recognition (Object Detection) in panel images. Second, the system exploits Answer Set Programming (ASP) to compare the reconstructed scheme from the image with the original patterns to detect any misalignments or errors.

                                                                                ", + "description": "The experiment aims to compare the design of an electronic product,\nrepresented by a CAD file, with the picture of a real artifact of the product.\n\nThe proposed solution consists of two main phases. First, the system\nestablishes a machine learning flow that utilizes a neural architecture to\naddress the issue of component recognition (Object Detection) in panel images.\nSecond, the system exploits Answer Set Programming (ASP) to compare the\nreconstructed scheme from the image with the original patterns to detect any\nmisalignments or errors.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=48053444-5100-4476-a8c3-53db3108dcdb&revisionId=94d411e7-3383-47e5-a923-581e7a6f5a1f&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2071,7 +2071,7 @@ "status": "draft" }, "name": "CDSICD10", - "description": "

                                                                                1st Call for Solutions, ICD10 classification using medical trained BERT and QA. \u201cOur solution combines two different approaches: one to identify the relevant disease (ICD-10 category) and the other one to determine the subcategory (the digits after the period). 

                                                                                The \u201ccategory-classifier\u201d is based on Spanish BERT (BETO) fine-tuned on Spanish clinical text (CodiEsp corpus). 

                                                                                In order to determine the subcategories of each ICD-10 category, we will use a question-answering approach based on a structured version of the ICD-10 dictionary created be NER.

                                                                                ", + "description": "1st Call for Solutions, ICD10 classification using medical trained BERT and\nQA. \u201cOur solution combines two different approaches: one to identify the\nrelevant disease (ICD-10 category) and the other one to determine the\nsubcategory (the digits after the period).\n\nThe \u201ccategory-classifier\u201d is based on Spanish BERT (BETO) fine-tuned on\nSpanish clinical text (CodiEsp corpus).\n\nIn order to determine the subcategories of each ICD-10 category, we will use a\nquestion-answering approach based on a structured version of the ICD-10\ndictionary created be NER.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4af0b85d-6d3e-4678-a991-865366ce4152&revisionId=b7ed24a9-c8fa-42cf-8f72-58acbb6f9435&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2114,7 +2114,7 @@ "status": "draft" }, "name": "aipanel_repurposing", - "description": "

                                                                                Goal:

                                                                                To design a model that allows repurposing of already approved drugs i.e., the model predicts if a drug can be used to fight another disease or protein target that relates to the disease.


                                                                                Approach:

                                                                                To achieve this goal, another Deep Convolutional Neural Network (D-CNN) has been implemented on molecular descriptors obtained for the drugs and Protein Descriptors obtained for targets, to develop a prediction model which predicts the IC50 value where IC50 refers to Half-maximal inhibitory concentration, the most widely used and informative measure of a drug's efficacy.


                                                                                To prepare the dataset, following drugs, targets and their combined activities were obtained from specific databases:

                                                                                1. 1651 Approved Drugs from CHEMBL Database with IC50 Bio-Activities
                                                                                2. 1975 Targets from CHEMBL Database


                                                                                Approx. 40000 activities were obtained for above mentioned drugs and targets, where the activities belonged to phase 4 studies. Phase 4 refers to the Stage where a drug is accepted since it shows desired results towards a specific Target. Around 53% of activities consis", + "description": "**_Goal:_**\n\nTo design a model that allows repurposing of already approved drugs i.e., the\nmodel predicts if a drug can be used to fight another disease or protein\ntarget that relates to the disease.\n\n \n\n ** _Approach:_**\n\nTo achieve this goal, another Deep Convolutional Neural Network (D-CNN) has\nbeen implemented on molecular descriptors obtained for the drugs and Protein\nDescriptors obtained for targets, to develop a prediction model which predicts\nthe IC50 value where IC50 refers to Half-maximal inhibitory concentration, the\nmost widely used and informative measure of a drug's efficacy.\n\n \n\nTo prepare the dataset, following drugs, targets and their combined activities\nwere obtained from specific databases:\n\n 1. 1651 Approved Drugs from CHEMBL Database with IC50 Bio-Activities\n 2. 1975 Targets from CHEMBL Database\n\n \n\nApprox. 40000 activities were obtained for above mentioned drugs and targets,\nwhere the activities belonged to phase 4 studies. Phase 4 refers to the Stage\nwhere a drug is accepted since it shows desired results towards a specific\nTarget. Around 53% of activities consisted of IC50 values less than 1000 nM.\nTherefore, activities were divided into two classes, active interaction (IC50\n<= 1000 nM) and inactive interaction (IC50 > 1000 nM). This allowed us to\ndevelop a binary classification model. Active refers to a positive response of\na drug towards a target.\n\n \n\nFor the obtained Drugs, their SMILES were extracted from CHEMBL. SMILES are\nnotations for describing the structure of chemical species using short ASCII\nstrings. The SMILES were further used to extract 881 PUBCHEM Molecular\nDescriptors using PaDEL, a software to calculate molecular descriptors and\nfingerprints. To obtain features for target proteins, a python based library\npropy3 was used w", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4c32c784-dd97-466c-b533-e4e8e541b80a&revisionId=fd42128d-cd93-4b30-89b7-4c1f756da6b2&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.2", @@ -2157,7 +2157,7 @@ "status": "draft" }, "name": "Tag-my-outfit", - "description": "

                                                                                The Tag My Outfit service predicts the category and the attributes of a piece of clothing viewed in a given image. The prediction model is the Visual Semantic Attention Model (VSAM), and is supervised by automatic pose extraction creating a discriminative feature space. This particular classifier was trained with the open source DeepFashion dataset. For further detail see http://physicalai.isr.tecnico.ulisboa.pt/tagmyoutfit.html


                                                                                The model accepts an image as input and outputs the labels corresponding to category (e.g. dress), subcategory (Evening Dress) and attributes ( short, long sleeve, round neckline)

                                                                                ", + "description": "The **Tag My Outfit** service predicts the category and the attributes of a\npiece of clothing viewed in a given image. The prediction model is the\n[_Visual Semantic Attention Model_\n(VSAM)](http://openaccess.thecvf.com/content_ICCVW_2019/papers/CVFAD/Ferreira_Pose_Guided_Attention_for_Multi-\nLabel_Fashion_Image_Classification_ICCVW_2019_paper.pdf), and is supervised by\nautomatic pose extraction creating a discriminative feature space. This\nparticular classifier was trained with the open source DeepFashion dataset.\nFor further detail see\n\n\n \n\nThe model accepts an image as input and outputs the labels corresponding to\ncategory (e.g. dress), subcategory (Evening Dress) and attributes ( short,\nlong sleeve, round neckline)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4c8cf4f7-670c-4ee8-9c20-f0fff3dc2b1d&revisionId=bb44d189-da04-4eea-9d55-7d2b5518a3e3&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2200,7 +2200,7 @@ "status": "draft" }, "name": "Urban4Cast", - "description": "

                                                                                Docker Image for Parking Predictions. It allows you to obtain parking predictions, with various levels of spacial granularity. It uses gRPC and protobuf as interfaces to the developed model. Please see the README of the project in order to understand how to use it.


                                                                                The inputs of the model define the spacial granularity (None, Neighborhood, Street, Sensor). Apart from that, you can define the temporal granularity (15 minutes, 1 hour, 1 day) and how many steps in the future you want to predict. The results are the predictions for these steps, including the upper and lower bounds of the prediciton.

                                                                                ", + "description": "Docker Image for Parking Predictions. It allows you to obtain parking\npredictions, with various levels of spacial granularity. It uses gRPC and\nprotobuf as interfaces to the developed model. Please see the README of the\nproject in order to understand how to use it.\n\n \n\nThe inputs of the model define the spacial granularity (None, Neighborhood,\nStreet, Sensor). Apart from that, you can define the temporal granularity (15\nminutes, 1 hour, 1 day) and how many steps in the future you want to predict.\nThe results are the predictions for these steps, including the upper and lower\nbounds of the prediciton.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4d22b7a8-240f-4e3b-a359-018819d779b3&revisionId=09c477af-508f-4cdc-806e-ce0462ae07cd&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2243,7 +2243,7 @@ "status": "draft" }, "name": "AudioSpeakerRecognition", - "description": "

                                                                                This model add speaker recognition to audio mining pipelines.

                                                                                ", + "description": "This model add speaker recognition to audio mining pipelines.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4f57c704-10c2-43ec-93ae-d2183b3180f1&revisionId=374b55ac-3579-4ee1-8f7b-c1f6f5779e7e&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2286,7 +2286,7 @@ "status": "draft" }, "name": "Generic-CODE", - "description": "

                                                                                The proposed solution is based on fine-tuned with Spanish medical texts of the pre-trained BERT family language models (transformers clinicalBERT and multilingualBERT). The designed text-based classification service predicts ICD-10 codes for clinical text in Spanish for the Colorectal cancer (CRC) and associated diagnoses. The service output contains the ICD-10 \u201csubclassification\u201d (4 sign) codes that gives additional information about manifestation, severity and location of the injury or disease for a wider range of disease (4227) ICD-10 codes. The prediction models for ICD-10 codes are with high accuracy: clinicalBERT: 0.949 AUC ROC score and  multilingualBERT: 0.950 AUC ROC score. The service allows the user to switch between two models (clinicalBERT and multilingualBERT) and to set the parameter N for top N diagnoses according to the specific needs.

                                                                                This module implements fast nearest-neighbor retrieval of a times series in a larger time series expressed as location and distance using the UCR suite Euclidean Distance (ED) algorithm.

                                                                                ", + "description": "This module implements fast nearest-neighbor retrieval of a times series in a\nlarger time series expressed as location and distance using the UCR suite\nEuclidean Distance (ED) algorithm.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=50ebce0a-f91f-46eb-be32-b36574a1e068&revisionId=7b642559-fd32-41d5-ae18-753d03f5014a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2372,7 +2372,7 @@ "status": "draft" }, "name": "SmartRiver", - "description": "

                                                                                The Digital Twin solution for AI-driven hydropower energy forecasting

                                                                                 

                                                                                 

                                                                                 

                                                                                 

                                                                                River discharge rules energy production for Hydropower plants.

                                                                                Prediction of water resources for the next day, month, season, challenges every energy producer and trader.

                                                                                Such knowledge supports optimal energy production, avoiding wastes (underestimation) or empty reservoirs (overestimation). 


                                                                                ", + "description": "**The Digital Twin solution for AI-driven hydropower energy forecasting**\n\n ** **\n\n ** **\n\n ** **\n\n ** **\n\n **River discharge** rules energy production for Hydropower plants.\n\nPrediction of water resources for the next day, month, season, challenges\nevery energy producer and trader.\n\nSuch knowledge supports optimal energy production, avoiding wastes\n(underestimation) or empty reservoirs (overestimation).\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=52471527-6ec1-4233-8c8e-e8d412b300b7&revisionId=7391c733-e008-4467-9965-c905c536ffba&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2415,7 +2415,7 @@ "status": "draft" }, "name": "Aquila", - "description": "

                                                                                The experiment aims to compare the design of an electronic product, represented by a CAD file, with the picture of a real artifact of the product.

                                                                                The proposed solution consists of two main phases. First, the system establishes a machine learning flow that utilizes a neural architecture to address the issue of component recognition (Object Detection) in panel images. Second, the system exploits Answer Set Programming (ASP) to compare the reconstructed scheme from the image with the original patterns to detect any misalignments or errors.

                                                                                ", + "description": "The experiment aims to compare the design of an electronic product,\nrepresented by a CAD file, with the picture of a real artifact of the product.\n\nThe proposed solution consists of two main phases. First, the system\nestablishes a machine learning flow that utilizes a neural architecture to\naddress the issue of component recognition (Object Detection) in panel images.\nSecond, the system exploits Answer Set Programming (ASP) to compare the\nreconstructed scheme from the image with the original patterns to detect any\nmisalignments or errors.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5357697a-114b-4649-8065-3c2108652ab3&revisionId=66f1c27a-797a-458e-9da2-c837e9e0402d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0", @@ -2458,7 +2458,7 @@ "status": "draft" }, "name": "ai4opti", - "description": "

                                                                                This model is for production line prediction. More specifically based on the historical data the model is able to predict if the production will be late or on time.

                                                                                ", + "description": "This model is for production line prediction. More specifically based on the\nhistorical data the model is able to predict if the production will be late or\non time.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=54c201d7-caf2-4803-8321-6d5ab1ecf2ea&revisionId=10aface4-cf1c-4123-84dc-f91746ef6232&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2501,7 +2501,7 @@ "status": "draft" }, "name": "aquila-webapp", - "description": "

                                                                                The experiment aims to compare the design of an electronic product, represented by a CAD file, with the picture of a real artifact of the product.

                                                                                The proposed solution consists of two main phases. First, the system establishes a machine learning flow that utilizes a neural architecture to address the issue of component recognition (Object Detection) in panel images. Second, the system exploits Answer Set Programming (ASP) to compare the reconstructed scheme from the image with the original patterns to detect any misalignments or errors.

                                                                                ", + "description": "The experiment aims to compare the design of an electronic product,\nrepresented by a CAD file, with the picture of a real artifact of the product.\n\nThe proposed solution consists of two main phases. First, the system\nestablishes a machine learning flow that utilizes a neural architecture to\naddress the issue of component recognition (Object Detection) in panel images.\nSecond, the system exploits Answer Set Programming (ASP) to compare the\nreconstructed scheme from the image with the original patterns to detect any\nmisalignments or errors.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5613118f-b66c-4cd7-b925-ea537d5a9c6c&revisionId=985597a7-a6e9-4a3f-a0b6-5fc0f90065c2&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2544,7 +2544,7 @@ "status": "draft" }, "name": "VideoShotDetection", - "description": "

                                                                                The shot detection system will detect the boundaries between video shots by detecting a change between visual scenes. 

                                                                                • Input: A video file. For a more accurate result, all frames need to be assessed. 
                                                                                • Output: Detection result will be a file where each row contains the start and the end frames of each shot in the video

                                                                                Model: The underlying model for the shot detection is a deep learning-based model called TransNetV2. This model has been trained on datasets with combination of real (15%) and synthetic (85%) shot transitions (cuts) created from two datasets IACC.3 and ClipShots.

                                                                                Evaluation: This model achieves the F1 score of 0.898 on TRECVID 2007 dataset. Annotations are provided by TRECVID and downloaded from their website. It appears that the ground truth annotations differ about 2 frames from the actual cuts. As a result, a tolerance of 2 frames is considered when applying the evaluation.

                                                                                ", + "description": "The shot detection system will detect the boundaries between video shots by\ndetecting a change between visual scenes.\n\n * Input: A video file. For a more accurate result, all frames need to be assessed. \n * Output: Detection result will be a file where each row contains the start and the end frames of each shot in the video\n\n **Model** : The underlying model for the shot detection is a deep learning-\nbased model called TransNetV2. This model has been trained on datasets with\ncombination of real (15%) and synthetic (85%) shot transitions (cuts) created\nfrom two datasets IACC.3 and ClipShots.\n\n**Evaluation** : This model achieves the F1 score of 0.898 on TRECVID 2007\ndataset. Annotations are provided by TRECVID and downloaded from their\n[website](https://www-\nnlpir.nist.gov/projects/tv2007/pastdata/master.shot.reference/). It appears\nthat the ground truth annotations differ about 2 frames from the actual cuts.\nAs a result, a tolerance of 2 frames is considered when applying the\nevaluation.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=56258e93-1bdf-4640-93f5-b3786e591acc&revisionId=91d5c71f-e984-4bb0-9c2b-aa2b15bea5e5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2587,7 +2587,7 @@ "status": "draft" }, "name": "Idiap_BEAT_Face_Recognition_-_FaceNET", - "description": "

                                                                                A face recognition algorithm to compare one probe image against a set of template images.

                                                                                The images must be gray-scale and should contain the face region only.Internally, the images are resized to 160x160 pixels.

                                                                                This algorithm expects the pre-trained FaceNet model to be provided as input as well.

                                                                                The model can be downloaded from https://drive.google.com/file/d/0B5MzpY9kBtDVZ2RpVDYwWmxoSUkwhich was made available in https://github.com/davidsandberg/facenet/tree/b95c9c3290455cabc425dc3f9435650679a74c50

                                                                                Reference experiment on the BEAT platform is amohammadi/amohammadi/atnt_eigenfaces/1/atnt1.

                                                                                ", + "description": "A face recognition algorithm to compare one probe image against a set of\ntemplate images.\n\nThe images must be gray-scale and should contain the face region\nonly.Internally, the images are resized to 160x160 pixels.\n\nThis algorithm expects the pre-trained FaceNet model to be provided as input\nas well.\n\nThe model can be downloaded from\nhttps://drive.google.com/file/d/0B5MzpY9kBtDVZ2RpVDYwWmxoSUkwhich was made\navailable in\nhttps://github.com/davidsandberg/facenet/tree/b95c9c3290455cabc425dc3f9435650679a74c50\n\nReference experiment on the BEAT platform is\n[amohammadi/amohammadi/atnt_eigenfaces/1/atnt1](https://www.idiap.ch/software/beat/platform/experiments/amohammadi/amohammadi/atnt_eigenfaces/1/atnt1/).\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5889ec5c-8f7b-44b0-bb6b-164a8fa98fd9&revisionId=09d2cbe8-7eeb-4214-8826-b4665f4ebb8c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2630,7 +2630,7 @@ "status": "draft" }, "name": "RoadDefectsDetection", - "description": "

                                                                                The model detects common road defects as well as gullies and manhole covers. It is trained on image from the UK.

                                                                                Furthermore, it exposes a classfification model for pothole depths.

                                                                                ", + "description": "The model detects common road defects as well as gullies and manhole covers.\nIt is trained on image from the UK.\n\nFurthermore, it exposes a classfification model for pothole depths.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5a5ab3be-eddf-4956-829c-acb1934b7ead&revisionId=2a788999-6aec-4e2e-b1b6-30c9d1b39d78&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -2673,7 +2673,7 @@ "status": "draft" }, "name": "cso", - "description": "

                                                                                AI-service to optimize stock management of components based on forecasting models and historical data analysis

                                                                                ", + "description": "AI-service to optimize stock management of components based on forecasting\nmodels and historical data analysis\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5c2fbf7d-4417-49da-8714-7e37b925d81b&revisionId=a8e9a9ea-aa80-40e7-91b3-fb2a0fdc1504&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.6", @@ -2716,7 +2716,7 @@ "status": "draft" }, "name": "lexatexer-ai4hydro-proxy", - "description": "

                                                                                LexaTexer provides an Enterprise AI platform to support the energy value chain with prebuilt, configurable AI applications addressing CAPEX intense hydro assets like Pelton and Francis turbines and pumps. In this project we combine our Enterprise AI platform and existing operational data to model the remaining useful life (RUL) of Pelton turbines based on real-world operational and environmental data. Thus, increasing RUL, efficiency and availability significantly. AI4Hydro plans to extent the remaining useful life of hydro turbines by up to 30%.

                                                                                ", + "description": "LexaTexer provides an Enterprise AI platform to support the energy value chain\nwith prebuilt, configurable AI applications addressing CAPEX intense hydro\nassets like Pelton and Francis turbines and pumps. In this project we combine\nour Enterprise AI platform and existing operational data to model the\nremaining useful life (RUL) of Pelton turbines based on real-world operational\nand environmental data. Thus, increasing RUL, efficiency and availability\nsignificantly. AI4Hydro plans to extent the remaining useful life of hydro\nturbines by up to 30%.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=600e7b34-68eb-4cff-892a-42b77eb71fbb&revisionId=8abc36f4-23a4-44bf-9d79-ad18f2d65dc9&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2759,7 +2759,7 @@ "status": "draft" }, "name": "mytestmodel", - "description": "

                                                                                Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat.

                                                                                ", + "description": "Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor\nincidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis\nnostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=61134a6f-511f-4144-ba26-1ae017bffa36&revisionId=6c316365-742b-43d9-96e4-54d4aa962d48&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2802,7 +2802,7 @@ "status": "draft" }, "name": "coverpageanalysis", - "description": "
                                                                                Key information extraction from document images is of paramount importance in office automation. \nEach cover card includes many words that are not required to be extracted. To extract the crucial key information, this repository works in three-folds:\n\n1. Text detection with YOLOv5 \n2. Text recognition with TRBA \n3. Text recognition enhancement with natural language processing\n\nFor more information, feel free to contact info@cogniteye.com\n




                                                                                ", + "description": "\n Key information extraction from document images is of paramount importance in office automation. \n Each cover card includes many words that are not required to be extracted. To extract the crucial key information, this repository works in three-folds:\n \n 1. Text detection with YOLOv5 \n 2. Text recognition with TRBA \n 3. Text recognition enhancement with natural language processing\n \n For more information, feel free to contact info@cogniteye.com\n \n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6297165d-d2f9-4617-90c5-d6586d34c84a&revisionId=b301cf36-fb4e-46cf-9425-a6dd1495d58c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -2845,7 +2845,7 @@ "status": "draft" }, "name": "flask-model", - "description": "

                                                                                The initial model

                                                                                ", + "description": "The initial model\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=63a30a14-770e-43d1-a929-1e1f1759af69&revisionId=ddc8368d-6dda-42c6-985a-66b7551e970b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2888,7 +2888,7 @@ "status": "draft" }, "name": "pumplife-prediction", - "description": "

                                                                                This repository contains the implementation of a service that performs a prediction on the expected running time of a pump. The prediction is made using a series of parameters recorded during the pump's testing, that happens before the pump is sent to the customer.

                                                                                Model description

                                                                                A series of different models have been tested and evaluated during the model selection phase. A Random Forest resulted to be the best performing model across the validation set, and was thus implemented in the API in this repository.

                                                                                The input data is the csv file output of the test bench performed on the pumps. The csv should contain a specific set of parameters, that are listed in the Readme in this repository.

                                                                                The model classifies the expected running time of the pump into 5 classes:

                                                                                • [min,180] ~ \"< 6 months\",
                                                                                • (180,365] ~ \"6 months ~ 1 year\",
                                                                                • (365,730] ~ \"1 year ~ 2 years\",
                                                                                • (730,1e+03] ~ \"2 years ~ 3 years\",
                                                                                • (1e+03,max] ~ \"> 3 years\".

                                                                                The prediction output of the Random Forest is than binarized to obtain the classification between the two classes [< 1 year, > 1 year]. The final output of the model is one of this two classes.

                                                                                ", + "description": "This repository contains the implementation of a service that performs a\nprediction on the expected running time of a pump. The prediction is made\nusing a series of parameters recorded during the pump's testing, that happens\nbefore the pump is sent to the customer.\n\n## Model description\n\nA series of different models have been tested and evaluated during the model\nselection phase. A Random Forest resulted to be the best performing model\nacross the validation set, and was thus implemented in the API in this\nrepository.\n\nThe input data is the csv file output of the test bench performed on the\npumps. The csv should contain a specific set of parameters, that are listed in\nthe Readme in this repository.\n\nThe model classifies the expected running time of the pump into 5 classes:\n\n * `[min,180]` ~ \"< 6 months\",\n * `(180,365]` ~ \"6 months ~ 1 year\",\n * `(365,730]` ~ \"1 year ~ 2 years\",\n * `(730,1e+03]` ~ \"2 years ~ 3 years\",\n * `(1e+03,max]` ~ \"> 3 years\".\n\nThe prediction output of the Random Forest is than binarized to obtain the\nclassification between the two classes `[< 1 year, > 1 year]`. The final\noutput of the model is one of this two classes.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=63bfe768-8f18-4265-89fc-18b77b10b4e5&revisionId=9358a7a6-141a-4b36-aabf-8e8ec6f3d6e9&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -2931,7 +2931,7 @@ "status": "draft" }, "name": "AI4EU-AgriCounting", - "description": "

                                                                                This model is part of the AI4EU Agriculture Pilot, where academia researchers, IT partners and smart agriculture companies showcase the opportunities of the AI4EU environment for unlikely stakeholders, like rural partners.

                                                                                Collectively, this consortium has produced a set of tools that exploit satellite image, UAVs technologies, robotics and the latest trends in IA to help manage and predict the quality and productivity of vineyards.

                                                                                This models deal with detection of cluster of grapes of a minimum quality and maturation in an image, informing of the visual metrics of the detected regions.

                                                                                ", + "description": "This model is part of the AI4EU Agriculture Pilot, where academia researchers,\nIT partners and smart agriculture companies showcase the opportunities of the\nAI4EU environment for unlikely stakeholders, like rural partners.\n\nCollectively, this consortium has produced a set of tools that exploit\nsatellite image, UAVs technologies, robotics and the latest trends in IA to\nhelp manage and predict the quality and productivity of vineyards.\n\nThis models deal with detection of cluster of grapes of a minimum quality and\nmaturation in an image, informing of the visual metrics of the detected\nregions.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6436a5d2-81d6-440d-9703-25eeede9ca73&revisionId=650ef51a-7c3b-404f-98e5-c85f7c2e1a30&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -2974,7 +2974,7 @@ "status": "draft" }, "name": "advice-road-crop", - "description": "

                                                                                advice-road-crop is a semantic segmentation model that detects the region of interest (ROI) of the image and crops this area to speed up the inference process. In the context of this project, the region of interest consists of the road 

                                                                                ", + "description": "advice-road-crop is a semantic segmentation model that detects the region of\ninterest (ROI) of the image and crops this area to speed up the inference\nprocess. In the context of this project, the region of interest consists of\nthe road\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=64d59631-44f5-4179-9b2f-9b6b4fce0fff&revisionId=848cb306-75ee-4a5c-98c7-c9857b5f2afd&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3017,7 +3017,7 @@ "status": "draft" }, "name": "INERGY_Heat_Demand_Prediction", - "description": "

                                                                                This service is based on a Random Forest model implemented in context of I-NERGY project. The overall vision of I-NERGY is to promote AI in the energy sector by delivering:

                                                                                • An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.
                                                                                • Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.

                                                                                This is a forecasting service for predicting thermal load (heat demand) of a Spanish Hospital in hourly basis. The data was provided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital complex have a district heating network. The layout of this district heating network is a ring system composed by two independent rings for heating and cooling. This ring just provides energy for heating and Domestic Hot Water (DHW).

                                                                                Apart from being a district heating network, this system is complex due to the different production sources used for heating and cooling. In this facility heat, cold and steam are produced by using different sources.

                                                                                For more information on how to use the service, please see Documents section.

                                                                                The project leading to this service has received funding from the European Union\u2019s Horizon 2020 research and innovation programme under grant agreement No 101016508

                                                                                ", + "description": "This service is based on a Random Forest model implemented in context of\nI-NERGY project. The overall vision of I-NERGY is to promote AI in the energy\nsector by delivering:\n\n * An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.\n * Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.\n\nThis is a forecasting service for predicting thermal load (heat demand) of a\nSpanish Hospital in hourly basis. The data was provided by VEOLIA, from the\nhospital complex in C\u00f3rdoba (Spain). The hospital complex have a district\nheating network. The layout of this district heating network is a ring system\ncomposed by two independent rings for heating and cooling. This ring just\nprovides energy for heating and Domestic Hot Water (DHW).\n\nApart from being a district heating network, this system is complex due to the\ndifferent production sources used for heating and cooling. In this facility\nheat, cold and steam are produced by using different sources.\n\nFor more information on how to use the service, please see Documents section.\n\n _The project leading to this service has received funding from the European\nUnion\u2019s Horizon 2020 research and innovation programme under grant agreement\nNo 101016508_\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=64d9f84f-bd62-4da3-8571-756c79f9451e&revisionId=33554300-4673-481f-8203-3c37ec015440&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3060,7 +3060,7 @@ "status": "draft" }, "name": "SpeechRecognition", - "description": "

                                                                                Speech recognition reliably translates spoken information into digital text.

                                                                                Main characteristics:

                                                                                • highly reliable speech recognition
                                                                                • robust against noise, e.g. in an industrial setting
                                                                                • can be combined with automatic speaker recognition
                                                                                • language models available for German and English
                                                                                • word and phoneme output to subsequent systems
                                                                                ", + "description": "Speech recognition reliably translates spoken information into digital text.\n\n# Main characteristics:\n\n * highly reliable speech recognition\n * robust against noise, e.g. in an industrial setting\n * can be combined with automatic speaker recognition\n * language models available for German and English\n * word and phoneme output to subsequent systems\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=65f43abe-ea13-45d1-9078-ce7fbbcb0d07&revisionId=3057c3ee-99e6-42f8-b398-05290d643917&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -3103,7 +3103,7 @@ "status": "draft" }, "name": "SpeechRecognition", - "description": "

                                                                                Speech recognition reliably translates spoken information into digital text.

                                                                                Main characteristics:

                                                                                • highly reliable speech recognition
                                                                                • robust against noise, e.g. in an industrial setting
                                                                                • can be combined with automatic speaker recognition
                                                                                • language models available for German and English
                                                                                • word and phoneme output to subsequent systems


                                                                                ", + "description": "Speech recognition reliably translates spoken information into digital text.\n\n# Main characteristics:\n\n * highly reliable speech recognition\n * robust against noise, e.g. in an industrial setting\n * can be combined with automatic speaker recognition\n * language models available for German and English\n * word and phoneme output to subsequent systems\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=65f43abe-ea13-45d1-9078-ce7fbbcb0d07&revisionId=9d6dadf1-ee95-4b9c-8f7b-ade96563bd64&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3146,7 +3146,7 @@ "status": "draft" }, "name": "rebase-model", - "description": "

                                                                                This is a LightGBM time-series forecasting model. LightGBM is a gradient boosting decision tree framework developed by Microsoft. It works by recursively partitioning the feature-space into hyperrectangles and utilising the mean (or median) of the target in the specific hyperrectangle as prediction. Every one step recursion is made to reduce the prediction errors of the previous model iteration. One of the advantages with LightGBM over other gradient boosting decision tree frameworks is its efficiency and the ability to predict quantile distributions. 

                                                                                The asset provides a user interface where you can upload a train set and a set to predict on. The prediction is then displayed in a chart and can be downloaded from the user-interface. It also exposes the rpc Predict() to be able to be called from another service. Here is a video demonstration. Please refer to this readme for more information about how to use and install.

                                                                                This project has received funding from the European Union's Horizon 2020 research and innovation programme within the framework of the I-NE", + "description": "This is a LightGBM time-series forecasting model. LightGBM is a gradient\nboosting decision tree framework developed by Microsoft. It works by\nrecursively partitioning the feature-space into hyperrectangles and utilising\nthe mean (or median) of the target in the specific hyperrectangle as\nprediction. Every one step recursion is made to reduce the prediction errors\nof the previous model iteration. One of the advantages with LightGBM over\nother gradient boosting decision tree frameworks is its efficiency and the\nability to predict quantile distributions.\n\nThe asset provides a user interface where you can upload a train set and a set\nto predict on. The prediction is then displayed in a chart and can be\ndownloaded from the user-interface. It also exposes the rpc Predict() to be\nable to be called from another service. Here is a video\n[demonstration](https://drive.google.com/file/d/1GpD9hEg498Ic2H76Vh4uGzF_k4EVKa2j/view?usp=sharing).\nPlease refer to this[ ](https://github.com/rebase-energy/ai4eu-\nexperiment/tree/master/model)[readme](https://github.com/rebase-energy/ai4eu-\nexperiment/tree/master/model) for more information about how to use and\ninstall.\n\nThis project has received funding from the European Union's Horizon 2020\nresearch and innovation programme within the framework of the I-NERGY Project,\nfunded under grant agreement No 101016508\n\nWebsite:[ https://www.rebase.energy/](https://www.rebase.energy/)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6662fc35-2e6c-4f48-8e26-f7b677acbb62&revisionId=97313833-7e70-47b1-8524-139c2dc26a78&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3189,7 +3189,7 @@ "status": "draft" }, "name": "i-nergy-load-forecasting-lightgbm", - "description": "

                                                                                This is a forecasting service for predicting of the Portuguese aggregated electricity load time series (15-min resolution, 24hr forecasting horizon). This service is based on a LightGBM model implemented in the context of I-NERGY project. For more information on how to use the solution, please see README.pdf in the Documents section.

                                                                                ", + "description": "This is a forecasting service for predicting of the Portuguese aggregated\nelectricity load time series (15-min resolution, 24hr forecasting horizon).\nThis service is based on a LightGBM model implemented in the context of\n[I-NERGY](https://www.i-nergy.eu/) project. For more information on how to use\nthe solution, please see README.pdf in the Documents section.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=693e5d71-2141-4078-9bf8-0b8b0a9d28fd&revisionId=dccbd07e-3522-4aca-a479-62581058c352&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3232,7 +3232,7 @@ "status": "draft" }, "name": "SSC-Demo", - "description": "

                                                                                Model for finding stamps in the image and determining their value.

                                                                                ", + "description": "Model for finding stamps in the image and determining their value.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=69e1bd04-c689-44e0-8cb6-e7c45ba4d5c6&revisionId=2fbe123c-09ac-4fdb-9af7-c610a541d709&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3576,7 +3576,7 @@ "status": "draft" }, "name": "covid_predict", - "description": "

                                                                                Two trained Convolutional networks with capabilities to determine automatically if a patient has pneumonia based on computer tomography (CT) scans or x-ray images. The raining phase is hidden to end users. It is a constant process based on gathering open or anonymized clinical images.

                                                                                The end users will be supplied with a docker. The communication with which is based on grpc proto buffer. End users will supply a link to X-ray or CT image and will obtain diagnosis and it\u2019s probability.

                                                                                ", + "description": "Two trained Convolutional networks with capabilities to determine\nautomatically if a patient has pneumonia based on computer tomography (CT)\nscans or x-ray images. The raining phase is hidden to end users. It is a\nconstant process based on gathering open or anonymized clinical images.\n\nThe end users will be supplied with a docker. The communication with which is\nbased on grpc proto buffer. End users will supply a link to X-ray or CT image\nand will obtain diagnosis and it\u2019s probability.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=ba5f9197-f3dd-469c-ae3f-0fec081ac81a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.8", @@ -3662,7 +3662,7 @@ "status": "draft" }, "name": "rp4pl-classification", - "description": "

                                                                                rp4pl-classification (Reliable Prediction for Pump Lifetime) is a classification model used to predict pump failures within a year of installation. The model input is the final quality test data from the pump manufacturing process and the output is the failure prediction (whether the pump is predicted to fail within a year installation - yes - or to not fail within a year of installation - no). The model pipeline included data transformation and feature inference. Additionally, it includes a feature selection step to select the most relevant features from the input data.

                                                                                ", + "description": "rp4pl-classification (Reliable Prediction for Pump Lifetime) is a\nclassification model used to predict pump failures within a year of\ninstallation. The model input is the final quality test data from the pump\nmanufacturing process and the output is the failure prediction (whether the\npump is predicted to fail within a year installation - yes - or to not fail\nwithin a year of installation - no). The model pipeline included data\ntransformation and feature inference. Additionally, it includes a feature\nselection step to select the most relevant features from the input data.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6f0368f1-77c2-4bfe-b632-98ecd9c87bd9&revisionId=151771e8-422b-4a7b-9d87-8edbadfa6def&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -3748,7 +3748,7 @@ "status": "draft" }, "name": "adios-apply", - "description": "

                                                                                I-NERGY - TTP1 - ADIOS APPLY MODEL


                                                                                Applies anomaly detection for electric power grids model to a full dataset. In this phase, we use the previously trained models to label the unknown alarms. Scikit-learn allows to save trained models to binary files on disk, so in this phase we first load our pretrained model and then we load also the one-hot encoder in case we are willing to use categorical data, or the text processing module if we want to use the text-based classification. Once the pre-trained model is loaded, it can be used to predict the labels of unknown alarms.

                                                                                AIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-apply-model

                                                                                Attribution

                                                                                This project has received funding from the European Union's Horizon 2020 research and innovation programme within the framework of the I-NERGY Project, funded under grant agreement No 101016508


                                                                                ", + "description": "I-NERGY - TTP1 - ADIOS APPLY MODEL\n\n \n\nApplies anomaly detection for electric power grids model to a full dataset. In\nthis phase, we use the previously trained models to label the unknown alarms.\nScikit-learn allows to save trained models to binary files on disk, so in this\nphase we first load our pretrained model and then we load also the one-hot\nencoder in case we are willing to use categorical data, or the text processing\nmodule if we want to use the text-based classification. Once the pre-trained\nmodel is loaded, it can be used to predict the labels of unknown alarms.\n\nAIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-apply-\nmodel\n\nAttribution\n\nThis project has received funding from the European Union's Horizon 2020\nresearch and innovation programme within the framework of the I-NERGY Project,\nfunded under grant agreement No 101016508\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6f212625-4d1c-4f13-9f0b-fcfcd6bca65c&revisionId=4888be04-de9c-48b3-b9b4-3e45102956f1&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3791,7 +3791,7 @@ "status": "draft" }, "name": "ML_Assistant_for_Vibration_Monitoring", - "description": "

                                                                                The models deployed to AI4EU experiment platform are based on the data provided by HAT Analytics as part of the AI4EU challenge entitled \"ML assistant for vibration monitoring\".


                                                                                Three models have been developed corresponding to three different asset types:

                                                                                1. Direct fans
                                                                                2. Feet-mounted fans
                                                                                3. Flange-mounted fans


                                                                                The measurements are gathered from different measurement points namely

                                                                                1. FAN: Fan casing
                                                                                2. MDE: Motor-Drive End
                                                                                3. MNDE: Motor-Non-Drive End

                                                                                Note that: Not all asset types provide data from all 3 measurement points.


                                                                                Measurements from each measurement point can be provided from three axes Axial (A), vertical (V), and Horizontal (H)

                                                                                ", + "description": "The models deployed to AI4EU experiment platform are based on the data\nprovided by HAT Analytics as part of the AI4EU challenge entitled \"ML\nassistant for vibration monitoring\".\n\n \n\nThree models have been developed corresponding to three different asset types:\n\n 1. Direct fans\n 2. Feet-mounted fans\n 3. Flange-mounted fans\n\n \n\nThe measurements are gathered from different measurement points namely\n\n 1. **FAN** : Fan casing\n 2. **MDE** : Motor-Drive End \n 3. **MNDE** : Motor-Non-Drive End\n\nNote that: Not all asset types provide data from all 3 measurement points.\n\n \n\nMeasurements from each measurement point can be provided from three axes Axial\n( **A** ), vertical ( **V** ), and Horizontal ( **H** )\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7264d5a0-ee24-497a-853d-acdf6b8bdd51&revisionId=23318740-fcef-4e42-8f59-c56ab7b8e72f&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3834,7 +3834,7 @@ "status": "draft" }, "name": "aipanel_approvedvswithdrawn", - "description": "

                                                                                Goal:

                                                                                To design a model that is able to predict whether a drug compound is approved or has potential tendencies to be withdrawn.

                                                                                Approach:

                                                                                To achieve this goal, a Deep Convolutional Neural Network (D-CNN) has been implemented on molecular descriptors obtained for the drugs, to develop a 2-class predictive model where the classes are 0: Approved, 1: Withdrawn. 

                                                                                To prepare the dataset, following drugs were obtained from specific databases:

                                                                                1. 270 Withdrawn Drugs from Charite Database
                                                                                2. 2800 Approved Drugs from CHEMBL Database

                                                                                Due to the imbalanced ratio of withdrawn and approved drugs, certain steps were taken during data preparation to help the model learn a better representation from the dataset. These steps are discussed in the later slides.

                                                                                For the obtained Drugs, their SMILES were extracted from CHEMBL. SMILES are notations for describing the structure of chemical species using short ASCII strings. The SMILES were further used to extract 881 PUBCHEM Molecular Descriptors using PaDEL, a software to cal", + "description": "**_Goal:_**\n\nTo design a model that is able to predict whether a drug compound is approved\nor has potential tendencies to be withdrawn.\n\n ** _Approach:_**\n\nTo achieve this goal, a Deep Convolutional Neural Network (D-CNN) has been\nimplemented on molecular descriptors obtained for the drugs, to develop a\n2-class predictive model where the classes are 0: Approved, 1: Withdrawn.\n\nTo prepare the dataset, following drugs were obtained from specific databases:\n\n 1. 270 Withdrawn Drugs from Charite Database\n 2. 2800 Approved Drugs from CHEMBL Database\n\nDue to the imbalanced ratio of withdrawn and approved drugs, certain steps\nwere taken during data preparation to help the model learn a better\nrepresentation from the dataset. These steps are discussed in the later\nslides.\n\nFor the obtained Drugs, their SMILES were extracted from CHEMBL. SMILES are\nnotations for describing the structure of chemical species using short ASCII\nstrings. The SMILES were further used to extract 881 PUBCHEM Molecular\nDescriptors using PaDEL, a software to calculate molecular descriptors and\nfingerprints. Furthermore, 729 Chemotype features were also extracted for all\ndrugs, where a chemotype describes the subspecies of a drug using its\nmolecular structure. In total 1610 features were\nprepared.![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABCAAAAL9CAYAAAD6nBeuAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAP+lSURBVHhe7N0FnBz1+cfx53K5XNzd3UNcCBbc3f5YIVCktIUCpUWKtlDc2kKRFofiboEQJBAS4k7c3eWS3OVu//v53fySyWb3crbJXe775rXkdnZ8ZmfneX4yKZEoExERERERERFJonLBvyIiIiIiIiIiSaMEhIiIiIiIiIgknRIQIiIiIiIiIpJ0SkCIiIiIiIiISNIpASEiIiIiIiIiSacEhIiIiIiIiIgknRIQIiIiIiIiIpJ0SkCIiIiIiIiISNIpASEiIiIiIiIiSacEhIiIiIiIiIgknRIQIiIiIiIiIpJ0SkCIiIiIiIiISNIpASEiIiIiIiIiSacEhIiIiIiIiIgknRIQIiIiIiIiIpJ0SkCIiIiIiI", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=72bbafe5-031c-4a8c-ad21-42d1388b00fd&revisionId=8b6967d7-fd07-4a8d-b6e6-f66ed2a360ad&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -3877,7 +3877,7 @@ "status": "draft" }, "name": "TEK_THOR_SIMULATION", - "description": "

                                                                                AI4EU - THOR THOR solution consists in a Hybrid optimization solution to make the right decision on the amount spare parts in stock, considering past sales and forecasts. The purchase decision considers as input information current stock status, production needs, production forecast, sales forecast, variability Price of stock material and several restriction parameters.

                                                                                Cash-Flow Simulation. A probabilistic Monte Carlo simulator of cash-flow, taking into account existing datasets and forecasts.

                                                                                ", + "description": "**AI4EU - THOR **THOR solution consists in a Hybrid optimization solution to\nmake the right decision on the amount spare parts in stock, considering past\nsales and forecasts. The purchase decision considers as input information\ncurrent stock status, production needs, production forecast, sales forecast,\nvariability Price of stock material and several restriction parameters.\n\n **Cash-Flow Simulation**. A probabilistic Monte Carlo simulator of cash-flow,\ntaking into account existing datasets and forecasts.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=731f99e5-8aef-4375-832f-8d5ababf21b3&revisionId=999f0664-c19c-4492-8520-cf467abc4b14&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -3920,7 +3920,7 @@ "status": "draft" }, "name": "pddl-planners-ffi", - "description": "

                                                                                An ACUMOS component which, acting as a gRPC server, is able to call a number of PDDL action planners (ff, fd, popf and optic for now).

                                                                                Asset produced by the AIPlan4EU project.




                                                                                This project contains an ACUMOS component which, acting as a gRPC server, is able to call a number of PDDL action planners (ff, fd, popf and optic for now).

                                                                                This is more a proof of concept on how to integrate PDDL Planner within a docker made available for ACUMOS Hybrid Pipelines.

                                                                                If you want to run the server locally, each of these planners needs to be installed separately and have to be available in your PATH. Otherwise, you can use the Dockerize version (see Docker version on this page which contains all of them), still you will need the client.


                                                                                The supported planners for now are:


                                                                                • ff is pretty straighforward to install FF homepage
                                                                                • fd fast downward is easy to install too Fast Downward homepage
                                                                                • popf, I would not know, I grabbed the binary from the ROSPlan distribution (bad bad\u2026\u200b), but here is the POPF homepage
                                                                                • optic is a pain to install, the Cmake files are broken\u2026\u200b Check OPTIC homepage, you may find th", + "description": "An ACUMOS component which, acting as a gRPC server, is able to call a number\nof PDDL action planners (`ff, fd, popf` and `optic` for now).\n\nAsset produced by the [AIPlan4EU](https://aiplan4eu.fbk.eu/) project.\n\n \n\n \n\n \n\nThis project contains an ACUMOS component which, acting as a gRPC server, is\nable to call a number of PDDL action planners (`ff`, `fd`, `popf` and `optic`\nfor now).\n\nThis is more a proof of concept on how to integrate PDDL Planner within a\ndocker made available for ACUMOS Hybrid Pipelines.\n\nIf you want to run the server locally, each of these planners needs to be\ninstalled separately and have to be available in your PATH. Otherwise, you can\nuse the Dockerize version (see [Docker\nversion](https://github.com/aiplan4eu/acumos-planners#Docker_version) on this\npage which contains all of them), still you will need the client.\n\n \n\nThe supported planners for now are:\n\n \n\n * `ff` is pretty straighforward to install [FF homepage](https://fai.cs.uni-saarland.de/hoffmann/ff.html)\n * `fd` fast downward is easy to install too [Fast Downward homepage](http://www.fast-downward.org/HomePage)\n * `popf`, I would not know, I grabbed the binary from the ROSPlan distribution (bad bad\u2026\u200b), but here is the [POPF homepage](https://nms.kcl.ac.uk/planning/software/popf.html)\n * `optic` is a pain to install, the Cmake files are broken\u2026\u200b Check [OPTIC homepage](https://nms.kcl.ac.uk/planning/software/optic.html), you may find the proper binary for you\u2026\u200b\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=73a6170b-47a0-4f99-bf95-af01798f693b&revisionId=e72ada49-fffb-45d3-9ef9-9e2b749cbd19&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -3963,7 +3963,7 @@ "status": "draft" }, "name": "trondheim-rl-agent", - "description": "

                                                                                  SUMO/RL implements a pipeline with a traffic simulator of the city of Trondheim, Norway, and a reinforcement learning autonomous agent that learns and implements traffic control policies with the goal of minimizing the number of pollution peaks above a given threshold. Each component can be ran stand alone.

                                                                                  This resource contains a trained Reinforcement Learning agent to interact with the 'trondheim-simulator' traffic simulator with the goal of reducing pollution peaks.

                                                                                  For a more detailed description check the github repository of the resource: https://github.com/tsveiga/AI4EU-RL-Trondheim

                                                                                  ", + "description": "SUMO/RL implements a pipeline with a traffic simulator of the city of\nTrondheim, Norway, and a reinforcement learning autonomous agent that learns\nand implements traffic control policies with the goal of minimizing the number\nof pollution peaks above a given threshold. Each component can be ran stand\nalone.\n\nThis resource contains a trained Reinforcement Learning agent to interact with\nthe 'trondheim-simulator' traffic simulator with the goal of reducing\npollution peaks.\n\nFor a more detailed description check the github repository of the resource:\nhttps://github.com/tsveiga/AI4EU-RL-Trondheim\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=78591f43-c83a-45bb-b5fe-1d79d15cfdde&revisionId=bf5bcfff-4c70-4ca3-bf20-0c6d88f352f7&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4006,7 +4006,7 @@ "status": "draft" }, "name": "Molecule-Trainer", - "description": "

                                                                                  Molecule Trainer is a modelling pipeline for optimization, training and deployment of models for molecular single prediction tasks. Molecule Trainer optimizes and trains a graph neural network based on Efficient Graph Convolution with fully connected layers at the end, which can produce accurate models with lower memory consumption and latency. As input it requires only a SMILES string of the molecules along with a binary or continuous target variable. The pipeline automatically checks if the task is classification or regression and optimizes the classification or regression metrics accordingly. Molecule Trainer offers methods for optimization, training and prediction. The description of these methods is given in the user guide.

                                                                                  ", + "description": "Molecule Trainer is a modelling pipeline for optimization, training and\ndeployment of models for molecular single prediction tasks. Molecule Trainer\noptimizes and trains a graph neural network based on Efficient Graph\nConvolution with fully connected layers at the end, which can produce accurate\nmodels with lower memory consumption and latency. As input it requires only a\nSMILES string of the molecules along with a binary or continuous target\nvariable. The pipeline automatically checks if the task is classification or\nregression and optimizes the classification or regression metrics accordingly.\nMolecule Trainer offers methods for optimization, training and prediction. The\ndescription of these methods is given in the user guide.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7a343bda-ecb5-4c6d-8a17-88c8d9139f50&revisionId=1626f215-66ff-4dbe-b4a1-17e3f74b64c5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4049,7 +4049,7 @@ "status": "draft" }, "name": "ucrsuite-dtw", - "description": "

                                                                                  This module implements fast nearest-neighbor retrieval of a times series in a larger time series expressed as location and distance using the UCR suite Dynamic Time Wrapping (DTW) algorithm.


                                                                                  ", + "description": "This module implements fast nearest-neighbor retrieval of a times series in a\nlarger time series expressed as location and distance using the UCR suite\nDynamic Time Wrapping (DTW) algorithm.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7cc68464-54e3-4a57-9e36-afdd04af7b74&revisionId=aeafd55f-59f5-4191-a34a-16ad0f7433d6&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4092,7 +4092,7 @@ "status": "draft" }, "name": "atranscribe", - "description": "

                                                                                  ATransCribe is a speech to text service. It uses whisper model for transcription. Whisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification.Also using its underlying deep learning technology it process soundclips and removes background noises etc. for better results.The app is developed and used for the H2020 project AI-PROFICIENT.








                                                                                  ", + "description": "ATransCribe is a speech to text service. It uses whisper model for\ntranscription. Whisper is a general-purpose speech recognition model. It is\ntrained on a large dataset of diverse audio and is also a multi-task model\nthat can perform multilingual speech recognition as well as speech translation\nand language identification.Also using its underlying deep learning technology\nit process soundclips and removes background noises etc. for better\nresults.The app is developed and used for the H2020 project AI-PROFICIENT.\n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7ed5c850-a7a4-4f71-bf97-c07be436424f&revisionId=b5057270-26f1-49da-b650-610d88fd6df1&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4135,7 +4135,7 @@ "status": "draft" }, "name": "entity_extractor", - "description": "

                                                                                  Extracts personally identifiable information from documents of different formats. Entities detected include names, addresses, or faces.

                                                                                  ", + "description": "Extracts personally identifiable information from documents of different\nformats. Entities detected include names, addresses, or faces.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=86b14065-b351-4e37-a394-a401a997c542&revisionId=fd34ef22-937c-4bec-9a02-f4af848e0c3b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4178,7 +4178,7 @@ "status": "draft" }, "name": "critical-part-classifier", - "description": "

                                                                                  This is a composite pipeline consisting of the Tensorflow model created for critical part prediction along with a generic data broken block that is used to match the contents of the CSV input file to the expected input features of the model. Given a set of features that describe the production line characteristics or factory conditions, the model we have built predicts whether a particular component part is critical or not to the supply chain. The end goal is the optimization of the stock management.

                                                                                  ", + "description": "This is a composite pipeline consisting of the Tensorflow model created for\ncritical part prediction along with a generic data broken block that is used\nto match the contents of the CSV input file to the expected input features of\nthe model. Given a set of features that describe the production line\ncharacteristics or factory conditions, the model we have built predicts\nwhether a particular component part is critical or not to the supply chain.\nThe end goal is the optimization of the stock management.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=88e79675-8008-4b48-bbac-67e7b5c519ed&revisionId=f6e7ad03-637f-490e-babb-36eb7544cf59&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "0", @@ -4350,7 +4350,7 @@ "status": "draft" }, "name": "ai4iot-calibration", - "description": "

                                                                                  The Calibration component is part of the AI4IoT Calibration pipeline. It includes a machine learning model that predicts the calibrated values of raw data coming from low-cost sensors, such that the output is as close as possible to reference values. The component is deployed with a pre-trained model and outputs the calibrated values for PM2.5 and PM10 measurements. Inputs are PM measurements from the sensor and meteorological data.

                                                                                  ", + "description": "The Calibration component is part of the AI4IoT Calibration pipeline. It\nincludes a machine learning model that predicts the calibrated values of raw\ndata coming from low-cost sensors, such that the output is as close as\npossible to reference values. The component is deployed with a pre-trained\nmodel and outputs the calibrated values for PM2.5 and PM10 measurements.\nInputs are PM measurements from the sensor and meteorological data.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=4afa4cfa-ee5d-4ffa-b114-1f9f093a2ac6&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.4", @@ -4436,7 +4436,7 @@ "status": "draft" }, "name": "SmartProc", - "description": "

                                                                                  Based on the given and read-in data of a time series, the algorithm calculates a forecast of how the data will develop further in a freely definable time horizon. Trends are recognised and taken into account in the forecast, as are seasonalities and similar dependencies that are contained in the input data and are recognised by the algorithm. The algorithm can be used for all types of data where a forecast makes sense, such as sales figures for a product or parts requirements for purchasing from a supplier. It must be said, however, that extraordinary events such as corona or disasters cannot be predicted by any AI-based algorithm - and it is true that it is only a prediction that does not necessarily reflect reality. The readme.txt file contains an example of a client script that addresses the algorithm and displays the result of the algorithm in a browser.

                                                                                  ", + "description": "Based on the given and read-in data of a time series, the algorithm calculates\na forecast of how the data will develop further in a freely definable time\nhorizon. Trends are recognised and taken into account in the forecast, as are\nseasonalities and similar dependencies that are contained in the input data\nand are recognised by the algorithm. The algorithm can be used for all types\nof data where a forecast makes sense, such as sales figures for a product or\nparts requirements for purchasing from a supplier. It must be said, however,\nthat extraordinary events such as corona or disasters cannot be predicted by\nany AI-based algorithm - and it is true that it is only a prediction that does\nnot necessarily reflect reality. The readme.txt file contains an example of a\nclient script that addresses the algorithm and displays the result of the\nalgorithm in a browser.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8c424961-1218-492f-b041-2653a84817a4&revisionId=e4572dcc-8e52-4207-91f3-897f17cd7861&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4479,7 +4479,7 @@ "status": "draft" }, "name": "NLP-IMECH", - "description": "

                                                                                  The module uses Natural Language Processing (cosine difference) to compare input text with a list of sentences contained in a csv file and returns the most similar description from the csv file along with its index in the csv file.

                                                                                  ", + "description": "The module uses Natural Language Processing (cosine difference) to compare\ninput text with a list of sentences contained in a csv file and returns the\nmost similar description from the csv file along with its index in the csv\nfile.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=93301148-af5f-4647-bd0c-51180d6d3688&revisionId=23be4e3a-e8e5-4066-b668-5590f78e5f20&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4522,7 +4522,7 @@ "status": "draft" }, "name": "IoTConceptExtraction", - "description": "

                                                                                  We developed an AI-based tool that automatically extracts knowledge from IoT ontologies to support the construction of a unified ontology for Web of Things. The following technologies are used: W3C semantic web technologies (such as RDF, OWL, SPARQL, SKOS), Deep learning model (Word2vec) and unsupervised clustering algorithms (K-means).

                                                                                  ", + "description": "We developed an AI-based tool that automatically extracts knowledge from IoT\nontologies to support the construction of a unified ontology for Web of\nThings. The following technologies are used: W3C semantic web technologies\n(such as RDF, OWL, SPARQL, SKOS), Deep learning model (Word2vec) and\nunsupervised clustering algorithms (K-means).\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9629027c-1030-446d-80ad-dec86ddeadeb&revisionId=8daafca8-0c5d-4266-a25b-6c0aa4af0a79&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4565,7 +4565,7 @@ "status": "draft" }, "name": "traffic-scene-segmentation-deeplab-xception65-cityscapes", - "description": "

                                                                                  This module provides a semantic segmentation using the `xception65_cityscapes_trainfine` model from the tensorflow model zoo1.

                                                                                  Here is the table of the cityscapes train classes with their id and their RGB color values used by the model and output of the module.

                                                                                  | class name    |   ID |    R |    G |    B |\n| ------------- | ---: | ---: | ---: | ---: |\n| ROAD          |    0 |  128 |   64 |  128 |\n| SIDEWALK      |    1 |  244 |   35 |  232 |\n| BUILDING      |    2 |   70 |   70 |   70 |\n| WALL          |    3 |  102 |  102 |  156 |\n| FENCE         |    4 |  190 |  153 |  153 |\n| POLE          |    5 |  153 |  153 |  153 |\n| TRAFFIC LIGHT |    6 |  250 |  170 |   30 |\n| TRAFFIC SIGN  |    7 |  220 |  220 |    0 |\n| VEGETATION    |    8 |  107 |  142 |   35 |\n| TERRAIN       |    9 |  152 |  251 |  152 |\n| SKY           |   10 |   70 |  130 |  180 |\n| PERSON        |   11 |  220 |   20 |   60 |\n| RIDE",
                                                                                  +    "description": "This module provides a semantic segmentation using the\n`xception65_cityscapes_trainfine` model from the tensorflow model zoo1.\n\nHere is the table of the cityscapes train classes with their id and their RGB\ncolor values used by the model and output of the module.\n\n    \n    \n    | class name    |   ID |    R |    G |    B |\n    | ------------- | ---: | ---: | ---: | ---: |\n    | ROAD          |    0 |  128 |   64 |  128 |\n    | SIDEWALK      |    1 |  244 |   35 |  232 |\n    | BUILDING      |    2 |   70 |   70 |   70 |\n    | WALL          |    3 |  102 |  102 |  156 |\n    | FENCE         |    4 |  190 |  153 |  153 |\n    | POLE          |    5 |  153 |  153 |  153 |\n    | TRAFFIC LIGHT |    6 |  250 |  170 |   30 |\n    | TRAFFIC SIGN  |    7 |  220 |  220 |    0 |\n    | VEGETATION    |    8 |  107 |  142 |   35 |\n    | TERRAIN       |    9 |  152 |  251 |  152 |\n    | SKY           |   10 |   70 |  130 |  180 |\n    | PERSON        |   11 |  220 |   20 |   60 |\n    | RIDER         |   12 |  255 |    0 |    0 |\n    | CAR           |   13 |    0 |    0 |  142 |\n    | TRUCK         |   14 |    0 |    0 |   70 |\n    | BUS           |   15 |    0 |   60 |  100 |\n    | TRAIN         |   16 |    0 |   80 |  100 |\n    | MOTOCYCLE     |   17 |    0 |    0 |  230 |\n    | BICYCLE       |   18 |  119 |   11 |   32 |\n    \n\nThe resolution of images is restricted by the model. The maximum width is 2049\nand maximum height is 1025.\n\nCommunication of image data happens via filepaths that specfiy the location\nrelative to a docker volume mount path. Docker volume mount is expected to be\ncommuncated via a environment variable `SHARED_FOLDER_PATH`.\n\nThere are two outputs of the model. The first is a paletted image, where the\npalette index is the class and the color the cityscapes dataset colo",
                                                                                       "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=98febe4e-ce6d-4f33-90b1-7a87c6c1638b&revisionId=7a8c8f00-2f8d-47dc-91e6-7f02536c2498&parentUrl=marketplace#md-model-detail-template",
                                                                                       "date_published": "2023-09-01T15:15:00.000",
                                                                                       "version": "1.0.0",
                                                                                  @@ -4608,7 +4608,7 @@
                                                                                         "status": "draft"
                                                                                       },
                                                                                       "name": "advice-converter-pipeline",
                                                                                  -    "description": "

                                                                                  In this pipeline, the label format converter node reads the annotations from the shared folder and converts from one standard format to another

                                                                                  ", + "description": "In this pipeline, the label format converter node reads the annotations from\nthe shared folder and converts from one standard format to another\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9c590181-fcdd-4f08-afdb-d00cc8ae094c&revisionId=e37153fb-c912-4fe8-a95c-8dbcd52b94e5&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "st3", @@ -4651,7 +4651,7 @@ "status": "draft" }, "name": "ithermai-quality-check-service", - "description": "

                                                                                  This is an AI model for classification of normal and faulty products of the injection molding process. It uses RGBT camera frames as input and labels them as faulty and normal products.

                                                                                  ", + "description": "This is an AI model for classification of normal and faulty products of the\ninjection molding process. It uses RGBT camera frames as input and labels them\nas faulty and normal products.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9ddecdf3-9be6-4b4e-a74b-eccfe1c1a6e8&revisionId=21f1a7b0-3e82-492f-95bc-7b3e78d7cf36&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4694,7 +4694,7 @@ "status": "draft" }, "name": "i-nergy-load-forecasting", - "description": "

                                                                                  This is a forecasting service for predicting electrical load of a boiler room in a large District Heating Network in hourly basis.

                                                                                  This service is based on a Seasonal ARIMA model implemented in context of I-NERGY project.

                                                                                  For more information on how to use the solution, please see README.pdf in Documents section.







                                                                                  ", + "description": "This is a forecasting service for predicting electrical load of a boiler room\nin a large District Heating Network in hourly basis.\n\nThis service is based on a Seasonal ARIMA model implemented in context of\n[I-NERGY](https://www.i-nergy.eu/) project.\n\nFor more information on how to use the solution, please see README.pdf in\nDocuments section.\n\n \n\n \n\n \n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9fc0357c-2b50-4733-8225-44f78a9d5421&revisionId=ae6bd423-aa37-411f-a8f1-40aeb6b0bd4d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4737,7 +4737,7 @@ "status": "draft" }, "name": "house-prices-databroker", - "description": "

                                                                                  Databroker of the House Price Prediction Pipeline.

                                                                                   The databroker is responsible for the transfer of house-price dataset to the model. The features are selected based on higher correlation coeffecient. It has a WebUI that can be used to feed new/ unseen input to the model that predicts the sales price of a house.

                                                                                   Repository:

                                                                                  Please refer the following link for the houseprice-prediction code in the Eclipse Graphene platform - https://gitlab.eclipse.org/eclipse/graphene/tutorials/-/tree/main/House_Price_Prediction


                                                                                  ", + "description": "Databroker of the House Price Prediction Pipeline.\n\n The databroker is responsible for the transfer of house-price dataset to the\nmodel. The features are selected based on higher correlation coeffecient. It\nhas a WebUI that can be used to feed new/ unseen input to the model that\npredicts the sales price of a house.\n\n **Repository:**\n\nPlease refer the following link for the houseprice-prediction code in the\nEclipse Graphene platform -\n\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a03e571c-634f-4da5-83cd-1cd069e304e0&revisionId=b577c72a-0f61-4d72-b04c-823ed54f4fa8&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4780,7 +4780,7 @@ "status": "draft" }, "name": "VideoObjectRecognition", - "description": "

                                                                                  The video object recognition model detects and classifies objects in a video segment

                                                                                  ", + "description": "The video object recognition model detects and classifies objects in a video\nsegment\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a2dd4a73-eae7-4c03-9e10-d07de158d040&revisionId=e2e04665-c00e-4363-9d29-837af49a370d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4823,7 +4823,7 @@ "status": "draft" }, "name": "keras-iris-model", - "description": "

                                                                                  Classify Iris blossoms with a keras model

                                                                                  ", + "description": "Classify Iris blossoms with a keras model\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a48fdedd-0ba3-49a4-befe-046467110a6e&revisionId=988e80a4-0629-48d4-8805-ce3cc7f71429&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4866,7 +4866,7 @@ "status": "draft" }, "name": "predictive-maintenance", - "description": "

                                                                                  Neural network trained in a Federated Learning way for predicting the failure of motors based on a set of features. The network is trained in an experiment in the DIH4AI project in a collaboration of the South Netherlands DIH and Fortiss. The federated learning process was executed on an International Data Spaces architecture with the whole process being recorded by the Evidencia plugin, of which the factsheet is uploaded as document.

                                                                                  ", + "description": "Neural network trained in a Federated Learning way for predicting the failure\nof motors based on a set of features. The network is trained in an experiment\nin the DIH4AI project in a collaboration of the South Netherlands DIH and\nFortiss. The federated learning process was executed on an International Data\nSpaces architecture with the whole process being recorded by the Evidencia\nplugin, of which the factsheet is uploaded as document.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a90b4145-51ec-4345-be5f-21d2c8e9a214&revisionId=c4624a34-affb-417b-b004-d30809697b49&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4909,7 +4909,7 @@ "status": "draft" }, "name": "Healthymity", - "description": "

                                                                                  Complex natural language processing model based on cognitive linguistics and semi-supervised learning using neural networks. The model is used to predict ICD code through medical notes text. 

                                                                                  ", + "description": "Complex natural language processing model based on cognitive linguistics and\nsemi-supervised learning using neural networks. The model is used to predict\nICD code through medical notes text.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=acb05f2a-d6ed-491d-9d70-bea6b8092ca9&revisionId=73b36c23-5849-4ac1-95f1-753070175bd3&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -4952,7 +4952,7 @@ "status": "draft" }, "name": "forWoT", - "description": "


                                                                                  The model is built to preprocess digitized worker cards. The model crops the workercard in the image and performs morphological transformations to remove occulusions.

                                                                                  ", + "description": "The model is built to preprocess digitized worker cards. The model crops the\nworkercard in the image and performs morphological transformations to remove\nocculusions.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0&revisionId=88630572-3464-444b-9ed5-86bf4dde7c56&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -5210,7 +5210,7 @@ "status": "draft" }, "name": "adios-train", - "description": "


                                                                                  I-NERGY - TTP1 - ADIOS TRAIN MODEL

                                                                                  Trains model for anomaly detection in power grid SCADA output. Given the alarm labelled set, which can be extended using the labelling system described above, we train a machine learning model to predict its category. The available alarms are randomly split in half, and the first part is used as a training set and the latter as a test set, on which we evaluate the performance.

                                                                                  AIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-training-model

                                                                                  Attribution

                                                                                  This project has received funding from the European Union's Horizon 2020 research and innovation programme within the framework of the I-NERGY Project, funded under grant agreement No 101016508


                                                                                  ", + "description": " \n\nI-NERGY - TTP1 - ADIOS TRAIN MODEL\n\nTrains model for anomaly detection in power grid SCADA output. Given the alarm\nlabelled set, which can be extended using the labelling system described\nabove, we train a machine learning model to predict its category. The\navailable alarms are randomly split in half, and the first part is used as a\ntraining set and the latter as a test set, on which we evaluate the\nperformance.\n\nAIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-\ntraining-model\n\nAttribution\n\nThis project has received funding from the European Union's Horizon 2020\nresearch and innovation programme within the framework of the I-NERGY Project,\nfunded under grant agreement No 101016508\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b5664ace-53a0-4739-bf3d-8f549091f871&revisionId=0010242a-25ea-4ba2-b3fd-46f938004671&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5253,7 +5253,7 @@ "status": "draft" }, "name": "house-prices-model", - "description": "

                                                                                  Prediction model of the House Price Prediction Pipeline. 

                                                                                  The houseprice-prediction model trains with the dataset from the databroker. Once trained, the model can then predict the sales price of houses for new unseen input data. It has a WebUI that displays the predicted sale price of the house for corresponding inputs from the user. 

                                                                                  Repository link: 

                                                                                  Please refer the following link for the houseprice-prediction code in the Eclipse Graphene platform - https://gitlab.eclipse.org/eclipse/graphene/tutorials/-/tree/main/House_Price_Prediction


                                                                                  ", + "description": "Prediction model of the House Price Prediction Pipeline.\n\nThe houseprice-prediction model trains with the dataset from the databroker.\nOnce trained, the model can then predict the sales price of houses for new\nunseen input data. It has a WebUI that displays the predicted sale price of\nthe house for corresponding inputs from the user.\n\n **Repository link: **\n\nPlease refer the following link for the houseprice-prediction code in the\nEclipse Graphene platform -\n\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b59939e2-76ef-4d82-b869-e96b89e6e175&revisionId=ae1f9926-f865-4467-8d56-b5e9a33fb193&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5296,7 +5296,7 @@ "status": "draft" }, "name": "sentiment-analysis-databroker", - "description": "

                                                                                  The model is the databroker for the Sentiment Analysis pipeline.

                                                                                  It has a user interface(UI) that takes the query text from the user and connects to the prediction model. The results can then be viewed on the Prediction model's UI.

                                                                                  ", + "description": "The model is the databroker for the Sentiment Analysis pipeline.\n\nIt has a user interface(UI) that takes the query text from the user and\nconnects to the prediction model. The results can then be viewed on the\nPrediction model's UI.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b6adb7b2-d8f6-47c6-9702-d8a16338a8e1&revisionId=86d03e8a-619f-4f79-8759-10566671f01d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5339,7 +5339,7 @@ "status": "draft" }, "name": "SwabAI", - "description": "

                                                                                  Adaptive optimization model for Electric Discharge Machining.

                                                                                  ", + "description": "Adaptive optimization model for Electric Discharge Machining.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b810ae05-a50e-4dd6-80ff-02384d56ca04&revisionId=257af2ec-9e0f-405d-852e-a6c7b8f73532&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5425,7 +5425,7 @@ "status": "draft" }, "name": "adios-label-extend", - "description": "

                                                                                  I-NERGY - TTP1 - ADIOS LABEL EXTEND MODEL

                                                                                  This model extends train model dataset labels using one hot encoding and closest distance matching. 

                                                                                  The label extension mechanism uses similarity between alarms to associate each unknown alarm with its most similar known one. We pick a reduced portion of the overall dataset (50k alarms) to extend the training set. The features of the dataset are mainly string fields, except for the Priority file, which is numerical. The similarity between each two alarms is measured in terms of the number of different features that they present. 

                                                                                  AIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-label-extend

                                                                                  Attribution

                                                                                  This project has received funding from the European Union's Horizon 2020 research and innovation programme within the framework of the I-NERGY Project, funded under grant agreement No 101016508

                                                                                  ", + "description": "I-NERGY - TTP1 - ADIOS LABEL EXTEND MODEL\n\nThis model extends train model dataset labels using one hot encoding and\nclosest distance matching.\n\nThe label extension mechanism uses similarity between alarms to associate each\nunknown alarm with its most similar known one. We pick a reduced portion of\nthe overall dataset (50k alarms) to extend the training set. The features of\nthe dataset are mainly string fields, except for the Priority file, which is\nnumerical. The similarity between each two alarms is measured in terms of the\nnumber of different features that they present.\n\nAIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-label-\nextend\n\nAttribution\n\nThis project has received funding from the European Union's Horizon 2020\nresearch and innovation programme within the framework of the I-NERGY Project,\nfunded under grant agreement No 101016508\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b9748aa3-9340-4f27-a7b9-59cea5d80d3c&revisionId=4613434d-2ef5-4e60-9fb9-26382dafb97c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5468,7 +5468,7 @@ "status": "draft" }, "name": "Multimodal_AI", - "description": "

                                                                                  The proposed model is a draft solution for the challenge titled \"Enhancing Clinical AI workflow\". The model is based on multi-modality which takes in multi modal data features after translating, co-aligning and fusion. The main objective is to integrate the model into the clinical decision support system .

                                                                                  ", + "description": "The proposed model is a draft solution for the challenge titled \"Enhancing\nClinical AI workflow\". The model is based on multi-modality which takes in\nmulti modal data features after translating, co-aligning and fusion. The main\nobjective is to integrate the model into the clinical decision support system\n.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b97a16cd-e475-4f5f-83e5-f1d042a3772a&revisionId=34816a52-7ba9-4890-8203-c0a6dd5fe270&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5511,7 +5511,7 @@ "status": "draft" }, "name": "rp4pl-rul", - "description": "

                                                                                  This model was developed part of the AI4EU program. RP4PL - RUL (Reliable Prediction for Pump Lifetime - Remaining useful lifetime) is used to predict the remaining useful lifetime for manufactured pumps. It takes as input final quality test data from the pump manufacturing process and outputs a lifetime prediction. The model pipeline contains data transformation and feature inference. It is constructed using a random forest regression algorithm, along with a feature selection step to reduce the set of features to a smaller subset.


                                                                                  ", + "description": "This model was developed part of the AI4EU program. RP4PL - RUL (Reliable\nPrediction for Pump Lifetime - Remaining useful lifetime) is used to predict\nthe remaining useful lifetime for manufactured pumps. It takes as input final\nquality test data from the pump manufacturing process and outputs a lifetime\nprediction. The model pipeline contains data transformation and feature\ninference. It is constructed using a random forest regression algorithm, along\nwith a feature selection step to reduce the set of features to a smaller\nsubset.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb45c963-205b-4d3b-aad4-9968dce77ee5&revisionId=cd27d33d-3a04-4cb1-be7c-b36522d0f8e1&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5554,7 +5554,7 @@ "status": "draft" }, "name": "SWE_predictor", - "description": "

                                                                                  A regression ML model that provides a Snow Water Equivalent indicator from Earth Observation data and data from climate models (ERA5) in river watersheds in the Alpine area. This model has been developed by Amigo srl (https://amigoclimate.com) for SnowPower, an innovative Software-as-a-Service to assist hydropower operators that is part of the I-NERGY 1st Open call. In particular, this model is at the core of the Snow module of SnowPower.Details about data input requirements and model performance are provided in the related entry in the AIOD Catalog (HERE).






                                                                                  Image: flaticon.com

                                                                                  ", + "description": "A regression ML model that provides a Snow Water Equivalent indicator from\nEarth Observation data and data from climate models (ERA5) in river watersheds\nin the Alpine area. This model has been developed by Amigo srl\n([https://amigoclimate.com](https://amigoclimate.com/)) for SnowPower, an\ninnovative Software-as-a-Service to assist hydropower operators that is part\nof the[ I-NERGY 1st Open call](https://www.ai4europe.eu/ai-\ncommunity/projects/i-nergy). In particular, this model is at the core of the\nSnow module of SnowPower.Details about data input requirements and model\nperformance are provided in the related entry in the AIOD Catalog\n([HERE](https://www.ai4europe.eu/research/ai-catalog/description-and-setup-ml-\nmodels-estimation-snow-water-equivalent-swe-and-runoff)).\n\n \n\n \n\n \n\n \n\n \n\nImage: flaticon.com\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=be8d1f46-c311-4578-a6cd-69dc8d3fa33b&revisionId=c310c554-9bfa-4146-9e21-6fff647f5abe&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5597,7 +5597,7 @@ "status": "draft" }, "name": "ai4eu-kewot", - "description": "

                                                                                  The main objective of this work is to deal with the semantic interoperability challenge, where several entities exist in cross-domain ontologies describing the same concept. Our main contributions can be summarized as follow:

                                                                                  \u00b7      A thorough analysis of several ontologies belonging to two domains (city and mobility) was conducted. The ontological entities were enriched with Google embeddings and plotted in 2-dimensions, revealing concepts of high similarity, not only in terms of semantic but also of syntactic similarity.  

                                                                                  \u00b7      An AI approach was followed in order to automatically extract the topics existing in ontologies of different domains. A detailed evaluation of the AI method was performed, showing qualitative and promising results. A visualization tool was deployed for easier exploration and contrast of the topics.

                                                                                  \u00b7      A Search Mechanism was prepared which takes as input the detected (or any other provided) topics T and an ontology O and returns as output a concept o \\in O which is the most similar to a topic t \\in T

                                                                                  ", + "description": "The main objective of this work is to deal with the semantic interoperability\nchallenge, where several entities exist in cross-domain ontologies describing\nthe same concept. Our main contributions can be summarized as follow:\n\n\u00b7 A thorough analysis of several ontologies belonging to two domains\n(city and mobility) was conducted. The ontological entities were enriched with\nGoogle embeddings and plotted in 2-dimensions, revealing concepts of high\nsimilarity, not only in terms of semantic but also of syntactic similarity. \n\n\u00b7 An AI approach was followed in order to automatically extract the\ntopics existing in ontologies of different domains. A detailed evaluation of\nthe AI method was performed, showing qualitative and promising results. A\nvisualization tool was deployed for easier exploration and contrast of the\ntopics.\n\n\u00b7 A Search Mechanism was prepared which takes as input the detected (or\nany other provided) topics T and an ontology O and returns as output a concept\no \\in O which is the most similar to a topic t \\in T\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c4cfb4bb-4ac6-4303-acd2-8eb3664c4138&revisionId=8676b4dc-21c3-4d65-b13b-8089ecbb33fc&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -5640,7 +5640,7 @@ "status": "draft" }, "name": "ai4eu-kewot", - "description": "

                                                                                  The main objective of this work is to deal with the semantic interoperability challenge, where several entities exist in cross-domain ontologies describing the same concept. Our main contributions can be summarized as follow:

                                                                                  \u00b7      A thorough analysis of several ontologies belonging to two domains (city and mobility) was conducted. The ontological entities were enriched with Google embeddings and plotted in 2-dimensions, revealing concepts of high similarity, not only in terms of semantic but also of syntactic similarity.  

                                                                                  \u00b7      An AI approach was followed in order to automatically extract the topics existing in ontologies of different domains. A detailed evaluation of the AI method was performed, showing qualitative and promising results. A visualization tool was deployed for easier exploration and contrast of the topics.

                                                                                  \u00b7      A Search Mechanism was prepared which takes as input the detected (or any other provided) topics T and an ontology O and returns as output a concept o \\in O which is the most similar to a topic t \\in T

                                                                                  ", + "description": "The main objective of this work is to deal with the semantic interoperability\nchallenge, where several entities exist in cross-domain ontologies describing\nthe same concept. Our main contributions can be summarized as follow:\n\n\u00b7 A thorough analysis of several ontologies belonging to two domains\n(city and mobility) was conducted. The ontological entities were enriched with\nGoogle embeddings and plotted in 2-dimensions, revealing concepts of high\nsimilarity, not only in terms of semantic but also of syntactic similarity. \n\n\u00b7 An AI approach was followed in order to automatically extract the\ntopics existing in ontologies of different domains. A detailed evaluation of\nthe AI method was performed, showing qualitative and promising results. A\nvisualization tool was deployed for easier exploration and contrast of the\ntopics.\n\n\u00b7 A Search Mechanism was prepared which takes as input the detected (or\nany other provided) topics T and an ontology O and returns as output a concept\no \\in O which is the most similar to a topic t \\in T\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c4cfb4bb-4ac6-4303-acd2-8eb3664c4138&revisionId=d97c57a2-7f37-40ec-8ffd-b45f2f69c297&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5726,7 +5726,7 @@ "status": "draft" }, "name": "DAISY", - "description": "

                                                                                  Combination of our expertise in vibration analysis with AI models that will contribute to the diagnosis of rotating machinery

                                                                                  ", + "description": "Combination of our expertise in vibration analysis with AI models that will\ncontribute to the diagnosis of rotating machinery\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e&revisionId=ee78edad-6fa5-456f-8bd1-6cc82fcffb33&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5769,7 +5769,7 @@ "status": "draft" }, "name": "CODE-CRC", - "description": "

                                                                                  The proposed solution is based on fine-tuned with Spanish medical texts of the pre-trained BERT family language models (transformers clinicalBERT and multilingualBERT). The designed text-based classification service predicts ICD-10 codes for clinical text in Spanish for the Colorectal cancer (CRC) and associated diagnoses. The service output contains the ICD-10 \u201ccategory\u201d (3 sign) codes that describe the basic manifestations of injury or sickness for 158 types of diseases related to CRC. The prediction models for ICD-10 codes are with high accuracy: clinicalBERT: 0.794 AUC ROC score and  multilingualBERT: 0.806 AUC ROC score. The service allows the user to switch between two models (clinicalBERT and multilingualBERT) and to set the parameter N for top N diagnoses according to the specific needs.

                                                                                  This model extracts the topics from an audio segment.

                                                                                  ", + "description": "This model extracts the topics from an audio segment.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c8b8a888-ae48-41d4-8476-f2ca6851daa7&revisionId=5eacd881-de83-42f4-bf5a-6ca728f4f082&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -5941,7 +5941,7 @@ "status": "draft" }, "name": "FaceAI", - "description": "

                                                                                  This model provides a solution for swab robot finding the position of the mouth, considering the MDR safety regulations. The position finding alogithm is based on deep learning and AI.

                                                                                  ", + "description": "This model provides a solution for swab robot finding the position of the\nmouth, considering the MDR safety regulations. The position finding alogithm\nis based on deep learning and AI.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=93e8df6b-6226-4674-9963-6d0aa6ddcc3c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -6027,7 +6027,7 @@ "status": "draft" }, "name": "FaceAI", - "description": "






                                                                                  This model provides a solution for swab robot finding the position of the mouth, considering the MDR safety regulations. The position finding alogithm is based on deep learning and AI.

                                                                                  The docker image is based on Python 3.9 slim buster. Scikit-learn and pandas are installed.

                                                                                  ", + "description": "This model provides a solution for swab robot finding the position of the\nmouth, considering the MDR safety regulations. The position finding alogithm\nis based on deep learning and AI.\n\nThe docker image is based on Python 3.9 slim buster. Scikit-learn and pandas\nare installed.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=e0e410f4-12d8-4fec-9113-3b01be44ad62&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.4", @@ -6113,7 +6113,7 @@ "status": "draft" }, "name": "iSolutions", - "description": "

                                                                                  The proposed model is used detection of the teeth and the lips to identification position the mouth detects. The model is including a decision-making process for robots in a medical context.

                                                                                  ", + "description": "The proposed model is used detection of the teeth and the lips to\nidentification position the mouth detects. The model is including a decision-\nmaking process for robots in a medical context.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cb4a33c5-a9a6-4432-bd49-10336956b6b0&revisionId=8e9f567a-8231-4f59-9aef-bfa8e6b79fc0&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6156,7 +6156,7 @@ "status": "draft" }, "name": "MusicDetection", - "description": "

                                                                                  With the software tool for automatic detection of music in combination with detection of speech sequences, Fraunhofer IDMT offers a highly effective solution to determine the exact amount of music and speech in radio and TV programs. The tools can be used to optimize broadcasting programs or provide accurate accounting for copyright agencies.

                                                                                  Less work: Using Fraunhofer IDMT\u2019s software tools, the amount of music and speech in radio and TV programs no longer needs to be determined by means of tedious manual work (typically personnel reading through audio content lists). The tool is able to detect and measure general audio categories (music, speech, music and speech combined, other content) both in live streams and in stored digital audio files.

                                                                                  Easy integration: The tools are scalable and can easily be integrated with standard workflows and components. It can be used in production and live streaming environments, both online and offline.

                                                                                  Easy data export: The tools easily integrate with content management systems. For data output, users may choose between XML files, cue sheets, or other standard data export formats.

                                                                                  ", + "description": "With the software tool for automatic detection of music in combination with\ndetection of speech sequences, Fraunhofer IDMT offers a highly effective\nsolution to determine the exact amount of music and speech in radio and TV\nprograms. The tools can be used to optimize broadcasting programs or provide\naccurate accounting for copyright agencies.\n\nLess work: Using Fraunhofer IDMT\u2019s software tools, the amount of music and\nspeech in radio and TV programs no longer needs to be determined by means of\ntedious manual work (typically personnel reading through audio content lists).\nThe tool is able to detect and measure general audio categories (music,\nspeech, music and speech combined, other content) both in live streams and in\nstored digital audio files.\n\nEasy integration: The tools are scalable and can easily be integrated with\nstandard workflows and components. It can be used in production and live\nstreaming environments, both online and offline.\n\nEasy data export: The tools easily integrate with content management systems.\nFor data output, users may choose between XML files, cue sheets, or other\nstandard data export formats.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cd6d1c1b-896e-4c62-9312-14416d5d411f&revisionId=b836fd7f-e5bf-4879-8d1a-c4ff5df393a9&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6199,7 +6199,7 @@ "status": "draft" }, "name": "Idiap_BEAT_Handwritten_Digit_Recognition_-_Multiclass_Logistic_Regressor_trained_on_M-NIST", - "description": "

                                                                                  This algorithm contains a logistic regression model trained on the MNIST database.It takes as input images of digits and outputs the classification label of images.

                                                                                  To test drive it, the MNIST data broker can be used.This model does not require any configuration and thus can be used as is.

                                                                                  The reference experiment on the BEAT platform is amohammadi/amohammadi/mnist_simple/1/mnist1

                                                                                  ", + "description": "This algorithm contains a logistic regression model trained on the MNIST\ndatabase.It takes as input images of digits and outputs the classification\nlabel of images.\n\nTo test drive it, the MNIST data broker can be used.This model does not\nrequire any configuration and thus can be used as is.\n\nThe reference experiment on the BEAT platform is\n[amohammadi/amohammadi/mnist_simple/1/mnist1](https://www.idiap.ch/software/beat/platform/experiments/amohammadi/amohammadi/mnist_simple/1/mnist1/)\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ce1b6792-889d-46cf-9529-3215802f729c&revisionId=eb3669aa-0889-42e9-a89f-7dab1b12baf1&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6242,7 +6242,7 @@ "status": "draft" }, "name": "news-classifier", - "description": "

                                                                                  Overview:

                                                                                  The classifier module is the core of the entire news training pipeline. It is responsible for the following activities,

                                                                                  1.Training process: Upon receiving the training parameters from the trainer node, the classifier node starts the training process.

                                                                                  2.Saving the trained models: Upon successful training, the models are saved in both the h5 and onnx format available in the shared folder.

                                                                                  3.Classifying the results: The Reuters dataset newswires are labeled over 46 topics. The test sequences are thereupon classified based on these topics.

                                                                                  Repository link:

                                                                                  Please refer the following link for the code that represents the trainer module in the Eclipse Graphene platform - https://gitlab.eclipse.org/eclipse/graphene/tutorials/-/tree/main/news_training/classifier

                                                                                  ", + "description": "**Overview:**\n\nThe classifier module is the core of the entire news training pipeline. It is\nresponsible for the following activities,\n\n1.Training process: Upon receiving the training parameters from the trainer\nnode, the classifier node starts the training process.\n\n2.Saving the trained models: Upon successful training, the models are saved in\nboth the h5 and onnx format available in the shared folder.\n\n3.Classifying the results: The Reuters dataset newswires are labeled over 46\ntopics. The test sequences are thereupon classified based on these topics.\n\n **Repository link:**\n\nPlease refer the following link for the code that represents the trainer\nmodule in the Eclipse Graphene platform -\n\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d05e44a5-d3e6-4730-b8ab-dd7a23fd52d4&revisionId=28719994-c987-4ce9-b88f-4f9d5e4129fc&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6285,7 +6285,7 @@ "status": "draft" }, "name": "viume-pic2text", - "description": "

                                                                                  The proposed model will address the detection of stamps and named entities from the images. To do that, the structure is split into two main modules. 1)Extractor 2)Analyzer. As Extractor, different models based on convolutional and recurrent neural networks will be trained to detect the stamps, signatures, and text. As Analyzer,

                                                                                  The trained NLP model will crop the document and use a custom trained model to extract the relevant information and all the relations inside the document. The extracted information from the document will be assigned with a unique ID and the corresponding columns will be filled with the extracted data.





                                                                                  ", + "description": "The proposed model will address the detection of stamps and named entities\nfrom the images. To do that, the structure is split into two main modules.\n1)Extractor 2)Analyzer. As Extractor, different models based on convolutional\nand recurrent neural networks will be trained to detect the stamps,\nsignatures, and text. As Analyzer,\n\nThe trained NLP model will crop the document and use a custom trained model to\nextract the relevant information and all the relations inside the document.\nThe extracted information from the document will be assigned with a unique ID\nand the corresponding columns will be filled with the extracted data.\n\n \n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d0882778-0ca2-4028-b90c-6c91da657817&revisionId=c2fe1abd-af13-4e90-a176-a44fdc5e4912&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6328,7 +6328,7 @@ "status": "draft" }, "name": "AI_REGIO_DSS4TB", - "description": "

                                                                                  This module is an intelligent troubleshooting system that is able to identify the component that is most probably damaged after a series of closed-ended questions answered by the operator. Such systems are built upon a given knowledge, which is an information matrix that represents the relationship between the possible symptoms and the failure components. The probability evolution is mainly based on the Bayes theorem which can elaborate the conditional probability. It consists of computing the likelihood of a general event occurrence based on the prior probability and the new information provided by each answer. More specifically, each answer allows updating the probability associated with each failure, based on which the next question will be selected.

                                                                                  ", + "description": "This module is an intelligent troubleshooting system that is able to identify\nthe component that is most probably damaged after a series of closed-ended\nquestions answered by the operator. Such systems are built upon a given\nknowledge, which is an information matrix that represents the relationship\nbetween the possible symptoms and the failure components. The probability\nevolution is mainly based on the Bayes theorem which can elaborate the\nconditional probability. It consists of computing the likelihood of a general\nevent occurrence based on the prior probability and the new information\nprovided by each answer. More specifically, each answer allows updating the\nprobability associated with each failure, based on which the next question\nwill be selected.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d1ce6215-8102-4b46-b495-5907bea57ba1&revisionId=d43ac2fe-3d60-4198-a664-7eed1ef2d152&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6371,7 +6371,7 @@ "status": "draft" }, "name": "sentiment-analysis-model", - "description": "

                                                                                  The model is part of the Sentiment Analysis pipeline.

                                                                                  It analysis the sentiment of the query text sent by the databroker and returns the prediction. This prediction can also be viewed on the user interface.

                                                                                  ", + "description": "The model is part of the Sentiment Analysis pipeline.\n\nIt analysis the sentiment of the query text sent by the databroker and returns\nthe prediction. This prediction can also be viewed on the user interface.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d2cfc140-0d61-41fb-86ef-fbe2f192c4d2&revisionId=cfec1423-8627-4669-92a1-ca5497743b70&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6414,7 +6414,7 @@ "status": "draft" }, "name": "INERGY_Heat_Decision", - "description": "

                                                                                  This service is based on a decision support system (DSS) implemented in context of I-NERGY project. The overall vision of I-NERGY is to promote AI in the energy sector by delivering:

                                                                                  • An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.
                                                                                  • Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.

                                                                                  This is a DSS service for for help in the decision on which energy source (for heat generation) use in a Spanish Hospital in hourly basis. The data was provided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital complex have a district heating network. The layout of this district heating network is a ring system composed by two independent rings for heating and cooling. This ring just provides energy for heating and Domestic Hot Water (DHW).

                                                                                  Apart from being a district heating network, this system is complex due to the different production sources used for heating and cooling. In this facility heat, cold and steam are produced by using different sources.

                                                                                  For more information on how to use the service, please see Documents section.

                                                                                  The project leading to this service has received funding from the European Union\u2019s Horizon 2020 research and innovation programme under grant agreement No 101016508

                                                                                  ", + "description": "This service is based on a decision support system (DSS) implemented in\ncontext of I-NERGY project. The overall vision of I-NERGY is to promote AI in\nthe energy sector by delivering:\n\n * An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.\n * Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.\n\nThis is a DSS service for for help in the decision on which energy source (for\nheat generation) use in a Spanish Hospital in hourly basis. The data was\nprovided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital\ncomplex have a district heating network. The layout of this district heating\nnetwork is a ring system composed by two independent rings for heating and\ncooling. This ring just provides energy for heating and Domestic Hot Water\n(DHW).\n\nApart from being a district heating network, this system is complex due to the\ndifferent production sources used for heating and cooling. In this facility\nheat, cold and steam are produced by using different sources.\n\nFor more information on how to use the service, please see Documents section.\n\n _The project leading to this service has received funding from the European\nUnion\u2019s Horizon 2020 research and innovation programme under grant agreement\nNo 101016508_\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d30263f9-9902-407d-b0c8-f389b541e98d&revisionId=97e3b739-a584-4b83-a25b-43e6a0bfaf39&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6457,7 +6457,7 @@ "status": "draft" }, "name": "AudioSpeechToTextEnglish", - "description": "

                                                                                  This model converts an audio segment to english text.

                                                                                  ", + "description": "This model converts an audio segment to english text.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5418d91-8eda-42ff-9348-570e5ba0a110&revisionId=ef9a485f-d31d-4f1b-be03-205d112a6b59&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6500,7 +6500,7 @@ "status": "draft" }, "name": "PII_Detector", - "description": "

                                                                                  PII Detector automatically detects personally identifiable information in unstructured files (documents or images).The face detection model analyzes an image file to find faces. The method returns a list of items, each of which contains the coordinates of a face that was detected in the file.

                                                                                  ", + "description": "PII Detector automatically detects personally identifiable information in\nunstructured files (documents or images).The face detection model analyzes an\nimage file to find faces. The method returns a list of items, each of which\ncontains the coordinates of a face that was detected in the file.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ffc67f-b5ef-42c6-a97b-238546af935a&revisionId=b4adbc99-9aec-4ec1-bb58-abbd40f5b75b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6543,7 +6543,7 @@ "status": "draft" }, "name": "openpose", - "description": "

                                                                                  Openpose is a Real-time multi-person keypoint detection model for body, face, hands, and foot estimation, originally developed by CMU but includes several updates.

                                                                                  Openpose-AI4EU is a component that uses an improved version (Mobilenet v2) and can be included in pipelines built with AI4EU experiments or can run standalone as a dockerized grpc service. For that we include test scripts. The component input is one image and outputs the parameters of all body keypoints detected (index of the skeleton keypoin, x and y coordinates in the image and the confidence score).

                                                                                  ", + "description": "Openpose is a Real-time multi-person keypoint detection model for body, face,\nhands, and foot estimation, originally developed by CMU but includes several\nupdates.\n\nOpenpose-AI4EU is a component that uses an improved version (Mobilenet v2) and\ncan be included in pipelines built with AI4EU experiments or can run\nstandalone as a dockerized grpc service. For that we include test scripts. The\ncomponent input is one image and outputs the parameters of all body keypoints\ndetected (index of the skeleton keypoin, x and y coordinates in the image and\nthe confidence score).\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d7e0ffc8-afcd-42a3-8d8a-01ea395d1303&revisionId=2beda89e-c87e-416c-980e-fe4908f8c87d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6586,7 +6586,7 @@ "status": "draft" }, "name": "TEK_THOR_OPTIMIZATION", - "description": "

                                                                                  AI4EU - THOR THOR solution consists in a Hybrid optimization solution to make the right decision on the amount spare parts in stock, considering past sales and forecasts. The purchase decision considers as input information current stock status, production needs, production forecast, sales forecast, variability Price of stock material and several restriction parameters.

                                                                                  Optimization model. EDA and Genetic search have been implemented to minimizing the total cost of spare parts procurement as well as covering cash-flow restrictions and production needs. This optimization provides as a result the procurement plan

                                                                                  ", + "description": "**AI4EU - THOR **THOR solution consists in a Hybrid optimization solution to\nmake the right decision on the amount spare parts in stock, considering past\nsales and forecasts. The purchase decision considers as input information\ncurrent stock status, production needs, production forecast, sales forecast,\nvariability Price of stock material and several restriction parameters.\n\n **Optimization model**. EDA and Genetic search have been implemented to\nminimizing the total cost of spare parts procurement as well as covering cash-\nflow restrictions and production needs. This optimization provides as a result\nthe procurement plan\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d8745d72-f4c0-49f6-8d20-514e8ad74f86&revisionId=644482dc-abd6-4805-b46a-4cd98192ae1c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -6629,7 +6629,7 @@ "status": "draft" }, "name": "Daisy_software", - "description": "

                                                                                  Even though maintenance of rotating machines, such as motors, pumps and fans through Vibration Monitoring (VM) has been a proven process, it requires an experienced 3rd party service engineer to attend the vessel onboard for vibration data collection and onshore vibration analysis and machinery condition reporting and that attendance onboard in many cases is not feasible.

                                                                                  To give a response to this problem, Daisy allows to apply AI to the large amount of mechanical vibration data of different assets, in order to build computational models that help in the classification and early detection of the faults that the rotating machinery of ships could have.

                                                                                  With this software, the user can load vibration data, apply signal processing techniques and train machine learning (ML) models with no prior programming experience on artificial intelligence (AI) and signal processing knowledge.

                                                                                  ", + "description": "Even though maintenance of rotating machines, such as motors, pumps and fans\nthrough Vibration Monitoring (VM) has been a proven process, it requires an\nexperienced 3rd party service engineer to attend the vessel onboard for\nvibration data collection and onshore vibration analysis and machinery\ncondition reporting and that attendance onboard in many cases is not feasible.\n\nTo give a response to this problem, Daisy allows to apply AI to the large\namount of mechanical vibration data of different assets, in order to build\ncomputational models that help in the classification and early detection of\nthe faults that the rotating machinery of ships could have.\n\nWith this software, the user can load vibration data, apply signal processing\ntechniques and train machine learning (ML) models with no prior programming\nexperience on artificial intelligence (AI) and signal processing knowledge.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d89b25d2-fcf8-48ae-9858-3f32cf047d8d&revisionId=ec81a5a2-0f51-4254-94a7-b80e92c6560a&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6672,7 +6672,7 @@ "status": "draft" }, "name": "Time-prediction-for-example-manufacturing", - "description": "

                                                                                  This module provides a manufacturing time prediction for an example manufacturing process.

                                                                                  The example manufacturing process is a conveyor belt machine, that takes a number of inputs to process. There are 2 processing stations and depending on the input item only either or both of these stations can be used to process the item.

                                                                                  The model was trained on simulated data.

                                                                                  Input

                                                                                  The input is a list of products characterized by a type (3 different colors) and the time of entering the process.

                                                                                  Model

                                                                                  For the prediction a stacked LSTM model with spatial dropout is used.

                                                                                  ", + "description": "This module provides a manufacturing time prediction for an example\nmanufacturing process.\n\nThe example manufacturing process is a conveyor belt machine, that takes a\nnumber of inputs to process. There are 2 processing stations and depending on\nthe input item only either or both of these stations can be used to process\nthe item.\n\nThe model was trained on simulated data.\n\n## Input\n\nThe input is a list of products characterized by a type (3 different colors)\nand the time of entering the process.\n\n## Model\n\nFor the prediction a stacked LSTM model with spatial dropout is used.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&revisionId=6385bf2f-ef0a-4481-a13c-35ef3859a82e&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.2", @@ -6715,7 +6715,7 @@ "status": "draft" }, "name": "Time-prediction-for-example-manufacturing", - "description": "

                                                                                  This module provides a manufacturing time prediction for an example manufacturing process.

                                                                                  The example manufacturing process is a conveyor belt machine, that takes a number of inputs to process. There are 2 processing stations and depending on the input item only either or both of these stations can be used to process the item.

                                                                                  The model was trained on simulated data.

                                                                                  Input

                                                                                  The input is a list of products characterized by a type (3 different colors) and the time of entering the process.

                                                                                  Model

                                                                                  For the prediction a stacked LSTM model with spatial dropout is used.

                                                                                  ", + "description": "This module provides a manufacturing time prediction for an example\nmanufacturing process.\n\nThe example manufacturing process is a conveyor belt machine, that takes a\nnumber of inputs to process. There are 2 processing stations and depending on\nthe input item only either or both of these stations can be used to process\nthe item.\n\nThe model was trained on simulated data.\n\n## Input\n\nThe input is a list of products characterized by a type (3 different colors)\nand the time of entering the process.\n\n## Model\n\nFor the prediction a stacked LSTM model with spatial dropout is used.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&revisionId=6faffcf2-e451-4973-b768-cfa4bf01469b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -6758,7 +6758,7 @@ "status": "draft" }, "name": "Time-prediction-for-example-manufacturing", - "description": "

                                                                                  This module provides a manufacturing time prediction for an example manufacturing process.

                                                                                  The example manufacturing process is a conveyor belt machine, that takes a number of inputs to process. There are 2 processing stations and depending on the input item only either or both of these stations can be used to process the item.

                                                                                  The model was trained on simulated data.

                                                                                  Input

                                                                                  The input is a list of products characterized by a type (3 different colors) and the time of entering the process.

                                                                                  Model

                                                                                  For the prediction a stacked LSTM model with spatial dropout is used.

                                                                                  ", + "description": "This module provides a manufacturing time prediction for an example\nmanufacturing process.\n\nThe example manufacturing process is a conveyor belt machine, that takes a\nnumber of inputs to process. There are 2 processing stations and depending on\nthe input item only either or both of these stations can be used to process\nthe item.\n\nThe model was trained on simulated data.\n\n## Input\n\nThe input is a list of products characterized by a type (3 different colors)\nand the time of entering the process.\n\n## Model\n\nFor the prediction a stacked LSTM model with spatial dropout is used.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&revisionId=d42e8f33-0bb7-407b-b72d-9fde9a276bd7&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6801,7 +6801,7 @@ "status": "draft" }, "name": "lane-detector", - "description": "

                                                                                  # AI4EU Pluto lane-detector

                                                                                  The model runs a lane detector over an image.


                                                                                  The image can be send as bytes, and the result will be the corresponding bytes for the keypoints detected along with the shape of the keypoints array. This helps reconstruct the multidimensional array from the bytes returned. The same for the `results` which is the original image overlayed with the detected keypoints, the `results_shape` provides the shape to reconstruct the array.


                                                                                  ### Example


                                                                                  ```python


                                                                                  import grpc

                                                                                  from PIL import Image

                                                                                  import numpy as np


                                                                                  start_ch = timer()

                                                                                  port_addr = 'localhost:8061'


                                                                                  # open a gRPC channel

                                                                                  channel = grpc.insecure_channel(port_addr)


                                                                                  filepath = \"assets/test.png\"


                                                                                  with open(filepath, 'rb') as f:

                                                                                    content = f.read()


                                                                                  requestPrediction = model_pb2.Features(img=content)


                                                                                  responsePrediction = stub.make_prediction(requestPrediction)


                                                                                  print('The prediction is :', responsePrediction.results)



                                                                                  # Recreate image:


                                                                                  img_shape = [*responsePrediction.results_shape]

                                                                                  np_img = np.frombuffer(responsePrediction.results, dtype=np.uint8).reshape(img_shape)


                                                                                  image = Image.fromarray(np_img).convert('RGB'))

                                                                                  ```


                                                                                  ", + "description": "# AI4EU Pluto lane-detector\n\nThe model runs a lane detector over an image.\n\n \n\nThe image can be send as bytes, and the result will be the corresponding bytes\nfor the keypoints detected along with the shape of the keypoints array. This\nhelps reconstruct the multidimensional array from the bytes returned. The same\nfor the `results` which is the original image overlayed with the detected\nkeypoints, the `results_shape` provides the shape to reconstruct the array.\n\n \n\n### Example\n\n \n\n```python\n\n \n\nimport grpc\n\nfrom PIL import Image\n\nimport numpy as np\n\n \n\nstart_ch = timer()\n\nport_addr = 'localhost:8061'\n\n \n\n# open a gRPC channel\n\nchannel = grpc.insecure_channel(port_addr)\n\n \n\nfilepath = \"assets/test.png\"\n\n \n\nwith open(filepath, 'rb') as f:\n\n content = f.read()\n\n \n\nrequestPrediction = model_pb2.Features(img=content)\n\n \n\nresponsePrediction = stub.make_prediction(requestPrediction)\n\n \n\nprint('The prediction is :', responsePrediction.results)\n\n \n\n \n\n# Recreate image:\n\n \n\nimg_shape = [*responsePrediction.results_shape]\n\nnp_img = np.frombuffer(responsePrediction.results,\ndtype=np.uint8).reshape(img_shape)\n\n \n\nimage = Image.fromarray(np_img).convert('RGB'))\n\n```\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dc34b9b5-3990-41fb-93b7-1a56cf1016cc&revisionId=23c1693f-08f7-4175-9b72-f2d999b24a98&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6844,7 +6844,7 @@ "status": "draft" }, "name": "ai4eu-competences", - "description": "

                                                                                  This tool computes the match between text and concepts from ESCO based on the ESCO model itself and the FastText computation model. Trustworthy is ensured in part by these models and their developers. Given a free text description, and the weight parameters, the service produces a set of matches that represent the corresponding ESCO competence (text and URI) and the similarity measure.


                                                                                  ", + "description": "This tool computes the match between text and concepts from ESCO based on the\nESCO model itself and the FastText computation model. Trustworthy is ensured\nin part by these models and their developers. Given a free text description,\nand the weight parameters, the service produces a set of matches that\nrepresent the corresponding ESCO competence (text and URI) and the similarity\nmeasure.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dc67374a-0a1c-4477-86b2-9db8f0a1faed&revisionId=977872e8-b343-4fa4-b5fe-31afc77c9e05&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6887,7 +6887,7 @@ "status": "draft" }, "name": "medical_notes_classification", - "description": "

                                                                                  Our solution is a NLP classification model fine-tuned on Spanish free text (medical notes) to predict IDC-10 codes. We will start from a transformer model trained on a Spanish corpus, such as BETO, and fine-tune it on general Spanish medical corpus (research paper or anonymized data delivered by Amadix and its partners), with pre-training tasks such as Masked Language Modeling. We will then fine-tune it on free-text data provided by AMADIX (medical notes) in order to predict the target ICD-10 codes.

                                                                                  We will also create a prediction explanation module to our product, in order for the end user to be able to understand the model prediction by visualizing the words in the input free text that push the model toward the predicted ICD-10 code. In order to do that, we will use SHAP values, which have demonstrated their performance for such tasks.

                                                                                  ", + "description": "Our solution is a NLP classification model fine-tuned on Spanish free text\n(medical notes) to predict IDC-10 codes. We will start from a transformer\nmodel trained on a Spanish corpus, such as BETO, and fine-tune it on general\nSpanish medical corpus (research paper or anonymized data delivered by Amadix\nand its partners), with pre-training tasks such as Masked Language Modeling.\nWe will then fine-tune it on free-text data provided by AMADIX (medical notes)\nin order to predict the target ICD-10 codes.\n\nWe will also create a prediction explanation module to our product, in order\nfor the end user to be able to understand the model prediction by visualizing\nthe words in the input free text that push the model toward the predicted\nICD-10 code. In order to do that, we will use SHAP values, which have\ndemonstrated their performance for such tasks.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dd0d9853-2060-44d3-94c7-208a0423609d&revisionId=19a88d46-b9df-47e2-bb53-38ac4fe02eec&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6930,7 +6930,7 @@ "status": "draft" }, "name": "AudioPunctuationEnglish", - "description": "

                                                                                  This model add english punctuation to an audio segment.

                                                                                  ", + "description": "This model add english punctuation to an audio segment.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ddf8de5f-5391-48be-a457-bce86757f8ba&revisionId=1846bb25-f697-4091-ba13-79f0ebb3147c&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -6973,7 +6973,7 @@ "status": "draft" }, "name": "AI4IMS", - "description": "

                                                                                  AI-based Inventory Management System, AI4IMS, integrates simulation, optimization and search algorithms in an advanced inventory management approach for an adaptive and dynamic response.

                                                                                  Firstly, we acquire and cleanse the required data to obtain a reliable dataset including product prices and demand forecasting. As a result of the forecasting, the uncertainty associated with material resources prices and demand is also characterized.

                                                                                  Secondly, we capture the production plant and procurement system in a simulation environment.

                                                                                  Thirdly, a direct randomized sampling method generates alternative scenarios for handling the uncertainty characterized during the forecasting step.

                                                                                  Next, a simulation-based optimization system finds an improved procurement policy within the solution space.

                                                                                  Finally, a variability analysis generates alternative solutions, which are provided for decision-maker support.

                                                                                  ", + "description": "AI-based Inventory Management System, AI4IMS, integrates simulation,\noptimization and search algorithms in an advanced inventory management\napproach for an adaptive and dynamic response.\n\nFirstly, we acquire and cleanse the required data to obtain a reliable dataset\nincluding product prices and demand forecasting. As a result of the\nforecasting, the uncertainty associated with material resources prices and\ndemand is also characterized.\n\nSecondly, we capture the production plant and procurement system in a\nsimulation environment.\n\nThirdly, a direct randomized sampling method generates alternative scenarios\nfor handling the uncertainty characterized during the forecasting step.\n\nNext, a simulation-based optimization system finds an improved procurement\npolicy within the solution space.\n\nFinally, a variability analysis generates alternative solutions, which are\nprovided for decision-maker support.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df50bc0b-e499-4249-b468-b94c0a1cf9fc&revisionId=ee9a1418-2876-414f-982f-84960e811a6d&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -7016,7 +7016,7 @@ "status": "draft" }, "name": "AI-Panel", - "description": "

                                                                                  The following notebook shows a sample model highlighting a preliminary step taken towards supporting pharmaceutical & nutraceutical drug discovery based on qualitative compound properties and customer requirements. The goal is to create a sophisticated predictive model capable of providing suggestions/predictions regarding compounds that have specific therapeutic advantages as well as their interaction with other compounds. The current model utilizes an exemplary dataset that contains for each substance/compound, a set of quantitative features describing the compound's efficacy. It is envisaged that the dataset will comprise multi-modal features such as physiochemical parameters, drug status, regulatory & safety data, and company-internal data. This numeric, textual, and image data is extracted and consolidated from open access chemical dataspaces/databases. This diversity of data will facilitate the design of a predictive model that filters drugs and related compounds based on product development and customer needs.

                                                                                  This is a Tensorflow model created for critical part prediction. Given a set of features that describe the production line characteristics or factory conditions, the model we have built predicts whether a particular component part is critical or not to the supply chain. The end goal is the optimization of the stock management.

                                                                                  ", + "description": "This is a Tensorflow model created for critical part prediction. Given a set\nof features that describe the production line characteristics or factory\nconditions, the model we have built predicts whether a particular component\npart is critical or not to the supply chain. The end goal is the optimization\nof the stock management.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e1a73166-03a3-4b93-a785-28d0591d7271&revisionId=3ef45d82-30f5-4f98-b9c8-44afe80b44a9&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -7102,7 +7102,7 @@ "status": "draft" }, "name": "EntityRecognizer", - "description": "

                                                                                  The entity recognizer is a deep learning-based solution that takes a text document as input and returns a list of instances of pre-defined entities (Person, Location, Organization, Miscellaneous). 


                                                                                  It uses bidirectional LSTM networks to generate informative word representations that capture the contextual dependencies between words in a sentence. Additionally, a CRF layer is added on top for a higher tagging accuracy. The models have been built using FlairNLP, a PyTorch-based NLP framework. 


                                                                                  This tool includes a multilingual NER model supporting English, German and Dutch.

                                                                                  ", + "description": "The entity recognizer is a deep learning-based solution that takes a text\ndocument as input and returns a list of instances of pre-defined entities\n(Person, Location, Organization, Miscellaneous).\n\n \n\nIt uses bidirectional LSTM networks to generate informative word\nrepresentations that capture the contextual dependencies between words in a\nsentence. Additionally, a CRF layer is added on top for a higher tagging\naccuracy. The models have been built using FlairNLP, a PyTorch-based NLP\nframework.\n\n \n\nThis tool includes a multilingual NER model supporting English, German and\nDutch.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&revisionId=41df686d-9fa3-4104-996f-fa926332adbb&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -7188,7 +7188,7 @@ "status": "draft" }, "name": "EntityRecognizer", - "description": "

                                                                                  The entity recognizer is a deep learning-based solution that takes a text document as input and returns a list of instances of pre-defined entities (Person, Location, Organization, Miscellaneous). 


                                                                                  It uses bidirectional LSTM networks to generate informative word representations that capture the contextual dependencies between words in a sentence. Additionally, a CRF layer is added on top for a higher tagging accuracy. The models have been built using FlairNLP, a PyTorch-based NLP framework. 


                                                                                  This tool includes a multilingual NER model supporting English, German and Dutch. 

                                                                                  ", + "description": "The entity recognizer is a deep learning-based solution that takes a text\ndocument as input and returns a list of instances of pre-defined entities\n(Person, Location, Organization, Miscellaneous).\n\n \n\nIt uses bidirectional LSTM networks to generate informative word\nrepresentations that capture the contextual dependencies between words in a\nsentence. Additionally, a CRF layer is added on top for a higher tagging\naccuracy. The models have been built using FlairNLP, a PyTorch-based NLP\nframework.\n\n \n\nThis tool includes a multilingual NER model supporting English, German and\nDutch.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&revisionId=f7447500-0c8d-4ca7-be7e-24ce3fefd144&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -7231,7 +7231,7 @@ "status": "draft" }, "name": "Explanations4AdultClassification", - "description": "

                                                                                  This tool provides predictions and explanations for the classification of instances of Adult Census dataset. The explanation method is called LionForests, while the prediction is based on a random forests model. The corresponding paper supporting this technique can be found here: http://ceur-ws.org/Vol-2659/mollas.pdf in Proceedings of the First International Workshop on New Foundations for Human-Centered AI (NeHuAI) co-located with 24th European Conference on Artificial Intelligence (ECAI 2020).

                                                                                  ", + "description": "This tool provides predictions and explanations for the classification of\ninstances of [Adult Census](https://archive.ics.uci.edu/ml/datasets/adult)\ndataset. The explanation method is called LionForests, while the prediction is\nbased on a random forests model. The corresponding paper supporting this\ntechnique can be found here: http://ceur-ws.org/Vol-2659/mollas.pdf in\nProceedings of the First International Workshop on New Foundations for Human-\nCentered AI (NeHuAI) co-located with 24th European Conference on Artificial\nIntelligence (ECAI 2020).\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4208fe5-3b5c-4fe0-9cff-c28b828db530&revisionId=5d31e250-36f3-4033-9ab9-17a9213f96ae&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -7274,7 +7274,7 @@ "status": "draft" }, "name": "edm-agent", - "description": "

                                                                                  EDM agent component is an RL based agent that controls the EDM environment (AI4EU component edm-env) based on the observed voltage and frequencies. It is based on the PPO algorithm and was trained using the `train.py` script that is available in the github repository of the component: https://github.com/threethirds/edm.

                                                                                  This component has a user interface via 8062 port which can be used to run a small demo control scenario. It also has a protobuf API via 8061 port in order to connect to the EDM environment.


                                                                                  ", + "description": "EDM agent component is an RL based agent that controls the EDM environment\n(AI4EU component edm-env) based on the observed voltage and frequencies. It is\nbased on the PPO algorithm and was trained using the `train.py` script that is\navailable in the github repository of the component:\nhttps://github.com/threethirds/edm.\n\nThis component has a user interface via 8062 port which can be used to run a\nsmall demo control scenario. It also has a protobuf API via 8061 port in order\nto connect to the EDM environment.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&revisionId=05ea3f80-92b1-4ffc-b1ab-1b3bb38cee7b&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.2", @@ -7403,7 +7403,7 @@ "status": "draft" }, "name": "ai4eu-security-pilot-model", - "description": "

                                                                                  This container provides a model for Thread Prediction in Network Traffic.

                                                                                  Therefore, this container can detect malicious traffic.


                                                                                  This container can be trained with the training interface and predict traffic with the prediction interface.

                                                                                  This container provides two inputs and one output.

                                                                                  The training input is to provide training data. You can connect this input with the ai4eu-security-databroker training output. After starting the training the data will be transfered to train the model.

                                                                                  The second input is the prediction input. You can connect this input with the ai4eu-security-databroker prediction output. After starting the model you can see the prediction results in the prediction output. There, you get a number between 0 and 1. According to your data you have to set a threshold to specify if the data are fraud or benign. The threshold can be found in the evaluation container of the model.

                                                                                  ", + "description": "This container provides a model for Thread Prediction in Network Traffic.\n\nTherefore, this container can detect malicious traffic.\n\n \n\nThis container can be trained with the training interface and predict traffic\nwith the prediction interface.\n\nThis container provides two inputs and one output.\n\nThe training input is to provide training data. You can connect this input\nwith the ai4eu-security-databroker training output. After starting the\ntraining the data will be transfered to train the model.\n\nThe second input is the prediction input. You can connect this input with the\nai4eu-security-databroker prediction output. After starting the model you can\nsee the prediction results in the prediction output. There, you get a number\nbetween 0 and 1. According to your data you have to set a threshold to specify\nif the data are fraud or benign. The threshold can be found in the evaluation\ncontainer of the model.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e8c82055-1afc-444c-9c21-3d64ea601b28&revisionId=0b99f79d-5e7c-4b0f-850f-bae2b6e710ce&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -7446,7 +7446,7 @@ "status": "draft" }, "name": "TransformersGraphAlgorithmsAgainstColonCancer", - "description": "

                                                                                  Automatic classification of ICD-10 codes from free text and medical records based on BERT model. The application of NLP (textual information extraction) tasks in combination with other numerical biomarkers will involve that the model will improve in accuracy and a greater number of cancer patients will be detected earlier, improving the future healthcare system.

                                                                                  Moreover, the automatic identification or classification in ICD-10 codes from free text not only helps to improve the predictive model but also avoids the manual assigning codes that is expensive, time consuming and error prone.


                                                                                  ", + "description": "Automatic classification of ICD-10 codes from free text and medical records\nbased on BERT model. The application of NLP (textual information extraction)\ntasks in combination with other numerical biomarkers will involve that the\nmodel will improve in accuracy and a greater number of cancer patients will be\ndetected earlier, improving the future healthcare system.\n\nMoreover, the automatic identification or classification in ICD-10 codes from\nfree text not only helps to improve the predictive model but also avoids the\nmanual assigning codes that is expensive, time consuming and error prone.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=0859679b-e1ba-4d89-8093-2313212216af&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.1", @@ -7489,7 +7489,7 @@ "status": "draft" }, "name": "TransformersGraphAlgorithmsAgainstColonCancer", - "description": "

                                                                                  Automatic classification of ICD-10 codes from medical records based on Transformers. The application of NLP tasks in combination with other numerical biomarkers will involve that the model will improve in accuracy and a greater number of cancer patients will be detected earlier, improving the future healthcare system.

                                                                                  Moreover, the automatic identification of ICD-10 codes from free text not only helps to improve the predictive model but also avoids the manual assigning codes that is expensive, time consuming and error prone.

                                                                                  ", + "description": "Automatic classification of ICD-10 codes from medical records based on\nTransformers. The application of NLP tasks in combination with other numerical\nbiomarkers will involve that the model will improve in accuracy and a greater\nnumber of cancer patients will be detected earlier, improving the future\nhealthcare system.\n\nMoreover, the automatic identification of ICD-10 codes from free text not only\nhelps to improve the predictive model but also avoids the manual assigning\ncodes that is expensive, time consuming and error prone.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=50c071a6-aba1-4f90-97a3-2ab1108a0d22&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.5", @@ -7618,7 +7618,7 @@ "status": "draft" }, "name": "TransformersGraphAlgorithmsAgainstColonCancer", - "description": "

                                                                                  Automatic classification of ICD-10 codes from free text and medical records based on BERT model. The application of NLP (textual information extraction) tasks in combination with other numerical biomarkers will involve that the model will improve in accuracy and a greater number of cancer patients will be detected earlier, improving the future healthcare system.

                                                                                  Moreover, the automatic identification or classification in ICD-10 codes from free text not only helps to improve the predictive model but also avoids the manual assigning codes that is expensive, time consuming and error prone.


                                                                                  ", + "description": "Automatic classification of ICD-10 codes from free text and medical records\nbased on BERT model. The application of NLP (textual information extraction)\ntasks in combination with other numerical biomarkers will involve that the\nmodel will improve in accuracy and a greater number of cancer patients will be\ndetected earlier, improving the future healthcare system.\n\nMoreover, the automatic identification or classification in ICD-10 codes from\nfree text not only helps to improve the predictive model but also avoids the\nmanual assigning codes that is expensive, time consuming and error prone.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=94e9caf4-0568-4125-b2e2-03872507d1d0&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.2", @@ -7704,7 +7704,7 @@ "status": "draft" }, "name": "TEK_THOR_FORECAST", - "description": "

                                                                                  AI4EU - THOR THOR solution consists in a Hybrid optimization solution to make the right decision on the amount spare parts in stock, considering past sales and forecasts. The purchase decision considers as input information current stock status, production needs, production forecast, sales forecast, variability Price of stock material and several restriction parameters.

                                                                                  Forecast. An auto-adjustable predictive model forecasts the short-term expected sales of end products as well as the expected price evolution of spared parts.

                                                                                  ", + "description": "**AI4EU - THOR **THOR solution consists in a Hybrid optimization solution to\nmake the right decision on the amount spare parts in stock, considering past\nsales and forecasts. The purchase decision considers as input information\ncurrent stock status, production needs, production forecast, sales forecast,\nvariability Price of stock material and several restriction parameters.\n\n **Forecast**. An auto-adjustable predictive model forecasts the short-term\nexpected sales of end products as well as the expected price evolution of\nspared parts.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ebcb6fba-d8f4-4010-a6b2-8386040c9030&revisionId=afc31a74-dcad-4b4e-a691-b31750478365&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.4", @@ -7747,7 +7747,7 @@ "status": "draft" }, "name": "Idiap_BEAT_Face_Recognition_-_Eigenfaces_trained_on_ATNT", - "description": "

                                                                                  A biometrics algorithm that compares a probe image to a set of template images and outputs a comparison score.

                                                                                  This algorithm was trained on the ATNT database and reproduces the EigenFaces face recognition baseline.

                                                                                  The input images must be gray-scale and of the size of 92x92. The training data comes from the BOB atnt database package.

                                                                                  Reference experiment on the BEAT platform is amohammadi/amohammadi/atnt_eigenfaces/1/atnt1.




                                                                                  ", + "description": "A biometrics algorithm that compares a probe image to a set of template images\nand outputs a comparison score.\n\nThis algorithm was trained on the ATNT database and reproduces the EigenFaces\nface recognition baseline.\n\nThe input images must be gray-scale and of the size of 92x92. The training\ndata comes from the [BOB atnt database\n](https://www.idiap.ch/software/bob/docs/bob/bob.db.atnt/master/index.html)package.\n\nReference experiment on the BEAT platform is\n[amohammadi/amohammadi/atnt_eigenfaces/1/atnt1](https://www.idiap.ch/software/beat/platform/experiments/amohammadi/amohammadi/atnt_eigenfaces/1/atnt1/).\n\n \n\n \n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ec640afb-9d7a-499d-977c-bceb435acff7&revisionId=2043f0e1-b332-499d-8472-c946faccd8c2&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -7790,7 +7790,7 @@ "status": "draft" }, "name": "AudioPunctuationGerman", - "description": "

                                                                                  This model adds German punctuation to an audio mining pipeline.

                                                                                  ", + "description": "This model adds German punctuation to an audio mining pipeline.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ed1a8947-f102-4786-9dbb-412568317a3f&revisionId=ace9dada-2a60-4530-b264-f4edb8511ca8&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -7833,7 +7833,7 @@ "status": "draft" }, "name": "atcrecognize", - "description": "

                                                                                  Atcrecognize extracts text from images that contain label tags. Using its underlying deep learning technology, atcrecognize enhances the image, removes the unnecessary parts of the image, and feeds into the ocr model that extracts the text with more precision. The app is developed and used for the H2020 project AI-PROFICIENT.

                                                                                  ", + "description": "Atcrecognize extracts text from images that contain label tags. Using its\nunderlying deep learning technology, atcrecognize enhances the image, removes\nthe unnecessary parts of the image, and feeds into the ocr model that extracts\nthe text with more precision. The app is developed and used for the H2020\nproject AI-PROFICIENT.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f24a9c17-5f4f-4bec-b0f5-5fd20e4669a7&revisionId=af42ba9b-ec9e-4f37-8a46-e581c9f3d811&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -7876,7 +7876,7 @@ "status": "draft" }, "name": "pomdp-ir", - "description": "

                                                                                  Partially Observable Markov Decision Processes with Information Rewards (POMDP-IR) is a framework to compute policies for autonomous agents with the goal of gathering information about particular features on the environment. SymbolicPerseus-IR extends one of the most knowns POMDP solvers to include Information Rewards. It lets you compute and test policies for a given input environment model.


                                                                                  Check the github repository of the resource for a more detailed overview: https://github.com/tsveiga/ai4eu-pomdp-ir

                                                                                  ", + "description": "Partially Observable Markov Decision Processes with Information Rewards\n(POMDP-IR) is a framework to compute policies for autonomous agents with the\ngoal of gathering information about particular features on the environment.\nSymbolicPerseus-IR extends one of the most knowns POMDP solvers to include\nInformation Rewards. It lets you compute and test policies for a given input\nenvironment model.\n\n \n\nCheck the github repository of the resource for a more detailed overview:\nhttps://github.com/tsveiga/ai4eu-pomdp-ir\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f7fc6fdd-3bc0-4443-a28d-dc08109d0ffd&revisionId=d5743c46-5b96-4d8a-90be-fdeb5e248f45&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -7962,7 +7962,7 @@ "status": "draft" }, "name": "AI4Agri-qualitypredictor", - "description": "

                                                                                  This component generates a set of models in order to predict one grape yield and three different grape quality indicators related to the AI4EU agriculture pilot.




                                                                                  To do that, the current component connects to the AI4EU agriculture pilot Knowledge graph and retrieves all the required data (according to the dates and parcel information provided in the prediction request and the target variable requested) to generate different models that will be evaluated and used to provide the best prediction possible.


                                                                                  ", + "description": "This component generates a set of models in order to predict one grape yield\nand three different grape quality indicators related to the [AI4EU agriculture\npilot](https://www.ai4eu.eu/ai4agriculture).\n\n \n\n \n\n \n\nTo do that, the current component connects to the AI4EU agriculture pilot\nKnowledge graph and retrieves all the required data (according to the dates\nand parcel information provided in the prediction request and the target\nvariable requested) to generate different models that will be evaluated and\nused to provide the best prediction possible.\n\n \n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fb06bc01-1ac9-4a7b-bcdc-cae78e970796&revisionId=d34ae15a-d648-4c34-ae31-7f5ca2abc7a2&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -8005,7 +8005,7 @@ "status": "draft" }, "name": "ai4eu-lexatexer-pump-rul", - "description": "

                                                                                  Provides access to a REST API which consumes a pumps quality assurance data and delivers failure probabilities and MTTF densities.

                                                                                  ", + "description": "Provides access to a REST API which consumes a pumps quality assurance data\nand delivers failure probabilities and MTTF densities.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fbbe4dff-5eaa-4171-b15a-d8035a79a035&revisionId=7bcfcec0-10e7-4c4f-af17-be65b435c5b3&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", @@ -8048,7 +8048,7 @@ "status": "draft" }, "name": "pira-analyzer", - "description": "

                                                                                  This component leverages AI technologies for information extraction to identify semantically-relevant structured information from semi-/un-structured documents. This information is classified as personally identifiable information (PII) entities or not by leveraging named entity recognition. Identified PII entities are further classified into different categories depending on their nature.

                                                                                  ", + "description": "This component leverages AI technologies for information extraction to\nidentify semantically-relevant structured information from semi-/un-structured\ndocuments. This information is classified as personally identifiable\ninformation (PII) entities or not by leveraging named entity recognition.\nIdentified PII entities are further classified into different categories\ndepending on their nature.\n\n", "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fe6bca3a-9583-4f6c-993a-ec104226a679&revisionId=353595da-df92-4d10-8690-d6e1665040af&parentUrl=marketplace#md-model-detail-template", "date_published": "2023-09-01T15:15:00.000", "version": "1.0.0", From 78024964c8262da2f139c3227dd1024bff7f6f8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 13 Sep 2023 09:02:54 +0200 Subject: [PATCH 29/79] platform added to mappings --- Dockerfile | 1 + es/setup/dataset.json | 1 - es/setup/experiment.json | 1 - es/setup/ml_model.json | 1 - es/setup/publication.json | 1 - es/setup/service.json | 1 - src/main.py | 4 +++- src/routers/search_router.py | 30 +++++++++++++++----------- src/routers/search_routers/__init__.py | 11 ++++++++++ 9 files changed, 33 insertions(+), 18 deletions(-) diff --git a/Dockerfile b/Dockerfile index d91d001b..2bbc2266 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,6 +14,7 @@ ENV PATH="${PATH}:/home/apprunner/.local/bin" # Install python packages globally, so that it can also be used from cron dockers (running as root) COPY ./pyproject.toml /app/pyproject.toml RUN pip install . +RUN pip install elasticsearch # This can be overwritten by a live volume, to support live code changes COPY ./src /app diff --git a/es/setup/dataset.json b/es/setup/dataset.json index e779ead0..395f68c8 100644 --- a/es/setup/dataset.json +++ b/es/setup/dataset.json @@ -83,7 +83,6 @@ }, "platform" : { "type" : "text", - "index" : false, "fields" : { "keyword" : { "type" : "keyword", diff --git a/es/setup/experiment.json b/es/setup/experiment.json index 6673fe33..4787149b 100644 --- a/es/setup/experiment.json +++ b/es/setup/experiment.json @@ -84,7 +84,6 @@ }, "platform" : { "type" : "text", - "index" : false, "fields" : { "keyword" : { "type" : "keyword", diff --git a/es/setup/ml_model.json b/es/setup/ml_model.json index 381a182e..d847b674 100644 --- a/es/setup/ml_model.json +++ b/es/setup/ml_model.json @@ -74,7 +74,6 @@ }, "platform" : { "type" : "text", - "index" : false, "fields" : { "keyword" : { "type" : "keyword", diff --git a/es/setup/publication.json b/es/setup/publication.json index 4a4641f2..5d07ad11 100644 --- a/es/setup/publication.json +++ b/es/setup/publication.json @@ -92,7 +92,6 @@ }, "platform" : { "type" : "text", - "index" : false, "fields" : { "keyword" : { "type" : "keyword", diff --git a/es/setup/service.json b/es/setup/service.json index 22b57190..0fc18eaf 100644 --- a/es/setup/service.json +++ b/es/setup/service.json @@ -46,7 +46,6 @@ }, "platform" : { "type" : "text", - "index" : false, "fields" : { "keyword" : { "type" : "keyword", diff --git a/src/main.py b/src/main.py index 73c565dc..c066f1fb 100644 --- a/src/main.py +++ b/src/main.py @@ -19,7 +19,8 @@ from database.model.platform.platform import Platform from database.model.platform.platform_names import PlatformName from database.setup import sqlmodel_engine -from routers import resource_routers, parent_routers, enum_routers +from routers import (resource_routers, parent_routers, enum_routers, + search_routers) def _parse_args() -> argparse.Namespace: @@ -70,6 +71,7 @@ def test_authorization(user: Json = Depends(get_current_user)) -> dict: + routers.other_routers + parent_routers.router_list + enum_routers.router_list + + search_routers.router_list ): app.include_router(router.create(engine, url_prefix)) diff --git a/src/routers/search_router.py b/src/routers/search_router.py index a6b5c920..a64e4ed1 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -8,10 +8,10 @@ from sqlalchemy.engine import Engine from starlette import status -from authentication import get_current_user, has_role +from authentication import get_current_user#, has_role from database.model.concept.aiod_entry import AIoDEntryRead from database.model.resource_read_and_create import resource_read -from routers.router import AIoDRouter +#from routers.router import AIoDRouter SORT = {"identifier": "asc"} LIMIT_MAX = 1000 @@ -25,7 +25,7 @@ class SearchResult(BaseModel, Generic[RESOURCE]): next_offset: list | None -class SearchRouter(AIoDRouter, Generic[RESOURCE], abc.ABC): +class SearchRouter(Generic[RESOURCE], abc.ABC): """ Providing search functionality in ElasticSearch """ @@ -64,7 +64,7 @@ def search( name: str = "", limit: int = 10, offset: str | None = None, # TODO: this should not be a string - user: dict = Depends(get_current_user), + user: dict = {Depends(get_current_user)}, ) -> SearchResult[read_class]: # type: ignore f""" Search for {self.resource_name_plural}. @@ -75,17 +75,23 @@ def search( detail=f"The limit should be maximum {LIMIT_MAX}. If you want more results, " f"use pagination.", ) - - if not has_role(user, os.getenv("ES_ROLE")): - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You do not have permission to search Aiod resources.", - ) - + +# if not has_role(user, os.getenv("ES_ROLE")): +# raise HTTPException( +# status_code=status.HTTP_403_FORBIDDEN, +# detail="You do not have permission to search Aiod resources.", +# ) + query = {"bool": {"must": {"match": {"name": name}}}} - result = self.client.search( + # Just to test + client = Elasticsearch("http://localhost:9200", + basic_auth=("elastic", "changeme")) + result = client.search( index=self.es_index, query=query, size=limit, sort=SORT, search_after=offset ) +# result = self.client.search( +# index=self.es_index, query=query, size=limit, sort=SORT, search_after=offset +# ) total_hits = result["hits"]["total"]["value"] resources: list[read_class] = [ # type: ignore diff --git a/src/routers/search_routers/__init__.py b/src/routers/search_routers/__init__.py index e69de29b..a8be1c6e 100644 --- a/src/routers/search_routers/__init__.py +++ b/src/routers/search_routers/__init__.py @@ -0,0 +1,11 @@ +from database.model.dataset.dataset import Dataset +from database.model.knowledge_asset.publication import Publication +from .search_router_datasets import SearchRouterDatasets +from .search_router_publications import SearchRouterPublications +from ..search_router import SearchRouter + +router_list: list[SearchRouter] = [ + SearchRouterDatasets(Dataset), + SearchRouterPublications(Publication) +] + From fdee2b7d124f17b4b07f5fb1a836f8f9c238fce8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 13 Sep 2023 09:16:51 +0200 Subject: [PATCH 30/79] elasticsearch query example completed --- es/elasticsearch_query.py | 53 +++++++++++++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 8 deletions(-) diff --git a/es/elasticsearch_query.py b/es/elasticsearch_query.py index 6b93c1df..5a1b7a9d 100755 --- a/es/elasticsearch_query.py +++ b/es/elasticsearch_query.py @@ -23,14 +23,51 @@ def main(index, search_concept, platforms): es_client = Elasticsearch("http://localhost:9200", basic_auth=(ELASTIC_USER, elastic_password)) # Prepare query - platform_identifiers = [{"match": {"platform_identifier": p}} for p in platforms] - query = { - "bool": { - "must": {"match": {"title": search_concept}}, - "must": {"bool": {"should": platform_identifiers}}, + # ------------------------------------------------------------------------- + + # Search fields corresponding to the indices + match_fields = ['name', 'description'] + if ('dataset' in index) or ('publication' in index): + match_fields.append('issn') + if 'publication' in index: + match_fields.append('isbn') + if 'service' in index: + match_fields.append('slogan') + + # Matches of the search concept for each field + query_matches = [{'match': {f: search_concept}} for f in match_fields] + + if platforms: + + # Matches of the platform field for each selected platform + platform_matches = [{'match': {'platform': p}} for p in platforms] + + # Query must match platform and search concept on at least one field + query = { + 'bool': { + 'must': { + 'bool': { + 'should': platform_matches, + 'minimum_should_match': 1 + } + }, + 'should': query_matches, + 'minimum_should_match': 1 + } } - } - + + else: + + # Query must match search concept on at least one field + query = { + 'bool': { + 'should': query_matches, + 'minimum_should_match': 1 + } + } + + # ------------------------------------------------------------------------- + # Perform first search result = es_client.search(index=index, query=query, size=SIZE, sort=SORT) @@ -57,5 +94,5 @@ def main(index, search_concept, platforms): if __name__ == "__main__": index = ["publication"] # List of assets search_concept = "in" # Search concept - platforms = ["2", "4", "9"] # List of platforms + platforms = ["example", "ai4experiments"] # List of platforms main(index, search_concept, platforms) From 2c3e9d7cb773b151aaabe3d7c6082b1991f11d05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 13 Sep 2023 16:53:41 +0200 Subject: [PATCH 31/79] First version of search service working --- es/elasticsearch_query.py | 10 +- src/routers/search_router.py | 106 +++++++++++------- src/routers/search_routers/__init__.py | 22 +++- .../search_routers/search_router_datasets.py | 4 + .../search_router_experiments.py | 20 ++++ .../search_routers/search_router_ml_models.py | 20 ++++ .../search_router_publications.py | 6 +- .../search_routers/search_router_services.py | 20 ++++ 8 files changed, 158 insertions(+), 50 deletions(-) create mode 100644 src/routers/search_routers/search_router_experiments.py create mode 100644 src/routers/search_routers/search_router_ml_models.py create mode 100644 src/routers/search_routers/search_router_services.py diff --git a/es/elasticsearch_query.py b/es/elasticsearch_query.py index 5a1b7a9d..7b4204aa 100755 --- a/es/elasticsearch_query.py +++ b/es/elasticsearch_query.py @@ -5,11 +5,9 @@ from elasticsearch import Elasticsearch # Global parameters -ELASTIC_USER = "elastic" SIZE = 2 SORT = {"identifier": "asc"} - def main(index, search_concept, platforms): # Get elasticsearch password @@ -17,10 +15,12 @@ def main(index, search_concept, platforms): for line in f: if "ES_PASSWORD" in line: elastic_password = line.split("=")[1][:-1] - break - + if "ES_USER" in line: + elastic_user = line.split("=")[1][:-1] + # Generate client - es_client = Elasticsearch("http://localhost:9200", basic_auth=(ELASTIC_USER, elastic_password)) + es_client = Elasticsearch("http://localhost:9200", + basic_auth=(elastic_user, elastic_password)) # Prepare query # ------------------------------------------------------------------------- diff --git a/src/routers/search_router.py b/src/routers/search_router.py index a64e4ed1..0255aac8 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -1,9 +1,9 @@ import abc import os -from typing import TypeVar, Generic, Any, Type +from typing import TypeVar, Generic, Any, Type, Annotated from elasticsearch import Elasticsearch -from fastapi import APIRouter, Depends, HTTPException +from fastapi import APIRouter, Depends, HTTPException, Query from pydantic import BaseModel from sqlalchemy.engine import Engine from starlette import status @@ -11,7 +11,6 @@ from authentication import get_current_user#, has_role from database.model.concept.aiod_entry import AIoDEntryRead from database.model.resource_read_and_create import resource_read -#from routers.router import AIoDRouter SORT = {"identifier": "asc"} LIMIT_MAX = 1000 @@ -29,86 +28,115 @@ class SearchRouter(Generic[RESOURCE], abc.ABC): """ Providing search functionality in ElasticSearch """ - + def __init__(self, client: Elasticsearch): self.client: Elasticsearch = client - + @property @abc.abstractmethod def es_index(self) -> str: """The name of the elasticsearch index""" - + @property @abc.abstractmethod def resource_name_plural(self) -> str: """The name of the resource (plural)""" - + @property def key_translations(self) -> dict[str, str]: - """If an attribute is called differently in elasticsearch than in our metadata model, - you can define a translation dictionary here. The key should be the name in - elasticsearch, the value the name in our data model.""" + """If an attribute is called differently in elasticsearch than in our + metadata model, you can define a translation dictionary here. The key + should be the name in elasticsearch, the value the name in our data + model.""" return {} - + @property @abc.abstractmethod def resource_class(self) -> RESOURCE: """The resource class""" - + def create(self, engine: Engine, url_prefix: str) -> APIRouter: router = APIRouter() read_class = resource_read(self.resource_class) # type: ignore - - @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", tags=["search"]) + + @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", + tags=["search"]) def search( - name: str = "", + platforms: Annotated[list[str] | None, Query()] = None, + search_query: str = "", limit: int = 10, - offset: str | None = None, # TODO: this should not be a string - user: dict = {Depends(get_current_user)}, + offset: Annotated[list[str] | None, Query()] = None ) -> SearchResult[read_class]: # type: ignore f""" Search for {self.resource_name_plural}. """ + if limit > LIMIT_MAX: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The limit should be maximum {LIMIT_MAX}. If you want more results, " - f"use pagination.", + detail=f"The limit should be maximum {LIMIT_MAX}. " + f"If you want more results, use pagination." ) + # Prepare query + # ----------------------------------------------------------------- -# if not has_role(user, os.getenv("ES_ROLE")): -# raise HTTPException( -# status_code=status.HTTP_403_FORBIDDEN, -# detail="You do not have permission to search Aiod resources.", -# ) + # Matches of the search concept for each field + query_matches = [{'match': {f: search_query}} + for f in self.match_fields] + + if platforms: + + # Matches of the platform field for each selected platform + platform_matches = [{'match': {'platform': p}} + for p in platforms] + + # Must match platform and search query on at least one field + query = { + 'bool': { + 'must': { + 'bool': { + 'should': platform_matches, + 'minimum_should_match': 1 + } + }, + 'should': query_matches, + 'minimum_should_match': 1 + } + } + + else: + + # Must match search concept on at least one field + query = { + 'bool': { + 'should': query_matches, + 'minimum_should_match': 1 + } + } + + # ----------------------------------------------------------------- + + result = self.client.search(index=self.es_index, query=query, + size=limit, sort=SORT, + search_after=offset) - query = {"bool": {"must": {"match": {"name": name}}}} - # Just to test - client = Elasticsearch("http://localhost:9200", - basic_auth=("elastic", "changeme")) - result = client.search( - index=self.es_index, query=query, size=limit, sort=SORT, search_after=offset - ) -# result = self.client.search( -# index=self.es_index, query=query, size=limit, sort=SORT, search_after=offset -# ) - total_hits = result["hits"]["total"]["value"] resources: list[read_class] = [ # type: ignore self._cast_resource(read_class, hit["_source"]) # type: ignore for hit in result["hits"]["hits"] ] next_offset = ( - result["hits"]["hits"][-1]["sort"] if len(result["hits"]["hits"]) > 0 else None + result["hits"]["hits"][-1]["sort"] + if len(result["hits"]["hits"]) > 0 else None ) return SearchResult[read_class]( # type: ignore total_hits=total_hits, next_offset=next_offset, - resources=resources, + resources=resources ) - + return router - + def _cast_resource( self, resource_class: RESOURCE, resource_dict: dict[str, Any] ) -> Type[RESOURCE]: diff --git a/src/routers/search_routers/__init__.py b/src/routers/search_routers/__init__.py index a8be1c6e..37f4fa35 100644 --- a/src/routers/search_routers/__init__.py +++ b/src/routers/search_routers/__init__.py @@ -1,11 +1,27 @@ +import os +from elasticsearch import Elasticsearch + from database.model.dataset.dataset import Dataset +from database.model.models_and_experiments.experiment import Experiment +from database.model.models_and_experiments.ml_model import MLModel from database.model.knowledge_asset.publication import Publication +from database.model.service.service import Service from .search_router_datasets import SearchRouterDatasets +from .search_router_experiments import SearchRouterExperiments +from .search_router_ml_models import SearchRouterMLModels from .search_router_publications import SearchRouterPublications +from .search_router_services import SearchRouterServices from ..search_router import SearchRouter +# Elasticsearch client +user = os.getenv("ES_USER") +pw = os.getenv("ES_PASSWORD") +es_client = Elasticsearch("http://elasticsearch:9200", basic_auth=(user, pw)) + router_list: list[SearchRouter] = [ - SearchRouterDatasets(Dataset), - SearchRouterPublications(Publication) + SearchRouterDatasets(client=es_client), + SearchRouterExperiments(client=es_client), + SearchRouterMLModels(client=es_client), + SearchRouterPublications(client=es_client), + SearchRouterServices(client=es_client) ] - diff --git a/src/routers/search_routers/search_router_datasets.py b/src/routers/search_routers/search_router_datasets.py index b0b2ed43..e30c404d 100644 --- a/src/routers/search_routers/search_router_datasets.py +++ b/src/routers/search_routers/search_router_datasets.py @@ -14,3 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return Dataset + + @property + def match_fields(self): + return ['name', 'description', 'issn'] diff --git a/src/routers/search_routers/search_router_experiments.py b/src/routers/search_routers/search_router_experiments.py new file mode 100644 index 00000000..e255b2db --- /dev/null +++ b/src/routers/search_routers/search_router_experiments.py @@ -0,0 +1,20 @@ +from database.model.models_and_experiments.experiment import Experiment +from routers.search_router import SearchRouter + + +class SearchRouterExperiments(SearchRouter[Experiment]): + @property + def es_index(self) -> str: + return "experiment" + + @property + def resource_name_plural(self) -> str: + return "experiments" + + @property + def resource_class(self): + return Experiment + + @property + def match_fields(self): + return ['name', 'description'] diff --git a/src/routers/search_routers/search_router_ml_models.py b/src/routers/search_routers/search_router_ml_models.py new file mode 100644 index 00000000..a172b740 --- /dev/null +++ b/src/routers/search_routers/search_router_ml_models.py @@ -0,0 +1,20 @@ +from database.model.models_and_experiments.ml_model import MLModel +from routers.search_router import SearchRouter + + +class SearchRouterMLModels(SearchRouter[MLModel]): + @property + def es_index(self) -> str: + return "ml_model" + + @property + def resource_name_plural(self) -> str: + return "ml_models" + + @property + def resource_class(self): + return MLModel + + @property + def match_fields(self): + return ['name', 'description'] diff --git a/src/routers/search_routers/search_router_publications.py b/src/routers/search_routers/search_router_publications.py index bf213445..5af2eac8 100644 --- a/src/routers/search_routers/search_router_publications.py +++ b/src/routers/search_routers/search_router_publications.py @@ -14,7 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return Publication - + @property - def key_translations(self) -> dict: - return {"publication_type": "type"} + def match_fields(self): + return ['name', 'description', 'issn', 'isbn'] diff --git a/src/routers/search_routers/search_router_services.py b/src/routers/search_routers/search_router_services.py new file mode 100644 index 00000000..cbcef5da --- /dev/null +++ b/src/routers/search_routers/search_router_services.py @@ -0,0 +1,20 @@ +from database.model.service.service import Service +from routers.search_router import SearchRouter + + +class SearchRouterServices(SearchRouter[Service]): + @property + def es_index(self) -> str: + return "service" + + @property + def resource_name_plural(self) -> str: + return "services" + + @property + def resource_class(self): + return Service + + @property + def match_fields(self): + return ['name', 'description', 'slogan'] From 0280c26214d43608fdd4a8da6284dad0a00c1374 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 14 Sep 2023 17:11:19 +0200 Subject: [PATCH 32/79] Search router tests implemented --- es/elasticsearch_query.py | 6 ++- src/routers/search_router.py | 1 + .../elasticsearch/dataset_search.json | 52 +++++++++++++++++++ .../elasticsearch/experiment_search.json | 52 +++++++++++++++++++ .../elasticsearch/ml_model_search.json | 51 ++++++++++++++++++ .../elasticsearch/publication_search.json | 51 ++++++++++++++++++ .../elasticsearch/service_search.json | 44 ++++++++++++++++ src/tests/routers/search_routers/__init__.py | 0 .../test_search_router_datasets.py | 48 +++++++++++++++++ .../test_search_router_experiments.py | 48 +++++++++++++++++ .../test_search_router_ml_model.py | 45 ++++++++++++++++ .../test_search_router_publications.py | 48 +++++++++++++++++ .../test_search_router_services.py | 44 ++++++++++++++++ 13 files changed, 488 insertions(+), 2 deletions(-) create mode 100644 src/tests/resources/elasticsearch/dataset_search.json create mode 100644 src/tests/resources/elasticsearch/experiment_search.json create mode 100644 src/tests/resources/elasticsearch/ml_model_search.json create mode 100644 src/tests/resources/elasticsearch/publication_search.json create mode 100644 src/tests/resources/elasticsearch/service_search.json create mode 100644 src/tests/routers/search_routers/__init__.py create mode 100644 src/tests/routers/search_routers/test_search_router_datasets.py create mode 100644 src/tests/routers/search_routers/test_search_router_experiments.py create mode 100644 src/tests/routers/search_routers/test_search_router_ml_model.py create mode 100644 src/tests/routers/search_routers/test_search_router_publications.py create mode 100644 src/tests/routers/search_routers/test_search_router_services.py diff --git a/es/elasticsearch_query.py b/es/elasticsearch_query.py index 7b4204aa..5d2c4c6e 100755 --- a/es/elasticsearch_query.py +++ b/es/elasticsearch_query.py @@ -70,7 +70,9 @@ def main(index, search_concept, platforms): # Perform first search result = es_client.search(index=index, query=query, size=SIZE, sort=SORT) - + + print(json.dumps(dict(result), indent=4)) + # Print total number of results print(f"TOTAL RESULTS: {result['hits']['total']['value']}") @@ -93,6 +95,6 @@ def main(index, search_concept, platforms): if __name__ == "__main__": index = ["publication"] # List of assets - search_concept = "in" # Search concept + search_concept = "name" # Search concept platforms = ["example", "ai4experiments"] # List of platforms main(index, search_concept, platforms) diff --git a/src/routers/search_router.py b/src/routers/search_router.py index 0255aac8..c695be11 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -77,6 +77,7 @@ def search( detail=f"The limit should be maximum {LIMIT_MAX}. " f"If you want more results, use pagination." ) + # Prepare query # ----------------------------------------------------------------- diff --git a/src/tests/resources/elasticsearch/dataset_search.json b/src/tests/resources/elasticsearch/dataset_search.json new file mode 100644 index 00000000..73493b2f --- /dev/null +++ b/src/tests/resources/elasticsearch/dataset_search.json @@ -0,0 +1,52 @@ +{ + "took" : 2, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 6.3386726, + "hits" : [ + { + "_index" : "dataset", + "_id" : "dataset_2", + "_score" : 6.3386726, + "_ignored" : [ + "description.keyword" + ], + "_source" : { + "license" : "https://test_resource.test", + "date_created" : "2023-09-01T00:00:00.000Z", + "date_published" : "2023-09-01T00:00:00.000Z", + "version" : "1.0.0", + "measurement_technique" : "", + "temporal_coverage" : "", + "issn" : "00000000", + "type" : "dataset", + "platform_identifier" : "6", + "resource_identifier" : 4, + "date_modified" : "2023-09-01T00:00:00.000Z", + "asset_identifier" : 4, + "@version" : "1", + "status" : "draft", + "description" : "A description", + "platform" : "example", + "name" : "A name", + "same_as" : "https://test_resource.test", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "identifier" : 2 + }, + "sort": [ + 1 + ] + } + ] + } +} diff --git a/src/tests/resources/elasticsearch/experiment_search.json b/src/tests/resources/elasticsearch/experiment_search.json new file mode 100644 index 00000000..6fb41161 --- /dev/null +++ b/src/tests/resources/elasticsearch/experiment_search.json @@ -0,0 +1,52 @@ +{ + "took" : 1, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 4.3783627, + "hits" : [ + { + "_index" : "experiment", + "_id" : "experiment_40", + "_score" : 4.3783627, + "_ignored" : [ + "description.keyword" + ], + "_source" : { + "license" : "https://test_resource.test", + "date_created" : "2023-09-01T00:00:00.000Z", + "date_published" : "2023-09-01T00:00:00.000Z", + "version" : "1.0.0", + "reproducibility_explanation" : "", + "type" : "experiment", + "experimental_workflow" : "", + "platform_identifier" : "405", + "resource_identifier" : 179, + "date_modified" : "2023-09-01T00:00:00.000Z", + "asset_identifier" : 177, + "@version" : "1", + "status" : "draft", + "description" : "A description", + "platform" : "example", + "name" : "A name", + "same_as" : "https://test_resource.test", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "identifier" : 40, + "execution_settings" : "" + }, + "sort": [ + 1 + ] + } + ] + } +} diff --git a/src/tests/resources/elasticsearch/ml_model_search.json b/src/tests/resources/elasticsearch/ml_model_search.json new file mode 100644 index 00000000..c47dfa44 --- /dev/null +++ b/src/tests/resources/elasticsearch/ml_model_search.json @@ -0,0 +1,51 @@ +{ + "took" : 4, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 6.245174, + "hits" : [ + { + "_index" : "ml_model", + "_id" : "ml_model_3", + "_score" : 6.245174, + "_ignored" : [ + "description.keyword" + ], + "_source" : { + "license" : "https://test_resource.test", + "date_created" : "2023-09-01T00:00:00.000Z", + "date_published" : "2023-09-01T00:00:00.000Z", + "version" : "1.0.1", + "type" : "ml_model", + "platform_identifier" : "3", + "resource_identifier" : 186, + "date_modified" : "2023-09-01T00:00:00.000Z", + "asset_identifier" : 184, + "@version" : "1", + "status" : "draft", + "description" : "A description", + "platform" : "example", + "name" : "A name", + "same_as" : "https://test_resource.test", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "identifier" : 3, + "ml_model_type" : "" + }, + "sort": [ + 1 + ] + } + ] + } +} + diff --git a/src/tests/resources/elasticsearch/publication_search.json b/src/tests/resources/elasticsearch/publication_search.json new file mode 100644 index 00000000..a3a45f78 --- /dev/null +++ b/src/tests/resources/elasticsearch/publication_search.json @@ -0,0 +1,51 @@ +{ + "took" : 2, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "publication", + "_id" : "publication_1", + "_score" : 1.0, + "_source" : { + "date_created" : "2023-09-01T00:00:00.000Z", + "date_published" : "2023-09-01T00:00:00.000Z", + "type" : "publication", + "isbn" : "0000000000000", + "@version" : "1", + "date_modified" : "2023-09-01T00:00:00.000Z", + "asset_identifier" : 370, + "status" : "draft", + "name" : "A name", + "permanent_identifier" : "https://test_resource.test", + "identifier" : 1, + "license" : "https://test_resource.test", + "knowledge_asset_identifier" : null, + "version" : "1.0.0", + "issn" : "00000000", + "publication_type" : "journal", + "platform_identifier" : "1", + "resource_identifier" : 376, + "description" : "A description", + "platform" : "example", + "same_as" : "https://test_resource.test", + "@timestamp" : "2023-09-01T00:00:00.000Z" + }, + "sort": [ + 1 + ] + } + ] + } +} diff --git a/src/tests/resources/elasticsearch/service_search.json b/src/tests/resources/elasticsearch/service_search.json new file mode 100644 index 00000000..bbe3149e --- /dev/null +++ b/src/tests/resources/elasticsearch/service_search.json @@ -0,0 +1,44 @@ +{ + "took" : 1, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 1.0, + "hits" : [ + { + "_index" : "service", + "_id" : "service_1", + "_score" : 1.0, + "_source" : { + "date_created" : "2023-09-01T00:00:00.000Z", + "type" : "service", + "terms_of_service" : "Some terms", + "slogan" : "A slogan", + "platform_identifier" : "1", + "resource_identifier" : 377, + "date_modified" : "2023-09-01T00:00:00.000Z", + "@version" : "1", + "status" : "draft", + "description" : "A description", + "platform" : "example", + "name" : "A name", + "same_as" : "https://test_resource.test", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "identifier" : 1 + }, + "sort": [ + 1 + ] + } + ] + } +} diff --git a/src/tests/routers/search_routers/__init__.py b/src/tests/routers/search_routers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/tests/routers/search_routers/test_search_router_datasets.py b/src/tests/routers/search_routers/test_search_router_datasets.py new file mode 100644 index 00000000..09a2703b --- /dev/null +++ b/src/tests/routers/search_routers/test_search_router_datasets.py @@ -0,0 +1,48 @@ +import os +import json + +from unittest.mock import Mock +from starlette.testclient import TestClient +from tests.testutils.paths import path_test_resources +from routers.search_routers import SearchRouterDatasets, router_list + +def test_search_happy_path(client: TestClient): + """Tests the Datasets search""" + + # Get the correspondent router instance from the search routers list + search_router = None + for router_instance in router_list: + if isinstance(router_instance, SearchRouterDatasets): + search_router = router_instance + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + mocked_file = os.path.join(resources_path, "dataset_search.json") + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + response = client.get("/search/datasets/v1", + params={'search_query': "description"}) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()['resources'][0] + + # Test the response + assert resource['identifier'] == 2 + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['status'] == "draft" + assert resource['name'] == "A name" + assert resource['description'] == "A description" + assert resource['version'] == "1.0.0" + assert resource['issn'] == "00000000" + assert resource['platform'] == "example" + assert resource['platform_identifier'] == "6" + assert resource['license'] == "https://test_resource.test" + assert resource['date_published'] == "2023-09-01T00:00:00+00:00" + assert resource['measurement_technique'] == "" + assert resource['temporal_coverage'] == "" + assert resource['same_as'] == "https://test_resource.test" diff --git a/src/tests/routers/search_routers/test_search_router_experiments.py b/src/tests/routers/search_routers/test_search_router_experiments.py new file mode 100644 index 00000000..bef1aca9 --- /dev/null +++ b/src/tests/routers/search_routers/test_search_router_experiments.py @@ -0,0 +1,48 @@ +import os +import json + +from unittest.mock import Mock +from starlette.testclient import TestClient +from tests.testutils.paths import path_test_resources +from routers.search_routers import SearchRouterExperiments, router_list + +def test_search_happy_path(client: TestClient): + """Tests the Experiments search""" + + # Get the correspondent router instance from the search routers list + search_router = None + for router_instance in router_list: + if isinstance(router_instance, SearchRouterExperiments): + search_router = router_instance + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + mocked_file = os.path.join(resources_path, "experiment_search.json") + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + response = client.get("/search/experiments/v1", + params={'search_query': "description"}) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()['resources'][0] + + # Test the response + assert resource['identifier'] == 40 + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['status'] == "draft" + assert resource['name'] == "A name" + assert resource['description'] == "A description" + assert resource['version'] == "1.0.0" + assert resource['platform'] == "example" + assert resource['platform_identifier'] == "405" + assert resource['license'] == "https://test_resource.test" + assert resource['date_published'] == "2023-09-01T00:00:00+00:00" + assert resource['reproducibility_explanation'] == "" + assert resource['experimental_workflow'] == "" + assert resource['execution_settings'] == "" + assert resource['same_as'] == "https://test_resource.test" diff --git a/src/tests/routers/search_routers/test_search_router_ml_model.py b/src/tests/routers/search_routers/test_search_router_ml_model.py new file mode 100644 index 00000000..58a4cafb --- /dev/null +++ b/src/tests/routers/search_routers/test_search_router_ml_model.py @@ -0,0 +1,45 @@ +import os +import json + +from unittest.mock import Mock +from starlette.testclient import TestClient +from tests.testutils.paths import path_test_resources +from routers.search_routers import SearchRouterMLModels, router_list + +def test_search_happy_path(client: TestClient): + """Tests the MLModels search""" + + # Get the correspondent router instance from the search routers list + search_router = None + for router_instance in router_list: + if isinstance(router_instance, SearchRouterMLModels): + search_router = router_instance + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + mocked_file = os.path.join(resources_path, "ml_model_search.json") + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + response = client.get("/search/ml_models/v1", + params={'search_query': "description"}) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()['resources'][0] + + # Test the response + assert resource['identifier'] == 3 + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['status'] == "draft" + assert resource['name'] == "A name" + assert resource['description'] == "A description" + assert resource['version'] == "1.0.1" + assert resource['platform'] == "example" + assert resource['platform_identifier'] == "3" + assert resource['license'] == "https://test_resource.test" + assert resource['date_published'] == "2023-09-01T00:00:00+00:00" + assert resource['same_as'] == "https://test_resource.test" diff --git a/src/tests/routers/search_routers/test_search_router_publications.py b/src/tests/routers/search_routers/test_search_router_publications.py new file mode 100644 index 00000000..5793f7bb --- /dev/null +++ b/src/tests/routers/search_routers/test_search_router_publications.py @@ -0,0 +1,48 @@ +import os +import json + +from unittest.mock import Mock +from starlette.testclient import TestClient +from tests.testutils.paths import path_test_resources +from routers.search_routers import SearchRouterPublications, router_list + +def test_search_happy_path(client: TestClient): + """Tests the Publications search""" + + # Get the correspondent router instance from the search routers list + search_router = None + for router_instance in router_list: + if isinstance(router_instance, SearchRouterPublications): + search_router = router_instance + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + mocked_file = os.path.join(resources_path, "publication_search.json") + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + response = client.get("/search/publications/v1", + params={'search_query': "description"}) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()['resources'][0] + + # Test the response + assert resource['identifier'] == 1 + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['status'] == "draft" + assert resource['name'] == "A name" + assert resource['description'] == "A description" + assert resource['version'] == "1.0.0" + assert resource['platform'] == "example" + assert resource['platform_identifier'] == "1" + assert resource['date_published'] == "2023-09-01T00:00:00+00:00" + assert resource['issn'] == "00000000" + assert resource['isbn'] == "0000000000000" + assert resource['permanent_identifier'] == "https://test_resource.test" + assert resource['license'] == "https://test_resource.test" + assert resource['same_as'] == "https://test_resource.test" diff --git a/src/tests/routers/search_routers/test_search_router_services.py b/src/tests/routers/search_routers/test_search_router_services.py new file mode 100644 index 00000000..e4ff9223 --- /dev/null +++ b/src/tests/routers/search_routers/test_search_router_services.py @@ -0,0 +1,44 @@ +import os +import json + +from unittest.mock import Mock +from starlette.testclient import TestClient +from tests.testutils.paths import path_test_resources +from routers.search_routers import SearchRouterServices, router_list + +def test_search_happy_path(client: TestClient): + """Tests the Services search""" + + # Get the correspondent router instance from the search routers list + search_router = None + for router_instance in router_list: + if isinstance(router_instance, SearchRouterServices): + search_router = router_instance + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + mocked_file = os.path.join(resources_path, "service_search.json") + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + response = client.get("/search/services/v1", + params={'search_query': "description"}) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()['resources'][0] + + # Test the response + assert resource['identifier'] == 1 + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['status'] == "draft" + assert resource['name'] == "A name" + assert resource['slogan'] == "A slogan" + assert resource['description'] == "A description" + assert resource['platform'] == "example" + assert resource['platform_identifier'] == "1" + assert resource['terms_of_service'] == "Some terms" + assert resource['same_as'] == "https://test_resource.test" From a0cc086f312cbf92f659fed3bbe556082f722483 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 28 Sep 2023 13:26:17 +0200 Subject: [PATCH 33/79] Search fields selection added --- src/routers/search_router.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/src/routers/search_router.py b/src/routers/search_router.py index c695be11..e1192546 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -64,6 +64,7 @@ def create(self, engine: Engine, url_prefix: str) -> APIRouter: def search( platforms: Annotated[list[str] | None, Query()] = None, search_query: str = "", + search_fields: Annotated[list[str] | None, Query()] = None, limit: int = 10, offset: Annotated[list[str] | None, Query()] = None ) -> SearchResult[read_class]: # type: ignore @@ -82,8 +83,25 @@ def search( # ----------------------------------------------------------------- # Matches of the search concept for each field - query_matches = [{'match': {f: search_query}} - for f in self.match_fields] + if search_fields: + + # The selected fields must be present in the match fields + if not set(search_fields).issubset(set(self.match_fields)): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"The available search fields for this entity " + f"are:{self.match_fields}" + ) + + # Search in specific search fields + query_matches = [{'match': {f: search_query}} + for f in search_fields] + + else: + + # Search in any match field + query_matches = [{'match': {f: search_query}} + for f in self.match_fields] if platforms: From 97b361c4e8119f291e28d3bf3075dcf01d5a5929 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 10 Oct 2023 17:52:22 +0200 Subject: [PATCH 34/79] Added search for event, news, ortganisation and project --- logstash/pipeline/conf/init_temp_table.conf | 88 +++++++++++++++++ logstash/pipeline/conf/sync_temp_table.conf | 96 +++++++++++++++++++ logstash/pipeline/sql/init_temp_event.sql | 26 +++++ logstash/pipeline/sql/init_temp_news.sql | 20 ++++ .../pipeline/sql/init_temp_organisation.sql | 25 +++++ logstash/pipeline/sql/init_temp_project.sql | 24 +++++ logstash/pipeline/sql/sync_temp_event.sql | 27 ++++++ logstash/pipeline/sql/sync_temp_ml_model.sql | 1 + logstash/pipeline/sql/sync_temp_news.sql | 21 ++++ .../pipeline/sql/sync_temp_organisation.sql | 26 +++++ logstash/pipeline/sql/sync_temp_project.sql | 25 +++++ 11 files changed, 379 insertions(+) create mode 100644 logstash/pipeline/sql/init_temp_event.sql create mode 100644 logstash/pipeline/sql/init_temp_news.sql create mode 100644 logstash/pipeline/sql/init_temp_organisation.sql create mode 100644 logstash/pipeline/sql/init_temp_project.sql create mode 100644 logstash/pipeline/sql/sync_temp_event.sql create mode 100644 logstash/pipeline/sql/sync_temp_news.sql create mode 100644 logstash/pipeline/sql/sync_temp_organisation.sql create mode 100644 logstash/pipeline/sql/sync_temp_project.sql diff --git a/logstash/pipeline/conf/init_temp_table.conf b/logstash/pipeline/conf/init_temp_table.conf index a9244e8a..a8c0e46f 100644 --- a/logstash/pipeline/conf/init_temp_table.conf +++ b/logstash/pipeline/conf/init_temp_table.conf @@ -60,6 +60,54 @@ input { statement_filepath => "/usr/share/logstash/sql/init_temp_service.sql" type => "service" } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_temp_news.sql" + type => "news" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_temp_event.sql" + type => "event" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_temp_project.sql" + type => "project" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_temp_organisation.sql" + type => "organisation" + } } # https://www.elastic.co/guide/en/logstash/current/filter-plugins.html+ #filter { @@ -119,4 +167,44 @@ output { document_id => "service_%{identifier}" } } + if [type] == "news" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "news" + document_id => "news_%{identifier}" + } + } + if [type] == "event" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "event" + document_id => "event_%{identifier}" + } + } + if [type] == "project" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "project" + document_id => "project_%{identifier}" + } + } + if [type] == "organisation" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "organisation" + document_id => "organisation_%{identifier}" + } + } } diff --git a/logstash/pipeline/conf/sync_temp_table.conf b/logstash/pipeline/conf/sync_temp_table.conf index 142e1784..a7ba5aa0 100644 --- a/logstash/pipeline/conf/sync_temp_table.conf +++ b/logstash/pipeline/conf/sync_temp_table.conf @@ -70,6 +70,62 @@ input { statement_filepath => "/usr/share/logstash/sql/sync_temp_service.sql" type => "service" } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_temp_news.sql" + type => "news" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_temp_event.sql" + type => "event" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_temp_project.sql" + type => "project" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_temp_organisation.sql" + type => "organisation" + } } # https://www.elastic.co/guide/en/logstash/current/filter-plugins.html+ #filter { @@ -129,4 +185,44 @@ output { document_id => "service_%{identifier}" } } + if [type] == "news" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "news" + document_id => "news_%{identifier}" + } + } + if [type] == "event" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "event" + document_id => "event_%{identifier}" + } + } + if [type] == "project" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "project" + document_id => "project_%{identifier}" + } + } + if [type] == "organisation" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "organisation" + document_id => "organisation_%{identifier}" + } + } } diff --git a/logstash/pipeline/sql/init_temp_event.sql b/logstash/pipeline/sql/init_temp_event.sql new file mode 100644 index 00000000..123ab6ca --- /dev/null +++ b/logstash/pipeline/sql/init_temp_event.sql @@ -0,0 +1,26 @@ +SELECT + -- Concept + event.identifier, + event.platform, + event.platform_identifier, + -- Concept.aiod_entry + event_status.name AS `status`, + event_mode.name AS `mode`, + aiod_entry.date_modified, + aiod_entry.date_created, + agent.type AS `organiser`, + -- Resource + event.ai_resource_id AS `resource_identifier`, + event.name, + event.description, + event.same_as, + event.start_date, + event.end_date, + event.schedule, + event.registration_link, +FROM aiod.event +INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier +LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier +LEFT JOIN aiod.event_mode ON aiod.event.mode_identifier=aiod.event_mode.identifier +ORDER BY aiod.event.identifier diff --git a/logstash/pipeline/sql/init_temp_news.sql b/logstash/pipeline/sql/init_temp_news.sql new file mode 100644 index 00000000..4179ca5a --- /dev/null +++ b/logstash/pipeline/sql/init_temp_news.sql @@ -0,0 +1,20 @@ +SELECT + -- Concept + news.identifier, + news.platform, + news.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + news.ai_resource_id AS `resource_identifier`, + news.name, + news.description, + news.same_as, + news.headline, + news.alternative_headline, +FROM aiod.news +INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +ORDER BY aiod.news.identifier diff --git a/logstash/pipeline/sql/init_temp_organisation.sql b/logstash/pipeline/sql/init_temp_organisation.sql new file mode 100644 index 00000000..58081bd6 --- /dev/null +++ b/logstash/pipeline/sql/init_temp_organisation.sql @@ -0,0 +1,25 @@ +SELECT + -- Concept + organisation.identifier, + organisation.platform, + organisation.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + agent.type AS `agent`, + -- Resource + organisation.ai_resource_id AS `resource_identifier`, + organisation.name, + organisation.description, + organisation.same_as, + organisation.date_founded, + organisation.legal_name, + -- Organisation + organisation_type.name AS `organisation_type` +FROM aiod.organisation +INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +INNER JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier +LEFT JOIN aiod.organisation_type ON aiod.organisation.type_identifier=aiod.organisation_type.identifier +ORDER BY aiod.organisation.identifier diff --git a/logstash/pipeline/sql/init_temp_project.sql b/logstash/pipeline/sql/init_temp_project.sql new file mode 100644 index 00000000..52c26163 --- /dev/null +++ b/logstash/pipeline/sql/init_temp_project.sql @@ -0,0 +1,24 @@ +SELECT + -- Concept + project.identifier, + project.platform, + project.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + project.ai_resource_id AS `resource_identifier`, + project.name, + project.description, + project.same_as, + project.start_date, + project.end_date, + project.total_cost_euro, + project.coordinator_identifier, + organisation.name AS coordinator, +FROM aiod.project +INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +INNER JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier +ORDER BY aiod.project.identifier diff --git a/logstash/pipeline/sql/sync_temp_event.sql b/logstash/pipeline/sql/sync_temp_event.sql new file mode 100644 index 00000000..9eca5888 --- /dev/null +++ b/logstash/pipeline/sql/sync_temp_event.sql @@ -0,0 +1,27 @@ +SELECT + -- Concept + event.identifier, + event.platform, + event.platform_identifier, + -- Concept.aiod_entry + event_status.name AS `status`, + event_mode.name AS `mode`, + aiod_entry.date_modified, + aiod_entry.date_created, + agent.type AS `organiser`, + -- Resource + event.ai_resource_id AS `resource_identifier`, + event.name, + event.description, + event.same_as, + event.start_date, + event.end_date, + event.schedule, + event.registration_link, +FROM aiod.event +INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier +LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier +LEFT JOIN aiod.event_mode ON aiod.event.mode_identifier=aiod.event_mode.identifier +WHERE aiod.aiod_entry.date_modified > :sql_last_value +ORDER BY aiod.event.identifier diff --git a/logstash/pipeline/sql/sync_temp_ml_model.sql b/logstash/pipeline/sql/sync_temp_ml_model.sql index b61d35fa..421b6403 100644 --- a/logstash/pipeline/sql/sync_temp_ml_model.sql +++ b/logstash/pipeline/sql/sync_temp_ml_model.sql @@ -24,4 +24,5 @@ INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entr INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifier LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier +WHERE aiod.aiod_entry.date_modified > :sql_last_value ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/sync_temp_news.sql b/logstash/pipeline/sql/sync_temp_news.sql new file mode 100644 index 00000000..5a460a64 --- /dev/null +++ b/logstash/pipeline/sql/sync_temp_news.sql @@ -0,0 +1,21 @@ +SELECT + -- Concept + news.identifier, + news.platform, + news.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + news.ai_resource_id AS `resource_identifier`, + news.name, + news.description, + news.same_as, + news.headline, + news.alternative_headline, +FROM aiod.news +INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +WHERE aiod.aiod_entry.date_modified > :sql_last_value +ORDER BY aiod.news.identifier diff --git a/logstash/pipeline/sql/sync_temp_organisation.sql b/logstash/pipeline/sql/sync_temp_organisation.sql new file mode 100644 index 00000000..7d94c68f --- /dev/null +++ b/logstash/pipeline/sql/sync_temp_organisation.sql @@ -0,0 +1,26 @@ +SELECT + -- Concept + organisation.identifier, + organisation.platform, + organisation.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + agent.type AS `agent`, + -- Resource + organisation.ai_resource_id AS `resource_identifier`, + organisation.name, + organisation.description, + organisation.same_as, + organisation.date_founded, + organisation.legal_name, + -- Organisation + organisation_type.name AS `organisation_type` +FROM aiod.organisation +INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +INNER JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier +LEFT JOIN aiod.organisation_type ON aiod.organisation.type_identifier=aiod.organisation_type.identifier +WHERE aiod.aiod_entry.date_modified > :sql_last_value +ORDER BY aiod.organisation.identifier diff --git a/logstash/pipeline/sql/sync_temp_project.sql b/logstash/pipeline/sql/sync_temp_project.sql new file mode 100644 index 00000000..20ebd61d --- /dev/null +++ b/logstash/pipeline/sql/sync_temp_project.sql @@ -0,0 +1,25 @@ +SELECT + -- Concept + project.identifier, + project.platform, + project.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + project.ai_resource_id AS `resource_identifier`, + project.name, + project.description, + project.same_as, + project.start_date, + project.end_date, + project.total_cost_euro, + project.coordinator_identifier, + organisation.name AS coordinator, +FROM aiod.project +INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +INNER JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier +WHERE aiod.aiod_entry.date_modified > :sql_last_value +ORDER BY aiod.project.identifier From e69620ccf4e135658ad25a60679869df91291707 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 17 Oct 2023 15:35:44 +0200 Subject: [PATCH 35/79] Added routers for event, news, organisation and project --- connectors/fill-examples.sh | 8 +- es/setup/curl.sh | 4 + es/setup/curl_dockerfile | 4 + es/setup/event.json | 152 ++++++++++++++++++ es/setup/news.json | 120 ++++++++++++++ es/setup/organisation.json | 136 ++++++++++++++++ es/setup/project.json | 127 +++++++++++++++ logstash/pipeline/conf/init_temp_table.conf | 10 ++ logstash/pipeline/conf/sync_temp_table.conf | 10 ++ logstash/pipeline/sql/init_temp_event.sql | 3 +- logstash/pipeline/sql/init_temp_news.sql | 2 +- logstash/pipeline/sql/init_temp_project.sql | 2 +- logstash/pipeline/sql/sync_temp_event.sql | 3 +- logstash/pipeline/sql/sync_temp_news.sql | 2 +- logstash/pipeline/sql/sync_temp_project.sql | 2 +- .../example/resources/resource/events.json | 3 +- .../resources/resource/organisations.json | 2 +- .../example/resources/resource/projects.json | 3 +- src/routers/search_routers/__init__.py | 13 +- .../search_routers/search_router_events.py | 20 +++ .../search_routers/search_router_news.py | 20 +++ .../search_router_organisations.py | 20 +++ .../search_routers/search_router_projects.py | 20 +++ 23 files changed, 668 insertions(+), 18 deletions(-) create mode 100644 es/setup/event.json create mode 100644 es/setup/news.json create mode 100644 es/setup/organisation.json create mode 100644 es/setup/project.json create mode 100644 src/routers/search_routers/search_router_events.py create mode 100644 src/routers/search_routers/search_router_news.py create mode 100644 src/routers/search_routers/search_router_organisations.py create mode 100644 src/routers/search_routers/search_router_projects.py diff --git a/connectors/fill-examples.sh b/connectors/fill-examples.sh index 320a5867..0be8d742 100755 --- a/connectors/fill-examples.sh +++ b/connectors/fill-examples.sh @@ -16,10 +16,6 @@ python3 connectors/synchronization.py \ -c connectors.example.example.ExampleEducationalResourceConnector \ -w /opt/connectors/data/example/educational_resource -python3 connectors/synchronization.py \ - -c connectors.example.example.ExampleEventConnector \ - -w /opt/connectors/data/example/event - python3 connectors/synchronization.py \ -c connectors.example.example.ExampleExperimentConnector \ -w /opt/connectors/data/example/experiment @@ -40,6 +36,10 @@ python3 connectors/synchronization.py \ -c connectors.example.example.ExamplePersonConnector \ -w /opt/connectors/data/example/person +python3 connectors/synchronization.py \ + -c connectors.example.example.ExampleEventConnector \ + -w /opt/connectors/data/example/event + python3 connectors/synchronization.py \ -c connectors.example.example.ExampleProjectConnector \ -w /opt/connectors/data/example/project diff --git a/es/setup/curl.sh b/es/setup/curl.sh index 8e92b07f..f3ab7e91 100755 --- a/es/setup/curl.sh +++ b/es/setup/curl.sh @@ -1,5 +1,9 @@ curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/dataset?pretty -H 'Content-Type: application/json' -d @/dataset.json +curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/event?pretty -H 'Content-Type: application/json' -d @/event.json curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/experiment?pretty -H 'Content-Type: application/json' -d @/experiment.json curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/ml_model?pretty -H 'Content-Type: application/json' -d @/ml_model.json +curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/news?pretty -H 'Content-Type: application/json' -d @/news.json +curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/organisation?pretty -H 'Content-Type: application/json' -d @/organisation.json +curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/project?pretty -H 'Content-Type: application/json' -d @/project.json curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/publication?pretty -H 'Content-Type: application/json' -d @/publication.json curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/service?pretty -H 'Content-Type: application/json' -d @/service.json diff --git a/es/setup/curl_dockerfile b/es/setup/curl_dockerfile index 183cea06..661ed8c8 100644 --- a/es/setup/curl_dockerfile +++ b/es/setup/curl_dockerfile @@ -3,8 +3,12 @@ FROM ubuntu:22.04 RUN apt-get update && apt-get install -y curl COPY dataset.json /dataset.json +COPY event.json /event.json COPY experiment.json /experiment.json COPY ml_model.json /ml_model.json +COPY news.json /news.json +COPY organisation.json /organisation.json +COPY project.json /project.json COPY publication.json /publication.json COPY service.json /service.json COPY curl.sh /curl.sh diff --git a/es/setup/event.json b/es/setup/event.json new file mode 100644 index 00000000..2deb13ef --- /dev/null +++ b/es/setup/event.json @@ -0,0 +1,152 @@ +{ + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date", + "index" : false + }, + "@version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "date_created" : { + "type" : "date", + "index" : false + }, + "date_modified" : { + "type" : "date" + }, + "description" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "end_date" : { + "type" : "date", + "index" : false + }, + "identifier" : { + "type" : "long", + "index" : false + }, + "mode" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "organiser_identifier" : { + "type" : "long", + "index" : false + }, + "organiser_type" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform_identifier" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "registration_link" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "resource_identifier" : { + "type" : "long", + "index" : false + }, + "same_as" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "schedule" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "start_date" : { + "type" : "date", + "index" : false + }, + "status" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "type" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + } + } + } +} diff --git a/es/setup/news.json b/es/setup/news.json new file mode 100644 index 00000000..7349acad --- /dev/null +++ b/es/setup/news.json @@ -0,0 +1,120 @@ +{ + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date", + "index" : false + }, + "@version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "alternative_headline" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "date_created" : { + "type" : "date", + "index" : false + }, + "date_modified" : { + "type" : "date" + }, + "description" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "headline" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "identifier" : { + "type" : "long", + "index" : false + }, + "name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform_identifier" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "resource_identifier" : { + "type" : "long", + "index" : false + }, + "same_as" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "status" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "type" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + } + } + } +} diff --git a/es/setup/organisation.json b/es/setup/organisation.json new file mode 100644 index 00000000..9d02c7b3 --- /dev/null +++ b/es/setup/organisation.json @@ -0,0 +1,136 @@ +{ + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date", + "index" : false + }, + "@version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "agent" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "date_created" : { + "type" : "date", + "index" : false + }, + "date_founded" : { + "type" : "date", + "format": "yyyy-MM-dd", + "index" : false + }, + "date_modified" : { + "type" : "date" + }, + "description" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "identifier" : { + "type" : "long", + "index" : false + }, + "legal_name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "organisation_type" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform_identifier" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "resource_identifier" : { + "type" : "long", + "index" : false + }, + "same_as" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "status" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "type" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + } + } + } +} diff --git a/es/setup/project.json b/es/setup/project.json new file mode 100644 index 00000000..74dfb9cd --- /dev/null +++ b/es/setup/project.json @@ -0,0 +1,127 @@ +{ + "mappings" : { + "properties" : { + "@timestamp" : { + "type" : "date", + "index" : false + }, + "@version" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "coordinator_identifier" : { + "type" : "long", + "index" : false + }, + "coordinator_name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "date_created" : { + "type" : "date", + "index" : false + }, + "date_modified" : { + "type" : "date" + }, + "description" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "end_date" : { + "type" : "date", + "index" : false + }, + "identifier" : { + "type" : "long", + "index" : false + }, + "name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "platform_identifier" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "resource_identifier" : { + "type" : "long", + "index" : false + }, + "same_as" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "start_date" : { + "type" : "date", + "index" : false + }, + "status" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + }, + "total_cost_euro" : { + "type" : "float", + "index" : false + }, + "type" : { + "type" : "text", + "index" : false, + "fields" : { + "keyword" : { + "type" : "keyword", + "ignore_above" : 256 + } + } + } + } + } +} diff --git a/logstash/pipeline/conf/init_temp_table.conf b/logstash/pipeline/conf/init_temp_table.conf index a8c0e46f..5d02c796 100644 --- a/logstash/pipeline/conf/init_temp_table.conf +++ b/logstash/pipeline/conf/init_temp_table.conf @@ -115,6 +115,16 @@ input { # remove_field => ["@version", "@timestamp"] # } #} +filter { + if [type] == "organisation" { + ruby { + code => ' + t = Time.at(event.get("date_founded").to_f) + event.set("date_founded", t.strftime("%Y-%m-%d")) + ' + } + } +} output { # https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html if [type] == "publication" { diff --git a/logstash/pipeline/conf/sync_temp_table.conf b/logstash/pipeline/conf/sync_temp_table.conf index a7ba5aa0..2bd8173c 100644 --- a/logstash/pipeline/conf/sync_temp_table.conf +++ b/logstash/pipeline/conf/sync_temp_table.conf @@ -133,6 +133,16 @@ input { # remove_field => ["@version", "@timestamp", "ts"] # } #} +filter { + if [type] == "organisation" { + ruby { + code => ' + t = Time.at(event.get("date_founded").to_f) + event.set("date_founded", t.strftime("%Y-%m-%d")) + ' + } + } +} output { # https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html if [type] == "publication" { diff --git a/logstash/pipeline/sql/init_temp_event.sql b/logstash/pipeline/sql/init_temp_event.sql index 123ab6ca..ecd2da5e 100644 --- a/logstash/pipeline/sql/init_temp_event.sql +++ b/logstash/pipeline/sql/init_temp_event.sql @@ -8,7 +8,7 @@ SELECT event_mode.name AS `mode`, aiod_entry.date_modified, aiod_entry.date_created, - agent.type AS `organiser`, + agent.type AS `organiser_type`, -- Resource event.ai_resource_id AS `resource_identifier`, event.name, @@ -18,6 +18,7 @@ SELECT event.end_date, event.schedule, event.registration_link, + event.organiser_identifier FROM aiod.event INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier diff --git a/logstash/pipeline/sql/init_temp_news.sql b/logstash/pipeline/sql/init_temp_news.sql index 4179ca5a..b315f19d 100644 --- a/logstash/pipeline/sql/init_temp_news.sql +++ b/logstash/pipeline/sql/init_temp_news.sql @@ -13,7 +13,7 @@ SELECT news.description, news.same_as, news.headline, - news.alternative_headline, + news.alternative_headline FROM aiod.news INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier diff --git a/logstash/pipeline/sql/init_temp_project.sql b/logstash/pipeline/sql/init_temp_project.sql index 52c26163..3ef64a64 100644 --- a/logstash/pipeline/sql/init_temp_project.sql +++ b/logstash/pipeline/sql/init_temp_project.sql @@ -16,7 +16,7 @@ SELECT project.end_date, project.total_cost_euro, project.coordinator_identifier, - organisation.name AS coordinator, + organisation.name AS coordinator_name FROM aiod.project INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier diff --git a/logstash/pipeline/sql/sync_temp_event.sql b/logstash/pipeline/sql/sync_temp_event.sql index 9eca5888..53419164 100644 --- a/logstash/pipeline/sql/sync_temp_event.sql +++ b/logstash/pipeline/sql/sync_temp_event.sql @@ -8,7 +8,7 @@ SELECT event_mode.name AS `mode`, aiod_entry.date_modified, aiod_entry.date_created, - agent.type AS `organiser`, + agent.type AS `organiser_type`, -- Resource event.ai_resource_id AS `resource_identifier`, event.name, @@ -18,6 +18,7 @@ SELECT event.end_date, event.schedule, event.registration_link, + event.organiser_identifier FROM aiod.event INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier diff --git a/logstash/pipeline/sql/sync_temp_news.sql b/logstash/pipeline/sql/sync_temp_news.sql index 5a460a64..dbb0ee8f 100644 --- a/logstash/pipeline/sql/sync_temp_news.sql +++ b/logstash/pipeline/sql/sync_temp_news.sql @@ -13,7 +13,7 @@ SELECT news.description, news.same_as, news.headline, - news.alternative_headline, + news.alternative_headline FROM aiod.news INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier diff --git a/logstash/pipeline/sql/sync_temp_project.sql b/logstash/pipeline/sql/sync_temp_project.sql index 20ebd61d..faa749a9 100644 --- a/logstash/pipeline/sql/sync_temp_project.sql +++ b/logstash/pipeline/sql/sync_temp_project.sql @@ -16,7 +16,7 @@ SELECT project.end_date, project.total_cost_euro, project.coordinator_identifier, - organisation.name AS coordinator, + organisation.name AS coordinator_name FROM aiod.project INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier diff --git a/src/connectors/example/resources/resource/events.json b/src/connectors/example/resources/resource/events.json index bf05e328..c7144d9a 100644 --- a/src/connectors/example/resources/resource/events.json +++ b/src/connectors/example/resources/resource/events.json @@ -2,12 +2,13 @@ { "platform": "example", "platform_identifier": "1", - "name": "Name of the Event", + "name": "The name of the Event", "description": "A description.", "same_as": "https://www.example.com/resource/this_resource", "date_published": "2022-01-01T15:15:00.000", "version": "1.1.0", "pid": "https://doi.org/10.1000/182", + "organiser": 2, "aiod_entry": { "editor": [], "status": "draft" diff --git a/src/connectors/example/resources/resource/organisations.json b/src/connectors/example/resources/resource/organisations.json index 5138f72a..f45a5c3a 100644 --- a/src/connectors/example/resources/resource/organisations.json +++ b/src/connectors/example/resources/resource/organisations.json @@ -69,4 +69,4 @@ ], "type": "Research Institution" } -] \ No newline at end of file +] diff --git a/src/connectors/example/resources/resource/projects.json b/src/connectors/example/resources/resource/projects.json index 9a1250b1..bc1e1d6c 100644 --- a/src/connectors/example/resources/resource/projects.json +++ b/src/connectors/example/resources/resource/projects.json @@ -8,6 +8,7 @@ "date_published": "2022-01-01T15:15:00.000", "version": "1.1.0", "pid": "https://doi.org/10.1000/182", + "coordinator": 1, "aiod_entry": { "editor": [], "status": "draft" @@ -79,4 +80,4 @@ "end_date": "2021-02-03T15:15:00", "total_cost_euro": 10000000 } -] \ No newline at end of file +] diff --git a/src/routers/search_routers/__init__.py b/src/routers/search_routers/__init__.py index 37f4fa35..027f6ef9 100644 --- a/src/routers/search_routers/__init__.py +++ b/src/routers/search_routers/__init__.py @@ -1,14 +1,13 @@ import os from elasticsearch import Elasticsearch -from database.model.dataset.dataset import Dataset -from database.model.models_and_experiments.experiment import Experiment -from database.model.models_and_experiments.ml_model import MLModel -from database.model.knowledge_asset.publication import Publication -from database.model.service.service import Service from .search_router_datasets import SearchRouterDatasets +from .search_router_events import SearchRouterEvents from .search_router_experiments import SearchRouterExperiments from .search_router_ml_models import SearchRouterMLModels +from .search_router_news import SearchRouterNews +from .search_router_organisations import SearchRouterOrganisations +from .search_router_projects import SearchRouterProjects from .search_router_publications import SearchRouterPublications from .search_router_services import SearchRouterServices from ..search_router import SearchRouter @@ -20,8 +19,12 @@ router_list: list[SearchRouter] = [ SearchRouterDatasets(client=es_client), + SearchRouterEvents(client=es_client), SearchRouterExperiments(client=es_client), SearchRouterMLModels(client=es_client), + SearchRouterNews(client=es_client), + SearchRouterOrganisations(client=es_client), + SearchRouterProjects(client=es_client), SearchRouterPublications(client=es_client), SearchRouterServices(client=es_client) ] diff --git a/src/routers/search_routers/search_router_events.py b/src/routers/search_routers/search_router_events.py new file mode 100644 index 00000000..1a669b0b --- /dev/null +++ b/src/routers/search_routers/search_router_events.py @@ -0,0 +1,20 @@ +from database.model.event.event import Event +from routers.search_router import SearchRouter + + +class SearchRouterEvents(SearchRouter[Event]): + @property + def es_index(self) -> str: + return "event" + + @property + def resource_name_plural(self) -> str: + return "events" + + @property + def resource_class(self): + return Event + + @property + def match_fields(self): + return ['name', 'description', 'organiser_type', 'mode'] diff --git a/src/routers/search_routers/search_router_news.py b/src/routers/search_routers/search_router_news.py new file mode 100644 index 00000000..36045d23 --- /dev/null +++ b/src/routers/search_routers/search_router_news.py @@ -0,0 +1,20 @@ +from database.model.news.news import News +from routers.search_router import SearchRouter + + +class SearchRouterNews(SearchRouter[News]): + @property + def es_index(self) -> str: + return "news" + + @property + def resource_name_plural(self) -> str: + return "news" + + @property + def resource_class(self): + return News + + @property + def match_fields(self): + return ['name', 'description', 'headline', 'alternative_headline'] diff --git a/src/routers/search_routers/search_router_organisations.py b/src/routers/search_routers/search_router_organisations.py new file mode 100644 index 00000000..ae7dc8e2 --- /dev/null +++ b/src/routers/search_routers/search_router_organisations.py @@ -0,0 +1,20 @@ +from database.model.agent.organisation import Organisation +from routers.search_router import SearchRouter + + +class SearchRouterOrganisations(SearchRouter[Organisation]): + @property + def es_index(self) -> str: + return "organisation" + + @property + def resource_name_plural(self) -> str: + return "organisations" + + @property + def resource_class(self): + return Organisation + + @property + def match_fields(self): + return ['name', 'legal_name', 'description'] diff --git a/src/routers/search_routers/search_router_projects.py b/src/routers/search_routers/search_router_projects.py new file mode 100644 index 00000000..a6984dba --- /dev/null +++ b/src/routers/search_routers/search_router_projects.py @@ -0,0 +1,20 @@ +from database.model.project.project import Project +from routers.search_router import SearchRouter + + +class SearchRouterProjects(SearchRouter[Project]): + @property + def es_index(self) -> str: + return "project" + + @property + def resource_name_plural(self) -> str: + return "projects" + + @property + def resource_class(self): + return Project + + @property + def match_fields(self): + return ['name', 'description', 'coordinator_name'] From 3e5d5f4a7b0851a5935b31d4ebbf6a5a62b90137 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 23 Oct 2023 12:54:13 +0200 Subject: [PATCH 36/79] Logstash names changed --- .../{init_temp_table.conf => init_table.conf} | 18 +++++++++--------- .../{sync_temp_table.conf => sync_table.conf} | 18 +++++++++--------- logstash/pipeline/pipelines.yml | 8 ++++---- ...{init_temp_dataset.sql => init_dataset.sql} | 0 .../{init_temp_event.sql => init_event.sql} | 0 ...temp_experiment.sql => init_experiment.sql} | 0 ...nit_temp_ml_model.sql => init_ml_model.sql} | 0 .../sql/{init_temp_news.sql => init_news.sql} | 0 ..._organisation.sql => init_organisation.sql} | 0 ...{init_temp_project.sql => init_project.sql} | 0 ...mp_publication.sql => init_publication.sql} | 0 ...{init_temp_service.sql => init_service.sql} | 0 ...{sync_temp_dataset.sql => sync_dataset.sql} | 0 .../{sync_temp_event.sql => sync_event.sql} | 0 ...temp_experiment.sql => sync_experiment.sql} | 0 ...ync_temp_ml_model.sql => sync_ml_model.sql} | 0 .../sql/{sync_temp_news.sql => sync_news.sql} | 0 ..._organisation.sql => sync_organisation.sql} | 0 ...{sync_temp_project.sql => sync_project.sql} | 0 ...mp_publication.sql => sync_publication.sql} | 0 ...{sync_temp_service.sql => sync_service.sql} | 0 21 files changed, 22 insertions(+), 22 deletions(-) rename logstash/pipeline/conf/{init_temp_table.conf => init_table.conf} (90%) rename logstash/pipeline/conf/{sync_temp_table.conf => sync_table.conf} (91%) rename logstash/pipeline/sql/{init_temp_dataset.sql => init_dataset.sql} (100%) rename logstash/pipeline/sql/{init_temp_event.sql => init_event.sql} (100%) rename logstash/pipeline/sql/{init_temp_experiment.sql => init_experiment.sql} (100%) rename logstash/pipeline/sql/{init_temp_ml_model.sql => init_ml_model.sql} (100%) rename logstash/pipeline/sql/{init_temp_news.sql => init_news.sql} (100%) rename logstash/pipeline/sql/{init_temp_organisation.sql => init_organisation.sql} (100%) rename logstash/pipeline/sql/{init_temp_project.sql => init_project.sql} (100%) rename logstash/pipeline/sql/{init_temp_publication.sql => init_publication.sql} (100%) rename logstash/pipeline/sql/{init_temp_service.sql => init_service.sql} (100%) rename logstash/pipeline/sql/{sync_temp_dataset.sql => sync_dataset.sql} (100%) rename logstash/pipeline/sql/{sync_temp_event.sql => sync_event.sql} (100%) rename logstash/pipeline/sql/{sync_temp_experiment.sql => sync_experiment.sql} (100%) rename logstash/pipeline/sql/{sync_temp_ml_model.sql => sync_ml_model.sql} (100%) rename logstash/pipeline/sql/{sync_temp_news.sql => sync_news.sql} (100%) rename logstash/pipeline/sql/{sync_temp_organisation.sql => sync_organisation.sql} (100%) rename logstash/pipeline/sql/{sync_temp_project.sql => sync_project.sql} (100%) rename logstash/pipeline/sql/{sync_temp_publication.sql => sync_publication.sql} (100%) rename logstash/pipeline/sql/{sync_temp_service.sql => sync_service.sql} (100%) diff --git a/logstash/pipeline/conf/init_temp_table.conf b/logstash/pipeline/conf/init_table.conf similarity index 90% rename from logstash/pipeline/conf/init_temp_table.conf rename to logstash/pipeline/conf/init_table.conf index 5d02c796..2560e44d 100644 --- a/logstash/pipeline/conf/init_temp_table.conf +++ b/logstash/pipeline/conf/init_table.conf @@ -9,7 +9,7 @@ input { #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_temp_publication.sql" + statement_filepath => "/usr/share/logstash/sql/init_publication.sql" type => "publication" } jdbc { @@ -21,7 +21,7 @@ input { #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_temp_dataset.sql" + statement_filepath => "/usr/share/logstash/sql/init_dataset.sql" type => "dataset" } jdbc { @@ -33,7 +33,7 @@ input { #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_temp_experiment.sql" + statement_filepath => "/usr/share/logstash/sql/init_experiment.sql" type => "experiment" } jdbc { @@ -45,7 +45,7 @@ input { #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_temp_ml_model.sql" + statement_filepath => "/usr/share/logstash/sql/init_ml_model.sql" type => "ml_model" } jdbc { @@ -57,7 +57,7 @@ input { #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_temp_service.sql" + statement_filepath => "/usr/share/logstash/sql/init_service.sql" type => "service" } jdbc { @@ -69,7 +69,7 @@ input { #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_temp_news.sql" + statement_filepath => "/usr/share/logstash/sql/init_news.sql" type => "news" } jdbc { @@ -81,7 +81,7 @@ input { #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_temp_event.sql" + statement_filepath => "/usr/share/logstash/sql/init_event.sql" type => "event" } jdbc { @@ -93,7 +93,7 @@ input { #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_temp_project.sql" + statement_filepath => "/usr/share/logstash/sql/init_project.sql" type => "project" } jdbc { @@ -105,7 +105,7 @@ input { #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_temp_organisation.sql" + statement_filepath => "/usr/share/logstash/sql/init_organisation.sql" type => "organisation" } } diff --git a/logstash/pipeline/conf/sync_temp_table.conf b/logstash/pipeline/conf/sync_table.conf similarity index 91% rename from logstash/pipeline/conf/sync_temp_table.conf rename to logstash/pipeline/conf/sync_table.conf index 2bd8173c..ff389e8e 100644 --- a/logstash/pipeline/conf/sync_temp_table.conf +++ b/logstash/pipeline/conf/sync_table.conf @@ -11,7 +11,7 @@ input { tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_temp_publication.sql" + statement_filepath => "/usr/share/logstash/sql/sync_publication.sql" type => "publication" } jdbc { @@ -25,7 +25,7 @@ input { tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_temp_dataset.sql" + statement_filepath => "/usr/share/logstash/sql/sync_dataset.sql" type => "dataset" } jdbc { @@ -39,7 +39,7 @@ input { tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_temp_experiment.sql" + statement_filepath => "/usr/share/logstash/sql/sync_experiment.sql" type => "experiment" } jdbc { @@ -53,7 +53,7 @@ input { tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_temp_ml_model.sql" + statement_filepath => "/usr/share/logstash/sql/sync_ml_model.sql" type => "ml_model" } jdbc { @@ -67,7 +67,7 @@ input { tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_temp_service.sql" + statement_filepath => "/usr/share/logstash/sql/sync_service.sql" type => "service" } jdbc { @@ -81,7 +81,7 @@ input { tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_temp_news.sql" + statement_filepath => "/usr/share/logstash/sql/sync_news.sql" type => "news" } jdbc { @@ -95,7 +95,7 @@ input { tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_temp_event.sql" + statement_filepath => "/usr/share/logstash/sql/sync_event.sql" type => "event" } jdbc { @@ -109,7 +109,7 @@ input { tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_temp_project.sql" + statement_filepath => "/usr/share/logstash/sql/sync_project.sql" type => "project" } jdbc { @@ -123,7 +123,7 @@ input { tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_temp_organisation.sql" + statement_filepath => "/usr/share/logstash/sql/sync_organisation.sql" type => "organisation" } } diff --git a/logstash/pipeline/pipelines.yml b/logstash/pipeline/pipelines.yml index 844d2b0c..c0007336 100644 --- a/logstash/pipeline/pipelines.yml +++ b/logstash/pipeline/pipelines.yml @@ -1,5 +1,5 @@ -- pipeline.id: init-temp-table-pipeline - path.config: "/usr/share/logstash/pipeline/init_temp_table.conf" +- pipeline.id: init-table-pipeline + path.config: "/usr/share/logstash/pipeline/init_table.conf" -- pipeline.id: sync-temp-table-pipeline - path.config: "/usr/share/logstash/pipeline/sync_temp_table.conf" +- pipeline.id: sync-table-pipeline + path.config: "/usr/share/logstash/pipeline/sync_table.conf" diff --git a/logstash/pipeline/sql/init_temp_dataset.sql b/logstash/pipeline/sql/init_dataset.sql similarity index 100% rename from logstash/pipeline/sql/init_temp_dataset.sql rename to logstash/pipeline/sql/init_dataset.sql diff --git a/logstash/pipeline/sql/init_temp_event.sql b/logstash/pipeline/sql/init_event.sql similarity index 100% rename from logstash/pipeline/sql/init_temp_event.sql rename to logstash/pipeline/sql/init_event.sql diff --git a/logstash/pipeline/sql/init_temp_experiment.sql b/logstash/pipeline/sql/init_experiment.sql similarity index 100% rename from logstash/pipeline/sql/init_temp_experiment.sql rename to logstash/pipeline/sql/init_experiment.sql diff --git a/logstash/pipeline/sql/init_temp_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql similarity index 100% rename from logstash/pipeline/sql/init_temp_ml_model.sql rename to logstash/pipeline/sql/init_ml_model.sql diff --git a/logstash/pipeline/sql/init_temp_news.sql b/logstash/pipeline/sql/init_news.sql similarity index 100% rename from logstash/pipeline/sql/init_temp_news.sql rename to logstash/pipeline/sql/init_news.sql diff --git a/logstash/pipeline/sql/init_temp_organisation.sql b/logstash/pipeline/sql/init_organisation.sql similarity index 100% rename from logstash/pipeline/sql/init_temp_organisation.sql rename to logstash/pipeline/sql/init_organisation.sql diff --git a/logstash/pipeline/sql/init_temp_project.sql b/logstash/pipeline/sql/init_project.sql similarity index 100% rename from logstash/pipeline/sql/init_temp_project.sql rename to logstash/pipeline/sql/init_project.sql diff --git a/logstash/pipeline/sql/init_temp_publication.sql b/logstash/pipeline/sql/init_publication.sql similarity index 100% rename from logstash/pipeline/sql/init_temp_publication.sql rename to logstash/pipeline/sql/init_publication.sql diff --git a/logstash/pipeline/sql/init_temp_service.sql b/logstash/pipeline/sql/init_service.sql similarity index 100% rename from logstash/pipeline/sql/init_temp_service.sql rename to logstash/pipeline/sql/init_service.sql diff --git a/logstash/pipeline/sql/sync_temp_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql similarity index 100% rename from logstash/pipeline/sql/sync_temp_dataset.sql rename to logstash/pipeline/sql/sync_dataset.sql diff --git a/logstash/pipeline/sql/sync_temp_event.sql b/logstash/pipeline/sql/sync_event.sql similarity index 100% rename from logstash/pipeline/sql/sync_temp_event.sql rename to logstash/pipeline/sql/sync_event.sql diff --git a/logstash/pipeline/sql/sync_temp_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql similarity index 100% rename from logstash/pipeline/sql/sync_temp_experiment.sql rename to logstash/pipeline/sql/sync_experiment.sql diff --git a/logstash/pipeline/sql/sync_temp_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql similarity index 100% rename from logstash/pipeline/sql/sync_temp_ml_model.sql rename to logstash/pipeline/sql/sync_ml_model.sql diff --git a/logstash/pipeline/sql/sync_temp_news.sql b/logstash/pipeline/sql/sync_news.sql similarity index 100% rename from logstash/pipeline/sql/sync_temp_news.sql rename to logstash/pipeline/sql/sync_news.sql diff --git a/logstash/pipeline/sql/sync_temp_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql similarity index 100% rename from logstash/pipeline/sql/sync_temp_organisation.sql rename to logstash/pipeline/sql/sync_organisation.sql diff --git a/logstash/pipeline/sql/sync_temp_project.sql b/logstash/pipeline/sql/sync_project.sql similarity index 100% rename from logstash/pipeline/sql/sync_temp_project.sql rename to logstash/pipeline/sql/sync_project.sql diff --git a/logstash/pipeline/sql/sync_temp_publication.sql b/logstash/pipeline/sql/sync_publication.sql similarity index 100% rename from logstash/pipeline/sql/sync_temp_publication.sql rename to logstash/pipeline/sql/sync_publication.sql diff --git a/logstash/pipeline/sql/sync_temp_service.sql b/logstash/pipeline/sql/sync_service.sql similarity index 100% rename from logstash/pipeline/sql/sync_temp_service.sql rename to logstash/pipeline/sql/sync_service.sql From 2146a3ddc9aad4066f73bfe5b37c739046e9576c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 26 Oct 2023 12:03:16 +0200 Subject: [PATCH 37/79] added logstash_config.py, just for having it there --- logstash/pipeline/logstash_config.py | 386 +++++++++++++++++++++++++++ 1 file changed, 386 insertions(+) create mode 100755 logstash/pipeline/logstash_config.py diff --git a/logstash/pipeline/logstash_config.py b/logstash/pipeline/logstash_config.py new file mode 100755 index 00000000..c75c9fcb --- /dev/null +++ b/logstash/pipeline/logstash_config.py @@ -0,0 +1,386 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os + +# PATH MACROS +# ============================================================================= + +# Repository base path +REPO_PATH = os.path.join("..", "..") + +# Working path +WORKING_PATH = os.path.join(".") + +# MACROS FOR THE DOCUMENTS GENERATION FUNCTIONS +# ============================================================================= + +INIT_INPUT_BASE = """ jdbc {{ + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{0}" + jdbc_password => "{1}" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_{2}.sql" + type => "{2}" + }} +""" + +SYNC_INPUT_BASE = """ jdbc {{ + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{0}" + jdbc_password => "{1}" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_{2}.sql" + type => "{2}" + }} + jdbc {{ + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{0}" + jdbc_password => "{1}" + use_column_value => true + tracking_column => "datetime_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_{2}.sql" + type => "rm_{2}" + }} +""" + +FILTER = """filter {{ + if [type] == "organisation" {0}{{ + ruby {{ + code => ' + t = Time.at(event.get("date_founded").to_f) + event.set("date_founded", t.strftime("%Y-%m-%d")) + ' + }} + }} +}} +""" + +SYNC_FILTER_ADDON = """or [type] == "rm_organisation" """ + +INIT_OUTPUT_BASE = """ if [type] == "{2}" {{ + elasticsearch {{ + hosts => "elasticsearch:9200" + user => "{0}" + password => "{1}" + ecs_compatibility => disabled + index => "{2}" + document_id => "{2}_%{{identifier}}" + }} + }} +""" + +#TODO: TEST DELETE WITHOUT protocol => "transport" +SYNC_OUTPUT_BASE = """ if [type] == "{2}" {{ + elasticsearch {{ + hosts => "elasticsearch:9200" + user => "{0}" + password => "{1}" + ecs_compatibility => disabled + index => "{2}" + document_id => "{2}_%{{identifier}}" + }} + }} + if [type] == "rm_{2}" {{ + elasticsearch {{ + action => "delete" + hosts => "elasticsearch:9200" + user => "{0}" + password => "{1}" + ecs_compatibility => disabled + index => "{2}" + document_id => "{2}_%{{identifier}}" + }} + }} +""" + +SQL_BASE = """SELECT + -- Concept + {0}.identifier, + {0}.platform, + {0}.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + {0}.ai_resource_id AS `resource_identifier`, + {0}.name, + {0}.description, + {0}.same_as{1}{2}{3}{4}{5}{6}{7} +FROM aiod.{0} +INNER JOIN aiod.aiod_entry ON aiod.{0}.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier{8}{9} +ORDER BY aiod.{0}.identifier +""" + +AI_ASSET_BASE = """, + -- AIAsset + {0}.ai_asset_id AS `asset_identifier`, + {0}.date_published, + {0}.version, + license.name AS `license`""" + +ATTRIBUTES_BASE = """, + -- Attributes + """ + +TYPE_BASE = """, + -- Type + {0}_type.name AS `{0}_type`""" + +MODE_BASE = """, + -- Mode + {0}_mode.name AS `mode`""" + +STATUS_BASE = """, + -- Status + {0}_status.name AS `{0}_status`""" + +AGENT_BASE = """, + -- Agent + agent.type AS `{0}`""" + +ORGANISATION_BASE = """, + -- Organisation + organisation.name AS `{0}`""" + +LEFT_LICENSE = """ +LEFT JOIN aiod.license ON aiod.{0}.license_identifier=aiod.license.identifier""" + +LEFT_TYPE = """ +LEFT JOIN aiod.{0}_type ON aiod.{0}.type_identifier=aiod.{0}_type.identifier""" + +LEFT_MODE = """ +LEFT JOIN aiod.{0}_mode ON aiod.{0}.mode_identifier=aiod.{0}_mode.identifier""" + +LEFT_STATUS = """ +LEFT JOIN aiod.{0}_status ON aiod.{0}.status_identifier=aiod.{0}_status.identifier""" + +LEFT_AGENT = """ +LEFT JOIN aiod.agent ON aiod.{0}.{1}=aiod.agent.identifier""" + +LEFT_ORGANISATION = """ +LEFT JOIN aiod.organisation ON aiod.{0}.{1}=aiod.organisation.identifier""" + +INIT_CLAUSE = """ +WHERE aiod.{0}.datetime_deleted IS NULL""" + +SYNC_CLAUSE = """ +WHERE aiod.{0}.datetime_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" + +RM_CLAUSE = """ +WHERE aiod.{0}.datetime_deleted IS NOT NULL AND aiod.{0}.datetime_deleted > :sql_last_value""" + +# DOCUMENTS GENERATION FUNCTIONS +# ============================================================================= + +def generate_config_file(conf_path, db_user, db_pass, es_user, es_pass, + entities, sync=False): + + if not sync: # init file + file_path = os.path.join(conf_path, "init_table.conf") + input_base = INIT_INPUT_BASE + filter = FILTER.format("") + output_base = INIT_OUTPUT_BASE + else: # sync file + file_path = os.path.join(conf_path, "sync_table.conf") + input_base = SYNC_INPUT_BASE + filter = FILTER.format(SYNC_FILTER_ADDON) + output_base = SYNC_OUTPUT_BASE + + # Generate configuration file + with open(file_path, 'w') as f: + + # Input + f.write("input {\n") + for entity in entities: + f.write(input_base.format(db_user, db_pass, entity)) + f.write("}\n") + + # Filters + if "organisation" in entities: + f.write(filter) + + # Output + f.write("output {\n") + for entity in entities: + f.write(output_base.format(es_user, es_pass, entity)) + f.write("}\n") + +def generate_sql_file(sql_path, entity, sync=False, rm=False): + + # Generate output file path + if rm: # rm (regardless of the value of sync) + file_path = os.path.join(sql_path, f"rm_{entity}.sql") + elif sync: # sync and not rm + file_path = os.path.join(sql_path, f"sync_{entity}.sql") + else: # not sync and not rm + file_path = os.path.join(sql_path, f"init_{entity}.sql") + + # Write the output file + with open(file_path, 'w') as f: + + # Left joins + left_joins = "" + + # For ai_asset entities + ai_asset_attributes = "" + if entity in ai_asset_entities: + ai_asset_attributes = AI_ASSET_BASE.format(entity) + left_joins += LEFT_LICENSE.format(entity) + + # Attributes + entity_attributes = "" + if entity in attributes.keys(): + entity_attributes = (ATTRIBUTES_BASE + + f"{entity}.{attributes[entity][0]}") + for attribute in attributes[entity][1:]: + entity_attributes += f",\n {entity}.{attribute}" + + # For entities with a type relation + type_attribute = "" + if entity in type_entities: + type_attribute = TYPE_BASE.format(entity) + left_joins += LEFT_TYPE.format(entity) + + # For entities with a mode relation + mode_attribute = "" + if entity in mode_entities: + mode_attribute = MODE_BASE.format(entity) + left_joins += LEFT_MODE.format(entity) + + # For entities with a status relation + status_attribute = "" + if entity in status_entities: + status_attribute = STATUS_BASE.format(entity) + left_joins += LEFT_STATUS.format(entity) + + # For entities with an agent relation + agent_attribute = "" + if entity in agent_entities.keys(): + agent_attribute = AGENT_BASE.format(agent_entities[entity][1]) + left_joins += LEFT_AGENT.format(entity, + agent_entities[entity][0]) + + # For entities with an organisation relation + organisation_attribute = "" + if entity in organisation_entities.keys(): + organisation_attribute = ORGANISATION_BASE.format( + organisation_entities[entity][1]) + left_joins += LEFT_ORGANISATION.format(entity, + organisation_entities[entity][0]) + + # Where clause + if rm: # rm (regardless of the value of sync) + where_clause = RM_CLAUSE.format(entity) + elif sync: # sync and not rm + where_clause = SYNC_CLAUSE.format(entity) + else: # not sync and not rm + where_clause = INIT_CLAUSE.format(entity) + + f.write(SQL_BASE.format(entity, ai_asset_attributes, + entity_attributes, type_attribute, + mode_attribute, status_attribute, + agent_attribute, organisation_attribute, + left_joins, where_clause)) + +# MAIN FUNCTION +# ============================================================================= + +def main(base_path, db_user, db_pass, es_user, es_pass, entities, + ai_asset_entities, attributes, type_entities, mode_entities, + status_entities, agent_entities, organisation_entities): + + # Make configuration dirs + conf_path = os.path.join(base_path, "conf") + os.makedirs(conf_path, exist_ok=True) + sql_path = os.path.join(base_path, "sql") + os.makedirs(sql_path, exist_ok=True) + + # Configuration init file + generate_config_file(conf_path, db_user, db_pass, es_user, es_pass, + entities, sync=False) + + # Configuration sync file + generate_config_file(conf_path, db_user, db_pass, es_user, es_pass, + entities, sync=True) + + # Generate SQL init, sync and rm files + for entity in entities: + generate_sql_file(sql_path, entity, sync=False, rm=False) # init + generate_sql_file(sql_path, entity, sync=True, rm=False) # sync + generate_sql_file(sql_path, entity, rm=True) # rm + +if __name__ == "__main__": + + # PATH MACROS + # ------------------------------------------------------------------------- + + # Repository base path + repo_path = REPO_PATH + + # Configuration base path + base_path = WORKING_PATH + + # ------------------------------------------------------------------------- + + # Users and passwords + db_user = "root" + db_pass = "" + es_user = "" + es_pass = "" + with open(os.path.join(repo_path, ".env"), "r") as f: + for line in f: + if "MYSQL_ROOT_PASSWORD" in line: + db_pass = line.split("=")[1][:-1] + if "ES_USER" in line: + es_user = line.split("=")[1][:-1] + if "ES_PASSWORD" in line: + es_pass = line.split("=")[1][:-1] + + # Entities and attributes + entities = ["dataset", "event", "experiment", "ml_model", "news", + "organisation", "project", "publication", "service"] + ai_asset_entities = ["dataset", "experiment", "ml_model", "publication"] + attributes = { + "dataset": ["issn", "measurement_technique", "temporal_coverage"], + "event": ["start_date", "end_date", "schedule", "registration_link", + "organiser_identifier"], + "experiment": ["experimental_workflow", "execution_settings", + "reproducibility_explanation"], + "news": ["headline", "alternative_headline"], + "organisation": ["date_founded", "legal_name"], + "project": ["start_date", "end_date", "total_cost_euro", + "coordinator_identifier"], + "publication": ["permanent_identifier", "isbn", "issn", + "knowledge_asset_id AS `knowledge_asset_identifier`"], + "service": ["slogan", "terms_of_service"] + } + type_entities = ["ml_model", "organisation", "publication"] + mode_entities = ["event"] + status_entities = ["event"] + agent_entities = { + "event": ("organiser_identifier", "organiser_type"), + "organisation": ("agent_id", "agent") + } + organisation_entities = { + "project": ("coordinator_identifier", "coordinator_name") + } + + # Main function + main(base_path, db_user, db_pass, es_user, es_pass, entities, + ai_asset_entities, attributes, type_entities, mode_entities, + status_entities, agent_entities, organisation_entities) From f964457153096a46c9a0b192f073448ea6259d57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 26 Oct 2023 13:00:39 +0200 Subject: [PATCH 38/79] Pagination changed to actual pages --- src/routers/search_router.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/routers/search_router.py b/src/routers/search_router.py index e1192546..f8050902 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -59,6 +59,7 @@ def create(self, engine: Engine, url_prefix: str) -> APIRouter: router = APIRouter() read_class = resource_read(self.resource_class) # type: ignore + # TODO: check parameters correctness @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", tags=["search"]) def search( @@ -66,7 +67,8 @@ def search( search_query: str = "", search_fields: Annotated[list[str] | None, Query()] = None, limit: int = 10, - offset: Annotated[list[str] | None, Query()] = None + page: int = 1 +# offset: Annotated[list[str] | None, Query()] = None ) -> SearchResult[read_class]: # type: ignore f""" Search for {self.resource_name_plural}. @@ -135,9 +137,12 @@ def search( # ----------------------------------------------------------------- +# result = self.client.search(index=self.es_index, query=query, +# size=limit, sort=SORT, +# search_after=offset) + from_ = limit*(page - 1) result = self.client.search(index=self.es_index, query=query, - size=limit, sort=SORT, - search_after=offset) + from_=from_, size=limit, sort=SORT) total_hits = result["hits"]["total"]["value"] resources: list[read_class] = [ # type: ignore From 0fe2fe1a414b59255a51971e4e2b51a879d5b528 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 26 Oct 2023 13:08:38 +0200 Subject: [PATCH 39/79] Pagination changed to actual pages --- src/routers/search_router.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/routers/search_router.py b/src/routers/search_router.py index f8050902..93e2cbc3 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -22,7 +22,8 @@ class SearchResult(BaseModel, Generic[RESOURCE]): total_hits: int resources: list[RESOURCE] next_offset: list | None - + current_page: int + page_size: int class SearchRouter(Generic[RESOURCE], abc.ABC): """ @@ -155,8 +156,10 @@ def search( ) return SearchResult[read_class]( # type: ignore total_hits=total_hits, + resources=resources, next_offset=next_offset, - resources=resources + current_page=page, + page_size=limit ) return router From 464c270b6a208fb1650a4f51e8f58f1725f3d687 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Tue, 31 Oct 2023 12:41:05 +0100 Subject: [PATCH 40/79] Application areas added to elasticsearch resuts --- logstash/pipeline/conf/init_table.conf | 8 +++++ logstash/pipeline/conf/sync_table.conf | 8 +++++ logstash/pipeline/logstash_config.py | 35 ++++++++++++++++----- logstash/pipeline/sql/init_dataset.sql | 7 ++++- logstash/pipeline/sql/init_event.sql | 7 ++++- logstash/pipeline/sql/init_experiment.sql | 7 ++++- logstash/pipeline/sql/init_ml_model.sql | 7 ++++- logstash/pipeline/sql/init_news.sql | 7 ++++- logstash/pipeline/sql/init_organisation.sql | 7 ++++- logstash/pipeline/sql/init_project.sql | 7 ++++- logstash/pipeline/sql/init_publication.sql | 7 ++++- logstash/pipeline/sql/init_service.sql | 7 ++++- logstash/pipeline/sql/sync_dataset.sql | 7 ++++- logstash/pipeline/sql/sync_event.sql | 7 ++++- logstash/pipeline/sql/sync_experiment.sql | 7 ++++- logstash/pipeline/sql/sync_ml_model.sql | 7 ++++- logstash/pipeline/sql/sync_news.sql | 7 ++++- logstash/pipeline/sql/sync_organisation.sql | 7 ++++- logstash/pipeline/sql/sync_project.sql | 7 ++++- logstash/pipeline/sql/sync_publication.sql | 7 ++++- logstash/pipeline/sql/sync_service.sql | 7 ++++- 21 files changed, 151 insertions(+), 26 deletions(-) diff --git a/logstash/pipeline/conf/init_table.conf b/logstash/pipeline/conf/init_table.conf index 2560e44d..2fe9fb98 100644 --- a/logstash/pipeline/conf/init_table.conf +++ b/logstash/pipeline/conf/init_table.conf @@ -116,6 +116,14 @@ input { # } #} filter { + if ![application_area] { + mutate { + replace => {"application_area" => ""} + } + } + mutate { + split => {"application_area" => ","} + } if [type] == "organisation" { ruby { code => ' diff --git a/logstash/pipeline/conf/sync_table.conf b/logstash/pipeline/conf/sync_table.conf index ff389e8e..4be1a0e9 100644 --- a/logstash/pipeline/conf/sync_table.conf +++ b/logstash/pipeline/conf/sync_table.conf @@ -134,6 +134,14 @@ input { # } #} filter { + if ![application_area] { + mutate { + replace => {"application_area" => ""} + } + } + mutate { + split => {"application_area" => ","} + } if [type] == "organisation" { ruby { code => ' diff --git a/logstash/pipeline/logstash_config.py b/logstash/pipeline/logstash_config.py index c75c9fcb..af95e5d6 100755 --- a/logstash/pipeline/logstash_config.py +++ b/logstash/pipeline/logstash_config.py @@ -56,7 +56,20 @@ }} """ -FILTER = """filter {{ +FILTER_BASE = """filter {{ + if ![application_area] {{ + mutate {{ + replace => {{"application_area" => ""}} + }} + }} + mutate {{ + # remove_field => ["@version", "@timestamp"] + split => {{"application_area" => ","}} + }}{0} +}} +""" + +DATE_FILTER = """ if [type] == "organisation" {0}{{ ruby {{ code => ' @@ -65,10 +78,9 @@ ' }} }} -}} """ -SYNC_FILTER_ADDON = """or [type] == "rm_organisation" """ +SYNC_DATE_FILTER_ADDON = """or [type] == "rm_organisation" """ INIT_OUTPUT_BASE = """ if [type] == "{2}" {{ elasticsearch {{ @@ -119,10 +131,15 @@ {0}.ai_resource_id AS `resource_identifier`, {0}.name, {0}.description, - {0}.same_as{1}{2}{3}{4}{5}{6}{7} + {0}.same_as{1}{2}{3}{4}{5}{6}{7}, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.{0} INNER JOIN aiod.aiod_entry ON aiod.{0}.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier{8}{9} +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier{8} +LEFT JOIN aiod.{0}_application_area_link ON aiod.{0}_application_area_link.from_identifier=aiod.{0}.identifier +LEFT JOIN aiod.application_area ON aiod.{0}_application_area_link.linked_identifier=aiod.application_area.identifier{9} +GROUP BY aiod.{0}.identifier ORDER BY aiod.{0}.identifier """ @@ -193,12 +210,12 @@ def generate_config_file(conf_path, db_user, db_pass, es_user, es_pass, if not sync: # init file file_path = os.path.join(conf_path, "init_table.conf") input_base = INIT_INPUT_BASE - filter = FILTER.format("") + date_filter = DATE_FILTER.format("") output_base = INIT_OUTPUT_BASE else: # sync file file_path = os.path.join(conf_path, "sync_table.conf") input_base = SYNC_INPUT_BASE - filter = FILTER.format(SYNC_FILTER_ADDON) + date_filter = DATE_FILTER.format(SYNC_DATE_FILTER_ADDON) output_base = SYNC_OUTPUT_BASE # Generate configuration file @@ -212,7 +229,9 @@ def generate_config_file(conf_path, db_user, db_pass, es_user, es_pass, # Filters if "organisation" in entities: - f.write(filter) + f.write(FILTER_BASE.format(date_filter)) + else: + f.write(FILTER_BASE.format("")) # Output f.write("output {\n") diff --git a/logstash/pipeline/sql/init_dataset.sql b/logstash/pipeline/sql/init_dataset.sql index e39acd5e..5ebdbcc8 100644 --- a/logstash/pipeline/sql/init_dataset.sql +++ b/logstash/pipeline/sql/init_dataset.sql @@ -20,9 +20,14 @@ SELECT -- Dataset dataset.issn, dataset.measurement_technique, - dataset.temporal_coverage + dataset.temporal_coverage, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.dataset INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.dataset.license_identifier=aiod.license.identifier +LEFT JOIN aiod.dataset_application_area_link ON aiod.dataset_application_area_link.from_identifier=aiod.dataset.identifier +LEFT JOIN aiod.application_area ON aiod.dataset_application_area_link.linked_identifier=aiod.application_area.identifier +GROUP BY aiod.dataset.identifier ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/init_event.sql b/logstash/pipeline/sql/init_event.sql index ecd2da5e..324a3264 100644 --- a/logstash/pipeline/sql/init_event.sql +++ b/logstash/pipeline/sql/init_event.sql @@ -18,10 +18,15 @@ SELECT event.end_date, event.schedule, event.registration_link, - event.organiser_identifier + event.organiser_identifier, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.event INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier LEFT JOIN aiod.event_mode ON aiod.event.mode_identifier=aiod.event_mode.identifier +LEFT JOIN aiod.event_application_area_link ON aiod.event_application_area_link.from_identifier=aiod.event.identifier +LEFT JOIN aiod.application_area ON aiod.event_application_area_link.linked_identifier=aiod.application_area.identifier +GROUP BY aiod.event.identifier ORDER BY aiod.event.identifier diff --git a/logstash/pipeline/sql/init_experiment.sql b/logstash/pipeline/sql/init_experiment.sql index 6811d178..25d6afb6 100644 --- a/logstash/pipeline/sql/init_experiment.sql +++ b/logstash/pipeline/sql/init_experiment.sql @@ -20,9 +20,14 @@ SELECT -- Experiment experiment.experimental_workflow, experiment.execution_settings, - experiment.reproducibility_explanation + experiment.reproducibility_explanation, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.experiment INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.experiment.license_identifier=aiod.license.identifier +LEFT JOIN aiod.experiment_application_area_link ON aiod.experiment_application_area_link.from_identifier=aiod.experiment.identifier +LEFT JOIN aiod.application_area ON aiod.experiment_application_area_link.linked_identifier=aiod.application_area.identifier +GROUP BY aiod.experiment.identifier ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/init_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql index de8ce17a..598378b4 100644 --- a/logstash/pipeline/sql/init_ml_model.sql +++ b/logstash/pipeline/sql/init_ml_model.sql @@ -18,10 +18,15 @@ SELECT ml_model.version, license.name AS `license`, -- MLModel - ml_model_type.name AS `ml_model_type` + ml_model_type.name AS `ml_model_type`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifier LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier +LEFT JOIN aiod.ml_model_application_area_link ON aiod.ml_model_application_area_link.from_identifier=aiod.ml_model.identifier +LEFT JOIN aiod.application_area ON aiod.ml_model_application_area_link.linked_identifier=aiod.application_area.identifier +GROUP BY aiod.ml_model.identifier ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql index b315f19d..32d30698 100644 --- a/logstash/pipeline/sql/init_news.sql +++ b/logstash/pipeline/sql/init_news.sql @@ -13,8 +13,13 @@ SELECT news.description, news.same_as, news.headline, - news.alternative_headline + news.alternative_headline, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.news INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.news_application_area_link ON aiod.news_application_area_link.from_identifier=aiod.news.identifier +LEFT JOIN aiod.application_area ON aiod.news_application_area_link.linked_identifier=aiod.application_area.identifier +GROUP BY aiod.news.identifier ORDER BY aiod.news.identifier diff --git a/logstash/pipeline/sql/init_organisation.sql b/logstash/pipeline/sql/init_organisation.sql index 58081bd6..36d2f798 100644 --- a/logstash/pipeline/sql/init_organisation.sql +++ b/logstash/pipeline/sql/init_organisation.sql @@ -16,10 +16,15 @@ SELECT organisation.date_founded, organisation.legal_name, -- Organisation - organisation_type.name AS `organisation_type` + organisation_type.name AS `organisation_type`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.organisation INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier INNER JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier LEFT JOIN aiod.organisation_type ON aiod.organisation.type_identifier=aiod.organisation_type.identifier +LEFT JOIN aiod.organisation_application_area_link ON aiod.organisation_application_area_link.from_identifier=aiod.organisation.identifier +LEFT JOIN aiod.application_area ON aiod.organisation_application_area_link.linked_identifier=aiod.application_area.identifier +GROUP BY aiod.organisation.identifier ORDER BY aiod.organisation.identifier diff --git a/logstash/pipeline/sql/init_project.sql b/logstash/pipeline/sql/init_project.sql index 3ef64a64..59f81839 100644 --- a/logstash/pipeline/sql/init_project.sql +++ b/logstash/pipeline/sql/init_project.sql @@ -16,9 +16,14 @@ SELECT project.end_date, project.total_cost_euro, project.coordinator_identifier, - organisation.name AS coordinator_name + organisation.name AS coordinator_name, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.project INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier INNER JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier +LEFT JOIN aiod.project_application_area_link ON aiod.project_application_area_link.from_identifier=aiod.project.identifier +LEFT JOIN aiod.application_area ON aiod.project_application_area_link.linked_identifier=aiod.application_area.identifier +GROUP BY aiod.project.identifier ORDER BY aiod.project.identifier diff --git a/logstash/pipeline/sql/init_publication.sql b/logstash/pipeline/sql/init_publication.sql index 4119115b..41f3c2b5 100644 --- a/logstash/pipeline/sql/init_publication.sql +++ b/logstash/pipeline/sql/init_publication.sql @@ -23,10 +23,15 @@ SELECT publication.permanent_identifier, publication.isbn, publication.issn, - publication_type.name AS `publication_type` + publication_type.name AS `publication_type`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.publication INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.publication.license_identifier=aiod.license.identifier LEFT JOIN aiod.publication_type ON aiod.publication.type_identifier=aiod.publication_type.identifier +LEFT JOIN aiod.publication_application_area_link ON aiod.publication_application_area_link.from_identifier=aiod.publication.identifier +LEFT JOIN aiod.application_area ON aiod.publication_application_area_link.linked_identifier=aiod.application_area.identifier +GROUP BY aiod.publication.identifier ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/init_service.sql b/logstash/pipeline/sql/init_service.sql index 8c3a7d16..d9b30863 100644 --- a/logstash/pipeline/sql/init_service.sql +++ b/logstash/pipeline/sql/init_service.sql @@ -14,8 +14,13 @@ SELECT service.same_as, -- Service service.slogan, - service.terms_of_service + service.terms_of_service, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.service INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.service_application_area_link ON aiod.service_application_area_link.from_identifier=aiod.service.identifier +LEFT JOIN aiod.application_area ON aiod.service_application_area_link.linked_identifier=aiod.application_area.identifier +GROUP BY aiod.service.identifier ORDER BY aiod.service.identifier diff --git a/logstash/pipeline/sql/sync_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql index 38b9d024..7d104a3d 100644 --- a/logstash/pipeline/sql/sync_dataset.sql +++ b/logstash/pipeline/sql/sync_dataset.sql @@ -20,10 +20,15 @@ SELECT -- Dataset dataset.issn, dataset.measurement_technique, - dataset.temporal_coverage + dataset.temporal_coverage, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.dataset INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.dataset.license_identifier=aiod.license.identifier +LEFT JOIN aiod.dataset_application_area_link ON aiod.dataset_application_area_link.from_identifier=aiod.dataset.identifier +LEFT JOIN aiod.application_area ON aiod.dataset_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value +GROUP BY aiod.dataset.identifier ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/sync_event.sql b/logstash/pipeline/sql/sync_event.sql index 53419164..b840e893 100644 --- a/logstash/pipeline/sql/sync_event.sql +++ b/logstash/pipeline/sql/sync_event.sql @@ -18,11 +18,16 @@ SELECT event.end_date, event.schedule, event.registration_link, - event.organiser_identifier + event.organiser_identifier, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.event INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier LEFT JOIN aiod.event_mode ON aiod.event.mode_identifier=aiod.event_mode.identifier +LEFT JOIN aiod.event_application_area_link ON aiod.event_application_area_link.from_identifier=aiod.event.identifier +LEFT JOIN aiod.application_area ON aiod.event_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value +GROUP BY aiod.event.identifier ORDER BY aiod.event.identifier diff --git a/logstash/pipeline/sql/sync_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql index f100fc39..a2837263 100644 --- a/logstash/pipeline/sql/sync_experiment.sql +++ b/logstash/pipeline/sql/sync_experiment.sql @@ -20,10 +20,15 @@ SELECT -- Experiment experiment.experimental_workflow, experiment.execution_settings, - experiment.reproducibility_explanation + experiment.reproducibility_explanation, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.experiment INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.experiment.license_identifier=aiod.license.identifier +LEFT JOIN aiod.experiment_application_area_link ON aiod.experiment_application_area_link.from_identifier=aiod.experiment.identifier +LEFT JOIN aiod.application_area ON aiod.experiment_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value +GROUP BY aiod.experiment.identifier ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/sync_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql index 421b6403..bbbf00c6 100644 --- a/logstash/pipeline/sql/sync_ml_model.sql +++ b/logstash/pipeline/sql/sync_ml_model.sql @@ -18,11 +18,16 @@ SELECT ml_model.version, license.name AS `license`, -- MLModel - ml_model_type.name AS `ml_model_type` + ml_model_type.name AS `ml_model_type`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifier LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier +LEFT JOIN aiod.ml_model_application_area_link ON aiod.ml_model_application_area_link.from_identifier=aiod.ml_model.identifier +LEFT JOIN aiod.application_area ON aiod.ml_model_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value +GROUP BY aiod.ml_model.identifier ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql index dbb0ee8f..3db5b037 100644 --- a/logstash/pipeline/sql/sync_news.sql +++ b/logstash/pipeline/sql/sync_news.sql @@ -13,9 +13,14 @@ SELECT news.description, news.same_as, news.headline, - news.alternative_headline + news.alternative_headline, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.news INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.news_application_area_link ON aiod.news_application_area_link.from_identifier=aiod.news.identifier +LEFT JOIN aiod.application_area ON aiod.news_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value +GROUP BY aiod.news.identifier ORDER BY aiod.news.identifier diff --git a/logstash/pipeline/sql/sync_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql index 7d94c68f..5d26677d 100644 --- a/logstash/pipeline/sql/sync_organisation.sql +++ b/logstash/pipeline/sql/sync_organisation.sql @@ -16,11 +16,16 @@ SELECT organisation.date_founded, organisation.legal_name, -- Organisation - organisation_type.name AS `organisation_type` + organisation_type.name AS `organisation_type`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.organisation INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier INNER JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier LEFT JOIN aiod.organisation_type ON aiod.organisation.type_identifier=aiod.organisation_type.identifier +LEFT JOIN aiod.organisation_application_area_link ON aiod.organisation_application_area_link.from_identifier=aiod.organisation.identifier +LEFT JOIN aiod.application_area ON aiod.organisation_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value +GROUP BY aiod.organisation.identifier ORDER BY aiod.organisation.identifier diff --git a/logstash/pipeline/sql/sync_project.sql b/logstash/pipeline/sql/sync_project.sql index faa749a9..b6e02c8f 100644 --- a/logstash/pipeline/sql/sync_project.sql +++ b/logstash/pipeline/sql/sync_project.sql @@ -16,10 +16,15 @@ SELECT project.end_date, project.total_cost_euro, project.coordinator_identifier, - organisation.name AS coordinator_name + organisation.name AS coordinator_name, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.project INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier INNER JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier +LEFT JOIN aiod.project_application_area_link ON aiod.project_application_area_link.from_identifier=aiod.project.identifier +LEFT JOIN aiod.application_area ON aiod.project_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value +GROUP BY aiod.project.identifier ORDER BY aiod.project.identifier diff --git a/logstash/pipeline/sql/sync_publication.sql b/logstash/pipeline/sql/sync_publication.sql index 24f199d9..699f747d 100644 --- a/logstash/pipeline/sql/sync_publication.sql +++ b/logstash/pipeline/sql/sync_publication.sql @@ -23,11 +23,16 @@ SELECT publication.permanent_identifier, publication.isbn, publication.issn, - publication_type.name AS `publication_type` + publication_type.name AS `publication_type`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.publication INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.publication.license_identifier=aiod.license.identifier LEFT JOIN aiod.publication_type ON aiod.publication.type_identifier=aiod.publication_type.identifier +LEFT JOIN aiod.publication_application_area_link ON aiod.publication_application_area_link.from_identifier=aiod.publication.identifier +LEFT JOIN aiod.application_area ON aiod.publication_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value +GROUP BY aiod.publication.identifier ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/sync_service.sql b/logstash/pipeline/sql/sync_service.sql index f06aa76f..389b541c 100644 --- a/logstash/pipeline/sql/sync_service.sql +++ b/logstash/pipeline/sql/sync_service.sql @@ -14,9 +14,14 @@ SELECT service.same_as, -- Service service.slogan, - service.terms_of_service + service.terms_of_service, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.service INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.service_application_area_link ON aiod.service_application_area_link.from_identifier=aiod.service.identifier +LEFT JOIN aiod.application_area ON aiod.service_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.aiod_entry.date_modified > :sql_last_value +GROUP BY aiod.service.identifier ORDER BY aiod.service.identifier From 85ba9259633b2f085c09b74d6f6fa3fb17171779 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 8 Nov 2023 12:15:20 +0100 Subject: [PATCH 41/79] First version with deletion --- logstash/pipeline/conf/init_table.conf | 89 +++--- logstash/pipeline/conf/sync_table.conf | 285 +++++++++++++++--- logstash/pipeline/logstash_config.py | 8 +- logstash/pipeline/sql/init_dataset.sql | 3 +- logstash/pipeline/sql/init_event.sql | 17 +- logstash/pipeline/sql/init_experiment.sql | 5 +- logstash/pipeline/sql/init_ml_model.sql | 5 +- logstash/pipeline/sql/init_news.sql | 2 + logstash/pipeline/sql/init_organisation.sql | 9 +- logstash/pipeline/sql/init_project.sql | 7 +- logstash/pipeline/sql/init_publication.sql | 7 +- logstash/pipeline/sql/init_service.sql | 3 +- logstash/pipeline/sql/rm_dataset.sql | 34 +++ logstash/pipeline/sql/rm_event.sql | 39 +++ logstash/pipeline/sql/rm_experiment.sql | 34 +++ logstash/pipeline/sql/rm_ml_model.sql | 33 ++ logstash/pipeline/sql/rm_news.sql | 27 ++ logstash/pipeline/sql/rm_organisation.sql | 33 ++ logstash/pipeline/sql/rm_project.sql | 32 ++ logstash/pipeline/sql/rm_publication.sql | 38 +++ logstash/pipeline/sql/rm_service.sql | 27 ++ logstash/pipeline/sql/sync_dataset.sql | 4 +- logstash/pipeline/sql/sync_event.sql | 18 +- logstash/pipeline/sql/sync_experiment.sql | 4 +- logstash/pipeline/sql/sync_ml_model.sql | 4 +- logstash/pipeline/sql/sync_news.sql | 3 +- logstash/pipeline/sql/sync_organisation.sql | 10 +- logstash/pipeline/sql/sync_project.sql | 8 +- logstash/pipeline/sql/sync_publication.sql | 8 +- logstash/pipeline/sql/sync_service.sql | 4 +- .../example/resources/resource/datasets.json | 44 ++- .../resources/resource/experiments.json | 99 +----- .../example/resources/resource/ml_models.json | 95 +----- 33 files changed, 678 insertions(+), 360 deletions(-) create mode 100644 logstash/pipeline/sql/rm_dataset.sql create mode 100644 logstash/pipeline/sql/rm_event.sql create mode 100644 logstash/pipeline/sql/rm_experiment.sql create mode 100644 logstash/pipeline/sql/rm_ml_model.sql create mode 100644 logstash/pipeline/sql/rm_news.sql create mode 100644 logstash/pipeline/sql/rm_organisation.sql create mode 100644 logstash/pipeline/sql/rm_project.sql create mode 100644 logstash/pipeline/sql/rm_publication.sql create mode 100644 logstash/pipeline/sql/rm_service.sql diff --git a/logstash/pipeline/conf/init_table.conf b/logstash/pipeline/conf/init_table.conf index 2fe9fb98..f14ca5cd 100644 --- a/logstash/pipeline/conf/init_table.conf +++ b/logstash/pipeline/conf/init_table.conf @@ -1,16 +1,14 @@ input { - # https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_publication.sql" - type => "publication" + statement_filepath => "/usr/share/logstash/sql/init_dataset.sql" + type => "dataset" } jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" @@ -18,11 +16,10 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_dataset.sql" - type => "dataset" + statement_filepath => "/usr/share/logstash/sql/init_event.sql" + type => "event" } jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" @@ -30,7 +27,6 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" clean_run => true record_last_run => false statement_filepath => "/usr/share/logstash/sql/init_experiment.sql" @@ -42,7 +38,6 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" clean_run => true record_last_run => false statement_filepath => "/usr/share/logstash/sql/init_ml_model.sql" @@ -54,11 +49,10 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_service.sql" - type => "service" + statement_filepath => "/usr/share/logstash/sql/init_news.sql" + type => "news" } jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" @@ -66,11 +60,10 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_news.sql" - type => "news" + statement_filepath => "/usr/share/logstash/sql/init_organisation.sql" + type => "organisation" } jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" @@ -78,11 +71,10 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_event.sql" - type => "event" + statement_filepath => "/usr/share/logstash/sql/init_project.sql" + type => "project" } jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" @@ -90,11 +82,10 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_project.sql" - type => "project" + statement_filepath => "/usr/share/logstash/sql/init_publication.sql" + type => "publication" } jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" @@ -102,19 +93,12 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" clean_run => true record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_organisation.sql" - type => "organisation" + statement_filepath => "/usr/share/logstash/sql/init_service.sql" + type => "service" } } -# https://www.elastic.co/guide/en/logstash/current/filter-plugins.html+ -#filter { -# mutate { -# remove_field => ["@version", "@timestamp"] -# } -#} filter { if ![application_area] { mutate { @@ -122,6 +106,7 @@ filter { } } mutate { + # remove_field => ["@version", "@timestamp"] split => {"application_area" => ","} } if [type] == "organisation" { @@ -132,27 +117,27 @@ filter { ' } } + } output { - # https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html - if [type] == "publication" { + if [type] == "dataset" { elasticsearch { hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" + index => "dataset" + document_id => "dataset_%{identifier}" } } - if [type] == "dataset" { + if [type] == "event" { elasticsearch { hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "dataset" - document_id => "dataset_%{identifier}" + index => "event" + document_id => "event_%{identifier}" } } if [type] == "experiment" { @@ -175,54 +160,54 @@ output { document_id => "ml_model_%{identifier}" } } - if [type] == "service" { + if [type] == "news" { elasticsearch { hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" + index => "news" + document_id => "news_%{identifier}" } } - if [type] == "news" { + if [type] == "organisation" { elasticsearch { hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "news" - document_id => "news_%{identifier}" + index => "organisation" + document_id => "organisation_%{identifier}" } } - if [type] == "event" { + if [type] == "project" { elasticsearch { hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" + index => "project" + document_id => "project_%{identifier}" } } - if [type] == "project" { + if [type] == "publication" { elasticsearch { hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "project" - document_id => "project_%{identifier}" + index => "publication" + document_id => "publication_%{identifier}" } } - if [type] == "organisation" { + if [type] == "service" { elasticsearch { hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" + index => "service" + document_id => "service_%{identifier}" } } } diff --git a/logstash/pipeline/conf/sync_table.conf b/logstash/pipeline/conf/sync_table.conf index 4be1a0e9..267b67f9 100644 --- a/logstash/pipeline/conf/sync_table.conf +++ b/logstash/pipeline/conf/sync_table.conf @@ -1,18 +1,29 @@ input { - # https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" use_column_value => true tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_publication.sql" - type => "publication" + statement_filepath => "/usr/share/logstash/sql/sync_dataset.sql" + type => "dataset" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_dataset.sql" + type => "rm_dataset" } jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" @@ -20,13 +31,25 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" use_column_value => true tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_dataset.sql" - type => "dataset" + statement_filepath => "/usr/share/logstash/sql/sync_event.sql" + type => "event" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_event.sql" + type => "rm_event" } jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" @@ -34,7 +57,6 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" use_column_value => true tracking_column => "date_modified" tracking_column_type => "timestamp" @@ -48,7 +70,19 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_experiment.sql" + type => "rm_experiment" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" use_column_value => true tracking_column => "date_modified" tracking_column_type => "timestamp" @@ -62,13 +96,12 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" use_column_value => true - tracking_column => "date_modified" + tracking_column => "date_deleted" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_service.sql" - type => "service" + statement_filepath => "/usr/share/logstash/sql/rm_ml_model.sql" + type => "rm_ml_model" } jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" @@ -76,7 +109,6 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" use_column_value => true tracking_column => "date_modified" tracking_column_type => "timestamp" @@ -90,13 +122,38 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_news.sql" + type => "rm_news" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" use_column_value => true tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_event.sql" - type => "event" + statement_filepath => "/usr/share/logstash/sql/sync_organisation.sql" + type => "organisation" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_organisation.sql" + type => "rm_organisation" } jdbc { jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" @@ -104,7 +161,6 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" use_column_value => true tracking_column => "date_modified" tracking_column_type => "timestamp" @@ -118,21 +174,66 @@ input { jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" jdbc_password => "ok" - #sql_log_level => "debug" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_project.sql" + type => "rm_project" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" use_column_value => true tracking_column => "date_modified" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_organisation.sql" - type => "organisation" + statement_filepath => "/usr/share/logstash/sql/sync_publication.sql" + type => "publication" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_publication.sql" + type => "rm_publication" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_service.sql" + type => "service" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_service.sql" + type => "rm_service" } } -# https://www.elastic.co/guide/en/logstash/current/filter-plugins.html+ -#filter { -# mutate { -# remove_field => ["@version", "@timestamp", "ts"] -# } -#} filter { if ![application_area] { mutate { @@ -140,9 +241,10 @@ filter { } } mutate { + # remove_field => ["@version", "@timestamp"] split => {"application_area" => ","} } - if [type] == "organisation" { + if [type] == "organisation" or [type] == "rm_organisation" { ruby { code => ' t = Time.at(event.get("date_founded").to_f) @@ -150,21 +252,22 @@ filter { ' } } + } output { - # https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html - if [type] == "publication" { + if [type] == "dataset" { elasticsearch { hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" + index => "dataset" + document_id => "dataset_%{identifier}" } } - if [type] == "dataset" { + if [type] == "rm_dataset" { elasticsearch { + action => "delete" hosts => "elasticsearch:9200" user => "elastic" password => "changeme" @@ -173,6 +276,27 @@ output { document_id => "dataset_%{identifier}" } } + if [type] == "event" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "event" + document_id => "event_%{identifier}" + } + } + if [type] == "rm_event" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "event" + document_id => "event_%{identifier}" + } + } if [type] == "experiment" { elasticsearch { hosts => "elasticsearch:9200" @@ -183,6 +307,17 @@ output { document_id => "experiment_%{identifier}" } } + if [type] == "rm_experiment" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "experiment" + document_id => "experiment_%{identifier}" + } + } if [type] == "ml_model" { elasticsearch { hosts => "elasticsearch:9200" @@ -193,14 +328,15 @@ output { document_id => "ml_model_%{identifier}" } } - if [type] == "service" { + if [type] == "rm_ml_model" { elasticsearch { + action => "delete" hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" + index => "ml_model" + document_id => "ml_model_%{identifier}" } } if [type] == "news" { @@ -213,14 +349,36 @@ output { document_id => "news_%{identifier}" } } - if [type] == "event" { + if [type] == "rm_news" { elasticsearch { + action => "delete" hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" + index => "news" + document_id => "news_%{identifier}" + } + } + if [type] == "organisation" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "organisation" + document_id => "organisation_%{identifier}" + } + } + if [type] == "rm_organisation" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "organisation" + document_id => "organisation_%{identifier}" } } if [type] == "project" { @@ -233,14 +391,57 @@ output { document_id => "project_%{identifier}" } } - if [type] == "organisation" { + if [type] == "rm_project" { elasticsearch { + action => "delete" hosts => "elasticsearch:9200" user => "elastic" password => "changeme" ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" + index => "project" + document_id => "project_%{identifier}" + } + } + if [type] == "publication" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "publication" + document_id => "publication_%{identifier}" + } + } + if [type] == "rm_publication" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "publication" + document_id => "publication_%{identifier}" + } + } + if [type] == "service" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "service" + document_id => "service_%{identifier}" + } + } + if [type] == "rm_service" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "service" + document_id => "service_%{identifier}" } } } diff --git a/logstash/pipeline/logstash_config.py b/logstash/pipeline/logstash_config.py index af95e5d6..f0865cdd 100755 --- a/logstash/pipeline/logstash_config.py +++ b/logstash/pipeline/logstash_config.py @@ -48,7 +48,7 @@ jdbc_user => "{0}" jdbc_password => "{1}" use_column_value => true - tracking_column => "datetime_deleted" + tracking_column => "date_deleted" tracking_column_type => "timestamp" schedule => "*/5 * * * * *" statement_filepath => "/usr/share/logstash/sql/rm_{2}.sql" @@ -193,13 +193,13 @@ LEFT JOIN aiod.organisation ON aiod.{0}.{1}=aiod.organisation.identifier""" INIT_CLAUSE = """ -WHERE aiod.{0}.datetime_deleted IS NULL""" +WHERE aiod.{0}.date_deleted IS NULL""" SYNC_CLAUSE = """ -WHERE aiod.{0}.datetime_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" +WHERE aiod.{0}.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" RM_CLAUSE = """ -WHERE aiod.{0}.datetime_deleted IS NOT NULL AND aiod.{0}.datetime_deleted > :sql_last_value""" +WHERE aiod.{0}.date_deleted IS NOT NULL AND aiod.{0}.date_deleted > :sql_last_value""" # DOCUMENTS GENERATION FUNCTIONS # ============================================================================= diff --git a/logstash/pipeline/sql/init_dataset.sql b/logstash/pipeline/sql/init_dataset.sql index 5ebdbcc8..3774e884 100644 --- a/logstash/pipeline/sql/init_dataset.sql +++ b/logstash/pipeline/sql/init_dataset.sql @@ -17,7 +17,7 @@ SELECT dataset.date_published, dataset.version, license.name AS `license`, - -- Dataset + -- Attributes dataset.issn, dataset.measurement_technique, dataset.temporal_coverage, @@ -29,5 +29,6 @@ INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifi LEFT JOIN aiod.license ON aiod.dataset.license_identifier=aiod.license.identifier LEFT JOIN aiod.dataset_application_area_link ON aiod.dataset_application_area_link.from_identifier=aiod.dataset.identifier LEFT JOIN aiod.application_area ON aiod.dataset_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.dataset.date_deleted IS NULL GROUP BY aiod.dataset.identifier ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/init_event.sql b/logstash/pipeline/sql/init_event.sql index 324a3264..61293035 100644 --- a/logstash/pipeline/sql/init_event.sql +++ b/logstash/pipeline/sql/init_event.sql @@ -4,29 +4,36 @@ SELECT event.platform, event.platform_identifier, -- Concept.aiod_entry - event_status.name AS `status`, - event_mode.name AS `mode`, + status.name AS `status`, aiod_entry.date_modified, aiod_entry.date_created, - agent.type AS `organiser_type`, -- Resource event.ai_resource_id AS `resource_identifier`, event.name, event.description, event.same_as, + -- Attributes event.start_date, event.end_date, event.schedule, event.registration_link, event.organiser_identifier, + -- Mode + event_mode.name AS `mode`, + -- Status + event_status.name AS `event_status`, + -- Agent + agent.type AS `organiser_type`, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.event INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier -LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.event_mode ON aiod.event.mode_identifier=aiod.event_mode.identifier +LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier +LEFT JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier LEFT JOIN aiod.event_application_area_link ON aiod.event_application_area_link.from_identifier=aiod.event.identifier LEFT JOIN aiod.application_area ON aiod.event_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.event.date_deleted IS NULL GROUP BY aiod.event.identifier ORDER BY aiod.event.identifier diff --git a/logstash/pipeline/sql/init_experiment.sql b/logstash/pipeline/sql/init_experiment.sql index 25d6afb6..62a26575 100644 --- a/logstash/pipeline/sql/init_experiment.sql +++ b/logstash/pipeline/sql/init_experiment.sql @@ -17,17 +17,18 @@ SELECT experiment.date_published, experiment.version, license.name AS `license`, - -- Experiment + -- Attributes experiment.experimental_workflow, experiment.execution_settings, experiment.reproducibility_explanation, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` -FROM aiod.experiment +FROM aiod.experiment INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.experiment.license_identifier=aiod.license.identifier LEFT JOIN aiod.experiment_application_area_link ON aiod.experiment_application_area_link.from_identifier=aiod.experiment.identifier LEFT JOIN aiod.application_area ON aiod.experiment_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.experiment.date_deleted IS NULL GROUP BY aiod.experiment.identifier ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/init_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql index 598378b4..b5216c09 100644 --- a/logstash/pipeline/sql/init_ml_model.sql +++ b/logstash/pipeline/sql/init_ml_model.sql @@ -17,16 +17,17 @@ SELECT ml_model.date_published, ml_model.version, license.name AS `license`, - -- MLModel + -- Type ml_model_type.name AS `ml_model_type`, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` -FROM aiod.ml_model +FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifier LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier LEFT JOIN aiod.ml_model_application_area_link ON aiod.ml_model_application_area_link.from_identifier=aiod.ml_model.identifier LEFT JOIN aiod.application_area ON aiod.ml_model_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.ml_model.date_deleted IS NULL GROUP BY aiod.ml_model.identifier ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql index 32d30698..30a12659 100644 --- a/logstash/pipeline/sql/init_news.sql +++ b/logstash/pipeline/sql/init_news.sql @@ -12,6 +12,7 @@ SELECT news.name, news.description, news.same_as, + -- Attributes news.headline, news.alternative_headline, -- Application Area @@ -21,5 +22,6 @@ INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.id INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.news_application_area_link ON aiod.news_application_area_link.from_identifier=aiod.news.identifier LEFT JOIN aiod.application_area ON aiod.news_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.news.date_deleted IS NULL GROUP BY aiod.news.identifier ORDER BY aiod.news.identifier diff --git a/logstash/pipeline/sql/init_organisation.sql b/logstash/pipeline/sql/init_organisation.sql index 36d2f798..4a4bfa49 100644 --- a/logstash/pipeline/sql/init_organisation.sql +++ b/logstash/pipeline/sql/init_organisation.sql @@ -7,24 +7,27 @@ SELECT status.name AS `status`, aiod_entry.date_modified, aiod_entry.date_created, - agent.type AS `agent`, -- Resource organisation.ai_resource_id AS `resource_identifier`, organisation.name, organisation.description, organisation.same_as, + -- Attributes organisation.date_founded, organisation.legal_name, - -- Organisation + -- Type organisation_type.name AS `organisation_type`, + -- Agent + agent.type AS `agent`, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.organisation INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -INNER JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier LEFT JOIN aiod.organisation_type ON aiod.organisation.type_identifier=aiod.organisation_type.identifier +LEFT JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier LEFT JOIN aiod.organisation_application_area_link ON aiod.organisation_application_area_link.from_identifier=aiod.organisation.identifier LEFT JOIN aiod.application_area ON aiod.organisation_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.organisation.date_deleted IS NULL GROUP BY aiod.organisation.identifier ORDER BY aiod.organisation.identifier diff --git a/logstash/pipeline/sql/init_project.sql b/logstash/pipeline/sql/init_project.sql index 59f81839..6e9dd176 100644 --- a/logstash/pipeline/sql/init_project.sql +++ b/logstash/pipeline/sql/init_project.sql @@ -12,18 +12,21 @@ SELECT project.name, project.description, project.same_as, + -- Attributes project.start_date, project.end_date, project.total_cost_euro, project.coordinator_identifier, - organisation.name AS coordinator_name, + -- Organisation + organisation.name AS `coordinator_name`, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.project INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -INNER JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier +LEFT JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier LEFT JOIN aiod.project_application_area_link ON aiod.project_application_area_link.from_identifier=aiod.project.identifier LEFT JOIN aiod.application_area ON aiod.project_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.project.date_deleted IS NULL GROUP BY aiod.project.identifier ORDER BY aiod.project.identifier diff --git a/logstash/pipeline/sql/init_publication.sql b/logstash/pipeline/sql/init_publication.sql index 41f3c2b5..d14d8079 100644 --- a/logstash/pipeline/sql/init_publication.sql +++ b/logstash/pipeline/sql/init_publication.sql @@ -17,12 +17,12 @@ SELECT publication.date_published, publication.version, license.name AS `license`, - -- KnowledgeAsset - publication.knowledge_asset_id AS `knowledge_asset_identifier`, - -- Publication + -- Attributes publication.permanent_identifier, publication.isbn, publication.issn, + publication.knowledge_asset_id AS `knowledge_asset_identifier`, + -- Type publication_type.name AS `publication_type`, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` @@ -33,5 +33,6 @@ LEFT JOIN aiod.license ON aiod.publication.license_identifier=aiod.license.ident LEFT JOIN aiod.publication_type ON aiod.publication.type_identifier=aiod.publication_type.identifier LEFT JOIN aiod.publication_application_area_link ON aiod.publication_application_area_link.from_identifier=aiod.publication.identifier LEFT JOIN aiod.application_area ON aiod.publication_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.publication.date_deleted IS NULL GROUP BY aiod.publication.identifier ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/init_service.sql b/logstash/pipeline/sql/init_service.sql index d9b30863..96732d7a 100644 --- a/logstash/pipeline/sql/init_service.sql +++ b/logstash/pipeline/sql/init_service.sql @@ -12,7 +12,7 @@ SELECT service.name, service.description, service.same_as, - -- Service + -- Attributes service.slogan, service.terms_of_service, -- Application Area @@ -22,5 +22,6 @@ INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.service_application_area_link ON aiod.service_application_area_link.from_identifier=aiod.service.identifier LEFT JOIN aiod.application_area ON aiod.service_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.service.date_deleted IS NULL GROUP BY aiod.service.identifier ORDER BY aiod.service.identifier diff --git a/logstash/pipeline/sql/rm_dataset.sql b/logstash/pipeline/sql/rm_dataset.sql new file mode 100644 index 00000000..0c07b87a --- /dev/null +++ b/logstash/pipeline/sql/rm_dataset.sql @@ -0,0 +1,34 @@ +SELECT + -- Concept + dataset.identifier, + dataset.platform, + dataset.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + dataset.ai_resource_id AS `resource_identifier`, + dataset.name, + dataset.description, + dataset.same_as, + -- AIAsset + dataset.ai_asset_id AS `asset_identifier`, + dataset.date_published, + dataset.version, + license.name AS `license`, + -- Attributes + dataset.issn, + dataset.measurement_technique, + dataset.temporal_coverage, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` +FROM aiod.dataset +INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.dataset.license_identifier=aiod.license.identifier +LEFT JOIN aiod.dataset_application_area_link ON aiod.dataset_application_area_link.from_identifier=aiod.dataset.identifier +LEFT JOIN aiod.application_area ON aiod.dataset_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.dataset.date_deleted IS NOT NULL AND aiod.dataset.date_deleted > :sql_last_value +GROUP BY aiod.dataset.identifier +ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/rm_event.sql b/logstash/pipeline/sql/rm_event.sql new file mode 100644 index 00000000..888e6215 --- /dev/null +++ b/logstash/pipeline/sql/rm_event.sql @@ -0,0 +1,39 @@ +SELECT + -- Concept + event.identifier, + event.platform, + event.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + event.ai_resource_id AS `resource_identifier`, + event.name, + event.description, + event.same_as, + -- Attributes + event.start_date, + event.end_date, + event.schedule, + event.registration_link, + event.organiser_identifier, + -- Mode + event_mode.name AS `mode`, + -- Status + event_status.name AS `event_status`, + -- Agent + agent.type AS `organiser_type`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` +FROM aiod.event +INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.event_mode ON aiod.event.mode_identifier=aiod.event_mode.identifier +LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier +LEFT JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier +LEFT JOIN aiod.event_application_area_link ON aiod.event_application_area_link.from_identifier=aiod.event.identifier +LEFT JOIN aiod.application_area ON aiod.event_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.event.date_deleted IS NOT NULL AND aiod.event.date_deleted > :sql_last_value +GROUP BY aiod.event.identifier +ORDER BY aiod.event.identifier diff --git a/logstash/pipeline/sql/rm_experiment.sql b/logstash/pipeline/sql/rm_experiment.sql new file mode 100644 index 00000000..9d75f3cc --- /dev/null +++ b/logstash/pipeline/sql/rm_experiment.sql @@ -0,0 +1,34 @@ +SELECT + -- Concept + experiment.identifier, + experiment.platform, + experiment.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + experiment.ai_resource_id AS `resource_identifier`, + experiment.name, + experiment.description, + experiment.same_as, + -- AIAsset + experiment.ai_asset_id AS `asset_identifier`, + experiment.date_published, + experiment.version, + license.name AS `license`, + -- Attributes + experiment.experimental_workflow, + experiment.execution_settings, + experiment.reproducibility_explanation, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` +FROM aiod.experiment +INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.experiment.license_identifier=aiod.license.identifier +LEFT JOIN aiod.experiment_application_area_link ON aiod.experiment_application_area_link.from_identifier=aiod.experiment.identifier +LEFT JOIN aiod.application_area ON aiod.experiment_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.experiment.date_deleted IS NOT NULL AND aiod.experiment.date_deleted > :sql_last_value +GROUP BY aiod.experiment.identifier +ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/rm_ml_model.sql b/logstash/pipeline/sql/rm_ml_model.sql new file mode 100644 index 00000000..2a146b9f --- /dev/null +++ b/logstash/pipeline/sql/rm_ml_model.sql @@ -0,0 +1,33 @@ +SELECT + -- Concept + ml_model.identifier, + ml_model.platform, + ml_model.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + ml_model.ai_resource_id AS `resource_identifier`, + ml_model.name, + ml_model.description, + ml_model.same_as, + -- AIAsset + ml_model.ai_asset_id AS `asset_identifier`, + ml_model.date_published, + ml_model.version, + license.name AS `license`, + -- Type + ml_model_type.name AS `ml_model_type`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` +FROM aiod.ml_model +INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifier +LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier +LEFT JOIN aiod.ml_model_application_area_link ON aiod.ml_model_application_area_link.from_identifier=aiod.ml_model.identifier +LEFT JOIN aiod.application_area ON aiod.ml_model_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.ml_model.date_deleted IS NOT NULL AND aiod.ml_model.date_deleted > :sql_last_value +GROUP BY aiod.ml_model.identifier +ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/rm_news.sql b/logstash/pipeline/sql/rm_news.sql new file mode 100644 index 00000000..0ee6822a --- /dev/null +++ b/logstash/pipeline/sql/rm_news.sql @@ -0,0 +1,27 @@ +SELECT + -- Concept + news.identifier, + news.platform, + news.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + news.ai_resource_id AS `resource_identifier`, + news.name, + news.description, + news.same_as, + -- Attributes + news.headline, + news.alternative_headline, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` +FROM aiod.news +INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.news_application_area_link ON aiod.news_application_area_link.from_identifier=aiod.news.identifier +LEFT JOIN aiod.application_area ON aiod.news_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.news.date_deleted IS NOT NULL AND aiod.news.date_deleted > :sql_last_value +GROUP BY aiod.news.identifier +ORDER BY aiod.news.identifier diff --git a/logstash/pipeline/sql/rm_organisation.sql b/logstash/pipeline/sql/rm_organisation.sql new file mode 100644 index 00000000..25173ac7 --- /dev/null +++ b/logstash/pipeline/sql/rm_organisation.sql @@ -0,0 +1,33 @@ +SELECT + -- Concept + organisation.identifier, + organisation.platform, + organisation.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + organisation.ai_resource_id AS `resource_identifier`, + organisation.name, + organisation.description, + organisation.same_as, + -- Attributes + organisation.date_founded, + organisation.legal_name, + -- Type + organisation_type.name AS `organisation_type`, + -- Agent + agent.type AS `agent`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` +FROM aiod.organisation +INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.organisation_type ON aiod.organisation.type_identifier=aiod.organisation_type.identifier +LEFT JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier +LEFT JOIN aiod.organisation_application_area_link ON aiod.organisation_application_area_link.from_identifier=aiod.organisation.identifier +LEFT JOIN aiod.application_area ON aiod.organisation_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.organisation.date_deleted IS NOT NULL AND aiod.organisation.date_deleted > :sql_last_value +GROUP BY aiod.organisation.identifier +ORDER BY aiod.organisation.identifier diff --git a/logstash/pipeline/sql/rm_project.sql b/logstash/pipeline/sql/rm_project.sql new file mode 100644 index 00000000..037dc8e9 --- /dev/null +++ b/logstash/pipeline/sql/rm_project.sql @@ -0,0 +1,32 @@ +SELECT + -- Concept + project.identifier, + project.platform, + project.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + project.ai_resource_id AS `resource_identifier`, + project.name, + project.description, + project.same_as, + -- Attributes + project.start_date, + project.end_date, + project.total_cost_euro, + project.coordinator_identifier, + -- Organisation + organisation.name AS `coordinator_name`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` +FROM aiod.project +INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier +LEFT JOIN aiod.project_application_area_link ON aiod.project_application_area_link.from_identifier=aiod.project.identifier +LEFT JOIN aiod.application_area ON aiod.project_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.project.date_deleted IS NOT NULL AND aiod.project.date_deleted > :sql_last_value +GROUP BY aiod.project.identifier +ORDER BY aiod.project.identifier diff --git a/logstash/pipeline/sql/rm_publication.sql b/logstash/pipeline/sql/rm_publication.sql new file mode 100644 index 00000000..706e3bd6 --- /dev/null +++ b/logstash/pipeline/sql/rm_publication.sql @@ -0,0 +1,38 @@ +SELECT + -- Concept + publication.identifier, + publication.platform, + publication.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + publication.ai_resource_id AS `resource_identifier`, + publication.name, + publication.description, + publication.same_as, + -- AIAsset + publication.ai_asset_id AS `asset_identifier`, + publication.date_published, + publication.version, + license.name AS `license`, + -- Attributes + publication.permanent_identifier, + publication.isbn, + publication.issn, + publication.knowledge_asset_id AS `knowledge_asset_identifier`, + -- Type + publication_type.name AS `publication_type`, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` +FROM aiod.publication +INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.license ON aiod.publication.license_identifier=aiod.license.identifier +LEFT JOIN aiod.publication_type ON aiod.publication.type_identifier=aiod.publication_type.identifier +LEFT JOIN aiod.publication_application_area_link ON aiod.publication_application_area_link.from_identifier=aiod.publication.identifier +LEFT JOIN aiod.application_area ON aiod.publication_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.publication.date_deleted IS NOT NULL AND aiod.publication.date_deleted > :sql_last_value +GROUP BY aiod.publication.identifier +ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/rm_service.sql b/logstash/pipeline/sql/rm_service.sql new file mode 100644 index 00000000..d4ad46f0 --- /dev/null +++ b/logstash/pipeline/sql/rm_service.sql @@ -0,0 +1,27 @@ +SELECT + -- Concept + service.identifier, + service.platform, + service.platform_identifier, + -- Concept.aiod_entry + status.name AS `status`, + aiod_entry.date_modified, + aiod_entry.date_created, + -- Resource + service.ai_resource_id AS `resource_identifier`, + service.name, + service.description, + service.same_as, + -- Attributes + service.slogan, + service.terms_of_service, + -- Application Area + GROUP_CONCAT(application_area.name) AS `application_area` +FROM aiod.service +INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier +LEFT JOIN aiod.service_application_area_link ON aiod.service_application_area_link.from_identifier=aiod.service.identifier +LEFT JOIN aiod.application_area ON aiod.service_application_area_link.linked_identifier=aiod.application_area.identifier +WHERE aiod.service.date_deleted IS NOT NULL AND aiod.service.date_deleted > :sql_last_value +GROUP BY aiod.service.identifier +ORDER BY aiod.service.identifier diff --git a/logstash/pipeline/sql/sync_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql index 7d104a3d..c0b30a86 100644 --- a/logstash/pipeline/sql/sync_dataset.sql +++ b/logstash/pipeline/sql/sync_dataset.sql @@ -17,7 +17,7 @@ SELECT dataset.date_published, dataset.version, license.name AS `license`, - -- Dataset + -- Attributes dataset.issn, dataset.measurement_technique, dataset.temporal_coverage, @@ -29,6 +29,6 @@ INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifi LEFT JOIN aiod.license ON aiod.dataset.license_identifier=aiod.license.identifier LEFT JOIN aiod.dataset_application_area_link ON aiod.dataset_application_area_link.from_identifier=aiod.dataset.identifier LEFT JOIN aiod.application_area ON aiod.dataset_application_area_link.linked_identifier=aiod.application_area.identifier -WHERE aiod.aiod_entry.date_modified > :sql_last_value +WHERE aiod.dataset.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value GROUP BY aiod.dataset.identifier ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/sync_event.sql b/logstash/pipeline/sql/sync_event.sql index b840e893..7af694ae 100644 --- a/logstash/pipeline/sql/sync_event.sql +++ b/logstash/pipeline/sql/sync_event.sql @@ -4,30 +4,36 @@ SELECT event.platform, event.platform_identifier, -- Concept.aiod_entry - event_status.name AS `status`, - event_mode.name AS `mode`, + status.name AS `status`, aiod_entry.date_modified, aiod_entry.date_created, - agent.type AS `organiser_type`, -- Resource event.ai_resource_id AS `resource_identifier`, event.name, event.description, event.same_as, + -- Attributes event.start_date, event.end_date, event.schedule, event.registration_link, event.organiser_identifier, + -- Mode + event_mode.name AS `mode`, + -- Status + event_status.name AS `event_status`, + -- Agent + agent.type AS `organiser_type`, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.event INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier -LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier +INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.event_mode ON aiod.event.mode_identifier=aiod.event_mode.identifier +LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier +LEFT JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier LEFT JOIN aiod.event_application_area_link ON aiod.event_application_area_link.from_identifier=aiod.event.identifier LEFT JOIN aiod.application_area ON aiod.event_application_area_link.linked_identifier=aiod.application_area.identifier -WHERE aiod.aiod_entry.date_modified > :sql_last_value +WHERE aiod.event.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value GROUP BY aiod.event.identifier ORDER BY aiod.event.identifier diff --git a/logstash/pipeline/sql/sync_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql index a2837263..198b287d 100644 --- a/logstash/pipeline/sql/sync_experiment.sql +++ b/logstash/pipeline/sql/sync_experiment.sql @@ -17,7 +17,7 @@ SELECT experiment.date_published, experiment.version, license.name AS `license`, - -- Experiment + -- Attributes experiment.experimental_workflow, experiment.execution_settings, experiment.reproducibility_explanation, @@ -29,6 +29,6 @@ INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifi LEFT JOIN aiod.license ON aiod.experiment.license_identifier=aiod.license.identifier LEFT JOIN aiod.experiment_application_area_link ON aiod.experiment_application_area_link.from_identifier=aiod.experiment.identifier LEFT JOIN aiod.application_area ON aiod.experiment_application_area_link.linked_identifier=aiod.application_area.identifier -WHERE aiod.aiod_entry.date_modified > :sql_last_value +WHERE aiod.experiment.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value GROUP BY aiod.experiment.identifier ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/sync_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql index bbbf00c6..4825aad2 100644 --- a/logstash/pipeline/sql/sync_ml_model.sql +++ b/logstash/pipeline/sql/sync_ml_model.sql @@ -17,7 +17,7 @@ SELECT ml_model.date_published, ml_model.version, license.name AS `license`, - -- MLModel + -- Type ml_model_type.name AS `ml_model_type`, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` @@ -28,6 +28,6 @@ LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifi LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier LEFT JOIN aiod.ml_model_application_area_link ON aiod.ml_model_application_area_link.from_identifier=aiod.ml_model.identifier LEFT JOIN aiod.application_area ON aiod.ml_model_application_area_link.linked_identifier=aiod.application_area.identifier -WHERE aiod.aiod_entry.date_modified > :sql_last_value +WHERE aiod.ml_model.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value GROUP BY aiod.ml_model.identifier ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql index 3db5b037..42a6b694 100644 --- a/logstash/pipeline/sql/sync_news.sql +++ b/logstash/pipeline/sql/sync_news.sql @@ -12,6 +12,7 @@ SELECT news.name, news.description, news.same_as, + -- Attributes news.headline, news.alternative_headline, -- Application Area @@ -21,6 +22,6 @@ INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.id INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.news_application_area_link ON aiod.news_application_area_link.from_identifier=aiod.news.identifier LEFT JOIN aiod.application_area ON aiod.news_application_area_link.linked_identifier=aiod.application_area.identifier -WHERE aiod.aiod_entry.date_modified > :sql_last_value +WHERE aiod.news.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value GROUP BY aiod.news.identifier ORDER BY aiod.news.identifier diff --git a/logstash/pipeline/sql/sync_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql index 5d26677d..88cee11b 100644 --- a/logstash/pipeline/sql/sync_organisation.sql +++ b/logstash/pipeline/sql/sync_organisation.sql @@ -7,25 +7,27 @@ SELECT status.name AS `status`, aiod_entry.date_modified, aiod_entry.date_created, - agent.type AS `agent`, -- Resource organisation.ai_resource_id AS `resource_identifier`, organisation.name, organisation.description, organisation.same_as, + -- Attributes organisation.date_founded, organisation.legal_name, - -- Organisation + -- Type organisation_type.name AS `organisation_type`, + -- Agent + agent.type AS `agent`, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.organisation INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -INNER JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier LEFT JOIN aiod.organisation_type ON aiod.organisation.type_identifier=aiod.organisation_type.identifier +LEFT JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier LEFT JOIN aiod.organisation_application_area_link ON aiod.organisation_application_area_link.from_identifier=aiod.organisation.identifier LEFT JOIN aiod.application_area ON aiod.organisation_application_area_link.linked_identifier=aiod.application_area.identifier -WHERE aiod.aiod_entry.date_modified > :sql_last_value +WHERE aiod.organisation.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value GROUP BY aiod.organisation.identifier ORDER BY aiod.organisation.identifier diff --git a/logstash/pipeline/sql/sync_project.sql b/logstash/pipeline/sql/sync_project.sql index b6e02c8f..626fdc30 100644 --- a/logstash/pipeline/sql/sync_project.sql +++ b/logstash/pipeline/sql/sync_project.sql @@ -12,19 +12,21 @@ SELECT project.name, project.description, project.same_as, + -- Attributes project.start_date, project.end_date, project.total_cost_euro, project.coordinator_identifier, - organisation.name AS coordinator_name, + -- Organisation + organisation.name AS `coordinator_name`, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` FROM aiod.project INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -INNER JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier +LEFT JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier LEFT JOIN aiod.project_application_area_link ON aiod.project_application_area_link.from_identifier=aiod.project.identifier LEFT JOIN aiod.application_area ON aiod.project_application_area_link.linked_identifier=aiod.application_area.identifier -WHERE aiod.aiod_entry.date_modified > :sql_last_value +WHERE aiod.project.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value GROUP BY aiod.project.identifier ORDER BY aiod.project.identifier diff --git a/logstash/pipeline/sql/sync_publication.sql b/logstash/pipeline/sql/sync_publication.sql index 699f747d..eb548dc6 100644 --- a/logstash/pipeline/sql/sync_publication.sql +++ b/logstash/pipeline/sql/sync_publication.sql @@ -17,12 +17,12 @@ SELECT publication.date_published, publication.version, license.name AS `license`, - -- KnowledgeAsset - publication.knowledge_asset_id AS `knowledge_asset_identifier`, - -- Publication + -- Attributes publication.permanent_identifier, publication.isbn, publication.issn, + publication.knowledge_asset_id AS `knowledge_asset_identifier`, + -- Type publication_type.name AS `publication_type`, -- Application Area GROUP_CONCAT(application_area.name) AS `application_area` @@ -33,6 +33,6 @@ LEFT JOIN aiod.license ON aiod.publication.license_identifier=aiod.license.ident LEFT JOIN aiod.publication_type ON aiod.publication.type_identifier=aiod.publication_type.identifier LEFT JOIN aiod.publication_application_area_link ON aiod.publication_application_area_link.from_identifier=aiod.publication.identifier LEFT JOIN aiod.application_area ON aiod.publication_application_area_link.linked_identifier=aiod.application_area.identifier -WHERE aiod.aiod_entry.date_modified > :sql_last_value +WHERE aiod.publication.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value GROUP BY aiod.publication.identifier ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/sync_service.sql b/logstash/pipeline/sql/sync_service.sql index 389b541c..96c114aa 100644 --- a/logstash/pipeline/sql/sync_service.sql +++ b/logstash/pipeline/sql/sync_service.sql @@ -12,7 +12,7 @@ SELECT service.name, service.description, service.same_as, - -- Service + -- Attributes service.slogan, service.terms_of_service, -- Application Area @@ -22,6 +22,6 @@ INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier LEFT JOIN aiod.service_application_area_link ON aiod.service_application_area_link.from_identifier=aiod.service.identifier LEFT JOIN aiod.application_area ON aiod.service_application_area_link.linked_identifier=aiod.application_area.identifier -WHERE aiod.aiod_entry.date_modified > :sql_last_value +WHERE aiod.service.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value GROUP BY aiod.service.identifier ORDER BY aiod.service.identifier diff --git a/src/connectors/example/resources/resource/datasets.json b/src/connectors/example/resources/resource/datasets.json index 2c56bfcb..04f6c212 100644 --- a/src/connectors/example/resources/resource/datasets.json +++ b/src/connectors/example/resources/resource/datasets.json @@ -39,11 +39,24 @@ "name": "" } ], -<<<<<<< HEAD - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, + "note": [ + { + "value": "A brief record of points or ideas about this AI resource." + } + ], + "research_area": [ + "Explainable AI", + "Physical AI" + ], + "scientific_domain": [ + "Anomaly Detection", + "Voice Recognition", + "Computer Vision." + ], + "size": { + "unit": "Rows", + "value": 100 + }, "spatial_coverage": {} }, { @@ -60,25 +73,6 @@ "aiod_entry": { "editor": [], "status": "draft" -======= - "note": [ - { - "value": "A brief record of points or ideas about this AI resource." - } - ], - "research_area": [ - "Explainable AI", - "Physical AI" - ], - "scientific_domain": [ - "Anomaly Detection", - "Voice Recognition", - "Computer Vision." - ], - "size": { - "unit": "Rows", - "value": 100 ->>>>>>> develop }, "alternate_name": [], "application_area": [], @@ -6229,4 +6223,4 @@ "size": {}, "spatial_coverage": {} } -] \ No newline at end of file +] diff --git a/src/connectors/example/resources/resource/experiments.json b/src/connectors/example/resources/resource/experiments.json index f5b28058..9dd688c3 100644 --- a/src/connectors/example/resources/resource/experiments.json +++ b/src/connectors/example/resources/resource/experiments.json @@ -1,5 +1,4 @@ [ -<<<<<<< HEAD { "platform": "ai4experiments", "platform_identifier": "366", @@ -1980,100 +1979,4 @@ "research_area": [], "scientific_domain": [] } -======= - { - "platform": "example", - "platform_identifier": "1", - "name": "The name of this experiment", - "description": "A description.", - "same_as": "https://www.example.com/resource/this_resource", - "date_published": "2022-01-01T15:15:00.000", - "version": "1.1.0", - "pid": "https://doi.org/10.1000/182", - "experimental_workflow": "1) Load the dataset 2) run preprocessing code found in ... 3) run the model on the data.", - "execution_settings": "string", - "reproducibility_explanation": "string", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [ - "alias 1", - "alias 2" - ], - "application_area": [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" - ], - "badge": [ - "ACM Artifacts Evaluated - Reusable" - ], - "citation": [], - "contact": [], - "creator": [], - "distribution": [ - { - "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "checksum_algorithm": "sha256", - "copyright": "2010-2020 Example Company. All rights reserved.", - "content_url": "https://www.example.com/experiment.zip", - "content_size_kb": 10000, - "date_published": "2022-01-01T15:15:00.000", - "description": "Description of this file.", - "encoding_format": "application/zip", - "name": "Name of this file.", - "technology_readiness_level": 1, - "installation_script": "./install.sh", - "installation": "Build the Dockerfile", - "installation_time_milliseconds": 100, - "deployment_script": "./run.sh", - "deployment": "You can run the run.py file using python3. See README.md for required arguments.", - "deployment_time_milliseconds": 100, - "os_requirement": "Windows 11.", - "dependency": "Python packages as listed in requirements.txt.", - "hardware_requirement": "4GB RAM; 100MB storage; 1GHz processor with 8 cores." - } - ], - "has_part": [], - "industrial_sector": [ - "Finance", - "eCommerce", - "Healthcare" - ], - "is_part_of": [], - "keyword": [ - "keyword1", - "keyword2" - ], - "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", - "media": [ - { - "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "checksum_algorithm": "sha256", - "copyright": "2010-2020 Example Company. All rights reserved.", - "content_url": "https://www.example.com/image.jpeg", - "content_size_kb": 10000, - "date_published": "2022-01-01T15:15:00.000", - "description": "Description of this file.", - "encoding_format": "image/jpeg", - "name": "Name of this file." - } - ], - "note": [ - { - "value": "A brief record of points or ideas about this AI resource." - } - ], - "research_area": [ - "Explainable AI", - "Physical AI" - ], - "scientific_domain": [ - "Anomaly Detection", - "Voice Recognition", - "Computer Vision." - ] - } ->>>>>>> develop -] \ No newline at end of file +] diff --git a/src/connectors/example/resources/resource/ml_models.json b/src/connectors/example/resources/resource/ml_models.json index afb45fd6..8e239e12 100644 --- a/src/connectors/example/resources/resource/ml_models.json +++ b/src/connectors/example/resources/resource/ml_models.json @@ -1,5 +1,4 @@ [ -<<<<<<< HEAD { "platform": "ai4experiments", "platform_identifier": "1", @@ -8084,96 +8083,4 @@ "scientific_domain": [], "type": "" } -======= - { - "platform": "example", - "platform_identifier": "1", - "name": "The name of this resource", - "description": "A description.", - "same_as": "https://www.example.com/resource/this_resource", - "date_published": "2022-01-01T15:15:00.000", - "version": "1.1.0", - "pid": "https://doi.org/10.1000/182", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [ - "alias 1", - "alias 2" - ], - "application_area": [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" - ], - "citation": [], - "contact": [], - "creator": [], - "distribution": [ - { - "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "checksum_algorithm": "sha256", - "copyright": "2010-2020 Example Company. All rights reserved.", - "content_url": "https://www.example.com/model.zip", - "content_size_kb": 10000, - "date_published": "2022-01-01T15:15:00.000", - "description": "Description of this file.", - "encoding_format": "application/zip", - "name": "Name of this file.", - "technology_readiness_level": 1, - "installation_script": "./install.sh", - "installation": "Build the Dockerfile", - "installation_time_milliseconds": 100, - "deployment_script": "./run.sh", - "deployment": "You can run the run.py file using python3. See README.md for required arguments.", - "deployment_time_milliseconds": 100, - "os_requirement": "Windows 11.", - "dependency": "Python packages as listed in requirements.txt.", - "hardware_requirement": "4GB RAM; 100MB storage; 1GHz processor with 8 cores." - } - ], - "has_part": [], - "industrial_sector": [ - "Finance", - "eCommerce", - "Healthcare" - ], - "is_part_of": [], - "keyword": [ - "keyword1", - "keyword2" - ], - "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", - "media": [ - { - "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "checksum_algorithm": "sha256", - "copyright": "2010-2020 Example Company. All rights reserved.", - "content_url": "https://www.example.com/image.jpeg", - "content_size_kb": 10000, - "date_published": "2022-01-01T15:15:00.000", - "description": "Description of this file.", - "encoding_format": "image/jpeg", - "name": "Name of this file." - } - ], - "note": [ - { - "value": "A brief record of points or ideas about this AI resource." - } - ], - "related_experiment": [], - "research_area": [ - "Explainable AI", - "Physical AI" - ], - "scientific_domain": [ - "Anomaly Detection", - "Voice Recognition", - "Computer Vision." - ], - "type": "Large Language Model" - } ->>>>>>> develop -] \ No newline at end of file +] From 0865424f58e25021089aab71381242dde434b4a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Thu, 9 Nov 2023 13:31:50 +0100 Subject: [PATCH 42/79] Prepared to be merged with develop --- .../example/resources/resource/datasets.json | 6271 +------------ .../example/resources/resource/events.json | 3 +- .../resources/resource/experiments.json | 2076 +---- .../example/resources/resource/ml_models.json | 8176 +---------------- .../resources/resource/organisations.json | 2 +- .../example/resources/resource/projects.json | 3 +- .../elasticsearch/dataset_search.json | 95 +- .../resources/elasticsearch/event_search.json | 55 + .../elasticsearch/experiment_search.json | 95 +- .../elasticsearch/ml_model_search.json | 92 +- .../resources/elasticsearch/news_search.json | 49 + .../elasticsearch/organisation_search.json | 51 + .../elasticsearch/project_search.json | 52 + .../elasticsearch/publication_search.json | 97 +- .../elasticsearch/service_search.json | 83 +- .../test_search_router_datasets.py | 27 +- .../test_search_router_events.py | 48 + .../test_search_router_experiments.py | 25 +- .../test_search_router_ml_model.py | 21 +- .../search_routers/test_search_router_news.py | 45 + .../test_search_router_organisations.py | 45 + .../test_search_router_projects.py | 46 + .../test_search_router_publications.py | 25 +- .../test_search_router_services.py | 17 +- 24 files changed, 951 insertions(+), 16548 deletions(-) create mode 100644 src/tests/resources/elasticsearch/event_search.json create mode 100644 src/tests/resources/elasticsearch/news_search.json create mode 100644 src/tests/resources/elasticsearch/organisation_search.json create mode 100644 src/tests/resources/elasticsearch/project_search.json create mode 100644 src/tests/routers/search_routers/test_search_router_events.py create mode 100644 src/tests/routers/search_routers/test_search_router_news.py create mode 100644 src/tests/routers/search_routers/test_search_router_organisations.py create mode 100644 src/tests/routers/search_routers/test_search_router_projects.py diff --git a/src/connectors/example/resources/resource/datasets.json b/src/connectors/example/resources/resource/datasets.json index 04f6c212..55930d45 100644 --- a/src/connectors/example/resources/resource/datasets.json +++ b/src/connectors/example/resources/resource/datasets.json @@ -1,6226 +1,103 @@ [ { - "platform": "ai4experiments", - "platform_identifier": "5", - "name": "autoUniv-au1-1000", - "description": "https://openml.org \n\n**Author** : Ray. J. Hickey \n \n**Source** : UCI \n \n **Please cite** :\n\n \n\n \n\n * Dataset Title: \n \n\n \n\nAutoUniv Dataset \n \ndata problem: autoUniv-au1-1000\n\n \n\n \n\n * Abstract: \n \n\n \n\nAutoUniv is an advanced data generator for classifications tasks. The aim is\nto reflect the nuances and heterogeneity of real data. Data can be generated\nin .csv, ARFF or C4.5 formats.\n\n \n\n \n\n * Source: \n \n\n \n\nAutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com \nAutoUniv web-site: http://sites.google.com/site/autouniv/.\n\n \n\n \n\n * Data Set Information:\n \n\n \n\nThe user first creates a classification model and then generates classified\nexamples from it. To create a model, the following are specified: the number\nof attributes (up to 1000) and their type (discrete or continuous), the number\nof classes (up to 10), the complexity of the underlying rules and the noise\nlevel. AutoUniv then produces a model through a process of constrained\nrandomised search to satisfy the user's requirements. A model can have up to\n3000 rules. Rare class models can be designed. A sequence of models can be\ndesigned to reflect concept and/or population drift.\n\n \n\nAutoUniv creates three text files for a model: a Prolog specification of the\nmodel used to generate examples (.aupl); a user-friendly statement of the\nclassification rules in an 'if ... then' format (.aurules); a statistical\nsummary of the main properties of the model, including its Bayes rate\n(.auprops).\n\n \n\n \n\n * Attribute Information: \n \n\n \n\nAttributes may be discrete with up to 10 values or continuous. A discrete\nattribute can be nominal with values v1, v2, v3 ... or integer with values 0,\n1, 2 , ... .\n\n \n\n \n\n * Relevant Papers:\n \n\n \n\nMarrs, G, Hickey, RJ and Black, MM (2010)", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=01dd6e8a-f4b1-4c5e-9206-0e40c8031be6&revisionId=39245fff-57fa-45c5-9f6a-49abf71e99b6&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", + "platform": "example", + "platform_identifier": "1", + "name": "The name of this dataset", + "description": "A description.", + "same_as": "https://www.example.com/resource/this_resource", + "date_published": "2022-01-01T15:15:00.000", + "version": "1.1.0", + "issn": "20493630", + "measurement_technique": "mass spectrometry", + "temporal_coverage": "2011/2012", "aiod_entry": { "editor": [], "status": "draft" }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=01dd6e8a-f4b1-4c5e-9206-0e40c8031be6&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/01dd6e8a-f4b1-4c5e-9206-0e40c8031be6/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [ - { - "value": "A brief record of points or ideas about this AI resource." - } - ], - "research_area": [ - "Explainable AI", - "Physical AI" - ], - "scientific_domain": [ - "Anomaly Detection", - "Voice Recognition", - "Computer Vision." - ], - "size": { - "unit": "Rows", - "value": 100 - }, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "6", - "name": "schizo", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\nSchizophrenic Eye-Tracking Data in Rubin and Wu (1997) \nBiometrics. Yingnian Wu (wu@hustat.harvard.edu) [14/Oct/97]\n\n \n\nInformation about the dataset \nCLASSTYPE: nominal \nCLASSINDEX: last\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0247b1c5-2161-4367-96ea-4aa9370b8bb6&revisionId=9c637a1f-22db-49df-ab7d-d0763058e9e9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0247b1c5-2161-4367-96ea-4aa9370b8bb6&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0247b1c5-2161-4367-96ea-4aa9370b8bb6/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "7", - "name": "calendarDOW", - "description": "https://openml.org \n\ncalendarDOW-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=026c2720-4045-48c9-87ec-9791c120bb85&revisionId=b2df4780-1459-41ed-abbb-fb38f2697a04&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=026c2720-4045-48c9-87ec-9791c120bb85&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/026c2720-4045-48c9-87ec-9791c120bb85/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "8", - "name": "GesturePhaseSegmentationProcessed", - "description": "https://openml.org \n\n**Author** : Renata Cristina Barros Madeo (Madeo\",\"R. C. B.) Priscilla Koch\nWagner (Wagner\",\"P. K.) Sarajane Marques Peres (Peres\",\"S. M.)\n{renata.si\",\"priscilla.wagner\",\"sarajane} at usp.br\nhttp://each.uspnet.usp.br/sarajane/ \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/gesture+phase+segmentation) \n \n **Please cite** : Please refer to the [Machine Learning Repository's citation\npolicy](https://archive.ics.uci.edu/ml/citation_policy.html). Additionally,\nthe authors require a citation to one or more publications from those cited as\nrelevant papers.\n\n \n\nCreators: \nRenata Cristina Barros Madeo (Madeo, R. C. B.) \nPriscilla Koch Wagner (Wagner, P. K.) \nSarajane Marques Peres (Peres, S. M.) \n{renata.si, priscilla.wagner, sarajane} at usp.br \nhttp://each.uspnet.usp.br/sarajane/\n\n \n\nDonor: \nUniversity of Sao Paulo - Brazil\n\n \n\nData Set Information:\n\n \n\nThe dataset is composed by features extracted from 7 videos with people\ngesticulating, aiming at studying Gesture Phase Segmentation. \nEach video is represented by two files: a raw file, which contains the\nposition of hands, wrists, head and spine of the user in each frame; and a\nprocessed file, which contains velocity and acceleration of hands and wrists.\nSee the data set description for more information on the dataset.\n\n \n\nAttribute Information:\n\n \n\nRaw files: 18 numeric attributes (double), a timestamp and a class attribute\n(nominal). \nProcessed files: 32 numeric attributes (double) and a class attribute\n(nominal). \nA feature vector with up to 50 numeric attributes can be generated with the\ntwo files mentioned above.\n\n \n\nThis is the processed data set with the following feature description:\n\n \n\nProcessed files:\n\n \n\n \n\n 1. Vectorial velocity of left hand (x coordi", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=03a938ee-181d-4409-a806-199034e5172b&revisionId=73ee8559-e5f5-45c4-b15b-56392843644f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=03a938ee-181d-4409-a806-199034e5172b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/03a938ee-181d-4409-a806-199034e5172b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "9", - "name": "haberman", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - \n \n**Please cite** :\n\n \n\n \n\n 1. \n\nTitle: Haberman's Survival Data\n\n \n\n \n\n 2. \n\nSources: \n(a) Donor: Tjen-Sien Lim (limt@stat.wisc.edu) \n(b) Date: March 4, 1999\n\n \n\n \n\n 3. \n\nPast Usage:\n\n \n \n\n 1. Haberman, S. J. (1976). Generalized Residuals for Log-Linear \nModels, Proceedings of the 9th International Biometrics \nConference, Boston, pp. 104-122.\n\n \n\n 2. Landwehr, J. M., Pregibon, D., and Shoemaker, A. C. (1984), \nGraphical Models for Assessing Logistic Regression Models (with \ndiscussion), Journal of the American Statistical Association 79: \n61-83.\n\n \n\n 3. Lo, W.-D. (1993). Logistic Regression Trees, PhD thesis, \nDepartment of Statistics, University of Wisconsin, Madison, WI.\n\n \n \n\n \n\n 4. \n\nRelevant Information: \nThe dataset contains cases from a study that was conducted between \n1958 and 1970 at the University of Chicago's Billings Hospital on \nthe survival of patients who had undergone surgery for breast \ncancer.\n\n \n\n \n\n 5. \n\nNumber of Instances: 306\n\n \n\n \n\n 6. \n\nNumber of Attributes: 4 (including the class attribute)\n\n \n\n \n\n 7. \n\nAttribute Information:\n\n \n \n\n 1. Age of patient at time of operation (numerical)\n \n\n 2. Patient's year of operation (year - 1900, numerical)\n \n\n 3. Number of positive axillary nodes detected (numerical)\n \n\n 4. Survival status (class attribute) \n1 = the patient survived 5 years or longer \n2 = the patient died within 5 year\n\n \n \n\n \n\n 8. \n\nMissing Attribute Values: None\n\n \n\n \n\n \n\nInformation about the dataset \nCLASSTYPE: nominal \nCLASSINDEX: last\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=03cc6248-6087-45e0-a732-6d34e299934e&revisionId=e36bfd43-b146-47b4-ad8b-f1cca7ef09c0&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=03cc6248-6087-45e0-a732-6d34e299934e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/03cc6248-6087-45e0-a732-6d34e299934e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "13", - "name": "sudoku-tutorial-gui-stream", - "description": " \n\n \n\nThis is the **streaming** version **User Interface** component of the AI4EU\nExperiments **Sudoku Hello World**!\n\n \n\nFor more details, see the corresponding entry in the AI4EU Asset Catalog:\n\n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=06c6909b-7c7d-4a09-8199-e3d647ba144d&revisionId=edc4ecbd-8189-4021-83ca-44e046f41127&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=06c6909b-7c7d-4a09-8199-e3d647ba144d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/06c6909b-7c7d-4a09-8199-e3d647ba144d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "17", - "name": "GAMETES_Epistasis_2-Way_20atts_0.1H_EDM-1_1", - "description": "https://openml.org \n\nGAMETES_Epistasis_2-Way_20atts_0.1H_EDM-1_1-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=08819c99-9458-48de-84e1-83290b73caa7&revisionId=a718624c-d501-459f-8ad6-7628dbcf60a9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=08819c99-9458-48de-84e1-83290b73caa7&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/08819c99-9458-48de-84e1-83290b73caa7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "20", - "name": "mushroom", - "description": "https://openml.org \n\n**Author** : [Jeff Schlimmer](Jeffrey.Schlimmer@a.gp.cs.cmu.edu) \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/mushroom) \\- 1981 \n \n**Please cite** : The Audubon Society Field Guide to North American Mushrooms\n(1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf\n\n \n\n### Description\n\n \n\nThis dataset describes mushrooms in terms of their physical characteristics.\nThey are classified into: poisonous or edible.\n\n \n\n### Source\n\n \n\n``` \n(a) Origin: \nMushroom records are drawn from The Audubon Society Field Guide to North\nAmerican Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf\n\n \n\n(b) Donor: \nJeff Schlimmer (Jeffrey.Schlimmer '@' a.gp.cs.cmu.edu) \n```\n\n \n\n### Dataset description\n\n \n\nThis dataset includes descriptions of hypothetical samples corresponding to 23\nspecies of gilled mushrooms in the Agaricus and Lepiota Family. Each species\nis identified as definitely edible, definitely poisonous, or of unknown\nedibility and not recommended. This latter class was combined with the\npoisonous one. The Guide clearly states that there is no simple rule for\ndetermining the edibility of a mushroom; no rule like ``leaflets three, let it\nbe'' for Poisonous Oak and Ivy.\n\n \n\n### Attributes Information\n\n \n\n`1. cap-shape: bell=b,conical=c,convex=x,flat=f, knobbed=k,sunken=s \n2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s \n3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r, pink=p,purple=u,red=e,white=w,yellow=y \n4. bruises?: bruises=t,no=f \n5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f, musty=m,none=n,pungent=p,spicy=s \n6. gill-attachment: attached=a,descending=d,free=f,notched=n \n7. gill-spacing: close=c,crowded=w,distant=d \n8. gill-size: broad=b,narrow=n \n9. gill-color: black=k,bro", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0a6e6072-441e-4274-bf2a-6216def228bd&revisionId=d6acfed4-6030-4b57-ac62-277a78f4592d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0a6e6072-441e-4274-bf2a-6216def228bd&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0a6e6072-441e-4274-bf2a-6216def228bd/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "21", - "name": "ecoli", - "description": "https://openml.org \n\necoli-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0c052358-19dd-4904-a209-f58f7457623e&revisionId=bbd47da9-e81c-43da-97ba-490c32c80089&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0c052358-19dd-4904-a209-f58f7457623e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0c052358-19dd-4904-a209-f58f7457623e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "22", - "name": "AudioFileBroker", - "description": "This model is used for the beginning of an audio mining pipeline and\ndispachtes the audio files.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0c4d6ad9-c9df-4054-a030-e8d22613afc5&revisionId=b3a2910a-0c19-47e8-9521-8482c203b49f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0c4d6ad9-c9df-4054-a030-e8d22613afc5&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0c4d6ad9-c9df-4054-a030-e8d22613afc5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "23", - "name": "ai4iot-data-source", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&revisionId=016bab07-2b2b-4bbd-8384-fb489403012b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0e228e69-9703-4445-b1f9-e6d1da1446da/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "24", - "name": "ai4iot-data-source", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&revisionId=663f1188-9f5a-4cf0-8d9e-b3f2aaaf863b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0e228e69-9703-4445-b1f9-e6d1da1446da/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "25", - "name": "ai4iot-data-source", - "description": "The Data Source component serves as an aggregator of data incoming from\ndifferent services, and which is useful for the AI4IoT pipeline. In\nparticular, it connects to external APIs and provides data in an unified (and\nstandardized through protobuf message definition) way. The AI4IoT tackles air\nquality in the city of Trondheim, Norway. Therefore, the current version of\nthis component fetches data for this city. The structure can, however, be\nreplicated to any other place by extending the scripts with the given API\ncalls for the place of interest. Currently, available data through this\ncomponent is pollution measurements both from a network of low-cost sensors, a\n(much smaller) network of industrial sensors and meteorological data.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&revisionId=a68fc42c-2e73-4328-97dc-34424eec75c5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.3", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&version=1.0.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0e228e69-9703-4445-b1f9-e6d1da1446da/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "26", - "name": "ai4iot-data-source", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&revisionId=b42fb848-ad2d-408c-897e-b25932fe2b93&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0e228e69-9703-4445-b1f9-e6d1da1446da&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0e228e69-9703-4445-b1f9-e6d1da1446da/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "27", - "name": "wdbc", - "description": "https://openml.org \n\n**Author** : William H. Wolberg, W. Nick Street, Olvi L. Mangasarian \n \n**Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+\\(original\\)),\n[University of Wisconsin](http://pages.cs.wisc.edu/~olvi/uwmp/cancer.html) \\-\n1995 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Breast Cancer Wisconsin (Diagnostic) Data Set (WDBC).** Features are\ncomputed from a digitized image of a fine needle aspirate (FNA) of a breast\nmass. They describe characteristics of the cell nuclei present in the image.\nThe target feature records the prognosis (benign (1) or malignant (2)).\n[Original data available here](ftp://ftp.cs.wisc.edu/math-prog/cpo-\ndataset/machine-learn/cancer/)\n\n \n\nCurrent dataset was adapted to ARFF format from the UCI version. Sample code\nID's were removed.\n\n \n\n! Note that there is also a related Breast Cancer Wisconsin (Original) Data\nSet with a different set of features, better known as\n[breast-w](https://www.openml.org/d/15).\n\n \n\n### Feature description\n\n \n\nTen real-valued features are computed for each of 3 cell nuclei, yielding a\ntotal of 30 descriptive features. See the papers below for more details on how\nthey were computed. The 10 features (in order) are:\n\n \n\na) radius (mean of distances from center to points on the perimeter) \n \nb) texture (standard deviation of gray-scale values) \n \nc) perimeter \n \nd) area \n \ne) smoothness (local variation in radius lengths) \n \nf) compactness (perimeter^2 / area - 1.0) \n \ng) concavity (severity of concave portions of the contour) \n \nh) concave points (number of concave portions of the contour) \n \ni) symmetry \n \nj) fractal dimension (\"coastline approximation\" - 1)\n\n \n\n### Relevant Papers\n\n \n\nW.N. Street, W.H. Wo", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0f467230-f8cf-4e8e-8ef0-1428d5147b29&revisionId=7b0db765-cd12-44cc-b22a-b9b92b31bdf4&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0f467230-f8cf-4e8e-8ef0-1428d5147b29&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0f467230-f8cf-4e8e-8ef0-1428d5147b29/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "29", - "name": "liver-disorders", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1061beb3-646c-458a-bb10-6bea01fce9d7&revisionId=678ca961-e726-4070-9a38-c19602648ecf&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1061beb3-646c-458a-bb10-6bea01fce9d7&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/1061beb3-646c-458a-bb10-6bea01fce9d7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "30", - "name": "liver-disorders", - "description": "https://openml.org \n\n**Author** : BUPA Medical Research Ltd. Donor: Richard S. Forsyth \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Liver+Disorders) \\-\n5/15/1990 \n \n **Please cite** :\n\n \n\n**BUPA liver disorders**\n\n \n\nThe first 5 variables are all blood tests which are thought to be sensitive to\nliver disorders that might arise from excessive alcohol consumption. Each line\nin the dataset constitutes the record of a single male individual.\n\n \n\n**Important note:** The 7th field (selector) has been widely misinterpreted in\nthe past as a dependent variable representing presence or absence of a liver\ndisorder. This is incorrect [1]. The 7th field was created by BUPA researchers\nas a train/test selector. It is not suitable as a dependent variable for\nclassification. The dataset does not contain any variable representing\npresence or absence of a liver disorder. Researchers who wish to use this\ndataset as a classification benchmark should follow the method used in\nexperiments by the donor (Forsyth & Rada, 1986, Machine learning: applications\nin expert systems and information retrieval) and others (e.g. Turney, 1995,\nCost-sensitive classification: Empirical evaluation of a hybrid genetic\ndecision tree induction algorithm), who used the 6th field (drinks), after\ndichotomising, as a dependent variable for classification. Because of\nwidespread misinterpretation in the past, researchers should take care to\nstate their method clearly.\n\n \n\n **Attribute information** \n \n1\\. mcv mean corpuscular volume \n \n2\\. alkphos alkaline phosphotase \n \n3\\. sgpt alanine aminotransferase \n \n4\\. sgot aspartate aminotransferase \n \n5\\. gammagt gamma-glutamyl transpeptidase \n \n6\\. drinks number of half-pint equivalents of alcoholic beverages drunk per\nday \n \n7\\. se", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1061beb3-646c-458a-bb10-6bea01fce9d7&revisionId=94f838e4-944a-401d-84a7-49b5582a540b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1061beb3-646c-458a-bb10-6bea01fce9d7&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/1061beb3-646c-458a-bb10-6bea01fce9d7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "34", - "name": "grpc_hydro_hubeau", - "description": "**Connector** to get French hydrology data.\n\n \n\nThe API makes it possible to interrogate the French hydrometric reference\nsystem (sites and observation stations of the French measurement network) as\nwell as the observations of water level (H) and flow (Q) called \"real time\".\n\n \n\nThe API is updated every 2 minutes over 24 hours deep and maintains a one\nmonth history.\n\n \n\nThe data disseminated is the raw data measured in the field, without expertise\nor improvements made by hydrometers.\n\n \n\nObservations are expressed in the following units:\n\n \n\n * mm for water heights (divide by 1000 to convert to meters);\n * l / s for flow rates (divide by 1000 to convert to m3 / s).\n\nDates are expressed in Coordinated Universal Time (UTC) in ISO 8601 format.\n\n \n\nIn metropolitan France, add 1 hour to UTC time during winter time, and 2 hours\nduring summer time. In Guadeloupe and Martinique, subtract 4 hours from UTC\ntime; In Guyana subtract 3 hours from UTC time; In Mayotte add 3 hours to UTC\ntime; In Reunion, add 4 hours to UTC time.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=11b6681c-d8df-49c0-ba38-480b3ee2f63c&revisionId=ad13c11d-9d68-4101-a325-e9da62142ce0&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.3", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=11b6681c-d8df-49c0-ba38-480b3ee2f63c&version=1.0.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/11b6681c-d8df-49c0-ba38-480b3ee2f63c/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "38", - "name": "wilt", - "description": "https://openml.org \n\n**Author** : Brian Johnson \n \n**Source** : [UCI] (https://archive.ics.uci.edu/ml/datasets/Wilt) \n \n **Please cite** : Johnson, B., Tateishi, R., Hoan, N., 2013. A hybrid\npansharpening approach and multiscale object-based image analysis for mapping\ndiseased pine and oak trees. International Journal of Remote Sensing, 34 (20),\n6969-6982.\n\n \n\n**Changes w.r.t. version 1: renamed variables such that they match\ndescription.**\n\n \n\n### Dataset:\n\n \n\nWilt Data Set\n\n \n\n### Abstract:\n\n \n\nHigh-resolution Remote Sensing data set (Quickbird). Small number of training\nsamples of diseased trees, large number for other land cover. Testing data set\nfrom stratified random sample of image.\n\n \n\n### Source:\n\n \n\nBrian Johnson; \nInstitute for Global Environmental Strategies; \n2108-11 Kamiyamaguchi, Hayama, Kanagawa,240-0115 Japan; \nEmail: Johnson '@' iges.or.jp\n\n \n\n### Data Set Information:\n\n \n\nThis data set contains some training and testing data from a remote sensing\nstudy by Johnson et al. (2013) that involved detecting diseased trees in\nQuickbird imagery. There are few training samples for the 'diseased trees'\nclass (74) and many for 'other land cover' class (4265).\n\n \n\nThe data set consists of image segments, generated by segmenting the\npansharpened image. The segments contain spectral information from the\nQuickbird multispectral image bands and texture information from the\npanchromatic (Pan) image band. The testing data set is for the row with\n\u00e2\u20ac\u0153Segmentation scale 15\u00e2\u20ac\u009d segments and \u00e2\u20ac\u0153original multi-spectral image\u00e2\u20ac\u009d\nSpectral information in Table 2 of the reference (i.e. row 5). Please see the\nreference below for more information on the data set, and please cite the\nreference if you use this data set. Enjoy!\n\n \n\n### Attribute Information:\n\n \n\ncl", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1413584f-738b-4241-9b60-80228e509fb7&revisionId=0bc075c6-0a74-48a0-97fb-b1dd62870920&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1413584f-738b-4241-9b60-80228e509fb7&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/1413584f-738b-4241-9b60-80228e509fb7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "39", - "name": "cjs", - "description": "https://openml.org \n\n**Author** : Dr. Fernando Camacho \n \n **Source** : Unknown - 1995 \n \n **Please cite** : Camacho, F. and Arron, G. (1995) Effects of the regulators\npaclobutrazol and flurprimidol on the growth of terminal sprouts formed on\ntrimmed silver maple trees. Canadian Journal of Statistics 3(23).\n\n \n\nData on tree growth used in the Case Study published in the September, 1995\nissue of the Canadian Journal of Statistics. This data set was been provided\nby Dr. Fernando Camacho, Ontario Hydro Technologies, 800 Kipling Ave, Toronto\nCanada M3Z 5S4. It forms the basis of the Case Study in Data Analysis\npublished in the Canadian Journal of Statistics, September 1995. It can be\nfreely used for noncommercial purposes, as long as proper acknowledgement to\nthe source and to the Canadian Journal of Statistics is made.\n\n \n\nDescription\n\n \n\nThe effects of the Growth Regulators Paclobutrazol (PP 333) \nand Flurprimidol (EL-500) on the Number and Length of Internodes \nin Terminal Sprouts Formed on Trimmed Silver Maple Trees.\n\n \n\nIntroduction:\n\n \n\nThe trimming of trees under distribution lines on city streets and \nin rural areas is a major problem and expense for electrical \nutilities. Such operations are routinely performed at intervals of \none to eight years depending upon the individual species growth rate \nand the amount of clearance required. Ontario Hydro trims about \n500,000 trees per year at a cost of about $25 per tree.\n\n \n\nMuch effort has been spent in developing chemicals for the horticultural \nindustry to retard the growth of woody and herbaceous plants. Recently, \na group of new growth regulators was introduced which was shown to be \neffective in controlling the growth of trees without producing \nnoticeable injury symptoms. In this group are P", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=141de15b-91a7-4dcb-9eb3-4297e217c3de&revisionId=62ae0822-9a5e-4003-afb5-3fef610694cd&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=141de15b-91a7-4dcb-9eb3-4297e217c3de&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/141de15b-91a7-4dcb-9eb3-4297e217c3de/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "40", - "name": "credit-g", - "description": "https://openml.org \n\n**Author** : Dr. Hans Hofmann \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/statlog+\\(german+credit+data\\))\n\\- 1994 \n \n**Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n **German Credit data** \n \nThis dataset classifies people described by a set of attributes as good or bad\ncredit risks.\n\n \n\nThis dataset comes with a cost matrix: \n`Good Bad (predicted) \nGood 0 1 (actual) \nBad 5 0`\n\n \n\nIt is worse to class a customer as good when they are bad (5), than it is to\nclass a customer as bad when they are good (1).\n\n \n\n### Attribute description\n\n \n\n \n\n 1. Status of existing checking account, in Deutsche Mark. \n \n\n 2. Duration in months \n \n\n 3. Credit history (credits taken, paid back duly, delays, critical accounts) \n \n\n 4. Purpose of the credit (car, television,...) \n \n\n 5. Credit amount \n \n\n 6. Status of savings account/bonds, in Deutsche Mark. \n \n\n 7. Present employment, in number of years. \n \n\n 8. Installment rate in percentage of disposable income \n \n\n 9. Personal status (married, single,...) and sex \n \n\n 10. Other debtors / guarantors \n \n\n 11. Present residence since X years \n \n\n 12. Property (e.g. real estate) \n \n\n 13. Age in years \n \n\n 14. Other installment plans (banks, stores) \n \n\n 15. Housing (rent, own,...) \n \n\n 16. Number of existing credits at this bank \n \n\n 17. Job \n \n\n 18. Number of people being liable to provide maintenance for \n \n\n 19. Telephone (yes,no) \n \n\n 20. Foreign worker (yes,no)\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=14578085-5f08-4275-a790-5a9cfbefb412&revisionId=ce377185-b3f0-4f39-8910-d6296ddef03b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=14578085-5f08-4275-a790-5a9cfbefb412&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/14578085-5f08-4275-a790-5a9cfbefb412/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "41", - "name": "GAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_75_EDM-2_001", - "description": "https://openml.org \n\nGAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_75_EDM-2_001-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=14f91a0e-6262-454d-8edf-90e68eb8de15&revisionId=7b852968-64e8-417f-947b-487a4b0ffca8&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=14f91a0e-6262-454d-8edf-90e68eb8de15&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/14f91a0e-6262-454d-8edf-90e68eb8de15/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "44", - "name": "FileUploadDataBroker", - "description": "This is a simple file upload data broker. It can be used as a starting point\nfor pipelines which process files. It offers a web interface with a simple\nfile upload dialog. The uploaded files are saved on a shared volume, then the\ncorresponding paths are sent to the next model in the pipeline. For example,\nthis data broker can be used in connection with the following models:\nMusicDetection, SpeechDection, MusicAnnotation, and ObjectDetection. In the\ncurrent version, only single files are supported.\n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1681c927-ae2c-41f6-9ee4-51ece5e80806&revisionId=f5f3b0cc-2486-45ac-8928-8769b89c8825&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1681c927-ae2c-41f6-9ee4-51ece5e80806&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/1681c927-ae2c-41f6-9ee4-51ece5e80806/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "50", - "name": "led7", - "description": "https://openml.org \n\nled7-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1bb76aa6-45df-4944-b2bf-6c6de92df1cc&revisionId=d6a09a23-a730-4298-93cb-76a00cc4d1ea&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1bb76aa6-45df-4944-b2bf-6c6de92df1cc&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/1bb76aa6-45df-4944-b2bf-6c6de92df1cc/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "55", - "name": "vehicle", - "description": "https://openml.org \n\n**Author** : Dr. Pete Mowforth and Dr. Barry Shepherd \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/Statlog+\\(Vehicle+Silhouettes\\)) \n **Please cite** : Siebert,JP. Turing Institute Research Memorandum\nTIRM-87-018 \"Vehicle Recognition Using Rule Based Methods\" (March 1987)\n\n \n\nNAME \nvehicle silhouettes\n\n \n\nPURPOSE \nto classify a given silhouette as one of four types of vehicle, \nusing a set of features extracted from the silhouette. The \nvehicle may be viewed from one of many different angles.\n\n \n\nPROBLEM TYPE \nclassification\n\n \n\nSOURCE \nDrs.Pete Mowforth and Barry Shepherd \nTuring Institute \nGeorge House \n36 North Hanover St. \nGlasgow \nG1 2AD\n\n \n\nCONTACT \nAlistair Sutherland \nStatistics Dept. \nStrathclyde University \nLivingstone Tower \n26 Richmond St. \nGLASGOW G1 1XH \nGreat Britain\n\n \n\n \n \n Tel: 041 552 4400 x3033 \n \n Fax: 041 552 4711 \n \n e-mail: alistair@uk.ac.strathclyde.stams \n \n\n \n\nHISTORY \nThis data was originally gathered at the TI in 1986-87 by \nJP Siebert. It was partially financed by Barr and Stroud Ltd. \nThe original purpose was to find a method of distinguishing \n3D objects within a 2D image by application of an ensemble of \nshape feature extractors to the 2D silhouettes of the objects. \nMeasures of shape features extracted from example silhouettes \nof objects to be discriminated were used to generate a class- \nification rule tree by means of computer induction. \nThis object recognition strategy was successfully used to \ndiscriminate between silhouettes of model cars, vans and buses \nviewed from constrained elevation but all angles of rotation. \nThe rule tree classification performance compared favourably \nto MDC (Minimum Distance C", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=201367ca-d077-4a98-be44-bff9bee718b6&revisionId=d36b7554-5acf-4f6f-a3c1-702b540faf51&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=201367ca-d077-4a98-be44-bff9bee718b6&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/201367ca-d077-4a98-be44-bff9bee718b6/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "58", - "name": "audio-file-broker", - "description": "Audio File Broker is a Python component that exposes an endpoint to receive an\naudio file (i.e., wav) through a POST endpoint reachable using the command:\n\n \n \n minikube service \\--url audio-file-broker1webui\n \n\nThe output is an audio file with a static ID that can be used for further\nelaboration.\n\n \n\nDetails and source code can be found here: \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=228e7550-ddc8-4774-89c8-e2b9638b72fa&revisionId=0fb523a2-61ea-4348-9b66-1ea7a9c28056&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=228e7550-ddc8-4774-89c8-e2b9638b72fa&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/228e7550-ddc8-4774-89c8-e2b9638b72fa/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "62", - "name": "analcatdata_dmft", - "description": "https://openml.org \n\n**Author** : Unknown \n \n**Source** : [Jeffrey S.\nSimonoff](http://people.stern.nyu.edu/jsimonof/AnalCatData/Data/) \\- 2003 \n \n**Please cite** : Jeffrey S. Simonoff, Analyzing Categorical Data, Springer-\nVerlag, 2003\n\n \n\nOne of the datasets used in the book \"Analyzing Categorical Data,\" \nby Jeffrey S. Simonoff. It contains data on the DMFT Index (Decayed, Missing,\nand Filled Teeth) before and after different prevention strategies. The\nprevention strategy is commonly used as the (categorical) target.\n\n \n\n### Attribute information\n\n \n\n \n\n * DMFT.Begin and DMFT.End: DMFT index before and after the prevention strategy\n \n\n * Gender of the individual\n \n\n * Ethnicity of the individual\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2580db21-2cd8-405b-8912-e9881ada1454&revisionId=49ed507f-ad20-469b-a293-43628d39546c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2580db21-2cd8-405b-8912-e9881ada1454&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2580db21-2cd8-405b-8912-e9881ada1454/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "64", - "name": "ai4eu-robotics-pump-6144-fft-broker", - "description": " \n\n \n\nThe robotic pump demonstrator represents a hydraulic pump that can be mounted\non an industrial robot, for example, to pump liquid paint for spray painting.\nOn this pump, one accelerometer is mounted for vibration monitoring and\nrecording.\n\nThe pump can be controlled in terms of speed (rotations per minute, rpm),\naffecting the throughput of paint and the pressure in and out of the pump.\n\nThe dataset consists of 380 million measurements of several sensor data of the\npump system in 1-second intervals over two months in 2020.\n\n \n\n \n\n \n\n \n\n[The complete dataset & documentation is available on\nZenodo.](https://zenodo.org/record/5729187)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2764acc6-f82f-4b9c-ada8-fcc4edffa180&revisionId=822f9bd2-a5f7-42f7-b39d-01161ad2af1c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2764acc6-f82f-4b9c-ada8-fcc4edffa180&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2764acc6-f82f-4b9c-ada8-fcc4edffa180/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "66", - "name": "parity5_plus_5", - "description": "https://openml.org \n\nparity5_plus_5-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2863f408-3bf5-46e5-a5e8-2c1d49547a73&revisionId=0ce18abf-1767-4bb5-b7fe-351aeaa74102&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2863f408-3bf5-46e5-a5e8-2c1d49547a73&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2863f408-3bf5-46e5-a5e8-2c1d49547a73/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "67", - "name": "profb", - "description": "https://openml.org \n\n**Author** : Hal Stern, Robin Lock \n \n **Source** : [StatLib](http://lib.stat.cmu.edu/datasets/profb) \n \n**Please cite** :\n\n \n\nPRO FOOTBALL SCORES (raw data appears after the description below)\n\n \n\nHow well do the oddsmakers of Las Vegas predict the outcome of \nprofessional football games? Is there really a home field advantage - if \nso how large is it? Are teams that play the Monday Night game at a \ndisadvantage when they play again the following Sunday? Do teams benefit \nfrom having a \"bye\" week off in the current schedule? These questions and \na host of others can be investigated using this data set.\n\n \n\nHal Stern from the Statistics Department at Harvard University has \nmade available his compilation of scores for all National Football League \ngames from the 1989, 1990, and 1991 seasons. Dr. Stern used these data as \npart of his presentation \"Who's Number One?\" in the special \"Best of \nBoston\" session at the 1992 Joint Statistics Meetings.\n\n \n\nSeveral variables in the data are keyed to the oddsmakers \"point \nspread\" for each game. The point spread is a value assigned before each \ngame to serve as a handicap for whichever is perceived to be the better \nteam. Thus, to win against the point spread, the \"favorite\" team must beat \nthe \"underdog\" team by more points than the spread. The underdog \"wins\" \nagainst the spread if it wins the game outright or manages to lose by fewer \npoints than the spread. In theory, the point spread should represent the \n\"expert\" prediction as to the game's outcome. In practice, it more usually \ndenotes a point at which an equal amount of money will be wagered both for \nand against the favored team.\n\n \n\nRaw data below contains 672 cases (all 224 regular season games in \neach season and infor", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2b1bb308-9b5f-4ba9-afa0-0ac42878bf1b&revisionId=5f66eea5-684f-451a-902e-f8a85d3cac02&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2b1bb308-9b5f-4ba9-afa0-0ac42878bf1b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2b1bb308-9b5f-4ba9-afa0-0ac42878bf1b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "75", - "name": "PersistentVolumeProvider", - "description": "The Persistent Volume Provider can be used to provide a common file storage\nfor elements of a pipeline. The name of the node should be the absolute\ndirectory path.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2f20e0ad-bc67-4629-9c8b-89f40a8c12d6&revisionId=4a8e7107-be77-4fd3-b1ec-c00afea2b4e6&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2f20e0ad-bc67-4629-9c8b-89f40a8c12d6&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2f20e0ad-bc67-4629-9c8b-89f40a8c12d6/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "88", - "name": "threeOf9", - "description": "https://openml.org \n\nthreeOf9-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=35271ff6-47b3-488c-9021-b0c5f893abd0&revisionId=c4c2d7bd-c07f-44e0-be8e-9711db0fb44a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=35271ff6-47b3-488c-9021-b0c5f893abd0&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/35271ff6-47b3-488c-9021-b0c5f893abd0/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "89", - "name": "monks-problems-2", - "description": "https://openml.org \n\n**Author** : Sebastian Thrun (Carnegie Mellon University) \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/MONK's+Problems)\n\\- October 1992 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**The Monk's Problems: Problem 2** \n \nOnce upon a time, in July 1991, the monks of Corsendonk Priory were faced with\na school held in their priory, namely the 2nd European Summer School on\nMachine Learning. After listening more than one week to a wide variety of\nlearning algorithms, they felt rather confused: Which algorithm would be\noptimal? And which one to avoid? As a consequence of this dilemma, they\ncreated a simple task on which all learning algorithms ought to be compared:\nthe three MONK's problems.\n\n \n\nThe target concept associated with the 2nd Monk's problem is the binary\noutcome of the logical formula: \n \nMONK-2: EXACTLY TWO of {a1 = 1, a2 = 1, a3 = 1, a4 = 1, a5 = 1, a6 = 1}\n\n \n\nIn this dataset, the original train and test sets were merged to allow other\nsampling procedures. However, the original train-test splits can be found as\none of the OpenML tasks.\n\n \n\n### Attribute information:\n\n \n\n \n\n * attr1: 1, 2, 3 \n \n\n * attr2: 1, 2, 3 \n \n\n * attr3: 1, 2 \n \n\n * attr4: 1, 2, 3 \n \n\n * attr5: 1, 2, 3, 4 \n \n\n * attr6: 1, 2 \n \n\n \n\n### Relevant papers\n\n \n\nThe MONK's Problems - A Performance Comparison of Different Learning\nAlgorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J.\nCheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K.\nKaufman, S. Keller, I. Kononenko, J. Kreuziger, R.S. Michalski, T. Mitchell,\nP. Pachowicz, Y. Reich H. Vafaie, W. Van de Welde, W. Wenzel, J. Wnek, and J.\nZhang. Technical Report CS-CMU-91-197, Carnegie Mellon University, D", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=35915a44-ff16-4bdb-a6d6-fa88df61bf26&revisionId=549f1574-d126-42c0-8197-64ec12cbc567&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=35915a44-ff16-4bdb-a6d6-fa88df61bf26&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/35915a44-ff16-4bdb-a6d6-fa88df61bf26/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "90", - "name": "AI4Agri-frontend", - "description": "GUI and back-end logic for the AI4Agri models\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=35d9681b-c182-466b-9edf-1a9c962d0888&revisionId=6f92c9f4-b497-411d-8a4b-38e2b32251be&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=35d9681b-c182-466b-9edf-1a9c962d0888&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/35d9681b-c182-466b-9edf-1a9c962d0888/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "96", - "name": "mofn-3-7-10", - "description": "https://openml.org \n\n**Author** : Unknown \n \n **Source** : [PMLB](https://github.com/EpistasisLab/penn-ml-\nbenchmarks/tree/master/datasets/classification) Supposedly from UCI\noriginally, but can't find it there. \n \n **Please cite**\n\n \n\nThe origin is not clear, but presumably this is an artificial problem\nrepresenting M-of-N rules. The target is 1 if a certain M 'bits' are '1'?\n(Joaquin Vanschoren)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=39846e3e-27c7-47c4-a613-55469ec5bd39&revisionId=9a0ab46a-219c-43c2-9f7d-464f8fb1da02&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=39846e3e-27c7-47c4-a613-55469ec5bd39&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/39846e3e-27c7-47c4-a613-55469ec5bd39/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "97", - "name": "monks-problems-3", - "description": "https://openml.org \n\n**Author** : Sebastian Thrun (Carnegie Mellon University) \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/MONK's+Problems)\n\\- October 1992 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**The Monk's Problems: Problem 3** \n \nOnce upon a time, in July 1991, the monks of Corsendonk Priory were faced with\na school held in their priory, namely the 2nd European Summer School on\nMachine Learning. After listening more than one week to a wide variety of\nlearning algorithms, they felt rather confused: Which algorithm would be\noptimal? And which one to avoid? As a consequence of this dilemma, they\ncreated a simple task on which all learning algorithms ought to be compared:\nthe three MONK's problems.\n\n \n\nThe target concept associated with the 3rd Monk's problem is the binary\noutcome of the logical formula: \n \nMONK-3: (a5 = 3 and a4 = 1) or (a5 /= 4 and a2 /= 3) \n \nIn addition, 5% class noise was added to the training set\n\n \n\nIn this dataset, the original train and test sets were merged to allow other\nsampling procedures. However, the original train-test splits can be found as\none of the OpenML tasks.\n\n \n\n### Attribute information:\n\n \n\n \n\n * attr1: 1, 2, 3 \n \n\n * attr2: 1, 2, 3 \n \n\n * attr3: 1, 2 \n \n\n * attr4: 1, 2, 3 \n \n\n * attr5: 1, 2, 3, 4 \n \n\n * attr6: 1, 2 \n \n\n \n\n### Relevant papers\n\n \n\nThe MONK's Problems - A Performance Comparison of Different Learning\nAlgorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J.\nCheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K.\nKaufman, S. Keller, I. Kononenko, J. Kreuziger, R.S. Michalski, T. Mitchell,\nP. Pachowicz, Y. Reich H. Vafaie, W. Van de Welde, W. Wenzel, J. Wnek, and J.\nZhang. Technical Report ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3c42202a-5c1f-4ebf-954f-b54ad0fb03e5&revisionId=dd7fa7d1-b185-460a-999e-8e792943ca7e&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=3c42202a-5c1f-4ebf-954f-b54ad0fb03e5&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/3c42202a-5c1f-4ebf-954f-b54ad0fb03e5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "98", - "name": "zoo", - "description": "https://openml.org \n\n**Author** : Richard S. Forsyth \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Zoo) \\- 5/15/1990 \n \n**Please cite** :\n\n \n\n**Zoo database** \n \nA simple database containing 17 Boolean-valued attributes describing animals.\nThe \"type\" attribute appears to be the class attribute.\n\n \n\nNotes: \n \n* I find it unusual that there are 2 instances of \"frog\" and one of \"girl\"! \n* feature 'animal' is an identifier (though not unique) and should be ignored when modeling\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=41098614-571a-4c70-b45d-6a7fbcabdcf8&revisionId=eccea8f4-cc22-4962-934f-1dbf3da9f983&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=41098614-571a-4c70-b45d-6a7fbcabdcf8&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/41098614-571a-4c70-b45d-6a7fbcabdcf8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "100", - "name": "breast-w", - "description": "https://openml.org \n\n**Author** : Dr. William H. Wolberg, University of Wisconsin \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+\\(original\\)),\n[University of Wisconsin](http://pages.cs.wisc.edu/~olvi/uwmp/cancer.html) \\-\n1995 \n \n **Please cite** : See below, plus\n[UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Breast Cancer Wisconsin (Original) Data Set.** Features are computed from a\ndigitized image of a fine needle aspirate (FNA) of a breast mass. They\ndescribe characteristics of the cell nuclei present in the image. The target\nfeature records the prognosis (malignant or benign). [Original data available\nhere](ftp://ftp.cs.wisc.edu/math-prog/cpo-dataset/machine-learn/cancer/)\n\n \n\nCurrent dataset was adapted to ARFF format from the UCI version. Sample code\nID's were removed.\n\n \n\n! Note that there is also a related Breast Cancer Wisconsin (Diagnosis) Data\nSet with a different set of features, better known as\n[wdbc](https://www.openml.org/d/1510).\n\n \n\n### Relevant Papers\n\n \n\nW.N. Street, W.H. Wolberg and O.L. Mangasarian. Nuclear feature extraction for\nbreast tumor diagnosis. IS&T/SPIE 1993 International Symposium on Electronic\nImaging: Science and Technology, volume 1905, pages 861-870, San Jose, CA,\n1993.\n\n \n\nO.L. Mangasarian, W.N. Street and W.H. Wolberg. Breast cancer diagnosis and\nprognosis via linear programming. Operations Research, 43(4), pages 570-577,\nJuly-August 1995.\n\n \n\n### Citation request\n\n \n\nThis breast cancer database was obtained from the University of Wisconsin\nHospitals, Madison from Dr. William H. Wolberg. If you publish results when\nusing this database, then please include this information in your\nacknowledgments. Also, please cite one or more of:\n\n \n\n \n\n 1. \n\nO. L. Mangasa", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=42cec034-786e-4b26-b299-c28e428c7b40&revisionId=3a85905c-0034-4a87-b284-b7eac431cf28&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=42cec034-786e-4b26-b299-c28e428c7b40&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/42cec034-786e-4b26-b299-c28e428c7b40/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "103", - "name": "mux6", - "description": "https://openml.org \n\nmux6-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=45e6dcba-4163-4613-8443-2333d958b9a5&revisionId=aa8d762f-b679-4687-9d96-33b887a3d39c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=45e6dcba-4163-4613-8443-2333d958b9a5&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/45e6dcba-4163-4613-8443-2333d958b9a5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "104", - "name": "MyIris", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\nMyExampleIris\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4787776f-fd13-43cd-afab-eb863338f6e5&revisionId=9c95ba8a-2f03-41a4-8499-6421229acc9a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4787776f-fd13-43cd-afab-eb863338f6e5&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/4787776f-fd13-43cd-afab-eb863338f6e5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "123", - "name": "steel-plates-fault", - "description": "https://openml.org \n\n**Author** : Semeion, Research Center of Sciences of Communication, Rome,\nItaly. \n \n**Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/steel+plates+faults) \n \n**Please cite** : Dataset provided by Semeion, Research Center of Sciences of\nCommunication, Via Sersale 117, 00128, Rome, Italy.\n\n \n\n**Steel Plates Faults Data Set** \n \nA dataset of steel plates' faults, classified into 7 different types. The goal\nwas to train machine learning for automatic pattern recognition.\n\n \n\nThe dataset consists of 27 features describing each fault (location, size,\n...) and 7 binary features indicating the type of fault (on of 7: Pastry,\nZ_Scratch, K_Scatch, Stains, Dirtiness, Bumps, Other_Faults). The latter is\ncommonly used as a binary classification target ('common' or 'other' fault.)\n\n \n\n### Attribute Information\n\n \n\n \n\n * V1: X_Minimum \n \n\n * V2: X_Maximum \n \n\n * V3: Y_Minimum \n \n\n * V4: Y_Maximum \n \n\n * V5: Pixels_Areas \n \n\n * V6: X_Perimeter \n \n\n * V7: Y_Perimeter \n \n\n * V8: Sum_of_Luminosity \n \n\n * V9: Minimum_of_Luminosity \n \n\n * V10: Maximum_of_Luminosity \n \n\n * V11: Length_of_Conveyer \n \n\n * V12: TypeOfSteel_A300 \n \n\n * V13: TypeOfSteel_A400 \n \n\n * V14: Steel_Plate_Thickness \n \n\n * V15: Edges_Index \n \n\n * V16: Empty_Index \n \n\n * V17: Square_Index \n \n\n * V18: Outside_X_Index \n \n\n * V19: Edges_X_Index \n \n\n * V20: Edges_Y_Index \n \n\n * V21: Outside_Global_Index \n \n\n * V22: LogOfAreas \n \n\n * V23: Log_X_Index \n \n\n * V24: Log_Y_Index \n \n\n * V25: Orientation_Index \n \n\n * V26: Luminosity_Index \n \n\n * V27: SigmoidOfAreas \n \n\n * V28: Pastry \n \n\n * V29: Z_Scratch \n \n\n * V30: K_Scatch \n \n\n * V31: Stains \n \n\n * V32: Dirtiness \n \n\n * V33: Bumps \n \n\n * Class: Other_Faults \n \n\n \n\n### Rel", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5636ef7b-50d6-49e1-8e78-5b68f24274c5&revisionId=731152cd-a431-4c78-9e65-02f74b6c5c0a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5636ef7b-50d6-49e1-8e78-5b68f24274c5&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5636ef7b-50d6-49e1-8e78-5b68f24274c5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "124", - "name": "ai4eu-robotics-pump-1024-raw-broker", - "description": "The robotic pump demonstrator represents a hydraulic pump that can be mounted\non an industrial robot, for example, to pump liquid paint for spray painting.\nOn this pump, one accelerometer is mounted for vibration monitoring and\nrecording.\n\nThe pump can be controlled in terms of speed (rotations per minute, rpm),\naffecting the throughput of paint and the pressure in and out of the pump.\n\nThe dataset consists of 380 million measurements of several sensor data of the\npump system in 1-second intervals over two months in 2020.\n\n[The complete dataset & documentation is available on\nZenodo.](https://zenodo.org/record/5729187)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=57617093-3530-44fc-a72e-b5f6f83630cd&revisionId=a25721c1-88bf-4146-b219-3a4db5c00059&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=57617093-3530-44fc-a72e-b5f6f83630cd&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/57617093-3530-44fc-a72e-b5f6f83630cd/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "126", - "name": "irish", - "description": "https://openml.org \n\n**Author** : Vincent Greaney, Thomas Kelleghan (St. Patrick's College, Dublin) \n \n**Source** : [StatLib](http://lib.stat.cmu.edu/datasets/irish.ed) \\- 1984 \n \n **Please cite** : [StatLib](http://lib.stat.cmu.edu/datasets/)\n\n \n\n **Irish Educational Transitions Data** \n \nData on educational transitions for a sample of 500 Irish schoolchildren aged\n11 in 1967. The data were collected by Greaney and Kelleghan (1984), and\nreanalyzed by Raftery and Hout (1985, 1993).\n\n \n\n### Attribute information\n\n \n\n \n\n * Sex: 1=male; 2=female.\n \n\n * DVRT (Drumcondra Verbal Reasoning Test Score).\n \n\n * Educational level attained\n \n\n * Leaving Certificate. 1 if Leaving Certificate not taken; 2 if taken.\n \n\n * Prestige score for father's occupation (calculated by Raftery and Hout, 1985).\n \n\n * Type of school: 1=secondary; 2=vocational; 9=primary terminal leaver.\n \n\n \n\n### Relevant papers\n\n \n\nGreaney, V. and Kelleghan, T. (1984). Equality of Opportunity in Irish \nSchools. Dublin: Educational Company.\n\n \n\nKass, R.E. and Raftery, A.E. (1993). Bayes factors and model uncertainty. \nTechnical Report no. 254, Department of Statistics, University of Washington. \nRevised version to appear in Journal of the American Statistical \nAssociation.\n\n \n\nRaftery, A.E. (1988). Approximate Bayes factors for generalized linear models. \nTechnical Report no. 121, Department of Statistics, University of Washington.\n\n \n\nRaftery, A.E. and Hout, M. (1985). Does Irish education approach the \nmeritocratic ideal? A logistic analysis. \nEconomic and Social Review, 16, 115-140.\n\n \n\nRaftery, A.E. and Hout, M. (1993). Maximally maintained inequality: \nExpansion, reform and opportunity in Irish schools. \nSociology of Education, 66, 41-62.\n\n \n\n### Ownership Statement\n\n ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5a43bbed-a905-4af3-840b-eec565f2165b&revisionId=920c28eb-e743-4ef1-9606-04b382db90c5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5a43bbed-a905-4af3-840b-eec565f2165b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5a43bbed-a905-4af3-840b-eec565f2165b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "128", - "name": "meta", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\n \n\n 1. \n\nTitle: meta-data\n\n \n\n \n\n 2. \n\nSources: \n(a) Creator: \nLIACC - University of Porto \nR.Campo Alegre 823 \n4150 PORTO \n(b) Donor: P.B.Brazdil or J.Gama Tel.: +351 600 1672 \nLIACC, University of Porto Fax.: +351 600 3654 \nRua Campo Alegre 823 Email: statlog-adm@ncc.up.pt \n4150 Porto, Portugal \n(c) Date: March, 1996\n\n \n\n \n\n \n\n(d) Acknowlegements: \nLIACC wishes to thank Commission of European Communities \nfor their support. Also, we wish to thank the following partners \nfor providing the individual test results:\n\n \n\n \n\n * Dept. of Statistics, University of Strathclyde, Glasgow, UK\n \n\n * Dept. of Statistics, University of Leeds, UK\n \n\n * Aston University, Birmingham, UK\n \n\n * Forschungszentrum Ulm, Daimler-Benz AG, Germany\n \n\n * Brainware GmbH, Berlin, Germany\n \n\n * Frauenhofer Gesellschaft IITB-EPO, Berlin, Germany\n \n\n * Institut fuer Kybernetik, Bochum, Germany\n \n\n * ISoft, Gif sur Yvette, France\n \n\n * \n\nDept. of CS and AI, University of Granada, Spain\n\n \n\n \n\n * \n\nPast Usage:\n\n \n\n \n\n \n\nMeta-Data was used in order to give advice about which classification \nmethod is appropriate for a particular dataset. \nThis work is described in:\n\n \n\n-\"Machine Learning, Neural and Statistical Learning\" \nEds. D.Michie,D.J.Spiegelhalter and C.Taylor \nEllis Horwood-1994\n\n \n\n \n\n * \"Characterizing the Applicability of \nClassification Algorithms Using Meta-Level Learning\", \nP. Brazdil, J.Gama and B.Henery: \nin Proc. of Machine Learning - ECML-94, \ned. F.Bergadano and L.de Raedt,LNAI Vol.784 Springer-Verlag.\n\n \n\n \n\n-\"Characterization of Classification Algorithms\" \nJ.Gama, P.Brazdil \nin Proc. of EPIA 95, LNAI Vol.990 \n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5bdd6ed1-601e-482b-904e-886921963a2d&revisionId=eb64f31c-2e72-4bd9-a60d-0598b8e83b33&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5bdd6ed1-601e-482b-904e-886921963a2d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5bdd6ed1-601e-482b-904e-886921963a2d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "130", - "name": "glass", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - \n \n**Please cite** :\n\n \n\n \n\n 1. \n\nTitle: Glass Identification Database\n\n \n\n \n\n 2. \n\nSources: \n(a) Creator: B. German \n\\-- Central Research Establishment \nHome Office Forensic Science Service \nAldermaston, Reading, Berkshire RG7 4PN \n(b) Donor: Vina Spiehler, Ph.D., DABFT \nDiagnostic Products Corporation \n(213) 776-0180 (ext 3014) \n(c) Date: September, 1987\n\n \n\n \n\n 3. \n\nPast Usage: \n\\-- Rule Induction in Forensic Science \n\\-- Ian W. Evett and Ernest J. Spiehler \n\\-- Central Research Establishment \nHome Office Forensic Science Service \nAldermaston, Reading, Berkshire RG7 4PN \n\\-- Unknown technical note number (sorry, not listed here) \n\\-- General Results: nearest neighbor held its own with respect to the \nrule-based system\n\n \n\n \n\n 4. \n\nRelevant Information:n \nVina conducted a comparison test of her rule-based system, BEAGLE, the \nnearest-neighbor algorithm, and discriminant analysis. BEAGLE is \na product available through VRS Consulting, Inc.; 4676 Admiralty Way, \nSuite 206; Marina Del Ray, CA 90292 (213) 827-7890 and FAX: -3189. \nIn determining whether the glass was a type of \"float\" glass or not, \nthe following results were obtained (# incorrect answers):\n\n \n\n \n Type of Sample Beagle NN DA \n Windows that were float processed (87) 10 12 21 \n Windows that were not: (76) 19 16 22 \n \n\n \n\nThe study of classification of types of glass was motivated by \ncriminological investigation. At the scene of the crime, the glass left \ncan be used as evidence...if it is correctly identified!\n\n \n\n \n\n 5. \n\nNumber of Instances: 214\n\n \n\n \n\n 6. \n\nNumber of Attributes:", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5c5599f7-73c7-4874-ace1-4c6e312409c4&revisionId=64523754-bb18-406c-827d-4fe090d0e5e6&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5c5599f7-73c7-4874-ace1-4c6e312409c4&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5c5599f7-73c7-4874-ace1-4c6e312409c4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "131", - "name": "wall-robot-navigation", - "description": "https://openml.org \n\n**Author** : Ananda Freire, Marcus Veloso and Guilherme Barreto \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Wall-\nFollowing+Robot+Navigation+Data) \\- 2010 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n **Wall-Following Robot Navigation Data Data Set** \n \nThe data were collected as the SCITOS G5 robot navigates through the room\nfollowing the wall in a clockwise direction, for 4 rounds, using 24 ultrasound\nsensors arranged circularly around its 'waist'.\n\n \n\nThe data consists of raw values of the measurements of all 24 ultrasound\nsensors and the corresponding class label. Sensor readings are sampled at a\nrate of 9 samples per second.\n\n \n\nThe class labels are: \n \n1\\. Move-Forward, \n \n2\\. Slight-Right-Turn, \n \n3\\. Sharp-Right-Turn, \n \n4\\. Slight-Left-Turn\n\n \n\nIt is worth mentioning that the 24 ultrasound readings and the simplified\ndistances were collected at the same time step, so each file has the same\nnumber of rows (one for each sampling time step).\n\n \n\nThe wall-following task and data gathering were designed to test the\nhypothesis that this apparently simple navigation task is indeed a non-\nlinearly separable classification task. Thus, linear classifiers, such as the\nPerceptron network, are not able to learn the task and command the robot\naround the room without collisions. Nonlinear neural classifiers, such as the\nMLP network, are able to learn the task and command the robot successfully\nwithout collisions.\n\n \n\n### Attribute Information:\n\n \n\n \n\n 1. US1: ultrasound sensor at the front of the robot (reference angle: 180\u00b0) \n \n\n 2. US2: ultrasound reading (reference angle: -165\u00b0)\n \n\n 3. US3: ultrasound reading (reference angle: -150\u00b0)\n \n\n 4. US4: ultrasound reading ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5d6161e5-1cbb-46fc-a005-85607fd7caea&revisionId=7df9f5eb-70a2-4480-901f-7a2f2783520a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5d6161e5-1cbb-46fc-a005-85607fd7caea&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5d6161e5-1cbb-46fc-a005-85607fd7caea/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "132", - "name": "cleve", - "description": "https://openml.org \n\ncleve-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5d938cc4-8cff-4e09-80cf-d8b08461d9c4&revisionId=ef0a6892-61d1-4ef8-9d98-3f29b71c15bf&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5d938cc4-8cff-4e09-80cf-d8b08461d9c4&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5d938cc4-8cff-4e09-80cf-d8b08461d9c4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "133", - "name": "GAMETES_Epistasis_3-Way_20atts_0.2H_EDM-1_1", - "description": "https://openml.org \n\nGAMETES_Epistasis_3-Way_20atts_0.2H_EDM-1_1-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5e840f29-a572-47c2-acdf-c1b8c0b4b8b7&revisionId=3e1cfa13-826c-4672-852e-438ec491a045&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5e840f29-a572-47c2-acdf-c1b8c0b4b8b7&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5e840f29-a572-47c2-acdf-c1b8c0b4b8b7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "136", - "name": "monks-problems-1", - "description": "https://openml.org \n\n**Author** : Sebastian Thrun (Carnegie Mellon University) \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/MONK's+Problems)\n\\- October 1992 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**The Monk's Problems: Problem 1** \n \nOnce upon a time, in July 1991, the monks of Corsendonk Priory were faced with\na school held in their priory, namely the 2nd European Summer School on\nMachine Learning. After listening more than one week to a wide variety of\nlearning algorithms, they felt rather confused: Which algorithm would be\noptimal? And which one to avoid? As a consequence of this dilemma, they\ncreated a simple task on which all learning algorithms ought to be compared:\nthe three MONK's problems.\n\n \n\nThe target concept associated with the 1st Monk's problem is the binary\noutcome of the logical formula: \n \nMONK-1: (a1 == a2) or (a5 == 1)\n\n \n\nIn this dataset, the original train and test sets were merged to allow other\nsampling procedures. However, the original train-test splits can be found as\none of the OpenML tasks.\n\n \n\n### Attribute information:\n\n \n\n \n\n * attr1: 1, 2, 3 \n \n\n * attr2: 1, 2, 3 \n \n\n * attr3: 1, 2 \n \n\n * attr4: 1, 2, 3 \n \n\n * attr5: 1, 2, 3, 4 \n \n\n * attr6: 1, 2 \n \n\n \n\n### Relevant papers\n\n \n\nThe MONK's Problems - A Performance Comparison of Different Learning\nAlgorithms, by S.B. Thrun, J. Bala, E. Bloedorn, I. Bratko, B. Cestnik, J.\nCheng, K. De Jong, S. Dzeroski, S.E. Fahlman, D. Fisher, R. Hamann, K.\nKaufman, S. Keller, I. Kononenko, J. Kreuziger, R.S. Michalski, T. Mitchell,\nP. Pachowicz, Y. Reich H. Vafaie, W. Van de Welde, W. Wenzel, J. Wnek, and J.\nZhang. Technical Report CS-CMU-91-197, Carnegie Mellon University, Dec. 1991.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6265676d-b001-4bd6-949c-05b7db6affae&revisionId=1375bd8f-18ca-4971-9a7b-c7dcb7a27c0c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6265676d-b001-4bd6-949c-05b7db6affae&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6265676d-b001-4bd6-949c-05b7db6affae/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "138", - "name": "hayes-roth", - "description": "https://openml.org \n\n**Author** : Barbara and Frederick Hayes-Roth\n\n \n\n**Source** : [original](https://archive.ics.uci.edu/ml/datasets/Hayes-Roth) \\- \n \n**Please cite** :\n\n \n\nHayes-Roth Database\n\n \n\nThis is a merged version of the separate train and test set which are usually\ndistributed. On OpenML this train-test split can be found as one of the\npossible tasks.\n\n \n\nSource Information: \n(a) Creators: Barbara and Frederick Hayes-Roth \n(b) Donor: David W. Aha (aha@ics.uci.edu) (714) 856-8779 \n \n(c) Date: March, 1989\n\n \n\nAttribute Information: \n\\-- 1. name: distinct for each instance and represented numerically \n\\-- 2. hobby: nominal values ranging between 1 and 3 \n\\-- 3. age: nominal values ranging between 1 and 4 \n\\-- 4. educational level: nominal values ranging between 1 and 4 \n\\-- 5. marital status: nominal values ranging between 1 and 4 \n\\-- 6. class: nominal value between 1 and 3\n\n \n\nDetailed description of the experiment: \n1\\. 3 categories (1, 2, and neither -- which I call 3) \n\\-- some of the instances could be classified in either class 1 or 2, and they\nhave been evenly distributed between the two classes \n2\\. 5 Attributes \n\\-- A. name (a randomly-generated number between 1 and 132) \n\\-- B. hobby (a randomly-generated number between 1 and 3) \n\\-- C. age (a number between 1 and 4) \n\\-- D. education level (a number between 1 and 4) \n\\-- E. marital status (a number between 1 and 4) \n3\\. Classification: \n \n\\-- only attributes C-E are diagnostic; values for A and B are ignored \n\\-- Class Neither: if a 4 occurs for any attribute C-E \n\\-- Class 1: Otherwise, if (# of 1's)>(# of 2's) for attributes C-E \n\\-- Class 2: Otherwise, if (# of 2's)>(# of 1's) for attributes C-E \n\\-- Either 1 or 2: Otherwise, if (# of 2's)=(# of 1's) for attribut", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=62f47eea-85d2-44a6-a3b2-cd3ac3d2ff32&revisionId=6df90024-afec-494b-b59e-724b350d5eab&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=62f47eea-85d2-44a6-a3b2-cd3ac3d2ff32&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/62f47eea-85d2-44a6-a3b2-cd3ac3d2ff32/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "148", - "name": "ai4eu-robotics-wrist-1024-raw-broker", - "description": "The robotic wrist demonstrator represents a mechanical wrist with three axes\nthat can hold tools, e.g. for spray painting in combination with a pump. On\nthis robotic wrist, two accelerometers are mounted for vibration monitoring\nand recording: one in the movable front part of the wrist and one in the\nshaft. The wrist can be controlled through the torque or the designated\nposition of each axis\u2019 motor.\n\nThe dataset consists of 1.8 billion measurements of several sensor data of the\nrobotic wrist in 1-second intervals over six months in 2020.\n\n[The complete dataset & description is available on\nZenodo](https://zenodo.org/record/5729818)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6819ff36-f57d-459d-a5f7-11e1e8e096fe&revisionId=6400e2d0-ed8f-48fd-8aab-50504461c72b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6819ff36-f57d-459d-a5f7-11e1e8e096fe&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6819ff36-f57d-459d-a5f7-11e1e8e096fe/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "152", - "name": "allbp", - "description": "https://openml.org \n\nallbp-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6a6f5d38-3775-485d-a6d6-1b90952daee9&revisionId=35d8f990-459e-41b0-918c-07895c554e3d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6a6f5d38-3775-485d-a6d6-1b90952daee9&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6a6f5d38-3775-485d-a6d6-1b90952daee9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "153", - "name": "xd6", - "description": "https://openml.org \n\n**Author** : Unknown \n \n **Source** : [PMLB](https://github.com/EpistasisLab/penn-ml-\nbenchmarks/tree/master/datasets/classification) \\- Supposedly originates from\nUCI, but can't find it there anymore. \n \n **Please cite:**\n\n \n\n**XD6 Dataset** \nDataset used by Buntine and Niblett (1992). Composed of 10 features, one of\nwhich is irrelevant. The target is a disjunctive normal form formula over the\nnine other attributes, with additional classification noise.\n\n \n\n[More\ninfo](https://books.google.be/books?id=W2bmBwAAQBAJ&pg=PA313&lpg=PA313&dq=dataset+xd6&source=bl&ots=6hYPdz8_Nl&sig=TR1ieOg9D1pCrvNyeKbb-3eKmd8&hl=en&sa=X&ved=0ahUKEwj_tZ_MxozZAhVHa1AKHZVEBBsQ6AEIQjAF#v=onepage&q=dataset\nxd6&f=false).\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6af5c9cf-73bf-406b-a250-5bbf7d0e5e47&revisionId=c3c334c0-d744-4b9a-96aa-d4333c5d3e8a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6af5c9cf-73bf-406b-a250-5bbf7d0e5e47&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6af5c9cf-73bf-406b-a250-5bbf7d0e5e47/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "163", - "name": "ucrsuite-config", - "description": "Data broker for subsequence search in time series\n\nThis data broker offers a web interface for uploading files and setting search\nparameters. It saves the uploaded files on a shared volume and sends the\ncorresponding paths to the next model in the pipeline. It was created to be\nused with ucrsuite-dtw and ucrsuite-ed models, and supports data and query\nfiles in txt format.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6e64762b-97e0-4278-8dad-c9d1513fabb4&revisionId=e41459ef-3143-4ead-a1c0-907b136f6e9a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6e64762b-97e0-4278-8dad-c9d1513fabb4&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6e64762b-97e0-4278-8dad-c9d1513fabb4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "164", - "name": "iris", - "description": "https://openml.org \n\n**Author** : R.A. Fisher \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Iris) \\- 1936 -\nDonated by Michael Marshall \n \n **Please cite** :\n\n \n\n**Iris Plants Database** \n \nThis is perhaps the best known database to be found in the pattern recognition\nliterature. Fisher's paper is a classic in the field and is referenced\nfrequently to this day. (See Duda & Hart, for example.) The data set contains\n3 classes of 50 instances each, where each class refers to a type of iris\nplant. One class is linearly separable from the other 2; the latter are NOT\nlinearly separable from each other.\n\n \n\nPredicted attribute: class of iris plant. \n \nThis is an exceedingly simple domain.\n\n \n\n### Attribute Information:\n\n \n\n \n \n 1. sepal length in cm \n 2. sepal width in cm \n 3. petal length in cm \n 4. petal width in cm \n 5. class: \n -- Iris Setosa \n -- Iris Versicolour \n -- Iris Virginica \n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6e9c598d-8928-437b-9013-d698f3321a37&revisionId=d3cee283-9ba0-40c2-b502-aa7ab4871ecf&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6e9c598d-8928-437b-9013-d698f3321a37&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6e9c598d-8928-437b-9013-d698f3321a37/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "168", - "name": "cars1", - "description": "https://openml.org \n\ncars1-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6feea5d8-c6fb-40d9-99bb-c8ad4ad4d242&revisionId=2699c172-24e4-4d32-aca3-2f74eb6dc968&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6feea5d8-c6fb-40d9-99bb-c8ad4ad4d242&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6feea5d8-c6fb-40d9-99bb-c8ad4ad4d242/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "172", - "name": "ner-databroker", - "description": "This is the databroker component of the NER pipeline.\n\nThrough the Web UI of the ner-databroker, you can provide the text to be\nreceived as an input for the entity recognizer. The language of the text\nshould be German, since the NER model is trained on German data. More than one\nsentence can be given as input.\n\nMake sure to run ner-pipeline, instead of ner-databroker as a standalone\ncomponent. As ner-pipeline is successfully deployed, open the WEB UI and\nfollow the instructions to submit the text.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=73244125-66e5-4087-9fe8-8229a39944c2&revisionId=e586beb7-322e-4a3e-82a7-b96bbbf49464&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=73244125-66e5-4087-9fe8-8229a39944c2&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/73244125-66e5-4087-9fe8-8229a39944c2/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "173", - "name": "corral", - "description": "https://openml.org \n\ncorral-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7337b1db-a3e8-4e24-8ab1-130d86f032c8&revisionId=a9a6ebfb-485b-4678-8f3a-00b27877c492&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7337b1db-a3e8-4e24-8ab1-130d86f032c8&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7337b1db-a3e8-4e24-8ab1-130d86f032c8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "175", - "name": "autoUniv-au7-1100", - "description": "https://openml.org \n\n**Author** : Ray. J. Hickey \n \n**Source** : UCI \n \n **Please cite** :\n\n \n\n \n\n * Dataset Title: \n \n\n \n\nAutoUniv Dataset \n \ndata problem: autoUniv-au7-300-drift-au7-cpd1-800\n\n \n\n \n\n * Abstract: \n \n\n \n\nAutoUniv is an advanced data generator for classifications tasks. The aim is\nto reflect the nuances and heterogeneity of real data. Data can be generated\nin .csv, ARFF or C4.5 formats.\n\n \n\n \n\n * Source: \n \n\n \n\nAutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com \nAutoUniv web-site: http://sites.google.com/site/autouniv/.\n\n \n\n \n\n * Data Set Information:\n \n\n \n\nThe user first creates a classification model and then generates classified\nexamples from it. To create a model, the following are specified: the number\nof attributes (up to 1000) and their type (discrete or continuous), the number\nof classes (up to 10), the complexity of the underlying rules and the noise\nlevel. AutoUniv then produces a model through a process of constrained\nrandomised search to satisfy the user's requirements. A model can have up to\n3000 rules. Rare class models can be designed. A sequence of models can be\ndesigned to reflect concept and/or population drift.\n\n \n\nAutoUniv creates three text files for a model: a Prolog specification of the\nmodel used to generate examples (.aupl); a user-friendly statement of the\nclassification rules in an 'if ... then' format (.aurules); a statistical\nsummary of the main properties of the model, including its Bayes rate\n(.auprops).\n\n \n\n \n\n * Attribute Information: \n \n\n \n\nAttributes may be discrete with up to 10 values or continuous. A discrete\nattribute can be nominal with values v1, v2, v3 ... or integer with values 0,\n1, 2 , ... .\n\n \n\n \n\n * Relevant Papers:\n \n\n \n\nMarrs, G, Hickey, RJ an", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7422c0f9-0fda-41ab-8bc0-91233a3455e1&revisionId=739ac852-a2b4-45fc-84ca-f93ca4c4d17f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7422c0f9-0fda-41ab-8bc0-91233a3455e1&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7422c0f9-0fda-41ab-8bc0-91233a3455e1/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "176", - "name": "ai4eu-robotics-wrist-1024-fft-broker", - "description": "The robotic wrist demonstrator represents a mechanical wrist with three axes\nthat can hold tools, e.g. for spray painting in combination with a pump. On\nthis robotic wrist, two accelerometers are mounted for vibration monitoring\nand recording: one in the movable front part of the wrist and one in the\nshaft. The wrist can be controlled through the torque or the designated\nposition of each axis\u2019 motor.\n\nThe dataset consists of 1.8 billion measurements of several sensor data of the\nrobotic wrist in 1-second intervals over six months in 2020.\n\n[The complete dataset & description is available on\nZenodo](https://zenodo.org/record/5729818)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=74b69064-462f-4176-a8ce-7719638f237a&revisionId=1933cb96-3d47-4700-a73a-09692385ad69&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=74b69064-462f-4176-a8ce-7719638f237a&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/74b69064-462f-4176-a8ce-7719638f237a/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "177", - "name": "grpc_piezo_hubeau", - "description": "Data from the \"Piezometry\" API come from the ADES portal (national portal for\naccess to French groundwater data). They relate to piezometric measurements\n(water level in groundwater tables), throughout France, from all the partners\nof the water information system (see metadata).\n\n \n\nThe updates are integrated daily into the API.\n\n \n\nData is expressed\n\n * in NGF meters for levels (or ratings);\n * in meters in relation to the measurement mark for the depths.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=76fda708-9907-4241-9d35-4d18a406eb35&revisionId=e3ff0320-a93a-4358-b13d-949df627c0b0&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=76fda708-9907-4241-9d35-4d18a406eb35&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/76fda708-9907-4241-9d35-4d18a406eb35/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "180", - "name": "teachingAssistant", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\nDataset from the MLRR repository: http://axon.cs.byu.edu:5000/\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7bc5051d-f852-4547-a317-e1c510f66332&revisionId=5f2ac1b6-7a8f-4762-9c64-82a14dea66b1&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7bc5051d-f852-4547-a317-e1c510f66332&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7bc5051d-f852-4547-a317-e1c510f66332/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "181", - "name": "wine-quality-red", - "description": "https://openml.org \n\nwine-quality-red-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7c5315b5-ca3c-488c-b235-f7f4d0534b16&revisionId=cecbcde7-4870-4ed3-9bb4-af01655e0c27&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7c5315b5-ca3c-488c-b235-f7f4d0534b16&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7c5315b5-ca3c-488c-b235-f7f4d0534b16/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "183", - "name": "diabetes", - "description": "https://openml.org \n\n**Author** : [Vincent Sigillito](vgs@aplcen.apl.jhu.edu)\n\n \n\n**Source** : [Obtained from\nUCI](https://archive.ics.uci.edu/ml/datasets/pima+indians+diabetes)\n\n \n\n**Please cite** : [UCI citation\npolicy](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n \n\n 1. \n\nTitle: Pima Indians Diabetes Database\n\n \n\n \n\n 2. \n\nSources: \n(a) Original owners: National Institute of Diabetes and Digestive and \nKidney Diseases \n(b) Donor of database: Vincent Sigillito (vgs@aplcen.apl.jhu.edu) \nResearch Center, RMI Group Leader \nApplied Physics Laboratory \nThe Johns Hopkins University \nJohns Hopkins Road \nLaurel, MD 20707 \n(301) 953-6231 \n(c) Date received: 9 May 1990\n\n \n\n \n\n 3. \n\nPast Usage:\n\n \n \n\n 1. \n\nSmith,~J.~W., Everhart,~J.~E., Dickson,~W.~C., Knowler,~W.~C., & \nJohannes,~R.~S. (1988). Using the ADAP learning algorithm to forecast \nthe onset of diabetes mellitus. In {it Proceedings of the Symposium \non Computer Applications and Medical Care} (pp. 261--265). IEEE \nComputer Society Press.\n\n \n\nThe diagnostic, binary-valued variable investigated is whether the \npatient shows signs of diabetes according to World Health Organization \ncriteria (i.e., if the 2 hour post-load plasma glucose was at least \n200 mg/dl at any survey examination or if found during routine medical \ncare). The population lives near Phoenix, Arizona, USA.\n\n \n\nResults: Their ADAP algorithm makes a real-valued prediction between \n0 and 1. This was transformed into a binary decision using a cutoff of \n0.448. Using 576 training instances, the sensitivity and specificity \nof their algorithm was 76% on the remaining 192 instances.\n\n \n\n \n \n\n \n\n 4. \n\nRelevant Information: \nSeveral constraints were placed on the selection of these instances from ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7d5de8fb-5f22-4b99-9ce3-9ae00f6c86db&revisionId=72ecabe9-fd16-4c78-954a-c7e86585d15c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7d5de8fb-5f22-4b99-9ce3-9ae00f6c86db&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7d5de8fb-5f22-4b99-9ce3-9ae00f6c86db/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "184", - "name": "dis", - "description": "https://openml.org \n\ndis-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7d5f6906-f781-4b68-93cc-95e733010b75&revisionId=6b69f0c0-9e4f-437c-8563-55b3b177ef2a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7d5f6906-f781-4b68-93cc-95e733010b75&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7d5f6906-f781-4b68-93cc-95e733010b75/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "186", - "name": "lymph", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - \n \n**Please cite** :\n\n \n\nCitation Request: \nThis lymphography domain was obtained from the University Medical Centre, \nInstitute of Oncology, Ljubljana, Yugoslavia. Thanks go to M. Zwitter and \nM. Soklic for providing the data. Please include this citation if you plan \nto use this database.\n\n \n\n \n\n 1. \n\nTitle: Lymphography Domain\n\n \n\n \n\n 2. \n\nSources: \n(a) See Above. \n(b) Donors: Igor Kononenko, \nUniversity E.Kardelj \nFaculty for electrical engineering \nTrzaska 25 \n61000 Ljubljana (tel.: (38)(+61) 265-161\n\n \n\n \n Bojan Cestnik \n Jozef Stefan Institute \n Jamova 39 \n 61000 Ljubljana \n Yugoslavia (tel.: (38)(+61) 214-399 ext.287) \n \n\n \n\n(c) Date: November 1988\n\n \n\n \n\n 3. \n\nPast Usage: (sveral)\n\n \n \n\n 1. Cestnik,G., Konenenko,I, & Bratko,I. (1987). Assistant-86: A \nKnowledge-Elicitation Tool for Sophisticated Users. In I.Bratko \n& N.Lavrac (Eds.) Progress in Machine Learning, 31-45, Sigma Press. \n\\-- Assistant-86: 76% accuracy\n\n \n\n 2. Clark,P. & Niblett,T. (1987). Induction in Noisy Domains. In \nI.Bratko & N.Lavrac (Eds.) Progress in Machine Learning, 11-30, \nSigma Press. \n\\-- Simple Bayes: 83% accuracy \n\\-- CN2 (99% threshold): 82%\n\n \n\n 3. Michalski,R., Mozetic,I. Hong,J., & Lavrac,N. (1986). The Multi-Purpose \nIncremental Learning System AQ15 and its Testing Applications to Three \nMedical Domains. In Proceedings of the Fifth National Conference on \nArtificial Intelligence, 1041-1045. Philadelphia, PA: Morgan Kaufmann. \n\\-- Experts: 85% accuracy (estimate) \n\\-- AQ15: 80-82%\n\n \n \n\n \n\n 4. \n\nRelevant Information: \nThis is one of three domains provided by the Oncology Institute \n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7f5388ed-8ec4-4f00-8230-e5624404ed95&revisionId=306ff0fb-0cee-48f7-ba80-d9567d62f039&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7f5388ed-8ec4-4f00-8230-e5624404ed95&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7f5388ed-8ec4-4f00-8230-e5624404ed95/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "187", - "name": "allrep", - "description": "https://openml.org \n\nallrep-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7f976866-58a9-41a2-a2c4-b66ee2ebb502&revisionId=dd968b72-c353-4de1-9da6-bbaaa6083b6d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7f976866-58a9-41a2-a2c4-b66ee2ebb502&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7f976866-58a9-41a2-a2c4-b66ee2ebb502/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "189", - "name": "ai4eu-robotics-wrist-6144-raw-broker", - "description": "The robotic wrist demonstrator represents a mechanical wrist with three axes\nthat can hold tools, e.g. for spray painting in combination with a pump. On\nthis robotic wrist, two accelerometers are mounted for vibration monitoring\nand recording: one in the movable front part of the wrist and one in the\nshaft. The wrist can be controlled through the torque or the designated\nposition of each axis\u2019 motor.\n\nThe dataset consists of 1.8 billion measurements of several sensor data of the\nrobotic wrist in 1-second intervals over six months in 2020.\n\n[The complete dataset & description is available on\nZenodo](https://zenodo.org/record/5729818)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8521c122-91e5-4748-aacd-c99e0cc7549e&revisionId=de99b386-a460-4eb9-96f0-7d53f01e3801&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8521c122-91e5-4748-aacd-c99e0cc7549e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8521c122-91e5-4748-aacd-c99e0cc7549e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "198", - "name": "JapaneseVowels", - "description": "https://openml.org \n\n**Author** : Mineichi Kudo, Jun Toyama, Masaru Shimbo \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Japanese+Vowels) \n \n**Please cite** :\n\n \n\n**Japanese vowels** \n \nThis dataset records 640 time series of 12 LPC cepstrum coefficients taken\nfrom nine male speakers.\n\n \n\nThe data was collected for examining our newly developed classifier for\nmultidimensional curves (multidimensional time series). Nine male speakers\nuttered two Japanese vowels /ae/ successively. For each utterance, with the\nanalysis parameters described below, we applied 12-degree linear prediction\nanalysis to it to obtain a discrete-time series with 12 LPC cepstrum\ncoefficients. This means that one utterance by a speaker forms a time series\nwhose length is in the range 7-29 and each point of a time series is of 12\nfeatures (12 coefficients).\n\n \n\nSimilar data are available for different utterances /ei/, /iu/, /uo/, /oa/ in\naddition to /ae/. Please contact the donor if you are interested in using this\ndata.\n\n \n\nThe number of the time series is 640 in total. We used one set of 270 time\nseries for training and the other set of 370 time series for testing.\n\n \n\nAnalysis parameters: \n \n* Sampling rate : 10kHz \n* Frame length : 25.6 ms \n* Shift length : 6.4ms \n* Degree of LPC coefficients : 12\n\n \n\nEach line represents 12 LPC coefficients in the increasing order separated by\nspaces. This corresponds to one analysis \nframe. Lines are organized into blocks, which are a set of 7-29 lines\nseparated by blank lines and corresponds to a single speech utterance of /ae/\nwith 7-29 frames.\n\n \n\nEach speaker is a set of consecutive blocks. In ae.train there are 30 blocks\nfor each speaker. Blocks 1-30 represent speaker 1, blocks 31-60 represent\nspeaker 2, and so on up to ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=898883b9-a6b7-47a1-ae2c-cdf9012ceaaf&revisionId=e5a5e2dc-1c77-4853-91a8-f559a2c8346a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=898883b9-a6b7-47a1-ae2c-cdf9012ceaaf&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/898883b9-a6b7-47a1-ae2c-cdf9012ceaaf/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "199", - "name": "badges2", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\nDataset from the MLRR repository: http://axon.cs.byu.edu:5000/\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8abffb54-85d2-40d6-9428-dbd62ffa345d&revisionId=49191518-c230-4f13-81b5-b64ba49d0621&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8abffb54-85d2-40d6-9428-dbd62ffa345d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8abffb54-85d2-40d6-9428-dbd62ffa345d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "200", - "name": "Augmented_data_registry", - "description": "# Description of the solution\n\nThe most important requirement for machine learning based tools is the\npresence of a robust and reliable data pipeline. A data pipeline is a series\nof (possibly automated) data transformations needed before such data can be\nused by any machine learning model.\n\n \n\n![](https://lh4.googleusercontent.com/Cp7fIU1RFvkjFQEecn-\nxwDbmJnc_MKdYyVPHwsGLY_enP84iGr-YaKaG8rvv4OKa3d4tGlTfokOut7NM92sPOOLFYkBBLe-\npIMsXln2lw_qKgYJniZJLiRehA3VsWLw73TSAQAB2)\n\nFigure 1: Example of a typical machine learning data pipeline\n\n \n\nAs Figure 1 shows the main steps to prepare data are 1) Data preparation which\nensures that the raw data collected via different streams is properly cleaned\nand associated with a certain quality. 2) Data processing which transforms\ncleaned data into a format compatible with standard machine learning\nalgorithms.\n\nThe presence of an automated pipeline of this kind makes sure that the same\ndata transformation process can be repeated in time, for example while using\nthe model in real life or when re-training the same model. Data pipelines\nshould be reproducible and reliable and should therefore be properly included\ninside a version control system.\n\nSeveral tools and libraries are being currently developed to improve version\ncontrol in data pipelines. Data Version Control ([DVC](https://dvc.org/)) is\nbecoming one of the most popular solutions as it can be seamlessly integrated\nwith Git based versioning solutions.\n\nWithin the scope of this project we decided to deliver an augmented data\nregistry built on top of DVC. The idea, shown in Figure 2, is to provide data\nengineers and data scientists with a way to automatically generate data\nquality reports and processing pipelines every time a new data entity is\npushed to a given DVC repository.\n\n ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8b133ef7-6353-480e-82e4-5d66dad7ced8&revisionId=fa47a809-eaaf-44ee-9f21-636290983357&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8b133ef7-6353-480e-82e4-5d66dad7ced8&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8b133ef7-6353-480e-82e4-5d66dad7ced8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "202", - "name": "VideoFileBroker", - "description": "The Video file broker feeds video files to video models, typically starting\nwith segmentation.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8eaa811e-46ff-4577-a88d-b203f7757338&revisionId=b102e42d-5a16-4e96-9fa6-fba8dab9616b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8eaa811e-46ff-4577-a88d-b203f7757338&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8eaa811e-46ff-4577-a88d-b203f7757338/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "203", - "name": "autoUniv-au7-500", - "description": "https://openml.org \n\n**Author** : Ray. J. Hickey \n \n**Source** : UCI \n \n **Please cite** :\n\n \n\n \n\n * Dataset Title: \n \n\n \n\nAutoUniv Dataset \n \ndata problem: autoUniv-au7-cpd1-500\n\n \n\n \n\n * Abstract: \n \n\n \n\nAutoUniv is an advanced data generator for classifications tasks. The aim is\nto reflect the nuances and heterogeneity of real data. Data can be generated\nin .csv, ARFF or C4.5 formats.\n\n \n\n \n\n * Source: \n \n\n \n\nAutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com \nAutoUniv web-site: http://sites.google.com/site/autouniv/.\n\n \n\n \n\n * Data Set Information:\n \n\n \n\nThe user first creates a classification model and then generates classified\nexamples from it. To create a model, the following are specified: the number\nof attributes (up to 1000) and their type (discrete or continuous), the number\nof classes (up to 10), the complexity of the underlying rules and the noise\nlevel. AutoUniv then produces a model through a process of constrained\nrandomised search to satisfy the user's requirements. A model can have up to\n3000 rules. Rare class models can be designed. A sequence of models can be\ndesigned to reflect concept and/or population drift.\n\n \n\nAutoUniv creates three text files for a model: a Prolog specification of the\nmodel used to generate examples (.aupl); a user-friendly statement of the\nclassification rules in an 'if ... then' format (.aurules); a statistical\nsummary of the main properties of the model, including its Bayes rate\n(.auprops).\n\n \n\n \n\n * Attribute Information: \n \n\n \n\nAttributes may be discrete with up to 10 values or continuous. A discrete\nattribute can be nominal with values v1, v2, v3 ... or integer with values 0,\n1, 2 , ... .\n\n \n\n \n\n * Relevant Papers:\n \n\n \n\nMarrs, G, Hickey, RJ and Black, MM (2", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8ef7f983-b1d2-4891-b76c-6f4ee2202248&revisionId=66cc456d-3bb0-476f-976a-e96562a3545b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8ef7f983-b1d2-4891-b76c-6f4ee2202248&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8ef7f983-b1d2-4891-b76c-6f4ee2202248/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "204", - "name": "IoTxKG_TEST", - "description": "# **IoTxKG Ontology Analysis Model**\n\n **identify main concepts based on clustering**\n\n \n\nThe Internet of Things (IoT) primary objective is to make a hyper-connected\nworld for various application domains. However, IoT suffers from a lack of\ninteroperability leading to a substantial threat to the predicted economic\nvalue. Schema.org provides semantic interoperability to structure\nheterogeneous data on the Web. An extension of this vocabulary for the IoT\ndomain (iot.schema.org) is an ongoing research effort to address semantic\ninteroperability for the Web of Things (WoT). To design this vocabulary, a\ncentral challenge is to identify the main topics (concepts and properties)\nautomatically from existing knowledge in IoT applications. IoTxKG\nautomatically identifies the most important topics from ontologies of the 4\nKE4WoT challenge domains \u2013 smart building, mobility, energy and weather \u2013\nbased on suitable language models.\n\n \n\nThe following technologies are employed in IoTxKG\n\n * W3C Semantic Web Technologies (e.g. RDF, OWL, SPARQL, SKOS)\n * Deep Learning Models (language models)\n * Clustering Algorithms (e.g. k-means clustering)\n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=900e0378-1f94-4727-b3ba-2907f7cdd818&revisionId=8d0f6c80-b67e-43db-ab6f-3646ed2f57b1&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=900e0378-1f94-4727-b3ba-2907f7cdd818&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/900e0378-1f94-4727-b3ba-2907f7cdd818/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "206", - "name": "dsc-text", - "description": "This is a simple integration of an IDS Data Connector into a AI4EU Model.\n\nThe source code is available in the tutorials repository on Github:\nhttps://github.com/ai4eu/tutorials/tree/master/DSC_Data_Exchange\n\n \n\n \n\nTo configure the what data the Model should download from an DSC one can use\nthe providet REST-Api accessable through the path /api/v1/ of the webui\ncontainer.\n\n \n\n \n\n \n\nThe following Endpoints are provided:\n\n \n\nrecipient (address of the DSC that provides the Data), resourceId, artifactId,\ncontract, customDSC (address of the DSC that should download the Data)\n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=926bd2eb-51b6-4e64-8a76-b6544cce5162&revisionId=d764d260-491b-4e55-8476-29b2a2598aa5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=926bd2eb-51b6-4e64-8a76-b6544cce5162&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/926bd2eb-51b6-4e64-8a76-b6544cce5162/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "209", - "name": "GAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1", - "description": "https://openml.org \n\nGAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=93f29f2f-1fd0-4d24-a057-544397af20bf&revisionId=216e926a-76c7-4c6f-aee9-7c005eb2d6a1&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=93f29f2f-1fd0-4d24-a057-544397af20bf&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/93f29f2f-1fd0-4d24-a057-544397af20bf/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "210", - "name": "ai4eu-security-pilot-databroker", - "description": "This container provides data for Thread Prediction in Network Traffic.\n\nTherefore, this container can deliver test and training data.\n\n \n\nYou can connect the training data output of the ai4eu-security-pilot-\ndatabroker container with the training data input of the ai4eu-security-\npilot.model container. This data will be used to train the model. It only\ncontains benign traffic. To test your model you can connect the prediction\ndata output of the ai4eu-security-pilot-databroker container with the\nprediction data input of the ai4eu-security-pilot.model container. This data\nwill be used to test the model. It contains benign and fraud traffic.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=95c624d4-05ed-40c0-ad1d-a833e35da282&revisionId=653b3402-027c-4fac-96ce-ce8fa0969bce&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=95c624d4-05ed-40c0-ad1d-a833e35da282&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/95c624d4-05ed-40c0-ad1d-a833e35da282/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "214", - "name": "advice-img-databroker", - "description": "advice-img-databroker collects the user's images placed on the shared folder\nand releases them into the pipeline\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9a0944ce-a5d3-4e01-8da0-d44be9b42814&revisionId=c754d039-d083-4997-abb2-6d67b1d6f3f5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9a0944ce-a5d3-4e01-8da0-d44be9b42814&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/9a0944ce-a5d3-4e01-8da0-d44be9b42814/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "215", - "name": "moon_rl", - "description": "This document contains information regarding the developments done within the\nMOON project. Such project took place within the AI4EU Open Call for the Alph\n-D challenge, addressing machining control optimization through Reinforcement\nLearning. The content of the document can be summarized with the following\npoints that hold all the information and are ordered in a logical way, going\nfrom the problem presentation to the solution proposed to face it. The last\npoint contains comments related to problems found within the project, how MOON\nhas adapted to such scenario and possible future steps. See README file for\ndetails on notebook.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9c0ed8db-b9e3-4a8f-8c63-b6350d951337&revisionId=9e63d89f-6525-48dc-8aba-36f6a6b04f81&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9c0ed8db-b9e3-4a8f-8c63-b6350d951337&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/9c0ed8db-b9e3-4a8f-8c63-b6350d951337/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "217", - "name": "vowel", - "description": "https://openml.org \n\n**Author** : Peter Turney (peter@ai.iit.nrc.ca) \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/machine-learning-\ndatabases/undocumented/connectionist-bench/vowel/) \\- date unknown \n \n **Please cite** : [UCI citation\npolicy](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n **Vowel Recognition (Deterding data)** \nSpeaker independent recognition of the eleven steady state vowels of British\nEnglish using a specified training set of lpc derived log area ratios. \nCollected by David Deterding (data and non-connectionist analysis), Mahesan\nNiranjan (first connectionist analysis), Tony Robinson (description, program,\ndata, and results)\n\n \n\nA very comprehensive description including comments by the authors can be\nfound [here](https://archive.ics.uci.edu/ml/machine-learning-\ndatabases/undocumented/connectionist-bench/vowel/vowel.names)\n\n \n\nThe problem is specified by the accompanying data file, \"vowel.data\". This \nconsists of a three dimensional array: voweldata [speaker, vowel, input]. \nThe speakers are indexed by integers 0-89. (Actually, there are fifteen \nindividual speakers, each saying each vowel six times.) The vowels are \nindexed by integers 0-10. For each utterance, there are ten floating-point \ninput values, with array indices 0-9.\n\n \n\nThe problem is to train the network as well as possible using only on data \nfrom \"speakers\" 0-47, and then to test the network on speakers 48-89, \nreporting the number of correct classifications in the test set.\n\n \n\nFor a more detailed explanation of the problem, see the excerpt from Tony \nRobinson's Ph.D. thesis in the COMMENTS section. In Robinson's opinion, \nconnectionist problems fall into two classes, the possible and the \nimpossible. He is interested in the latter, by which h", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9d05f3f0-d155-4dc4-84a7-b7551bcba3e2&revisionId=7295e950-aa23-4a8e-bd1d-075622985ae5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9d05f3f0-d155-4dc4-84a7-b7551bcba3e2&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/9d05f3f0-d155-4dc4-84a7-b7551bcba3e2/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "221", - "name": "NewsDatabroker", - "description": "Overview:\n\nProvides textual data to the news-classifier\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a0588b74-603d-4c6d-bed7-fef41bdaa8eb&revisionId=0cd9b307-60c3-48f4-9308-07108854cf09&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a0588b74-603d-4c6d-bed7-fef41bdaa8eb&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a0588b74-603d-4c6d-bed7-fef41bdaa8eb/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "224", - "name": "rebase-data-broker", - "description": "This data broker can load open datasets from[\n](https://www.rebase.energy/datasets).\nThis will enable access to all upcoming open datasets in the Rebase Platform.\nThe goal of this broker is to make it easy for anyone on the AIOD platform to\naccess additional open energy datasets.\n\nThe broker provides a user interface to download train and validation sets in\na unified way that can quickly be used to evaluate your model. It also exposes\nLoadData rpc method to get data. A demonstration video can be found\n[here](https://drive.google.com/file/d/1xYYv1rZRrQSZT1-A73suNSvji122-GhY/view?usp=sharing).\nPlease refer to this[ ](https://github.com/rebase-energy/ai4eu-\nexperiment/tree/master/dataset)[readme](https://github.com/rebase-\nenergy/ai4eu-experiment/tree/master/dataset) to understand more about how to\nuse and install.\n\nThis project has received funding from the European Union's Horizon 2020\nresearch and innovation programme within the framework of the I-NERGY Project,\nfunded under grant agreement No 101016508\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a514218c-d37f-4c38-a06d-c60a267eda42&revisionId=8ad34ae9-6fd3-4815-b890-99d6f22bf929&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a514218c-d37f-4c38-a06d-c60a267eda42&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a514218c-d37f-4c38-a06d-c60a267eda42/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "225", - "name": "biomed", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - Date unknown \n \n **Please cite** :\n\n \n\nFebruary 23, 1982\n\n \n\nThe 1982 annual meetings of the American Statistical Association (ASA) \nwill be held August 16-19, 1982 in Cincinnati. At that meeting, the ASA \nCommittee on Statistical Graphics plans to sponsor an \"Exposition of \nStatistical Graphics Technology.\" The purpose of this activity is to \nmore fully inform the ASA membership about the capabilities and uses of \ncomputer graphcis in statistical work. This letter is to invite you to \nparticipate in the Exposition.\n\n \n\nAttached is a set of biomedical data containing 209 observations (134 \nfor \"normals\" and 75 for \"carriers\"). Each vendor of provider of \nstatistical graphics software participating in the Exposition is to \nanalyze these data using their software and to prepare tabular, graphical \nand text output illustrating the use of graphics in these analyses and \nsummarizing their conclusions. The tabular and graphical materials must be \ndirect computer output from the statistical graphics software; the \ntextual descriptions and summaries need not be. The total display space \navailable to each participant at the meeting will be a standard poster- \nboard (approximately 4' x 2 1/2'). All entries will be displayed in one \nlocation at the meetings, together with brief written commentary by \nthe committee summarizing the results of this activity.\n\n \n\nReference\n\n \n\nExposition of Statistical Graphics Technology, \nL. H. Cox, M. M. Johnson, K. Kafadar, \nASA Proc Stat. Comp Section, 1982, pp 55-56. \nEnclosures\n\n \n\nTHE DATA\n\n \n\nThe following data arose in a study to develop screening methods to \nidentify carriers of a rare genetic disorder. Four measurements m1, \nm2, m3, m4 were made ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a6b3cc75-5ff7-4293-b1b7-36731c797020&revisionId=d1323bad-7098-462e-b402-6b6c6f77cfce&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a6b3cc75-5ff7-4293-b1b7-36731c797020&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a6b3cc75-5ff7-4293-b1b7-36731c797020/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "226", - "name": "kc2", - "description": "https://openml.org \n\n**Author** : Mike Chapman, NASA \n \n **Source** : [tera-\nPROMISE](http://openscience.us/repo/defect/mccabehalsted/kc2.html) \\- 2004 \n \n **Please cite** : Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE\nRepository of Software Engineering Databases. School of Information Technology\nand Engineering, University of Ottawa, Canada.\n\n \n\n**KC2 Software defect prediction** \n \nOne of the NASA Metrics Data Program defect data sets. Data from software for\nscience data processing. Data comes from McCabe and Halstead features\nextractors of source code. These features were defined in the 70s in an\nattempt to objectively characterize code features that are associated with\nsoftware quality.\n\n \n\n### Attribute Information\n\n \n\n \n\n 1. loc : numeric % McCabe's line count of code\n \n\n 2. v(g) : numeric % McCabe \"cyclomatic complexity\"\n \n\n 3. ev(g) : numeric % McCabe \"essential complexity\"\n \n\n 4. iv(g) : numeric % McCabe \"design complexity\"\n \n\n 5. n : numeric % Halstead total operators + operands\n \n\n 6. v : numeric % Halstead \"volume\"\n \n\n 7. l : numeric % Halstead \"program length\"\n \n\n 8. d : numeric % Halstead \"difficulty\"\n \n\n 9. i : numeric % Halstead \"intelligence\"\n \n\n 10. e : numeric % Halstead \"effort\"\n \n\n 11. b : numeric % Halstead \n \n\n 12. t : numeric % Halstead's time estimator\n \n\n 13. lOCode : numeric % Halstead's line count\n \n\n 14. lOComment : numeric % Halstead's count of lines of comments\n \n\n 15. lOBlank : numeric % Halstead's count of blank lines\n \n\n 16. lOCodeAndComment: numeric\n \n\n 17. uniq_Op : numeric % unique operators\n \n\n 18. uniq_Opnd : numeric % unique operands\n \n\n 19. total_Op : numeric % total operators\n \n\n 20. total_Opnd : numeric % total operands\n \n\n 21. branchCount : numeric % of the flow ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a80a6f2d-b129-4ae0-bfce-22f7631801fe&revisionId=066db903-f64c-4bf9-9118-28ed77006e9a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a80a6f2d-b129-4ae0-bfce-22f7631801fe&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a80a6f2d-b129-4ae0-bfce-22f7631801fe/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "227", - "name": "autoUniv-au7-700", - "description": "https://openml.org \n\n**Author** : Ray. J. Hickey \n \n**Source** : UCI \n \n **Please cite** :\n\n \n\n \n\n * Dataset Title: \n \n\n \n\nAutoUniv Dataset \n \ndata problem: autoUniv-au7-700\n\n \n\n \n\n * Abstract: \n \n\n \n\nAutoUniv is an advanced data generator for classifications tasks. The aim is\nto reflect the nuances and heterogeneity of real data. Data can be generated\nin .csv, ARFF or C4.5 formats.\n\n \n\n \n\n * Source: \n \n\n \n\nAutoUniv was developed by Ray. J. Hickey. Email: ray.j.hickey '@' gmail.com \nAutoUniv web-site: http://sites.google.com/site/autouniv/.\n\n \n\n \n\n * Data Set Information:\n \n\n \n\nThe user first creates a classification model and then generates classified\nexamples from it. To create a model, the following are specified: the number\nof attributes (up to 1000) and their type (discrete or continuous), the number\nof classes (up to 10), the complexity of the underlying rules and the noise\nlevel. AutoUniv then produces a model through a process of constrained\nrandomised search to satisfy the user's requirements. A model can have up to\n3000 rules. Rare class models can be designed. A sequence of models can be\ndesigned to reflect concept and/or population drift.\n\n \n\nAutoUniv creates three text files for a model: a Prolog specification of the\nmodel used to generate examples (.aupl); a user-friendly statement of the\nclassification rules in an 'if ... then' format (.aurules); a statistical\nsummary of the main properties of the model, including its Bayes rate\n(.auprops).\n\n \n\n \n\n * Attribute Information: \n \n\n \n\nAttributes may be discrete with up to 10 values or continuous. A discrete\nattribute can be nominal with values v1, v2, v3 ... or integer with values 0,\n1, 2 , ... .\n\n \n\n \n\n * Relevant Papers:\n \n\n \n\nMarrs, G, Hickey, RJ and Black, MM (2010) ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a853cebc-f57d-4e28-afa8-88b8f7a27e9f&revisionId=45d90a0e-8de7-44a8-b04f-c05c0ec44b32&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a853cebc-f57d-4e28-afa8-88b8f7a27e9f&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a853cebc-f57d-4e28-afa8-88b8f7a27e9f/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "231", - "name": "cleveland-nominal", - "description": "https://openml.org \n\ncleveland-nominal-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ae60613f-f66e-4250-b9ee-92784a85ed89&revisionId=6e9c6eea-42b0-4bd1-8d7d-ecc7c170af17&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ae60613f-f66e-4250-b9ee-92784a85ed89&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ae60613f-f66e-4250-b9ee-92784a85ed89/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "238", - "name": "i-nergy-load-forecasting-databroker", - "description": "This is a Databroker service used for Timeseries. This service is implemented\nin context of the [I-NERGY](https://www.i-nergy.eu/) project. A User Interface\nis included where the users can upload their Timeseries in a csv format. For\nmore information on how to use the solution, please see README.pdf in the\nDocuments section.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b303991f-d5cf-40b0-a941-1d0c0292f4f9&revisionId=fa3adc1a-1cee-40df-aea7-628a4942b01b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b303991f-d5cf-40b0-a941-1d0c0292f4f9&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b303991f-d5cf-40b0-a941-1d0c0292f4f9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "239", - "name": "ai4eu-robotics-pump-1024-fft-broker", - "description": "The robotic pump demonstrator represents a hydraulic pump that can be mounted\non an industrial robot, for example, to pump liquid paint for spray painting.\nOn this pump, one accelerometer is mounted for vibration monitoring and\nrecording.\n\nThe pump can be controlled in terms of speed (rotations per minute, rpm),\naffecting the throughput of paint and the pressure in and out of the pump.\n\nThe dataset consists of 380 million measurements of several sensor data of the\npump system in 1-second intervals over two months in 2020.\n\n[The complete dataset & documentation is available on\nZenodo.](https://zenodo.org/record/5729187)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b3bef42d-b521-4d63-866b-26b6a4b1e053&revisionId=191d8798-2b8b-4ebb-9c4b-9e58caf91bdc&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b3bef42d-b521-4d63-866b-26b6a4b1e053&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b3bef42d-b521-4d63-866b-26b6a4b1e053/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "245", - "name": "car-evaluation", - "description": "https://openml.org \n\ncar-evaluation-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b70b266d-8c03-4f01-b809-668eb6ad4d89&revisionId=61420377-785c-4b22-8344-f04eeda911b7&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b70b266d-8c03-4f01-b809-668eb6ad4d89&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b70b266d-8c03-4f01-b809-668eb6ad4d89/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "248", - "name": "phoneme", - "description": "https://openml.org \n\n**Author** : Dominique Van Cappel, THOMSON-SINTRA \n \n **Source** : [KEEL](http://sci2s.ugr.es/keel/dataset.php?cod=105#sub2),\n[ELENA](https://www.elen.ucl.ac.be/neural-\nnets/Research/Projects/ELENA/databases/REAL/phoneme/) \\- 1993 \n \n **Please cite** : None\n\n \n\nThe aim of this dataset is to distinguish between nasal (class 0) and oral\nsounds (class 1). Five different attributes were chosen to characterize each\nvowel: they are the amplitudes of the five first harmonics AHi, normalised by\nthe total energy Ene (integrated on all the frequencies): AHi/Ene. The\nphonemes are transcribed as follows: sh as in she, dcl as in dark, iy as the\nvowel in she, aa as the vowel in dark, and ao as the first vowel in water.\n\n \n\n### Source\n\n \n\nThe current dataset was formatted by the KEEL repository, but originally\nhosted by the [ELENA Project](https://www.elen.ucl.ac.be/neural-\nnets/Research/Projects/ELENA/elena.htm#stuff). The dataset originates from the\nEuropean ESPRIT 5516 project: ROARS. The aim of this project was the\ndevelopment and the implementation of a real time analytical system for French\nand Spanish speech recognition.\n\n \n\n### Relevant information\n\n \n\nMost of the already existing speech recognition systems are global systems\n(typically Hidden Markov Models and Time Delay Neural Networks) which\nrecognizes signals and do not really use the speech \nspecificities. On the contrary, analytical systems take into account the\narticulatory process leading to the different phonemes of a given language,\nthe idea being to deduce the presence of each of the \nphonetic features from the acoustic observation.\n\n \n\nThe main difficulty of analytical systems is to obtain acoustical parameters\nsufficiantly reliable. These acoustical measurements must :\n\n \n\n \n\n ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b824ceef-6647-4286-999c-6e175cebc886&revisionId=4517efb8-1b0a-485f-9603-1667a3738dc4&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b824ceef-6647-4286-999c-6e175cebc886&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b824ceef-6647-4286-999c-6e175cebc886/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "249", - "name": "tic-tac-toe", - "description": "https://openml.org \n\n**Author** : David W. Aha \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Tic-Tac-\nToe+Endgame) \\- 1991 \n \n**Please cite** : [UCI](http://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n **Tic-Tac-Toe Endgame database** \n \nThis database encodes the complete set of possible board configurations at the\nend of tic-tac-toe games, where \"x\" is assumed to have played first. The\ntarget concept is \"win for x\" (i.e., true when \"x\" has one of 8 possible ways\nto create a \"three-in-a-row\").\n\n \n\n### Attribute Information\n\n \n\n \n \n (x=player x has taken, o=player o has taken, b=blank) \n 1. top-left-square: {x,o,b} \n 2. top-middle-square: {x,o,b} \n 3. top-right-square: {x,o,b} \n 4. middle-left-square: {x,o,b} \n 5. middle-middle-square: {x,o,b} \n 6. middle-right-square: {x,o,b} \n 7. bottom-left-square: {x,o,b} \n 8. bottom-middle-square: {x,o,b} \n 9. bottom-right-square: {x,o,b} \n 10. Class: {positive,negative} \n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b8a104fa-968e-4492-aca6-7ea4b6de9a2d&revisionId=ebb899ed-1abb-4f88-9d7a-f85922b29557&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b8a104fa-968e-4492-aca6-7ea4b6de9a2d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b8a104fa-968e-4492-aca6-7ea4b6de9a2d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "250", - "name": "wine", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - \n \n**Please cite** :\n\n \n\n \n\n 1. \n\nTitle of Database: Wine recognition data \nUpdated Sept 21, 1998 by C.Blake : Added attribute information\n\n \n\n \n\n 2. \n\nSources: \n(a) Forina, M. et al, PARVUS - An Extendible Package for Data \nExploration, Classification and Correlation. Institute of Pharmaceutical \nand Food Analysis and Technologies, Via Brigata Salerno, \n16147 Genoa, Italy.\n\n \n\n(b) Stefan Aeberhard, email: stefan@coral.cs.jcu.edu.au \n(c) July 1991 \n3\\. Past Usage:\n\n \n\n(1) \nS. Aeberhard, D. Coomans and O. de Vel, \nComparison of Classifiers in High Dimensional Settings, \nTech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of \nMathematics and Statistics, James Cook University of North Queensland. \n(Also submitted to Technometrics).\n\n \n\nThe data was used with many others for comparing various \nclassifiers. The classes are separable, though only RDA \nhas achieved 100% correct classification. \n(RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data)) \n(All results using the leave-one-out technique)\n\n \n\nIn a classification context, this is a well posed problem \nwith \"well behaved\" class structures. A good data set \nfor first testing of a new classifier, but not very \nchallenging.\n\n \n\n(2) \nS. Aeberhard, D. Coomans and O. de Vel, \n\"THE CLASSIFICATION PERFORMANCE OF RDA\" \nTech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of \nMathematics and Statistics, James Cook University of North Queensland. \n(Also submitted to Journal of Chemometrics).\n\n \n\nHere, the data was used to illustrate the superior performance of \nthe use of a new appreciation function with RDA.\n\n \n\n \n\n 3. \n\nRelevant Information:\n\n \n\n\\-- These data are the results of a", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b937a775-61e8-4522-8511-09597c6b40c9&revisionId=9adb25dd-4ded-4104-a593-f5aaad1ff3c2&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b937a775-61e8-4522-8511-09597c6b40c9&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b937a775-61e8-4522-8511-09597c6b40c9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "253", - "name": "file-viewer", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=268020c8-c4fb-4137-953a-d5dd59f70e8a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/bb1c9198-b230-4cd5-bda5-866c689fc1b4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "254", - "name": "file-viewer", - "description": "A simple file viewer that lists provided files with download links.\n\nTo connect with other components a link to SharedFolderProvider is needed. The\nviewer will show a list with recent files and their download link. The content\nof last file will be presented if its text or an image.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=811faf16-86aa-41a0-8720-4e4dcc352074&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.3", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&version=1.0.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/bb1c9198-b230-4cd5-bda5-866c689fc1b4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "255", - "name": "file-viewer", - "description": "A simple file viewer that lists provided files with download links.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=aedc2371-cf0d-433a-8878-8b5ab4aec112&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/bb1c9198-b230-4cd5-bda5-866c689fc1b4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "256", - "name": "file-viewer", - "description": "A simple file viewer that lists provided files with download links.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&revisionId=f8389c64-a5e0-4ce4-b97d-ef63de60db19&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bb1c9198-b230-4cd5-bda5-866c689fc1b4&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/bb1c9198-b230-4cd5-bda5-866c689fc1b4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "258", - "name": "recognaize-ui", - "description": "Recognaize UI\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bc867fa6-eb1d-4905-bb76-2ebe413c2e91&revisionId=c7add00b-b4b4-46ee-8594-bd0e067f5665&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bc867fa6-eb1d-4905-bb76-2ebe413c2e91&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/bc867fa6-eb1d-4905-bb76-2ebe413c2e91/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "260", - "name": "autos", - "description": "https://openml.org \n\n**Author** : Jeffrey C. Schlimmer (Jeffrey.Schlimmer@a.gp.cs.cmu.edu) \n \n**Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/Automobile) \\- 1987 \n \n **Please cite** :\n\n \n\n**1985 Auto Imports Database** \n \nThis data set consists of three types of entities: (a) the specification of an\nauto in terms of various characteristics, (b) its assigned insurance risk\nrating, (c) its normalized losses in use as compared to other cars. The second\nrating corresponds to the degree to which the auto is more risky than its\nprice indicates. Cars are initially assigned a risk factor symbol associated\nwith its price. Then, if it is more risky (or less), this symbol is adjusted\nby moving it up (or down) the scale. Actuarians call this process \"symboling\".\nA value of +3 indicates that the auto is risky, -3 that it is probably pretty\nsafe.\n\n \n\nThe third factor is the relative average loss payment per insured vehicle\nyear. This value is normalized for all autos within a particular size\nclassification (two-door small, station wagons, sports/speciality, etc...),\nand represents the average loss per car per year.\n\n \n\nSeveral of the attributes in the database could be used as a \"class\"\nattribute.\n\n \n\nSources: \n \n1) 1985 Model Import Car and Truck Specifications, 1985 Ward's Automotive\nYearbook. \n2) Personal Auto Manuals, Insurance Services Office, 160 Water Street, New\nYork, NY 10038 \n3) Insurance Collision Report, Insurance Institute for Highway Safety,\nWatergate 600, Washington, DC 20037\n\n \n\nPast Usage: \n \nKibler,~D., Aha,~D.~W., & Albert,~M. (1989). Instance-based prediction of\nreal-valued attributes. {it Computational Intelligence}, {it 5}, 51--57.\n\n \n\nAttribute Information:\n\n \n\n> \n>\n>\n> \n>\n> 1. symboling: -3, -2, -1, 0, 1, 2, 3.\n> \n>\n> 2. ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c3822188-8928-4e20-b604-4a274ff34503&revisionId=d1574d67-64d0-4b00-8dfa-7b35d810ddb1&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c3822188-8928-4e20-b604-4a274ff34503&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c3822188-8928-4e20-b604-4a274ff34503/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "277", - "name": "seismic-bumps", - "description": "https://openml.org \n\n**Author** : Sikora M., Wrobel L. \n \n**Source** : UCI \n \n**Please cite** : Sikora M., Wrobel L.: Application of rule induction\nalgorithms for analysis of data collected by seismic hazard monitoring systems\nin coal mines. Archives of Mining Sciences, 55(1), 2010, 91-114.\n\n \n\n \n\n * Title: \n \n\n \n\nseismic-bumps Data Set\n\n \n\n \n\n * Abstract: \n \n\n \n\nThe data describe the problem of high energy (higher than 10^4 J) seismic\nbumps forecasting in a coal mine. Data come from two of longwalls located in a\nPolish coal mine.\n\n \n\n \n\n * Source:\n \n\n \n\nMarek Sikora^{1,2} (marek.sikora '@' polsl.pl), Lukasz Wrobel^{1}\n(lukasz.wrobel '@' polsl.pl) \n(1) Institute of Computer Science, Silesian University of Technology, 44-100\nGliwice, Poland \n(2) Institute of Innovative Technologies EMAG, 40-189 Katowice, Poland\n\n \n\n \n\n * Data Set Information:\n \n\n \n\nMining activity was and is always connected with the occurrence of dangers\nwhich are commonly called mining hazards. A special case of such threat is a\nseismic hazard which frequently occurs in many underground mines. Seismic\nhazard is the hardest detectable and predictable of natural hazards and in\nthis respect it is comparable to an earthquake. More and more advanced seismic\nand seismoacoustic monitoring systems allow a better understanding rock mass\nprocesses and definition of seismic hazard \nprediction methods. Accuracy of so far created methods is however far from\nperfect. Complexity of seismic processes and big disproportion between the\nnumber of low-energy seismic events and the number of high-energy phenomena\n(e.g. > 10^4J) causes the statistical techniques to be insufficient to predict\nseismic hazard. Therefore, it is essential to search for new opportunities of\nbetter hazard prediction, a", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ce2033a8-a389-435d-a64c-90a173e6775f&revisionId=97be56b0-b72d-41cd-8821-99a6a38e7285&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ce2033a8-a389-435d-a64c-90a173e6775f&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ce2033a8-a389-435d-a64c-90a173e6775f/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "278", - "name": "car", - "description": "https://openml.org \n\n**Author** : Marko Bohanec, Blaz Zupan \n \n **Source** : [UCI](https://archive.ics.uci.edu/ml/datasets/car+evaluation) \\-\n1997 \n \n**Please cite** : [UCI](http://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Car Evaluation Database** \n \nThis database was derived from a simple hierarchical decision model originally\ndeveloped for the demonstration of DEX (M. Bohanec, V. Rajkovic: Expert system\nfor decision making. Sistemica 1(1), pp. 145-157, 1990.).\n\n \n\nThe model evaluates cars according to the following concept structure:\n\n \n\n \n \n CAR car acceptability \n . PRICE overall price \n . . buying buying price \n . . maint price of the maintenance \n . TECH technical characteristics \n . . COMFORT comfort \n . . . doors number of doors \n . . . persons capacity in terms of persons to carry \n . . . lug_boot the size of luggage boot \n . . safety estimated safety of the car \n \n\n \n\nInput attributes are printed in lowercase. Besides the target concept (CAR),\nthe model includes three intermediate concepts: PRICE, TECH, COMFORT. Every\nconcept is in the original model related to its lower level descendants by a\nset of examples (for \nthese examples sets see http://www-ai.ijs.si/BlazZupan/car.html).\n\n \n\nThe Car Evaluation Database contains examples with the structural information\nremoved, i.e., directly relates CAR to the six input attributes: buying,\nmaint, doors, persons, lug_boot, safety. Because of known underlying concept\nstructure, this database may be particularly useful for testing constructive\ninduction and structure discovery methods.\n\n \n\n### Changes wit", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cf25ba45-37d5-4548-b3d1-103c5cbbf24c&revisionId=105d6390-095f-4d54-bb6d-5e5c24cc5d88&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=cf25ba45-37d5-4548-b3d1-103c5cbbf24c&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/cf25ba45-37d5-4548-b3d1-103c5cbbf24c/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "279", - "name": "banknote-authentication", - "description": "https://openml.org \n\nAuthor: Volker Lohweg (University of Applied Sciences, Ostwestfalen-Lippe) \n \nSource: [UCI](https://archive.ics.uci.edu/ml/datasets/banknote+authentication)\n\\- 2012 \n \nPlease cite: [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\nDataset about distinguishing genuine and forged banknotes. Data were extracted\nfrom images that were taken from genuine and forged banknote-like specimens.\nFor digitization, an industrial camera usually used for print inspection was\nused. The final images have 400x 400 pixels. Due to the object lens and\ndistance to the investigated object gray-scale pictures with a resolution of\nabout 660 dpi were gained. A Wavelet Transform tool was used to extract\nfeatures from these images.\n\n \n\n### Attribute Information\n\n \n\nV1. variance of Wavelet Transformed image (continuous) \n \nV2. skewness of Wavelet Transformed image (continuous) \n \nV3. curtosis of Wavelet Transformed image (continuous) \n \nV4. entropy of image (continuous)\n\n \n\nClass (target). Presumably 1 for genuine and 2 for forged\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cfd561b4-1973-40a1-a572-b70ffdf4d4a0&revisionId=d507733b-9e93-4bef-9161-01dbd46a505a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=cfd561b4-1973-40a1-a572-b70ffdf4d4a0&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/cfd561b4-1973-40a1-a572-b70ffdf4d4a0/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "282", - "name": "Idiap_BEAT_Databroker_-_M-NIST", - "description": "This data broker provides the public MNIST database as a series of image.The\ndata broker itself is standalone as it relies on the [BOB mnist database\npackage\n](https://www.idiap.ch/software/bob/docs/bob/bob.db.mnist/master/index.html)to\nprovide the data.\n\nThere is no need for any configuration to be done in order to use it.\n\nIt can be used as input to benchmark other Acumos models.There is not image\nprocessing done in it. The output is a two dimensional numpy array that is\nstored as a binary type in order to avoid complex type creation as there's no\nnotion of array size with protobuf.\n\nThe corresponding BEAT experiment can be found on the [BEAT\nplatform](https://www.idiap.ch/software/beat/platform/experiments/amohammadi/amohammadi/mnist_simple/1/mnist1/)\n\n \n\n \n\n.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d1bb94b1-9a27-47d2-a36a-7fceb57a8a9d&revisionId=082d9988-6731-48a9-aa03-22e8ca420541&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d1bb94b1-9a27-47d2-a36a-7fceb57a8a9d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d1bb94b1-9a27-47d2-a36a-7fceb57a8a9d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "284", - "name": "kc1", - "description": "https://openml.org \n\n**Author** : Mike Chapman, NASA \n \n **Source** : [tera-\nPROMISE](http://openscience.us/repo/defect/mccabehalsted/kc1.html) \\- 2004 \n \n **Please cite** : Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE\nRepository of Software Engineering Databases. School of Information Technology\nand Engineering, University of Ottawa, Canada.\n\n \n\n**KC1 Software defect prediction** \n \nOne of the NASA Metrics Data Program defect data sets. Data from software for\nstorage management for receiving and processing ground data. Data comes from\nMcCabe and Halstead features extractors of source code. These features were\ndefined in the 70s in an attempt to objectively characterize code features\nthat are associated with software quality.\n\n \n\n### Attribute Information\n\n \n\n \n\n 1. loc : numeric % McCabe's line count of code\n \n\n 2. v(g) : numeric % McCabe \"cyclomatic complexity\"\n \n\n 3. ev(g) : numeric % McCabe \"essential complexity\"\n \n\n 4. iv(g) : numeric % McCabe \"design complexity\"\n \n\n 5. n : numeric % Halstead total operators + operands\n \n\n 6. v : numeric % Halstead \"volume\"\n \n\n 7. l : numeric % Halstead \"program length\"\n \n\n 8. d : numeric % Halstead \"difficulty\"\n \n\n 9. i : numeric % Halstead \"intelligence\"\n \n\n 10. e : numeric % Halstead \"effort\"\n \n\n 11. b : numeric % Halstead \n \n\n 12. t : numeric % Halstead's time estimator\n \n\n 13. lOCode : numeric % Halstead's line count\n \n\n 14. lOComment : numeric % Halstead's count of lines of comments\n \n\n 15. lOBlank : numeric % Halstead's count of blank lines\n \n\n 16. lOCodeAndComment: numeric\n \n\n 17. uniq_Op : numeric % unique operators\n \n\n 18. uniq_Opnd : numeric % unique operands\n \n\n 19. total_Op : numeric % total operators\n \n\n 20. total_Opnd : numeric % total operands\n \n\n 21. ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d21790e8-a728-4e75-9438-a0644c7c4eb6&revisionId=4201bae5-f63e-416e-bdec-f4c78ed0338f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d21790e8-a728-4e75-9438-a0644c7c4eb6&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d21790e8-a728-4e75-9438-a0644c7c4eb6/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "287", - "name": "trondheim-simulator", - "description": "SUMO/RL implements a pipeline with a traffic simulator of the city of\nTrondheim, Norway, and a reinforcement learning autonomous agent that learns\nand implements traffic control policies with the goal of minimizing the number\nof pollution peaks above a given threshold. Each component can be ran stand\nalone.\n\nThe simulator is a wrapper of the Sumo simulator, that provides more\nfunctionality. The simulator is directly targeted to Trondheim city, with the\ngoal to study the traffic related emissions.\n\nFor a more detailed description check the github repository of the resouce:\nhttps://github.com/tsveiga/AI4EU-RL-Trondheim\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d40fdc2b-fe34-4de3-979d-507b55e96a0f&revisionId=a7ca617c-c274-4500-aff0-3bff21a24298&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d40fdc2b-fe34-4de3-979d-507b55e96a0f&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d40fdc2b-fe34-4de3-979d-507b55e96a0f/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "289", - "name": "edm-env", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&revisionId=44b5ac74-bf4d-42c9-b187-3d827c240553&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5ef8d03-8b7a-471e-b958-4c4a3192eaf8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "290", - "name": "edm-env", - "description": "EDM env component is a numpy based EDM environment that follows the gym API.\nIt emulates the rib machining pattern, with 4 available actions for z-axis\ncontrol: 0 (stay), 1 (lower by 10\u03bcm), 2 (raise by 10\u03bcm), 3 (flush).\nEnvironment returns observed average voltage of the sparks, and the frequency\nof sparking (both are normalized)\n\nThis component exposes a protobuf based control API via 8061 port. Using this\nAPI it can be controlled by the demo EDM agent (edm-agent component in AI4EU\nplatform). For instructions to run the agent and the enviroment together see\nthe component repository at https://github.com/threethirds/edm\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&revisionId=6fe2ae46-9234-4ce6-843b-adbf4e963c63&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.3", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&version=1.0.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5ef8d03-8b7a-471e-b958-4c4a3192eaf8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "291", - "name": "edm-env", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&revisionId=7d04d645-ac32-4751-b953-b461d82e305d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5ef8d03-8b7a-471e-b958-4c4a3192eaf8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "292", - "name": "edm-env", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&revisionId=f7d265c5-821e-4c55-9410-837af3c9d9ab&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5ef8d03-8b7a-471e-b958-4c4a3192eaf8&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5ef8d03-8b7a-471e-b958-4c4a3192eaf8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "294", - "name": "ionosphere", - "description": "https://openml.org \n\n**Author** : Space Physics Group, Applied Physics Laboratory, Johns Hopkins\nUniversity. Donated by Vince Sigillito. \n \n **Source** : [UCI Machine Learning\nRepository](https://archive.ics.uci.edu/ml/datasets/ionosphere) \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Johns Hopkins University Ionosphere database** \n \nThis radar data was collected by a system in Goose Bay, Labrador. This system\nconsists of a phased array of 16 high-frequency antennas with a total\ntransmitted power on the order of 6.4 kilowatts. See the paper for more\ndetails.\n\n \n\n### Attribute information\n\n \n\nReceived signals were processed using an autocorrelation function whose\narguments are the time of a pulse and the pulse number. There were 17 pulse\nnumbers for the Goose Bay system. Instances in this database are described by\n2 attributes per pulse number, corresponding to the complex values returned by\nthe function resulting from the complex electromagnetic signal.\n\n \n\nThe targets were free electrons in the ionosphere. \"Good\" (g) radar returns\nare those showing evidence of some type of structure in the ionosphere. \"Bad\"\n(b) returns are those that do not; their signals pass through the ionosphere.\n\n \n\n### Relevant papers\n\n \n\nSigillito, V. G., Wing, S. P., Hutton, L. V., & Baker, K. B. (1989).\nClassification of radar returns from the ionosphere using neural networks.\nJohns Hopkins APL Technical Digest, 10, 262-266.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d6b959e6-59c4-4311-a0b2-550b9a1bd407&revisionId=48b02822-24ca-4e2e-9e05-f606db3b6be2&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d6b959e6-59c4-4311-a0b2-550b9a1bd407&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d6b959e6-59c4-4311-a0b2-550b9a1bd407/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "304", - "name": "credit-approval", - "description": "https://openml.org \n\n**Author** : Confidential - Donated by Ross Quinlan \n \n**Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/credit+approval) \\-\n1987 \n \n **Please cite** : [UCI](http://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Credit Approval** \nThis file concerns credit card applications. All attribute names and values\nhave been changed to meaningless symbols to protect the confidentiality of the\ndata.\n\n \n\nThis dataset is interesting because there is a good mix of attributes --\ncontinuous, nominal with small numbers of values, and nominal with larger\nnumbers of values. There are also a few missing values.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dc36f026-d89b-4017-943e-560012105d3d&revisionId=9238fdfe-0824-45cf-933d-d51cb54deb54&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=dc36f026-d89b-4017-943e-560012105d3d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/dc36f026-d89b-4017-943e-560012105d3d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "309", - "name": "segment", - "description": "https://openml.org \n\n**Author** : University of Massachusetts Vision Group, Carla Brodley \n \n **Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/image+segmentation)\n\\- 1990 \n \n **Please cite** : [UCI](http://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Image Segmentation Data Set** \nThe instances were drawn randomly from a database of 7 outdoor images. The\nimages were hand-segmented to create a classification for every pixel. Each\ninstance is a 3x3 region.\n\n \n\n### Attribute Information\n\n \n\n \n\n 1. region-centroid-col: the column of the center pixel of the region.\n \n\n 2. region-centroid-row: the row of the center pixel of the region.\n \n\n 3. region-pixel-count: the number of pixels in a region = 9.\n \n\n 4. short-line-density-5: the results of a line extractoin algorithm that \ncounts how many lines of length 5 (any orientation) with \nlow contrast, less than or equal to 5, go through the region.\n\n \n\n 5. short-line-density-2: same as short-line-density-5 but counts lines \nof high contrast, greater than 5.\n\n \n\n 6. vedge-mean: measure the contrast of horizontally \nadjacent pixels in the region. There are 6, the mean and \nstandard deviation are given. This attribute is used as \na vertical edge detector.\n\n \n\n 7. vegde-sd: (see 6)\n \n\n 8. hedge-mean: measures the contrast of vertically adjacent \npixels. Used for horizontal line detection.\n\n \n\n 9. hedge-sd: (see 8).\n \n\n 10. intensity-mean: the average over the region of (R + G + B)/3\n \n\n 11. rawred-mean: the average over the region of the R value.\n \n\n 12. rawblue-mean: the average over the region of the B value.\n \n\n 13. rawgreen-mean: the average over the region of the G value.\n \n\n 14. exred-mean: measure the excess red: (2R - (G + B))\n \n\n 15. exblue-mean: measure the exces", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df660739-9aee-423a-a44e-df9b637cfe1b&revisionId=6f1d61b0-1028-44ee-ac03-ce7b562550c3&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df660739-9aee-423a-a44e-df9b637cfe1b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/df660739-9aee-423a-a44e-df9b637cfe1b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "310", - "name": "segment", - "description": "https://openml.org \n\n**Author** : University of Massachusetts Vision Group, Carla Brodley \n \n **Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/image+segmentation)\n\\- 1990 \n \n **Please cite** : [UCI](http://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Image Segmentation Data Set** \nThe instances were drawn randomly from a database of 7 outdoor images. The\nimages were hand-segmented to create a classification for every pixel. Each\ninstance is a 3x3 region.\n\n \n\n **Major changes w.r.t. version 2: ignored first two variables as they do not\nfit the classification task (they reflect the location of the sample in the\noriginal image). The 3rd is constant, so should also be ignored.**\n\n \n\n### Attribute Information\n\n \n\n \n\n 1. short-line-density-5: the results of a line extractoin algorithm that \ncounts how many lines of length 5 (any orientation) with \nlow contrast, less than or equal to 5, go through the region.\n\n \n\n 2. short-line-density-2: same as short-line-density-5 but counts lines \nof high contrast, greater than 5.\n\n \n\n 3. vedge-mean: measure the contrast of horizontally \nadjacent pixels in the region. There are 6, the mean and \nstandard deviation are given. This attribute is used as \na vertical edge detector.\n\n \n\n 4. vegde-sd: (see 6)\n \n\n 5. hedge-mean: measures the contrast of vertically adjacent \npixels. Used for horizontal line detection.\n\n \n\n 6. hedge-sd: (see 8).\n \n\n 7. intensity-mean: the average over the region of (R + G + B)/3\n \n\n 8. rawred-mean: the average over the region of the R value.\n \n\n 9. rawblue-mean: the average over the region of the B value.\n \n\n 10. rawgreen-mean: the average over the region of the G value.\n \n\n 11. exred-mean: measure the excess red: (2R - (G + B))\n \n\n 12. exblue-mean: measure th", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df660739-9aee-423a-a44e-df9b637cfe1b&revisionId=a0a9b64d-774e-438d-b13c-c9c20e220da0&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df660739-9aee-423a-a44e-df9b637cfe1b&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/df660739-9aee-423a-a44e-df9b637cfe1b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "313", - "name": "pc1", - "description": "https://openml.org \n\n**Author** : Mike Chapman, NASA \n \n **Source** : [tera-\nPROMISE](http://openscience.us/repo/defect/mccabehalsted/pc1.html) \\- 2004 \n \n **Please cite** : Sayyad Shirabad, J. and Menzies, T.J. (2005) The PROMISE\nRepository of Software Engineering Databases. School of Information Technology\nand Engineering, University of Ottawa, Canada.\n\n \n\n**PC1 Software defect prediction** \n \nOne of the NASA Metrics Data Program defect data sets. Data from flight\nsoftware for earth orbiting satellite. Data comes from McCabe and Halstead\nfeatures extractors of source code. These features were defined in the 70s in\nan attempt to objectively characterize code features that are associated with\nsoftware quality.\n\n \n\n### Attribute Information\n\n \n\n \n\n 1. loc : numeric % McCabe's line count of code\n \n\n 2. v(g) : numeric % McCabe \"cyclomatic complexity\"\n \n\n 3. ev(g) : numeric % McCabe \"essential complexity\"\n \n\n 4. iv(g) : numeric % McCabe \"design complexity\"\n \n\n 5. n : numeric % Halstead total operators + operands\n \n\n 6. v : numeric % Halstead \"volume\"\n \n\n 7. l : numeric % Halstead \"program length\"\n \n\n 8. d : numeric % Halstead \"difficulty\"\n \n\n 9. i : numeric % Halstead \"intelligence\"\n \n\n 10. e : numeric % Halstead \"effort\"\n \n\n 11. b : numeric % Halstead \n \n\n 12. t : numeric % Halstead's time estimator\n \n\n 13. lOCode : numeric % Halstead's line count\n \n\n 14. lOComment : numeric % Halstead's count of lines of comments\n \n\n 15. lOBlank : numeric % Halstead's count of blank lines\n \n\n 16. lOCodeAndComment: numeric\n \n\n 17. uniq_Op : numeric % unique operators\n \n\n 18. uniq_Opnd : numeric % unique operands\n \n\n 19. total_Op : numeric % total operators\n \n\n 20. total_Opnd : numeric % total operands\n \n\n 21. branchCount : numeric % of t", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e20b38c6-c46c-4cf6-96cf-c3ce14285c88&revisionId=c63c438d-ba5f-4544-94f2-8be84fb8e252&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e20b38c6-c46c-4cf6-96cf-c3ce14285c88&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e20b38c6-c46c-4cf6-96cf-c3ce14285c88/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "318", - "name": "SensorThings_API_connector", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&revisionId=5d3aee4b-03e3-4e99-8fe4-80193da4a04e&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e4e10524-917f-4515-860f-46d7d90106e5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "319", - "name": "SensorThings_API_connector", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&revisionId=a8705f9d-18cd-4d6f-b1a6-ed9a5dfa2d54&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e4e10524-917f-4515-860f-46d7d90106e5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "320", - "name": "SensorThings_API_connector", - "description": "This is a generic connector for the SensorThings API. It will allow you to\nconnect to any SensorThings API in the world and therefore potentially recover\ndata on any domain. For example, this would facilitate the retrieval of public\nCovid19 data, harvested from various sources including Johns Hopkins and RKI,\nor from near-real-time air quality across Europe, from both national sources\n(harvested from AT SOS and WFS) and Europe (EEA).\n\nTo illustrate the potential uses of these different SensorThings API (with a\nsingle connector), one can take a look at these different applications: a\nvisualisation tool[[1]](about:blank) bringing together French and German flow\ndata, a covid-19 dashboard[[2]](about:blank) and the [Windy Web\nsite](https://www.windy.com/fr/-NO2-no2?camsEu,no2,47.905,1.908,5)[[3]](about:blank)\nfocused on the weather forecast.\n\n \n\n[[1]](about:blank) \n\n \n\n[[2]](about:blank) \n\n \n\n[[3]](about:blank)\n\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&revisionId=baf4c46b-673b-48d0-ac27-1fa2a87ba625&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.3", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&version=1.0.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e4e10524-917f-4515-860f-46d7d90106e5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "321", - "name": "SensorThings_API_connector", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&revisionId=f7bd8ec8-795c-471b-b4d8-5339f907b241&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e4e10524-917f-4515-860f-46d7d90106e5&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e4e10524-917f-4515-860f-46d7d90106e5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "327", - "name": "IoTxKG", - "description": "The Internet of Things (IoT) primary objective is to make a hyper-connected\nworld for various application domains. However, IoT suffers from a lack of\ninteroperability leading to a substantial threat to the predicted economic\nvalue. Schema.org provides semantic interoperability to structure\nheterogeneous data on the Web. An extension of this vocabulary for the IoT\ndomain (iot.schema.org) is an ongoing research effort to address semantic\ninteroperability for the Web of Things (WoT). To design this vocabulary, a\ncentral challenge is to identify the main topics (concepts and properties)\nautomatically from existing knowledge in IoT applications. IoTxKG\nautomatically 1) identifies the most important topics from existing ontologies\nof the 4 KE4WoT challenge domains \u2013 smart building, mobility, energy and\nweather \u2013 based on suitable language models and 2) visualises the topics using\nboth wordclouds and interactive graph-based word clouds.\n\n \n\nThe following technologies are employed in IoTxKG\n\n * W3C Semantic Web Technologies (e.g. RDF, OWL, SPARQL, SKOS)\n * Deep Learning Language Models (Word2vec, BERT, ERNIE, GPT)\n * Clustering Algorithms (e.g. k-means clustering)\n * Graph-based Visualization\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e55074a8-d66b-4e83-84c9-e0cd4371c79b&revisionId=75ce6a2f-1762-4907-8b94-a12ec9607f23&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e55074a8-d66b-4e83-84c9-e0cd4371c79b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e55074a8-d66b-4e83-84c9-e0cd4371c79b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "328", - "name": "ai4eu-robotics-pump-6144-raw-broker", - "description": "The robotic pump demonstrator represents a hydraulic pump that can be mounted\non an industrial robot, for example, to pump liquid paint for spray painting.\nOn this pump, one accelerometer is mounted for vibration monitoring and\nrecording.\n\nThe pump can be controlled in terms of speed (rotations per minute, rpm),\naffecting the throughput of paint and the pressure in and out of the pump.\n\nThe dataset consists of 380 million measurements of several sensor data of the\npump system in 1-second intervals over two months in 2020.\n\n[The complete dataset & documentation is available on\nZenodo.](https://zenodo.org/record/5729187)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e5be6960-bea7-4d62-8301-be494ab1ac46&revisionId=2626b3dc-d3a3-4f3c-b7b9-e523758dd5b5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e5be6960-bea7-4d62-8301-be494ab1ac46&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e5be6960-bea7-4d62-8301-be494ab1ac46/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "329", - "name": "ai4eu-robotics-pump-6144-raw-broker", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e5be6960-bea7-4d62-8301-be494ab1ac46&revisionId=65423d0c-1238-47a8-94fb-98d39df1d460&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e5be6960-bea7-4d62-8301-be494ab1ac46&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e5be6960-bea7-4d62-8301-be494ab1ac46/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "330", - "name": "TEK_THOR_DATA_CURATION", - "description": "**AI4EU - THOR **THOR solution consists in a Hybrid optimization solution to\nmake the right decision on the amount spare parts in stock, considering past\nsales and forecasts. The purchase decision considers as input information\ncurrent stock status, production needs, production forecast, sales forecast,\nvariability Price of stock material and several restriction parameters.\n\n **Data-Curation**. Different datasets extracted from company ERP are analyzed\nand normalized by a \u2018Quality\u2019 module, which uses different statistical\ntechniques to calculate quality metrics and fix missing values.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e6d5038c-424a-44ce-9415-34fa129bf9a5&revisionId=bf98fd1e-fdf2-4ada-9a9a-c30fb1a90fea&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e6d5038c-424a-44ce-9415-34fa129bf9a5&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e6d5038c-424a-44ce-9415-34fa129bf9a5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "331", - "name": "breast-cancer", - "description": "https://openml.org \n\n**Author** : \n \n**Source** : Unknown - \n \n**Please cite** :\n\n \n\nCitation Request: \nThis breast cancer domain was obtained from the University Medical Centre, \nInstitute of Oncology, Ljubljana, Yugoslavia. Thanks go to M. Zwitter and \nM. Soklic for providing the data. Please include this citation if you plan \nto use this database.\n\n \n\n \n\n 1. \n\nTitle: Breast cancer data (Michalski has used this)\n\n \n\n \n\n 2. \n\nSources: \n\\-- Matjaz Zwitter & Milan Soklic (physicians) \nInstitute of Oncology \nUniversity Medical Center \nLjubljana, Yugoslavia \n\\-- Donors: Ming Tan and Jeff Schlimmer (Jeffrey.Schlimmer@a.gp.cs.cmu.edu) \n\\-- Date: 11 July 1988\n\n \n\n \n\n 3. \n\nPast Usage: (Several: here are some) \n\\-- Michalski,R.S., Mozetic,I., Hong,J., & Lavrac,N. (1986). The \nMulti-Purpose Incremental Learning System AQ15 and its Testing \nApplication to Three Medical Domains. In Proceedings of the \nFifth National Conference on Artificial Intelligence, 1041-1045, \nPhiladelphia, PA: Morgan Kaufmann. \n\\-- accuracy range: 66%-72% \n\\-- Clark,P. & Niblett,T. (1987). Induction in Noisy Domains. In \nProgress in Machine Learning (from the Proceedings of the 2nd \nEuropean Working Session on Learning), 11-30, Bled, \nYugoslavia: Sigma Press. \n\\-- 8 test results given: 65%-72% accuracy range \n\\-- Tan, M., & Eshelman, L. (1988). Using weighted networks to \nrepresent classification knowledge in noisy domains. Proceedings \nof the Fifth International Conference on Machine Learning, 121-134, \nAnn Arbor, MI. \n\\-- 4 systems tested: accuracy range was 68%-73.5% \n\\-- Cestnik,G., Konenenko,I, & Bratko,I. (1987). Assistant-86: A \nKnowledge-Elicitation Tool for Sophisticated Users. In I.Bratko \n& N.Lavrac (Eds.) Progress in Machine Learning, 31-45, Sig", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e76a359c-ad44-48f2-a5be-f969434c0079&revisionId=62a3f013-f8ae-46b8-9887-aadd4b079659&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e76a359c-ad44-48f2-a5be-f969434c0079&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e76a359c-ad44-48f2-a5be-f969434c0079/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "335", - "name": "GAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_50_EDM-2_001", - "description": "https://openml.org \n\nGAMETES_Heterogeneity_20atts_1600_Het_0.4_0.2_50_EDM-2_001-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ea98c298-5fcb-4b37-8262-828d3605cfaf&revisionId=70f884e0-9a7e-458b-bdf0-ad3bba0667dc&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ea98c298-5fcb-4b37-8262-828d3605cfaf&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ea98c298-5fcb-4b37-8262-828d3605cfaf/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "347", - "name": "thyroid-new", - "description": "https://openml.org \n\nnew-thyroid-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=edc15172-70fb-489b-bff1-c1c28c61ce6b&revisionId=96529752-d961-4e5e-8f0f-b104c3e1b603&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=edc15172-70fb-489b-bff1-c1c28c61ce6b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/edc15172-70fb-489b-bff1-c1c28c61ce6b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "348", - "name": "churn", - "description": "https://openml.org \n\n**Author** : Unknown \n \n **Source** : [PMLB](https://github.com/EpistasisLab/penn-ml-\nbenchmarks/tree/master/datasets/classification),\n[BigML](https://bigml.com/user/francisco/gallery/dataset/5163ad540c0b5e5b22000383),\nSupposedly from UCI but I can't find it there. \n \n **Please cite** :\n\n \n\nA dataset relating characteristics of telephony account features and usage and\nwhether or not the customer churned. Originally used in [Discovering Knowledge\nin Data: An Introduction to Data Mining](http://secs.ac.in/wp-\ncontent/CSE_PORTAL/DataMining_Daniel.pdf).\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ee42788e-0ec5-45a9-97e4-6a0634ac84e2&revisionId=8cf5e565-aff3-41fd-ac89-c428b59a0a21&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ee42788e-0ec5-45a9-97e4-6a0634ac84e2&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ee42788e-0ec5-45a9-97e4-6a0634ac84e2/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "349", - "name": "blood-transfusion-service-center", - "description": "https://openml.org \n\n**Author** : Prof. I-Cheng Yeh \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/Blood+Transfusion+Service+Center) \n \n **Please cite** : Yeh, I-Cheng, Yang, King-Jang, and Ting, Tao-Ming,\n\"Knowledge discovery on RFM model using Bernoulli sequence\", Expert Systems\nwith Applications, 2008.\n\n \n\n**Blood Transfusion Service Center Data Set** \n \nData taken from the Blood Transfusion Service Center in Hsin-Chu City in\nTaiwan -- this is a classification problem.\n\n \n\nTo demonstrate the RFMTC marketing model (a modified version of RFM), this\nstudy adopted the donor database of Blood Transfusion Service Center in Hsin-\nChu City in Taiwan. The center passes their blood transfusion service bus to\none university in Hsin-Chu City to gather blood donated about every three\nmonths. To build an FRMTC model, we selected 748 donors at random from the\ndonor database.\n\n \n\n### Attribute Information\n\n \n\n \n\n * V1: Recency - months since last donation\n \n\n * V2: Frequency - total number of donation\n \n\n * V3: Monetary - total blood donated in c.c.\n \n\n * V4: Time - months since first donation), and a binary variable representing whether he/she donated blood in March 2007 (1 stand for donating blood; 0 stands for not donating blood).\n \n\n \n\nThe target attribute is a binary variable representing whether he/she donated\nblood in March 2007 (2 stands for donating blood; 1 stands for not donating\nblood).\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eea25848-33cf-4b43-9677-1e932d8e710a&revisionId=9b1bfbf7-438a-45a7-99b0-c3c470a2551c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eea25848-33cf-4b43-9677-1e932d8e710a&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/eea25848-33cf-4b43-9677-1e932d8e710a/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "351", - "name": "edm_aad_data_node_cl", - "description": "**EDM RL Controller predictions (Solution Provider: Artificialy SA)**\n\n \n\nReinforcement learning applied to Electrical discharge machining (EDM) control\nfor the AI4EU project with Agie Charmilles SA.\n\n \n\nThe solution consist of two nodes: `data_node` server which streams a\nDataFrame of observations (EDM machine states) read from the path provided by\nthe client (`infile`); and an `agent_node` server which predicts control\nactions based on the agent / controller specified by the client. Output\npredictions are stored inside the `./data_predictions/` folder of the\n`agent_node` Docker container.\n\n \n\nTo use this solution, please use the Docker container and the additional files\n(which are in the Documents tap of the model in the marketplace) from both the\n`data_node` and `agent_node`. They are both in the AI4EU platform market place\nnamed as `edm_aad_agent_node_cl` and `edm_aad_data_node_cl`\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f257ab28-e846-4d05-8fc1-9e53cddab23a&revisionId=0672b76d-0046-4ff5-afc1-5e7a64554451&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f257ab28-e846-4d05-8fc1-9e53cddab23a&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f257ab28-e846-4d05-8fc1-9e53cddab23a/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } + "alternate_name": [ + "alias 1", + "alias 2" ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "352", - "name": "led24", - "description": "https://openml.org \n\nled24-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f7d84667-d8e6-4dc3-af68-0845d7e984e2&revisionId=27254760-7bc4-4b93-b466-3e5c93490461&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f7d84667-d8e6-4dc3-af68-0845d7e984e2&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f7d84667-d8e6-4dc3-af68-0845d7e984e2/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } + "application_area": [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "354", - "name": "ilpd", - "description": "https://openml.org \n\n**Author** : Bendi Venkata Ramana, M. Surendra Prasad Babu, N. B.\nVenkateswarlu \n \n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/ILPD+\\(Indian+Liver+Patient+Dataset\\))\n\\- 2012 \n \n **Please cite** : [UCI](https://archive.ics.uci.edu/ml/citation_policy.html)\n\n \n\n**Indian Liver Patient Dataset** \n \nThis data set contains 416 liver patient records and 167 non liver patient\nrecords.The data set was collected from north east of Andhra Pradesh, India.\nThe class label divides the patients into 2 groups (liver patient or not).\nThis data set contains 441 male patient records and 142 female patient\nrecords.\n\n \n\n### Attribute Information\n\n \n\nV1. Age of the patient. Any patient whose age exceeded 89 is listed as being\nof age \"90\". \n \nV2. Gender of the patient \n \nV3. Total Bilirubin \n \nV4. Direct Bilirubin \n \nV5. Alkphos Alkaline Phosphatase \n \nV6. Sgpt Alanine Aminotransferase \n \nV7. Sgot Aspartate Aminotransferase \n \nV8. Total Proteins \n \nV9. Albumin \n \nV10. A/G Ratio Albumin and Globulin Ratio\n\n \n\nA feature indicating a train-test split has been removed.\n\n \n\n### Relevant Papers\n\n \n\n \n\n 1. Bendi Venkata Ramana, Prof. M. S. Prasad Babu and Prof. N. B. Venkateswarlu, A Critical Comparative Study of Liver Patients from USA and INDIA: An Exploratory Analysis\u009d, International Journal of Computer Science Issues, ISSN:1694-0784, May 2012. \n \n\n 2. Bendi Venkata Ramana, Prof. M. S. Prasad Babu and Prof. N. B. Venkateswarlu, A Critical Study of Selected Classification Algorithms for Liver Disease Diagnosis, International Journal of Database Management Systems (IJDMS), Vol.3, No.2, ISSN : 0975-5705, PP 101-114, May 2011.\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8016853-8e2c-45f3-8326-bd38387351e7&revisionId=050f2f0a-629d-4f41-a381-14220bd76465&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], "citation": [], "contact": [], "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f8016853-8e2c-45f3-8326-bd38387351e7&version=1.0.0", - "media": [ + "distribution": [ { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f8016853-8e2c-45f3-8326-bd38387351e7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/dataset/file.csv", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "text/csv", + "name": "Name of this file.", + "technology_readiness_level": 1 } ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "355", - "name": "ModelInitializer", - "description": "The Model Initializer is an infrastructure node that can provide initial\nconfig parameters to a model.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f888ec3c-1076-4e57-b56a-05f055aa4760&revisionId=76c80c0b-1883-4cb3-8f6c-4857c77ac4d5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], "funder": [], "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f888ec3c-1076-4e57-b56a-05f055aa4760&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f888ec3c-1076-4e57-b56a-05f055aa4760/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } + "industrial_sector": [ + "Finance", + "eCommerce", + "Healthcare" ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "356", - "name": "solar-flare", - "description": "https://openml.org \n\nflare-pmlb\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&revisionId=276fb3d2-00a8-4695-abdc-bbcc8d8ed604&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } + "keyword": [ + "keyword1", + "keyword2" ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "357", - "name": "solar-flare", - "description": "https://openml.org \n\n**Author** : Gary Bradshaw \n \n**Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/solar+flare) \n \n**Please cite** :\n\n \n\n**Solar Flare database** \nRelevant Information: \n\\-- The database contains 3 potential classes, one for the number of times a \ncertain type of solar flare occured in a 24 hour period. \n\\-- Each instance represents captured features for 1 active region on the \nsun. \n\\-- The data are divided into two sections. The second section (flare.data2) \nhas had much more error correction applied to the it, and has \nconsequently been treated as more reliable.\n\n \n\nNumber of Instances: flare.data1: 323, flare.data2: 1066\n\n \n\nNumber of attributes: 13 (includes 3 class attributes)\n\n \n\n### Attribute Information\n\n \n\n \n \n 1. Code for class (modified Zurich class) (A,B,C,D,E,F,H) \n 2. Code for largest spot size (X,R,S,A,H,K) \n 3. Code for spot distribution (X,O,I,C) \n 4. Activity (1 = reduced, 2 = unchanged) \n 5. Evolution (1 = decay, 2 = no growth, \n 3 = growth) \n 6. Previous 24 hour flare activity code (1 = nothing as big as an M1, \n 2 = one M1, \n 3 = more activity than one M1) \n 7. Historically-complex (1 = Yes, 2 = No) \n 8. Did region become historically complex (1 = yes, 2 = no) \n on this pass across the sun's disk \n 9. Area (1 = small, 2 = large) \n \n\n \n\n \n\n 1. Area of the largest spot (1 = <=5, 2 = >5)\n \n\n \n\nFrom all these predictors three classes of flares are predicted, which a", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&revisionId=91ca0a1e-60e1-45ce-a2c0-7c3f79498739&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&version=1.0.0", + "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", "media": [ { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/image.jpeg", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "image/jpeg", + "name": "Name of this file." } ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "358", - "name": "solar-flare", - "description": "https://openml.org \n\n**Author** : Gary Bradshaw \n \n**Source** : [UCI](http://archive.ics.uci.edu/ml/datasets/solar+flare) \n \n**Please cite** :\n\n \n\n**Solar Flare database** \nRelevant Information: \n\\-- The database contains 3 potential classes, one for the number of times a \ncertain type of solar flare occured in a 24 hour period. \n\\-- Each instance represents captured features for 1 active region on the \nsun. \n\\-- The data are divided into two sections. The second section (flare.data2) \nhas had much more error correction applied to the it, and has \nconsequently been treated as more reliable.\n\n \n\nNumber of Instances: flare.data1: 323, flare.data2: 1066\n\n \n\nNumber of attributes: 13 (includes 3 class attributes)\n\n \n\n### Attribute Information\n\n \n\n \n \n 1. Code for class (modified Zurich class) (A,B,C,D,E,F,H) \n 2. Code for largest spot size (X,R,S,A,H,K) \n 3. Code for spot distribution (X,O,I,C) \n 4. Activity (1 = reduced, 2 = unchanged) \n 5. Evolution (1 = decay, 2 = no growth, \n 3 = growth) \n 6. Previous 24 hour flare activity code (1 = nothing as big as an M1, \n 2 = one M1, \n 3 = more activity than one M1) \n 7. Historically-complex (1 = Yes, 2 = No) \n 8. Did region become historically complex (1 = yes, 2 = no) \n on this pass across the sun's disk \n 9. Area (1 = small, 2 = large) \n \n\n \n\n \n\n 1. Area of the largest spot (1 = <=5, 2 = >5)\n \n\n \n\nFrom all these predictors three classes of flares are predicted, which a", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&revisionId=f333bc3c-b87a-42b8-a5e9-5290036cc520&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc&version=1.0.1", - "media": [ + "note": [ { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f8bc6336-a607-4bfc-8f3a-5ae4a8e878cc/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" + "value": "A brief record of points or ideas about this AI resource." } ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "360", - "name": "ai4eu-robotics-wrist-6144-fft-broker", - "description": "The robotic wrist demonstrator represents a mechanical wrist with three axes\nthat can hold tools, e.g. for spray painting in combination with a pump. On\nthis robotic wrist, two accelerometers are mounted for vibration monitoring\nand recording: one in the movable front part of the wrist and one in the\nshaft. The wrist can be controlled through the torque or the designated\nposition of each axis\u2019 motor.\n\nThe dataset consists of 1.8 billion measurements of several sensor data of the\nrobotic wrist in 1-second intervals over six months in 2020.\n\n[The complete dataset & description is available on\nZenodo](https://zenodo.org/record/5729818)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f9425d71-4d33-4af5-b4ba-25d6fa8aa3c4&revisionId=2399eb3e-67fb-419f-a630-df48c3cf138a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f9425d71-4d33-4af5-b4ba-25d6fa8aa3c4&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f9425d71-4d33-4af5-b4ba-25d6fa8aa3c4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } + "research_area": [ + "Explainable AI", + "Physical AI" ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} - }, - { - "platform": "ai4experiments", - "platform_identifier": "363", - "name": "climate-model-simulation-crashes", - "description": "https://openml.org \n\n**Author** : D. Lucas, R. Klein, J. Tannahill, D. Ivanova, S. Brandon, D.\nDomyancic, Y. Zhang.\n\n \n\n **Source** :\n[UCI](https://archive.ics.uci.edu/ml/datasets/climate+model+simulation+crashes)\n\n \n\n **Please Cite** : \nLucas, D. D., Klein, R., Tannahill, J., Ivanova, D., Brandon, S., Domyancic,\nD., and Zhang, Y.: Failure analysis of parameter-induced simulation crashes in\nclimate models, Geosci. Model Dev. Discuss., 6, 585-623, [Web\nLink](http://www.geosci-model-dev-\ndiscuss.net/6/585/2013/gmdd-6-585-2013.html), 2013.\n\n \n\nSource:\n\n \n\nD. Lucas (ddlucas .at. alum.mit.edu), Lawrence Livermore National Laboratory;\nR. Klein (rklein .at. astron.berkeley.edu), Lawrence Livermore National\nLaboratory & U.C. Berkeley; J. Tannahill (tannahill1 .at. llnl.gov), Lawrence\nLivermore National Laboratory; D. Ivanova (ivanova2 .at. llnl.gov), Lawrence\nLivermore National Laboratory; S. Brandon (brandon1 .at. llnl.gov), Lawrence\nLivermore National Laboratory; D. Domyancic (domyancic1 .at. llnl.gov),\nLawrence Livermore National Laboratory; Y. Zhang (zhang24 .at. llnl.gov),\nLawrence Livermore National Laboratory .\n\n \n\nThis data was constructed using LLNL's UQ Pipeline, was created under the\nauspices of the US Department of Energy by Lawrence Livermore National\nLaboratory under Contract DE-AC52-07NA27344, was funded by LLNL's Uncertainty\nQuantification Strategic Initiative Laboratory Directed Research and\nDevelopment Project under tracking code 10-SI-013, and is released under UCRL\nnumber LLNL-MISC-633994.\n\n \n\nData Set Information:\n\n \n\nThis dataset contains records of simulation crashes encountered during climate\nmodel uncertainty quantification (UQ) ensembles. Ensemble members were\nconstructed using a Latin hypercube method in LLNL's UQ Pipeline software\nsystem to", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fba9e526-edb4-4fb0-9cb1-31ea29f07a2f&revisionId=6b6905e7-2855-43c9-a344-c01991e4efca&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "issn": "00000000", - "measurement_technique": "", - "temporal_coverage": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "funder": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fba9e526-edb4-4fb0-9cb1-31ea29f07a2f&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/fba9e526-edb4-4fb0-9cb1-31ea29f07a2f/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } + "scientific_domain": [ + "Anomaly Detection", + "Voice Recognition", + "Computer Vision." ], - "note": [], - "research_area": [], - "scientific_domain": [], - "size": {}, - "spatial_coverage": {} + "size": { + "unit": "Rows", + "value": 100 + }, + "spatial_coverage": { + "address": { + "region": "California", + "locality": "Paris", + "street": "Wetstraat 170", + "postal_code": "1040 AA", + "address": "Wetstraat 170, 1040 Brussel", + "country": "BEL" + }, + "geo": { + "latitude": 37.42242, + "longitude": -122.08585, + "elevation_millimeters": 0 + } + } } -] +] \ No newline at end of file diff --git a/src/connectors/example/resources/resource/events.json b/src/connectors/example/resources/resource/events.json index 2c2fbf61..65c16791 100644 --- a/src/connectors/example/resources/resource/events.json +++ b/src/connectors/example/resources/resource/events.json @@ -2,13 +2,12 @@ { "platform": "example", "platform_identifier": "1", - "name": "The name of the Event", + "name": "Name of the Event", "description": "A description.", "same_as": "https://www.example.com/resource/this_resource", "date_published": "2022-01-01T15:15:00.000", "version": "1.1.0", "pid": "https://doi.org/10.1000/182", - "organiser": 2, "aiod_entry": { "editor": [], "status": "draft" diff --git a/src/connectors/example/resources/resource/experiments.json b/src/connectors/example/resources/resource/experiments.json index 9dd688c3..7b3acb48 100644 --- a/src/connectors/example/resources/resource/experiments.json +++ b/src/connectors/example/resources/resource/experiments.json @@ -1,1982 +1,96 @@ [ - { - "platform": "ai4experiments", - "platform_identifier": "366", - "name": "Sudoku Tutorial", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=00aff3ab-94cb-4969-93c3-a95be53c05d2&revisionId=2f7c7ef1-262c-4a73-8393-aef1ded7cad3&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "2", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=00aff3ab-94cb-4969-93c3-a95be53c05d2&version=2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/00aff3ab-94cb-4969-93c3-a95be53c05d2/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "367", - "name": "Sudoku Tutorial", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=00aff3ab-94cb-4969-93c3-a95be53c05d2&revisionId=2f7c7ef1-262c-4a73-8393-aef1ded7cad3&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=00aff3ab-94cb-4969-93c3-a95be53c05d2&version=1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/00aff3ab-94cb-4969-93c3-a95be53c05d2/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "368", - "name": "AI4IoT-Calibration-Solution", - "description": "This solution implements a pipeline to air quality sensor calibration in the\ncontext of the AI4IoT pilot, consisting of three modules which, together,\noffer a solution for calibration of low-cost air quality sensors in the city\nof Trondheim, Norway. The modules are: a `data source` which fetches data from\nseveral external APIs and concatenates them, a `calibration` which predicts\nthe true value at the sensor location and a simple `visualization` module\nwhich implements a web interface to analyze the output of the calibration\nprocedure.\n\n \n\nMore info on the pipeline can be found at the github repository:\nhttps://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&revisionId=478028bb-1c58-4641-9bc0-eba716119aec&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.2", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&version=1.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0fbc7cc0-843a-489b-bab1-40e4d2700680/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "369", - "name": "AI4IoT-Calibration-Solution", - "description": "This solution implements a pipeline to air quality sensor calibration in the\ncontext of the AI4IoT pilot, consisting of three modules which, together,\noffer a solution for calibration of low-cost air quality sensors in the city\nof Trondheim, Norway. The modules are: a `data source` which fetches data from\nseveral external APIs and concatenates them, a `calibration` which predicts\nthe true value at the sensor location and a simple `visualization` module\nwhich implements a web interface to analyze the output of the calibration\nprocedure.\n\n \n\nMore info on the pipeline can be found at the github repository:\nhttps://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&revisionId=478028bb-1c58-4641-9bc0-eba716119aec&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0fbc7cc0-843a-489b-bab1-40e4d2700680/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "370", - "name": "AI4IoT-Calibration-Solution", - "description": "This solution implements a pipeline to air quality sensor calibration in the\ncontext of the AI4IoT pilot, consisting of three modules which, together,\noffer a solution for calibration of low-cost air quality sensors in the city\nof Trondheim, Norway. The modules are: a `data source` which fetches data from\nseveral external APIs and concatenates them, a `calibration` which predicts\nthe true value at the sensor location and a simple `visualization` module\nwhich implements a web interface to analyze the output of the calibration\nprocedure.\n\n \n\nMore info on the pipeline can be found at the github repository:\nhttps://github.com/ntnu-ai-lab/ai4iot-calib-pipeline.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&revisionId=478028bb-1c58-4641-9bc0-eba716119aec&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0fbc7cc0-843a-489b-bab1-40e4d2700680&version=1.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0fbc7cc0-843a-489b-bab1-40e4d2700680/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "371", - "name": "MusicDetection-pipeline", - "description": "This simple pipeline automatically analyzes audio content with the\nMusicDetection model and annotates music attributes like genre and tempo.\n\nContent to be analyzed can be provided via file upload, detection results will\nbe presented in WebUI and can be downloaded.\n\n \n\nRemark: Since MusicDetection model is not publicly accessible, for the\ndeployment of this pipeline it is necessary to acquire access credentials.\nPlease send you requests to[ ai-assets@idmt.fraunhofer.de](mailto:ai-\nassets@idmt.fraunhofer.de??subject=Music-Detection-pipeline)\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fc0b6dc-46e5-468b-9adf-841d9b062e51&revisionId=1b067b23-4730-4dc1-95aa-0bfc78b0a6ce&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "0.9.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0fc0b6dc-46e5-468b-9adf-841d9b062e51&version=0.9.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0fc0b6dc-46e5-468b-9adf-841d9b062e51/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "372", - "name": "clinical_evida_text_classifier", - "description": "This model let us to classify clinical text related to colon cancer or non-\ncolon cancer texts based on ICD10 categories. The main objective is to get a\nlabel (1 or 0) depending if the input text belongs to C18 ICD category, which\ncorresponds to Colon Cancer Category. The model is based on distilBERT\ntransformer model and was trained using CodiEsp dataset. The input is a plain\ntext and the output will be a number label.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=13648d7f-5002-4fd8-98f7-27d50d2d964e&revisionId=65657060-5fac-48d5-bdf8-e75dab26ae23&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=13648d7f-5002-4fd8-98f7-27d50d2d964e&version=1.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/13648d7f-5002-4fd8-98f7-27d50d2d964e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "373", - "name": "sentiment-analysis-pipeline", - "description": "Sentiment analysis pipeline.\n\n \n\n \n\nIt takes the query text from the user and connects to the prediction model.\nThe results can then be viewed on the Prediction model's UI.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=24269432-3dcf-42a8-a04e-463ed0c59757&revisionId=a951dffc-98f8-4914-a1d5-0fa79cb76640&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=24269432-3dcf-42a8-a04e-463ed0c59757&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/24269432-3dcf-42a8-a04e-463ed0c59757/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "374", - "name": "TrainingPipeline", - "description": "**Overview:**\n\nThe training pipeline for the news_training example consists of 4 main nodes,\n\n1) News-Classifier - The core of the pipeline\n\n2) trainer-model - Facilitates the training process\n\n3) Tensorboard - Provides diagnostics preview\n\n4) News-Databroker - Starting point for data feed\n\n\n\nNote:\n\nApart from demonstrating a training scenario, this example also shows the use\nof a shared folder for common file access for pipeline nodes.\n\nEach of the 4 mentioned nodes are also available as independent models here.\n\n\n\nRepository link:\n\nPlease refer the following link for the code that represents the training\npipeline in the Eclipse Graphene platform -\n\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2d4b9dff-c822-4fb6-9b5f-06f761fcbe2c&revisionId=7e95c907-2bdf-405d-8da4-4961e785514b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "v2", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2d4b9dff-c822-4fb6-9b5f-06f761fcbe2c&version=v2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2d4b9dff-c822-4fb6-9b5f-06f761fcbe2c/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "375", - "name": "ucrsuite-dtw-pip", - "description": "## Overview\n\nThe UCR Suite DTW pipeline **ucrsuite-dtw-pip** implements fast nearest-\nneighbor retrieval under the dynamic time warping (DTW)\n\n **ucrsuite-config** data broker is a starting point for the pipeline that\nprocess files and parameters to perform subsequence search in time series.\n**ucrsuite-dtw** calculates the nearest neighbor of a times series in a larger\ntime series expressed as location and distance, using the UCR suite DTW\nalgorithm.\n\n## Usage\n\nTo use the **ucrsuite-dtw-pip** solution, you can either download it from the\nMarketplace or run it on the Playground. Once the solution is deployed in the\nPlayground, open the Web-UI of the **ucrsuite-config** model and enter the\nfollowing information:\n\n \n\n* **Data file:** The path to the file containing the long time series.\n\n* **Query file:** The path to the file containing the query time series.\n\n* **R:** The size of the warping window. The value is in range 0-1, e.g., R=0.05 means windows of size +/-5%.\n\n \n\nThen, **Run** the solution. The distance calculation will start in the\nbackground.\n\nThe result of calculation, expressed as location and distance, will be stored\nin the shared folder as a `dtw_distance.txt` file.\n\nDetailed result also available in the logs of the **ucrsuite-dtw** model in\nthe following format:\n\n\\------------------------\n\nLocation: 756562\n\nDistance: 3.77562\n\nData Scanned: 1000000\n\nTotal Execution Time: 1.584 sec\n\n \n\nPruned by LB_Kim: 67.97%\n\nPruned by LB_Keogh: 22.25%\n\nPruned by LB_Keogh2: 9.32%\n\nDTW Calculation: 0.46%\n\n \n\n\\------------------------\n\nThe `Location` field specifies the starting location of the nearest neighbor\nof the given query, of size M, in the data file. Note that location starts\nfrom 0.\n\nThe `Distance` field specifies the distance between the nearest neighbor and\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=32685df0-a7b2-4dd4-adae-7e426db9fff2&revisionId=f3592359-962b-42cc-a5c7-3c0849882775&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=32685df0-a7b2-4dd4-adae-7e426db9fff2&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/32685df0-a7b2-4dd4-adae-7e426db9fff2/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "376", - "name": "AI4IndustryPilot", - "description": "This is the assembled Solution of the [AI4Industry\nPilot](https://www.ai4europe.eu/node/106) of the [AI4EU\nproject](www.ai4europe.eu). To run the solution, please use \"Deploy to local\"\nin the [AI4EU Experiments Platform](https://acumos-int-fhg.ai4eu.eu/) on this\nsolution and follow the readme in the package or the [YouTube Tutorial (Deploy\nand Run)](https://www.youtube.com/watch?v=gM-HRMNOi4w).\n\n \n\nThis solution is the result of a collaboration between\n\n * Siemens Germany - Ivan Gocev\n * Fraunhofer IAIS - Raoul Blankertz, Nico H\u00f6ft\n * Technische Universit\u00e4t Wien - Peter Sch\u00fcller\n\n \n\n \n\n \n\n \n\nContact:\n\n[Peter Sch\u00fcller](mailto:peter@peterschueller.com)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=33b984f4-fa6e-42e3-9af7-8cb3464ae10b&revisionId=1f58db48-282b-4629-a596-c379c9550f66&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "2", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=33b984f4-fa6e-42e3-9af7-8cb3464ae10b&version=2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/33b984f4-fa6e-42e3-9af7-8cb3464ae10b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "377", - "name": "AI4IndustryPilot", - "description": "This is the assembled Solution of the [AI4Industry\nPilot](https://www.ai4europe.eu/node/106) of the [AI4EU\nproject](www.ai4europe.eu). To run the solution, please use \"Deploy to local\"\nin the [AI4EU Experiments Platform](https://acumos-int-fhg.ai4eu.eu/) on this\nsolution and follow the readme in the package or the [YouTube Tutorial (Deploy\nand Run)](https://www.youtube.com/watch?v=gM-HRMNOi4w).\n\n \n\nThis solution is the result of a collaboration between\n\n * Siemens Germany - Ivan Gocev\n * Fraunhofer IAIS - Raoul Blankertz, Nico H\u00f6ft\n * Technische Universit\u00e4t Wien - Peter Sch\u00fcller\n\n \n\n \n\n \n\n \n\nContact:\n\n[Peter Sch\u00fcller](mailto:peter@peterschueller.com)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=33b984f4-fa6e-42e3-9af7-8cb3464ae10b&revisionId=1f58db48-282b-4629-a596-c379c9550f66&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=33b984f4-fa6e-42e3-9af7-8cb3464ae10b&version=1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/33b984f4-fa6e-42e3-9af7-8cb3464ae10b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "378", - "name": "house-prices-pipeline", - "description": "**Overview description**\n\nThe House Prices Pipeline is a simple example pipeline that predicts house\nprices. The pipeline illustrates how the price development is predicted by\nentering relevant parameters that provide information about the status of a\nproperty.\n\n ** **\n\n **Use case example**\n\nAs an interested house owner, an estimate can be made based on the AI\nforecast, how much the property will increase in value or not.\n\n ** **\n\n **Usage**\n\nSelect the \"houseprice-pipeline\" solution in the Marketplace or in the Design\nStudio. It is possible to download the solution or to run it on the Playground\nfor testing purposes. When the solution is deployed in the Playground, select\nthe Web-UI of the databroker and fill in the parameters. Then go back to the\nPlayground and run the solution once and open the Web-UI (interface) of the\nmodel. In the second interface you will get the prediction based on your\ninput.\n\n\n\n **Support**\n\nThe solution is part of the tutorials with developer documentation and source\ncode available. For further construction feel free to reach out to the AI.Lab\nteam ai-lab@iais.fraunhofer.de or directly with the developer of the\ntechnology. The developer teams are generally open for feedback and happy\nabout co-creation opportunities.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=42bdc41c-6144-4c7b-88b6-4509999bff6d&revisionId=ec4a4a98-d37a-49c5-aaa1-97437d8a5a31&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=42bdc41c-6144-4c7b-88b6-4509999bff6d&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/42bdc41c-6144-4c7b-88b6-4509999bff6d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "379", - "name": "Sudoku-Tutorial-Stream", - "description": "This is the **streaming** version of the deployable **Solution** of the\n**AI4EU Experiments Sudoku Hello World**!\n\nIt is a Proof of Concept for a Sudoku design assistant based on ASP, gRPC, and\nProtobuf, deployable in the AI4EU Experiments Platform.\n\nThe Git repository holding this component of the Hello World is publicly\navailable here: \n\nA Tutorial video about this \"Sudoku Hello World\" can be found here:\n\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=516d1afa-44ae-4315-be0a-88232698778d&revisionId=72489923-f34e-454a-85ef-2a0b8a54ed54&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=516d1afa-44ae-4315-be0a-88232698778d&version=1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/516d1afa-44ae-4315-be0a-88232698778d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "380", - "name": "Hubeau_Piezo_Stations", - "description": "This is an example of solution to access data of the French groundwater level\nstations (piezometer sensor).\n\nData from the \"Piezometry\" API come from the ADES portal (national portal for\naccess to French groundwater data). They relate to piezometric measurements\n(water level in groundwater tables), throughout France, from all the partners\nof the water information system (see metadata).\n\n \n\nThe updates are integrated daily into the API.\n\n \n\nData is expressed\n\n \n\nin NGF meters for levels (or ratings);\n\nin meters in relation to the measurement mark for the depths.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5a56cb42-bfc5-48c6-a92b-92bb06a2b308&revisionId=780ab7bd-c541-4e36-9493-f80dcd67f743&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5a56cb42-bfc5-48c6-a92b-92bb06a2b308&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5a56cb42-bfc5-48c6-a92b-92bb06a2b308/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "381", - "name": "Iris_Pipeline", - "description": "Iris Pipeline: Made use of generic data broker to connect to iris dataset.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5b367470-e405-44de-b930-4c1e5f3e7161&revisionId=8b2b253f-3bd1-4719-8d0a-9f1084bf15bf&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5b367470-e405-44de-b930-4c1e5f3e7161&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5b367470-e405-44de-b930-4c1e5f3e7161/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "382", - "name": "ner-pipeline", - "description": "This is the ner-pipeline, which represents a deep learning Entity Recognizer\nin German.\n\nAfter successfully deploying ner-pipeline in the KI.NRW Playground, submit the\ndesired text via ner-databroker's Web UI first (1), then RUN the pipeline (2)\nand go to the Web UI of the ner-model (3). You will see a list of processed\ntexts, with the most recent provided text on top of the list.\n\n \n\nFor each new NER request to the deployed ner-pipeline, repeat the steps from 1\nto 3.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=612a819c-66fe-4ac4-86ae-b04e95ef4624&revisionId=a63bc9db-1691-45ca-a022-98e89ff43fd5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=612a819c-66fe-4ac4-86ae-b04e95ef4624&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/612a819c-66fe-4ac4-86ae-b04e95ef4624/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "383", - "name": "advice-inference-pipeline", - "description": "The process is divided into two independent workflows, the first one is the\nprediction, and includes the advice-img-databroker, advice-road-crop and\nadvice-yolo nodes, which will perform the whole process of label prediction.\nOn the other hand, the advice-label-assitant node allows the user to perform\nthe relabelling task while the inference process is performed in the\nbackground\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=659ee5a9-0fbe-4676-8b1f-bb27d8379c30&revisionId=bae9c467-8208-47cc-b46f-ba6c97e9930d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "st3", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=659ee5a9-0fbe-4676-8b1f-bb27d8379c30&version=st3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/659ee5a9-0fbe-4676-8b1f-bb27d8379c30/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "384", - "name": "ADVICE", - "description": "**A** I-base **D** predicti **V** e road ma **I** ntenan **C** e for a safer\n**E** urope (ADVICE) consist of a two stages pipeline for pothole detection,\npothole size estimation and pothole formation forecasting. The pipeline is\nexpected to evolve into a hybrid solution of edge and cloud computing.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6a58218e-ae25-446e-96b0-ebbb954f76e9&revisionId=5487352a-0934-465d-a9bd-feb927033a82&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "0.0.1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6a58218e-ae25-446e-96b0-ebbb954f76e9&version=0.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6a58218e-ae25-446e-96b0-ebbb954f76e9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "385", - "name": "Hubeau_Piezo_Chroniques", - "description": "This is an example of **solution** to access data of the French groundwater\nlevel observations timeseries (from piezometer sensor).\n\nData from the \"Piezometry\" API come from the ADES portal (national portal for\naccess to French groundwater data). They relate to piezometric measurements\n(water level in groundwater tables), throughout France, from all the partners\nof the water information system (see metadata).\n\n \n\nThe updates are integrated daily into the API.\n\n \n\nData is expressed\n\n \n\nin NGF meters for levels (or ratings);\n\nin meters in relation to the measurement mark for the depths.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7694139a-dabf-4aa3-98ba-40ffe4c5fcad&revisionId=19527676-2736-419a-be52-0fa6895b2c50&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7694139a-dabf-4aa3-98ba-40ffe4c5fcad&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7694139a-dabf-4aa3-98ba-40ffe4c5fcad/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "386", - "name": "AI_REGIO_NLP_DSS", - "description": "AI Regio Pipeline structured to receive natural language text from a mic\nclient over internet, transforming audio into text and using the produced text\nto help an operator in manufacturing domain.\n\nNLP is coupled with a self-learning DSS system that updates probability tables\nbased on past answers given by the operator. It is able to understand short\nanswers, like yes / no / don't know. NLP module, instead, maps a full sentence\ninto well-know problem, allowing the system to ask the right first question\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8556ecaf-35ef-4b40-91bb-699165f89d71&revisionId=41386cb6-d281-429a-9415-b9c20c0cc9cb&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8556ecaf-35ef-4b40-91bb-699165f89d71&version=1.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8556ecaf-35ef-4b40-91bb-699165f89d71/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "387", - "name": "AI_REGIO_NLP_DSS", - "description": "AI Regio Pipeline structured to receive natural language text from a mic\nclient over internet, transforming audio into text and using the produced text\nto help an operator in manufacturing domain.\n\nNLP is coupled with a self-learning DSS system that updates probability tables\nbased on past answers given by the operator. It is able to understand short\nanswers, like yes / no / don't know. NLP module, instead, maps a full sentence\ninto well-know problem, allowing the system to ask the right first question\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8556ecaf-35ef-4b40-91bb-699165f89d71&revisionId=41386cb6-d281-429a-9415-b9c20c0cc9cb&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8556ecaf-35ef-4b40-91bb-699165f89d71&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8556ecaf-35ef-4b40-91bb-699165f89d71/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "388", - "name": "Video-Pipeline", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&revisionId=e91bdfc4-0c68-464c-831f-4970ab155386&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "V1.1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&version=V1.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/92a8e46f-d80a-4e68-a485-5ef5a74efa17/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "389", - "name": "Video-Pipeline", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&revisionId=e91bdfc4-0c68-464c-831f-4970ab155386&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "V1.3", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&version=V1.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/92a8e46f-d80a-4e68-a485-5ef5a74efa17/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "390", - "name": "Video-Pipeline", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&revisionId=e91bdfc4-0c68-464c-831f-4970ab155386&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "V1.2", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=92a8e46f-d80a-4e68-a485-5ef5a74efa17&version=V1.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/92a8e46f-d80a-4e68-a485-5ef5a74efa17/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "391", - "name": "Standard_STA_Flow", - "description": "This is an example of **solution** exploiting the generic connector for the\nSensorThings API. This connector allows to connect to any SensorThings API in\nthe world and therefore potentially recover data on any domain. For example,\nthis would facilitate the retrieval of public Covid19 data, harvested from\nvarious sources including Johns Hopkins and RKI, or from near-real-time air\nquality across Europe, from both national sources (harvested from AT SOS and\nWFS) and Europe (EEA).\n\nTo illustrate the potential uses of these different SensorThings API (with a\nsingle connector), one can take a look at these different applications: a\nvisualisation tool[[1]](about:blank) bringing together French and German flow\ndata, a covid-19 dashboard[[2]](about:blank) and the [Windy Web\nsite](https://www.windy.com/fr/-NO2-no2?camsEu,no2,47.905,1.908,5)[[3]](about:blank)\nfocused on the weather forecast.\n\n \n\n[[1]](about:blank) \n\n \n\n[[2]](about:blank) \n\n \n\n[[3]](about:blank)\n\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a5a36ff2-f9f7-4272-abde-b81cf4cbbb80&revisionId=8caf7a53-d01e-4ea7-8c43-fc5dc27fcbc3&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a5a36ff2-f9f7-4272-abde-b81cf4cbbb80&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a5a36ff2-f9f7-4272-abde-b81cf4cbbb80/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "392", - "name": "AI4Media Demo", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a87cb119-168c-45b0-9a3e-6963396c1acf&revisionId=ca6125ff-b507-4c9a-b223-5440316a15d4&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a87cb119-168c-45b0-9a3e-6963396c1acf&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a87cb119-168c-45b0-9a3e-6963396c1acf/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "393", - "name": "AI4Media Demo", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a87cb119-168c-45b0-9a3e-6963396c1acf&revisionId=ca6125ff-b507-4c9a-b223-5440316a15d4&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a87cb119-168c-45b0-9a3e-6963396c1acf&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a87cb119-168c-45b0-9a3e-6963396c1acf/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "394", - "name": "aiplan4eu-demo", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ad53fc7d-7110-4b45-a4ed-b79324fa44e1&revisionId=3a9591b8-a644-4343-83ae-a765e88b7109&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "v1.1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ad53fc7d-7110-4b45-a4ed-b79324fa44e1&version=v1.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ad53fc7d-7110-4b45-a4ed-b79324fa44e1/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "395", - "name": "aiplan4eu-demo", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ad53fc7d-7110-4b45-a4ed-b79324fa44e1&revisionId=3a9591b8-a644-4343-83ae-a765e88b7109&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "v1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ad53fc7d-7110-4b45-a4ed-b79324fa44e1&version=v1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ad53fc7d-7110-4b45-a4ed-b79324fa44e1/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "396", - "name": "ObjectDetectionP", - "description": "This is a simple pipeline wrapping the object detection model.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b08401ec-f24a-452b-bf42-c57cb91b21e8&revisionId=490b5ed8-b498-4ddb-a99b-0cb1662f533c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b08401ec-f24a-452b-bf42-c57cb91b21e8&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b08401ec-f24a-452b-bf42-c57cb91b21e8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "397", - "name": "aqpredvisualize", - "description": "Air Quality Prediction and Visualization Pipeline for the area of Trondheim.\nThe pipeline consists of 3 modules, a databroker module, a prediction module\nbased on a pre-trained machine learning model and a visualization module with\na web interface. More information and instructions can be found in the github\nrepository: \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c4d4ec8b-1e43-4bf7-941e-8d81612cb71e&revisionId=3d63a545-e260-46a1-a743-298902fb2818&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c4d4ec8b-1e43-4bf7-941e-8d81612cb71e&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c4d4ec8b-1e43-4bf7-941e-8d81612cb71e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "398", - "name": "ObjectDetectionPipeline", - "description": "This is a simple pipeline wrapping the object detection model. The underlying\nobject detection model in this pipeline is a public image.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cd303086-6599-41cf-b89b-66f31f7c4f44&revisionId=0d4d73db-e069-447f-949f-2eb1bc9e98e5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=cd303086-6599-41cf-b89b-66f31f7c4f44&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/cd303086-6599-41cf-b89b-66f31f7c4f44/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "399", - "name": "Hubeau_Hydro_Observations", - "description": "Example of **solution** to retrieve French hydrology observations data using\nthe \"Grpc hydro hubeau\" component.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d95fa687-97d9-45b4-bda6-cadddebb6343&revisionId=1ee16b73-9874-413d-ba66-33502c2bb689&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d95fa687-97d9-45b4-bda6-cadddebb6343&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d95fa687-97d9-45b4-bda6-cadddebb6343/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "400", - "name": "audio-pipeline", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&revisionId=8b7d0433-56c8-48bf-8654-3ac87eb630e9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "4.1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&version=4.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/df1f1286-0071-4df8-afd7-fe5dd20f9cd4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "401", - "name": "audio-pipeline", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&revisionId=8b7d0433-56c8-48bf-8654-3ac87eb630e9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "2.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&version=2.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/df1f1286-0071-4df8-afd7-fe5dd20f9cd4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "402", - "name": "audio-pipeline", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&revisionId=8b7d0433-56c8-48bf-8654-3ac87eb630e9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "3.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&version=3.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/df1f1286-0071-4df8-afd7-fe5dd20f9cd4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "403", - "name": "audio-pipeline", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&revisionId=8b7d0433-56c8-48bf-8654-3ac87eb630e9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "5.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&version=5.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/df1f1286-0071-4df8-afd7-fe5dd20f9cd4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "404", - "name": "audio-pipeline", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&revisionId=8b7d0433-56c8-48bf-8654-3ac87eb630e9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "4.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df1f1286-0071-4df8-afd7-fe5dd20f9cd4&version=4.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/df1f1286-0071-4df8-afd7-fe5dd20f9cd4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "405", - "name": "MusicDetectionPL", - "description": "This pipeline is designed to use the MusicDetection model for the analysis of\nsingle audio files that are provided by file upload. Results of the\nMusicDetection are provided via WebUI.\n\nSince MusicDetection model is not publicly accessible, for the deployment of\nthis pipeline it is necessary to acquire access credentials from the provider\nof the MusicDetection model. NB: Access can not be provided from the publisher\nof this pipeline.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eea265e1-f1b8-4f5d-8694-299b37fc3d0d&revisionId=a44f39bb-56b2-4d5e-b72c-f36cd24a9992&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eea265e1-f1b8-4f5d-8694-299b37fc3d0d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/eea265e1-f1b8-4f5d-8694-299b37fc3d0d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "406", - "name": "Hubeau_Hydro_Stations", - "description": "Example of **solution** to retrieve French hydrology stations data using the\n\"Grpc hydro hubeau\" component.\n\nThis service makes it possible to query the stations in the French hydrometric\nreference system. A station can carry height and / or flow observations\n(directly measured or calculated from a rating curve).\n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f476f311-e38c-4c60-a550-605a8b7c5af0&revisionId=4ae0dfe8-95c8-47ae-877d-b9247a249e77&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f476f311-e38c-4c60-a550-605a8b7c5af0&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f476f311-e38c-4c60-a550-605a8b7c5af0/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "407", - "name": "ucrsuite-ed-pip", - "description": "## Overview\n\nThe UCR Suite ED pipeline **ucrsuite-ed-pip** implements fast nearest-neighbor\nretrieval under the Euclidean Distance (ED)\n\n **ucrsuite-config** data broker is a starting point for the pipeline that\nprocess files and parameters to perform subsequence search in time series.\n**ucrsuite-ed** calculates the nearest neighbor of a times series in a larger\ntime series expressed as location and distance, using the UCR suite ED\nalgorithm.\n\n \n\n## Usage\n\nTo use the **ucrsuite-ed-pip** solution, you can either download it from the\nMarketplace or run it on the Playground. Once the solution is deployed in the\nPlayground, open the Web-UI of the **ucrsuite-config** model and enter the\nfollowing information:\n\n* **Data file:** The path to the file containing the long time series.\n\n* **Query file:** The path to the file containing the query time series.\n\nThen, **Run** the solution. The distance calculation will start in the\nbackground.\n\nThe result of calculation, expressed as location and distance, will be stored\nin the shared folder as a `ed_distance.txt` file.\n\nDetailed result also available in the logs of the **ucrsuite-ed** model in the\nfollowing format:\n\n\\----------------------------------------------------\n\nLocation : 347236\n\nDistance : 7.03705\n\nData Scanned : 1000000\n\nTotal Execution Time : 1.029 sec\n\n\\----------------------------------------------------\n\nThe `Location` field specifies the starting location of the nearest neighbor\nof the given query, of size M, in the data file. Note that location starts\nfrom 0.\n\nThe `Distance` field specifies the distance between the nearest neighbor and\nthe query.\n\nThe `Data Scanned` field specifies the number of data points in the input data\nfile.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f87058b4-a1f1-4e0e-a944-ece53adcf8b3&revisionId=20402b92-1b2e-4547-b1e0-e2866c439645&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f87058b4-a1f1-4e0e-a944-ece53adcf8b3&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f87058b4-a1f1-4e0e-a944-ece53adcf8b3/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "408", - "name": "RecognaizePipeline", - "description": "**The RecognAIze** pipeline coverts images to text including layout detection\nand table handling and consists of our microservices:\n\nDatabroker with UI, Preprocessing, Segmentation and OCR.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fca70f4f-d6b7-4fed-a98a-8800b7831ef8&revisionId=c7b3cfaf-7960-472b-91e3-03b930dca96a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.1.1", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fca70f4f-d6b7-4fed-a98a-8800b7831ef8&version=1.1.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/fca70f4f-d6b7-4fed-a98a-8800b7831ef8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - }, - { - "platform": "ai4experiments", - "platform_identifier": "409", - "name": "ai4eu-sec-pilot", - "description": "This simulation can detect threads in network traffic. To train the model\nconnect the model with the training data interface from the databroker\ncontainer. The train data are made with benign traffic and does not contain\nany fraud because the model should lern how benign traffic looks like.\n\nTo predict traffic connect the prediction data output from the databroker\ncontainer with the prediction interface. The traffic to predict includes\nbenign and fraud traffic. The output will be a number between 0 and 1. You can\nset the threshold according to your data. The best threshold cna be found in\nthe model validation folder insider the model container.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ff236ff3-f08e-40d1-9b76-a42f7e792b96&revisionId=bd6920a5-6998-470b-a4d0-cb0ed9ea73ec&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "0.0.2", - "pid": "", - "experimental_workflow": "", - "execution_settings": "", - "reproducibility_explanation": "", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "alternate_name": [], - "application_area": [], - "badge": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ff236ff3-f08e-40d1-9b76-a42f7e792b96&version=0.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ff236ff3-f08e-40d1-9b76-a42f7e792b96/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "research_area": [], - "scientific_domain": [] - } -] + { + "platform": "example", + "platform_identifier": "1", + "name": "The name of this experiment", + "description": "A description.", + "same_as": "https://www.example.com/resource/this_resource", + "date_published": "2022-01-01T15:15:00.000", + "version": "1.1.0", + "pid": "https://doi.org/10.1000/182", + "experimental_workflow": "1) Load the dataset 2) run preprocessing code found in ... 3) run the model on the data.", + "execution_settings": "string", + "reproducibility_explanation": "string", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [ + "alias 1", + "alias 2" + ], + "application_area": [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "badge": [ + "ACM Artifacts Evaluated - Reusable" + ], + "citation": [], + "contact": [], + "creator": [], + "distribution": [ + { + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/experiment.zip", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "application/zip", + "name": "Name of this file.", + "technology_readiness_level": 1, + "installation_script": "./install.sh", + "installation": "Build the Dockerfile", + "installation_time_milliseconds": 100, + "deployment_script": "./run.sh", + "deployment": "You can run the run.py file using python3. See README.md for required arguments.", + "deployment_time_milliseconds": 100, + "os_requirement": "Windows 11.", + "dependency": "Python packages as listed in requirements.txt.", + "hardware_requirement": "4GB RAM; 100MB storage; 1GHz processor with 8 cores." + } + ], + "has_part": [], + "industrial_sector": [ + "Finance", + "eCommerce", + "Healthcare" + ], + "is_part_of": [], + "keyword": [ + "keyword1", + "keyword2" + ], + "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", + "media": [ + { + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/image.jpeg", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "image/jpeg", + "name": "Name of this file." + } + ], + "note": [ + { + "value": "A brief record of points or ideas about this AI resource." + } + ], + "research_area": [ + "Explainable AI", + "Physical AI" + ], + "scientific_domain": [ + "Anomaly Detection", + "Voice Recognition", + "Computer Vision." + ] + } +] \ No newline at end of file diff --git a/src/connectors/example/resources/resource/ml_models.json b/src/connectors/example/resources/resource/ml_models.json index 8e239e12..2cb871df 100644 --- a/src/connectors/example/resources/resource/ml_models.json +++ b/src/connectors/example/resources/resource/ml_models.json @@ -1,8086 +1,92 @@ [ - { - "platform": "ai4experiments", - "platform_identifier": "1", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AIM4PS", - "description": "AIM4PS employs state-of-the-art AI methodologies for intaking and processing\npublic procurement data, taking as a reference the specific production- and\nproduct-related information collected from manufacturing EISs.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0146cfdb-7853-48aa-b4b2-76183a3f3c14&revisionId=7c089fc1-a981-4c93-9137-dfef1bc19bd8&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0146cfdb-7853-48aa-b4b2-76183a3f3c14&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0146cfdb-7853-48aa-b4b2-76183a3f3c14/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "2", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AudioSpeechToTextGerman", - "description": "This model converts an audio segment to German text.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=015a16fd-8fea-495a-ae94-1fc92384d2b3&revisionId=0e5ad85f-29df-4d60-9b7d-178c1382abe0&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=015a16fd-8fea-495a-ae94-1fc92384d2b3&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/015a16fd-8fea-495a-ae94-1fc92384d2b3/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "3", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Doc2Answer", - "description": "The model implements 2 main tasks of the AI4EU call. It is able to parse and\nextract information from 2 type of INPS documents: \"O7\" and \"SocialCard\".\n\nThe first type it locates cells and extract the content as text (i.e. numbers,\ndates).\n\nThe second type locates stamps and classify them.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=01742dd8-cc32-4332-93ca-a181be3853e7&revisionId=d5cab0b1-4827-4b75-b270-8b11a2e08b99&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=01742dd8-cc32-4332-93ca-a181be3853e7&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/01742dd8-cc32-4332-93ca-a181be3853e7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "4", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "CODE_V2", - "description": "The main objective of the challenge is to develop an method for automatic\nclassification of clinical narratives to ICD-10 codes.\n\nOur approach for semantic text classification has three core components: (1)\nFormalization of domain knowledge of medical information and techniques of\nsemantic data fusion; (2) Multilingual NLP techniques for document\npreprocessing including all or some of: data cleaning, data normalization,\ndata augmentation, transitive connections analysis, data balancing, expert\u2019s\nheuristics. For medical data based on our expertise on DOID, ICD-O, ICD-9,\nICD-10, MESH, MONDO, UMLS, Orphanet, SNOMED classification, data augmentation\nincluding typos simulation and synonym replacement will be used; (3)\nMultilingual deep learning methods for supervised classification of disease\ninto its corresponding class from the ICD-10. We are fine tuning pretrained\nBERT family models (bioBERT, clinicalBERT, MultilingualBERT, PubMedBERT, etc.)\nwith domain specific terminology for the target language. Additional corpora\ngenerated from public documents and linked open data is used for fine-tuning\nof the deep learning classification model for the specific ICD-10\nclassification.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=01d95f4f-3bb4-4807-b6af-eb2d35d352cf&revisionId=2dc164ec-b92a-4413-a78e-70efc6643bc5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=01d95f4f-3bb4-4807-b6af-eb2d35d352cf&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/01d95f4f-3bb4-4807-b6af-eb2d35d352cf/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "10", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "i-nergy-load-forecasting-nbeats", - "description": "This is a time series forecasting service for predicting of the Portuguese\naggregated electricity load series (15-min resolution, 24hr forecasting\nhorizon). This service is based on an NBEATS model trained in the context of\n[I-NERGY](https://www.i-nergy.eu/) project. The model has been trained on the\nPortuguese timeseries from 2013 to 2019 validated on year 2020 and tested on\n2021 with Mean Absolute Percentage Error (MAPE) = 2.35%. No time covariates or\nexternal variables have been included in the model. The lookback window of the\nmodel is 10 days. The model can be used to produce forecasts for periods from\n2022 and later for Portugal. Other transmission system operators may use it as\nwell, however expecting lower performance in general. No external variables\nhave been considered. Please keep in mind that the effects of the pandemic on\nnational loads can negatively affect the model\u2019s performance. For more\ninformation please go to ReadME.md in the Documents section.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0440778a-74e8-4d7f-950f-e6e1ce6bc29e&revisionId=3622c8ba-999d-4ce3-b711-b2bf4b43fa88&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0440778a-74e8-4d7f-950f-e6e1ce6bc29e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0440778a-74e8-4d7f-950f-e6e1ce6bc29e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "14", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "road-damage-detector", - "description": "# AI4EU Pluto two-stage detector\n\n \n\nThe model is a two stage detector based on\n[YOLOv5](https://github.com/ultralytics/yolov5).\n\n \n\nThe object detector will detect objects of the following classes:\n\n \\- Rutting\n\n \\- Pothole\n\n \\- Manhole\n\n \\- Gully\n\n \\- EdgeDeterioration\n\n \\- Cracking\n\n \n\nThe second stage classifier will, for `Potholes`, also classify the depth as 1\nof 4 discrete values:\n\n \n\n \\- lt2\n\n \\- 2to5\n\n \\- 5to10\n\n \\- gt10\n\n \n\n \n\n# Example client\n\n \n\n```python\n\nimport os\n\nimport grpc\n\nimport model_pb2\n\nimport model_pb2_grpc\n\n \n\n## Setup\n\nport_addr = 'localhost:8061'\n\n \n\n# open a gRPC channel\n\nchannel_opt = [('grpc.max_send_message_length', 512 * 1024 * 1024),\n\n ('grpc.max_receive_message_length', 512 * 1024 * 1024)]\n\nchannel = grpc.insecure_channel(port_addr, options = channel_opt)\n\nstub = model_pb2_grpc.PredictStub(channel)\n\n \n\n \n\n## Make prediction\n\nfilepath = \"assets/test.png\"\n\n \n\nwith open(filepath, 'rb') as f:\n\n content = f.read()\n\n \n\nresponsePrediction = stub.make_prediction(requestPrediction)\n\n \n\n \n\n## Interpret result\n\nfor annotation in responsePrediction.annotations:\n\n print(f\"Detections: {annotation}\")\n\n \n\n```\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=075252b1-3ff7-424d-ab6d-19ca2d90f0f0&revisionId=8297b2b4-2260-42ec-bb89-072918b7c843&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=075252b1-3ff7-424d-ab6d-19ca2d90f0f0&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/075252b1-3ff7-424d-ab6d-19ca2d90f0f0/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "18", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "iOCR", - "description": " \n\n**iOCR** can easily convert scanned or photographed documents into digital\ntext using its underlying **Deep Learning** technologies in order to\nautomatically localize and recognize the text inside of these images.\n\nWith our innovative product you will reduce the amount of effort required to\ndigitize your data as iOCR ensures the data is not lost and correctly\ndigitized. The need for specialized scanners or high manual effort will\ndecrease as iOCR aims to improve and scale with your business returning the\ncosts required for this kind of effort back to you, offering you more\nopportunities to extend your company.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=08be83e3-f261-428d-846a-99f2fb0d46fb&revisionId=e74c2c19-130d-451f-a095-86c01e6739a6&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=08be83e3-f261-428d-846a-99f2fb0d46fb&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/08be83e3-f261-428d-846a-99f2fb0d46fb/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "19", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Text2ImageSearch", - "description": "This model implements a text-to-image search engine: it searches images in a\npublicly available database (MIRFlickr100K) using natural language sentences\nas a query.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=090281fe-4884-4ff8-80e1-fb87a41aa327&revisionId=cbe08f0a-9266-498a-a4ca-ab4f1edf5462&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=090281fe-4884-4ff8-80e1-fb87a41aa327&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/090281fe-4884-4ff8-80e1-fb87a41aa327/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "28", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AWDrugsModel", - "description": " \n\n \n\n \n\n \n\n \n\nThe first draft of the drug decision support system (ANN model) determines a\nstatus of candidate drug molecules as approved or withdrawn categories by\nmeans of molecular descriptors. The dataset has 44 features for analyzing the\ndrugs and contains 220 drugs having 110 approved and 110 withdrawn drugs. We\ncalculated molecular descriptors (760 descriptors) for all molecules in the\ndrug datasets and selected the most effective attributes (44 features) to\nreduce the dimensionality of data on the drug dataset.\n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=0fd660e7-7a8c-4616-98af-75a866065b40&revisionId=1c0d6691-fc28-4fd4-bb27-8ad6c3b69bf6&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=0fd660e7-7a8c-4616-98af-75a866065b40&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/0fd660e7-7a8c-4616-98af-75a866065b40/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "32", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "FH_SWF_SAT", - "description": "# Documentation of the concept for Reinforcement learning based machine tool\ncontrol\n\nThe following description is part of the submitted solution approach of the\nAI4EU Challenge and explains the interrelationships of the submitted\ndocuments. As part of the challenge, sample data was generated, which are\nsimilar to the described input and output data. Some of the approaches\ndeveloped contained detailed explanations and implementations as well as\nsecret solutions that were re-elaborated as pseudo-code. If our solution is\namong the finalists, the secret solutions will be explained further.\n\n## Structuring the system solution as Docker container\n\nAn important aspect of the challenge is modularity and flexibility. For this\nreason, the developed solution approach is implemented as Docker container.\nThe developed solution is connected via port 8061 with 8 inputs (float -\nmachine parameter) and generates 1 output (float - threshold). The designed\nsolution based on an artificial intelligence reinforcement learner. The\ndeveloped solution is a reinforcement agent. These generates on the basis of\nthe trained knowledge an action (threshold) which is given as parameter to the\nenvironment (rib and surface machine). From the environment the current reward\n(KPI) and state (8 Inputs) are feedback to the agent (developed solution).\n\n## Included documents in the Docker container\n\nFor the realisation of the solution approach different python files and data\nprotocols are realised. An overview of the generated files can be seen in the\nfollowing listing.\n\nactor.pth - data.csv - network.py - README.md - define_threshold.py -\nlicense.jason - model.proto - model_pb2.py - model_pb2_grpc.py -\nrequirements.txt - threshold_generator_client.py -\nthreshold_generator_server.py\n\nThe python file network.", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=108c1bd1-a0f7-4ada-8d39-a72b1b56fe2e&revisionId=fc31a182-5bfd-48fc-b5ea-a55034a70c41&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=108c1bd1-a0f7-4ada-8d39-a72b1b56fe2e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/108c1bd1-a0f7-4ada-8d39-a72b1b56fe2e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "35", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "DSS4TB-IMECH", - "description": "A modification on the module \"AI REGIO DSS4TB\" an intelligent troubleshooting\nsystem that able to identify the component that is most probably faulty after\na series of closed-ended questions answered by the operator.\n\nThe system works on a probabilistic model that selects the most suitable\nquestion to ask the operator on the basis of:\n\n 1. Information matrix established by an expert\n 2. Previous answers\n 3. Description given by the user (interpreted by the NLP-IMECH module)\n\nOperator knowledge is made available to the algorithm in the form of csv files\nthat contain dynamic information matrices that are updated after every\ntroubleshooting session. The use of these files means the system can quickly\nbe adapted to a different contexts by simply that switching out the\ninformation matrix.\n\nResponding to the questions asked with YES, NO or DON'T KNOW the operator can\nquickly arrive at the identification of the fault. The system demonstrates a\nlevel of resilience in its ability to arrive at the correct diagnosis despite\na some errors and uncertainty in the answers given.\n\nThe module is intended for use in conjunction with the following AI4EU\nmodules:\n\n 1. NLP-IMECH\n 2. AudioFileBroker\n 3. ConvertAudioToTextEng\n 4. FileViewer\n 5. SharedFolderProvider\n 6. 4 x FileUploadDataBroker\n\n![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAqMAAAFcCAYAAAD8s7c5AAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAMsAAADLAAShkWtsAAP+lSURBVHhe7J0HnBRF9sfXeN6dd15Wz/uf3nl35gwqgoo5iygomBVFEBMIGEEJgoqYQTGgiOScc84555xzZoFlgffvby2F5Wzv7Guo2Vl3p/z83KH7dXX96r3q+U2lTjsQJEkl2b17t2zcuFG2bdsWF5s3b5ZNmzaFnnOBHQg754K8tmzZEnrOBWXbunVr6DkXGg7ko+FAubR2Prli55Orxk7L1bdfNVwtB42dhkOyuMJBa/dL5xqlTjRcfcYwdhquifC/L78mwv8aDlqu2OVnrnntV6Dl4DPWtVyj+F/DNVHteseOHQcVS8FK6NC0lBjNSikxmh3aBo+dT67Y+eQatcHHg2+/arhaDho", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=12db8681-b3e9-4868-ac11-3475fbe6ffb8&revisionId=a0004d6a-28d4-4775-a9f7-be9fd05cfdc2&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=12db8681-b3e9-4868-ac11-3475fbe6ffb8&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/12db8681-b3e9-4868-ac11-3475fbe6ffb8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "36", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "NewsTrainer", - "description": "**Overview:**\n\nThe NewsTrainer module facilitates the training process by specifying the\nclassifier node with the required hyperparameters. The number of epochs, batch\nsize, validation ratio and model filename are the different parameters\navailable in the web-UI.\n\n **Repository link:**\n\nPlease refer the following link for the code that represents the trainer\nmodule in the Eclipse Graphene platform -\n\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=13416c8e-ae15-488a-b1f3-db33b799eb1a&revisionId=cda82f21-469f-4101-a82f-d1c34b819b74&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=13416c8e-ae15-488a-b1f3-db33b799eb1a&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/13416c8e-ae15-488a-b1f3-db33b799eb1a/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "37", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Fraunhofer-uncertainty-metrics-for-classification-tasks", - "description": "# Uncertainty Metric for Classification tasks\n\n \n\nImplements uncertainty estimation metrics for classification tasks.\n\n## Input\n\nThe input to the metric computation module is a prediction from multiple\nforward passes of Monte Carlo Dropout or the models in an ensemble. The\nprediction is expected as a single data point, so the shape is N x C where N\nis the number of forward passes, and C is the number of classes.\n\n## Metrics\n\nThe metrics used to quantify uncertainty in the predictions are entropy,\nmutual information and variance.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=13f5a196-0775-4730-88a0-a62f911ddb3a&revisionId=a549ad83-c0b9-48cb-a43e-0c5be7f4f9fd&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=13f5a196-0775-4730-88a0-a62f911ddb3a&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/13f5a196-0775-4730-88a0-a62f911ddb3a/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "43", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "tensorflow-iris-model", - "description": "Classify Iris Blossoms with a tensorflow model\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=15a3f457-401e-466e-9b85-1e25d8ae0b69&revisionId=42f38ede-7feb-4ebe-ba7c-2a6912aad332&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=15a3f457-401e-466e-9b85-1e25d8ae0b69&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/15a3f457-401e-466e-9b85-1e25d8ae0b69/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "45", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "INERGY_Cold_Decision", - "description": "This service is based on a decision support system (DSS) implemented in\ncontext of I-NERGY project. The overall vision of I-NERGY is to promote AI in\nthe energy sector by delivering:\n\n * An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.\n * Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.\n\nThis is a DSS service for for help in the decision on which energy source (for\ncold generation) use in a Spanish Hospital in hourly basis. The data was\nprovided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital\ncomplex have a district heating network. The layout of this district heating\nnetwork is a ring system composed by two independent rings for heating and\ncooling. This ring just provides energy for heating and Domestic Hot Water\n(DHW).\n\nApart from being a district heating network, this system is complex due to the\ndifferent production sources used for heating and cooling. In this facility\nheat, cold and steam are produced by using different sources.\n\nFor more information on how to use the service, please see Documents section.\n\n _The project leading to this service has received funding from the European\nUnion\u2019s Horizon 2020 research and innovation programme under grant agreement\nNo 101016508_\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=169c308d-3451-4bb9-9fe1-84316863c18b&revisionId=68550ad2-0036-4e2d-a29c-99dc940cb235&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=169c308d-3451-4bb9-9fe1-84316863c18b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/169c308d-3451-4bb9-9fe1-84316863c18b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "46", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "i-nergy-load-forecasting-ren-hourly-lstm-2018-2019", - "description": "This is a forecasting service for predicting the aggregated hourly net\nelectrical load of the Portuguese transmission system operator (REN). The core\nof the service is a totally recurrent LSTM deep neural network. The model has\nbeen trained on the REN load time series for the years 2018 and 2019 (except\nDecember 2019). The service is served as a docker container and a client\nscript is also provided to help the user form their inference requests. The\nmodel is totally configurable in terms of:\n\n 1. **Provided ground truth data points:** The client can update the existing model with the desired length of new data points that have been observed. The provided input should follow the format of the csv file history_sample.csv.\n 2. **Forecast horizons:** The client can request a forecast horizon of their preference. It should be noted that large forecast horizons lead to worse results due to the error propagation caused by the LSTM recurrence.\n\nThis model has been developed within [I-NERGY EU](https://i-nergy.eu/)\nproject.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=16d39167-1650-487a-ab25-29eee8eb838f&revisionId=b2c1b964-aab1-4002-bbe7-d4d5ae438e61&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=16d39167-1650-487a-ab25-29eee8eb838f&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/16d39167-1650-487a-ab25-29eee8eb838f/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "48", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AI4agriNDVI", - "description": "AI4AGRI model for correcting NDVI information from satellite images\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=178e0fdf-05ec-42ad-9e0a-da5f147de7fd&revisionId=af75387e-635b-46d1-a442-a47b993b061b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=178e0fdf-05ec-42ad-9e0a-da5f147de7fd&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/178e0fdf-05ec-42ad-9e0a-da5f147de7fd/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "49", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "SAPI_wheat_model_v0", - "description": "SAPI machine learning regression model based on satellite productivity maps is\na powerful tool for predicting crop yields in agriculture. By utilizing\nadvanced algorithms, this model analyzes data from satellite imagery to\nestimate the expected yield of wheat. The output from the model is predicted\nyield for particular parcel. The model learns from past data to establish\npatterns and relationships between the satellite imagery and crop yields. It\nthen applies this knowledge to make predictions for the test parcel. This\nregression model provides a non-invasive and cost-effective method for yield\nprediction, as it eliminates the need for manual data collection or extensive\nfield visits.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=195181e4-090f-45e9-91cc-5919718ad0d9&revisionId=ac253be9-81ee-43f2-8a24-79369b10a45c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=195181e4-090f-45e9-91cc-5919718ad0d9&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/195181e4-090f-45e9-91cc-5919718ad0d9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "51", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ObjectDetection", - "description": "Detection of physical objects in still images or videos\n\n \n\nThe object detection mining service allows to detect one or more physical\nobjects to be found in images and videos.\n\n \n\nInput: Image file or video file. You can specify which frames are to be\nprocessed for a video.\n\n \n\nOutput: A set of detected objects will be returned for the image or each\nprocessed frame. For each detected object an axially parallel bounding box, an\nobject category and a rating are returned. The rating indicates the certainty\nof the model regarding the category of the identified object within a bounding\nbox.\n\nIn addition, an automatically generated ID is assigned to each detected object\nto allow the unambiguous identification of all detected objects in one media\nfile. This ID has no relation to the category of the detected Object.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&revisionId=6efaddee-cb74-4995-a8c3-9bc8e3f9c29b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/1c97e098-d7c7-4fb5-83ca-a5202efc5e90/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "52", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ObjectDetection", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&revisionId=85536789-c619-4003-87c2-868e8971a597&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/1c97e098-d7c7-4fb5-83ca-a5202efc5e90/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "53", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ObjectDetection", - "description": "Detection of physical objects in still images or videos\n\n \n\nThe object detection mining service allows to detect one or more physical\nobjects to be found in images and videos.\n\n \n\nInput: Image file or video file. You can specify which frames are to be\nprocessed for a video.\n\n \n\nOutput: A set of detected objects will be returned for the image or each\nprocessed frame. For each detected object an axially parallel bounding box, an\nobject category and a rating are returned. The rating indicates the certainty\nof the model regarding the category of the identified object within a bounding\nbox.\n\nIn addition, an automatically generated ID is assigned to each detected object\nto allow the unambiguous identification of all detected objects in one media\nfile. This ID has no relation to the category of the detected Object.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&revisionId=f85ede77-a094-46e4-9147-fb9e595f2b91&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1c97e098-d7c7-4fb5-83ca-a5202efc5e90&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/1c97e098-d7c7-4fb5-83ca-a5202efc5e90/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "54", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "QRUL", - "description": "The model processes quality test data results and estimate the Remaining\nUseful Life (RUL) of a produced pump from the Pfeiffer company. The provided\nsolution offers 2 classification techniques estimating whether a pump is going\nto fail in the first year of operation or not, or estimating the time range\nthat the pump will fail.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=1ee174ca-e7c4-405e-8137-27611cb0b6bc&revisionId=6dc27e5f-72b7-406e-a5fb-6db99737b816&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=1ee174ca-e7c4-405e-8137-27611cb0b6bc&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/1ee174ca-e7c4-405e-8137-27611cb0b6bc/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "56", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "VideoSegmentation", - "description": "The Video Segmentation model splits the incoming video into scene segments\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=21459f4b-ed64-455b-93ae-5e345f046148&revisionId=9113a839-bfa1-470a-b4c2-7714be30a03c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=21459f4b-ed64-455b-93ae-5e345f046148&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/21459f4b-ed64-455b-93ae-5e345f046148/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "57", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "SAPI_maize_model_v0", - "description": "SAPI machine learning regression model based on satellite productivity maps is\na powerful tool for predicting crop yields in agriculture. By utilizing\nadvanced algorithms, this model analyzes data from satellite imagery to\nestimate the expected yield of maize. The output from the model is predicted\nyield for particular parcel. The model learns from past data to establish\npatterns and relationships between the satellite imagery and crop yields. It\nthen applies this knowledge to make predictions for the test parcel. This\nregression model provides a non-invasive and cost-effective method for yield\nprediction, as it eliminates the need for manual data collection or extensive\nfield visits.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=21a28a9d-bc8b-490e-85e5-e1452ad74e3e&revisionId=b11fdff7-5654-48de-bd4e-70d3f1131703&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=21a28a9d-bc8b-490e-85e5-e1452ad74e3e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/21a28a9d-bc8b-490e-85e5-e1452ad74e3e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "60", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "polaris_mep_ai", - "description": "Polaris MEP AI module is an addon for Polaris MEP, an execution planner to\noptimize production plannings using restrictions-based solvers. The new AI\nmodule adds features to predict and forecast the inputs of the planner. So\nproduction demand and resource availability can be predicted with AI and\noptimized with OR. Regression methods Linear Regressi\u00f3n, Lasso, Gradient\nBoosting, Random Forest, and K-NN are included. Autoregressive methods ARIMA,\nSARIMA, VARMA, LSTM, and Fuzzy NN are included.\n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=24f4722f-9c82-489c-b9b0-359976eb792f&revisionId=76dbff09-04b5-4ec6-af32-8a3e82b60ded&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=24f4722f-9c82-489c-b9b0-359976eb792f&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/24f4722f-9c82-489c-b9b0-359976eb792f/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "61", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "dummy-environment-clarspy", - "description": "Dummy model for 1st Call for Solutions\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2532264a-b2aa-4cf4-8a90-8eb5f0546b9f&revisionId=558d248e-bd5d-4e53-a360-8bdc95dc8cc0&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2532264a-b2aa-4cf4-8a90-8eb5f0546b9f&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2532264a-b2aa-4cf4-8a90-8eb5f0546b9f/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "63", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AI_REGIO_CUSUM_RLS_filter", - "description": "**CUSUM RLS filter** contains a change detection algorithm for multiple\nsensors, using the Recursive Least Squares (RLS) and Cumulative Sum (CUSUM)\nmethods [F. Gustafsson. _Adaptive Filtering and Change Detection_. John Willey\n& Sons, LTD 2000].\n\nAs an AI resource the \u201c _CUSUMRLSfilter_ \" asset is currently implemented as\nOpen Source Solution whose main aim is to detect abrupt changes on the\nmeasurements recorded by a set of sensors.The asset was implemented as part of\none of the experiment of the AI REGIO project, and subsequently adapted for\ngeneral use.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=259afff9-66a4-47e7-b55c-4f19b2d75b8d&revisionId=f3b61e6d-904c-48ab-9930-72eedd3eb62c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=259afff9-66a4-47e7-b55c-4f19b2d75b8d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/259afff9-66a4-47e7-b55c-4f19b2d75b8d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "65", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ner-model", - "description": "This is the ner-model component of the ner-pipeline.\n\n \n\nThrough the Web UI of the ner-model, you can access the results of the entity\nrecognition task on a given text. The most recent result will show on top of\nthe results. An entity is defined within \"|\", followed by its type and\nconfidence score in round brackets.\n\nMake sure to run ner-pipeline, instead of ner-model as a standalone component.\nAs ner-pipeline is successfully deployed, first submit the text via ner-\ndatabroker, then RUN the pipeline and go to the Web UI of the ner-model. You\nwill see a list of processed texts, with the most recent provided text on top\nof the list.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=27e777bc-2968-427c-9df5-9f5593613475&revisionId=77f58af9-73d4-48b8-9237-7c6e1d3cdb97&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=27e777bc-2968-427c-9df5-9f5593613475&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/27e777bc-2968-427c-9df5-9f5593613475/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "68", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "cnext_decision_intelligence", - "description": "The published model is a result of the AI4EU challenge \u201cDecision Intelligence\nfor Healthcare\u201d, and is focused on delivering data-driven decision support on\nthe question \u201cwhat is the next step in handling patient test/diagnoses related\nto suspected COVID infection. \n\nAs part of this challenge, we needed to validate a Machine Learning Model \u2013\npublished on the AI4EU marketplace \u2013 using GRPC (protobuf) as inference\nendpoint and docker container image as packaging model could act as a decision\nbrick and as such be plugged in into our Decision Intelligence Platform.\n\n \n\nMore information about the solution can be found in the accompanying\nAI4EU_Cnext.pdf file.\n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2b3f75d9-a480-4589-9992-457b0863b7b5&revisionId=cb074874-ee6b-458c-a825-e5d129ca4635&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.6", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2b3f75d9-a480-4589-9992-457b0863b7b5&version=1.0.6", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2b3f75d9-a480-4589-9992-457b0863b7b5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "72", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Vibration_analysis", - "description": "This model allows the analysis of vibration of rotating machines. It is based\non vibration measurements in the three spatial directions, on strategic\nmeasurement points: MDE (Motor driven end) and MNDE (Motor non driven end).\nIt allows to detect if a machine presents a faulty behaviour and to establish\nthe cause of this problem and to evaluate its intensity on a scale from 1 to\n3.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4&revisionId=42a20377-3b6f-41c5-88b2-76b07993aa0b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "73", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Vibration_analysis", - "description": "This model allows from acceleration mesurements in the three directions on the\nmeasurement points mde (motor driven end) and mnde (motor non driven end), to\ndetect a machine malfunction and to establish its nature. The type of failure\ndetected in this version are unbalance issue and bearing issue . Other types\nof failure will be supported in the next versions, stay tuned.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4&revisionId=533fbe3c-2b51-48ef-89bd-fe9ee96cf13a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2e60e141-c4eb-42a0-8fcf-3c8fe4a989b4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "74", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "INERGY_Cold_Demand_Prediction", - "description": "This service is based on a Random Forest model implemented in context of\nI-NERGY project. The overall vision of I-NERGY is to promote AI in the energy\nsector by delivering:\n\n * An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.\n * Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.\n\nThis is a forecasting service for predicting thermal load (cold energy) of a\nSpanish Hospital in hourly basis. The data was provided by VEOLIA, from the\nhospital complex in C\u00f3rdoba (Spain). The hospital complex have a district\nheating network. The layout of this district heating network is a ring system\ncomposed by two independent rings for heating and cooling. This ring just\nprovides energy for heating and Domestic Hot Water (DHW).\n\nApart from being a district heating network, this system is complex due to the\ndifferent production sources used for heating and cooling. In this facility\nheat, cold and steam are produced by using different sources.\n\nFor more information on how to use the service, please see Documents section.\n\n _The project leading to this service has received funding from the European\nUnion\u2019s Horizon 2020 research and innovation programme under grant agreement\nNo 101016508_\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=2ef3e3fb-afe9-422a-b037-88168d219a80&revisionId=8fc73f14-3456-4eda-af0a-68af28faada0&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=2ef3e3fb-afe9-422a-b037-88168d219a80&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/2ef3e3fb-afe9-422a-b037-88168d219a80/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "81", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "O7_information_extractor", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&revisionId=00a8cf50-c886-440f-8326-2381b54f7778&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.4", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&version=1.0.4", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/31b07091-8a96-4caf-8149-5d8316c3b314/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "82", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "O7_information_extractor", - "description": "This model is implemented to extract **O7** information from Italian social\nworkers' cards.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&revisionId=b4d4ea0c-c723-4dca-9066-5af00f2d9133&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.5", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&version=1.0.5", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/31b07091-8a96-4caf-8149-5d8316c3b314/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "83", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "O7_information_extractor", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&revisionId=e5afb24c-c035-4853-9ede-7b4b6b5ef5c8&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=31b07091-8a96-4caf-8149-5d8316c3b314&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/31b07091-8a96-4caf-8149-5d8316c3b314/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "84", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "advice-yolo", - "description": "advice-yolo is the implementation of YOLOv4 deep learning model. The model is\nalready trained for detecting road defects\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3332868c-0248-4f2c-8401-1464faf56166&revisionId=3cc90b52-2567-4432-b6bb-6368ab68ad6f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=3332868c-0248-4f2c-8401-1464faf56166&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/3332868c-0248-4f2c-8401-1464faf56166/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "85", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "advice-yolo", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3332868c-0248-4f2c-8401-1464faf56166&revisionId=ca4b9849-5e73-45d6-8e47-c512183f55cd&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=3332868c-0248-4f2c-8401-1464faf56166&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/3332868c-0248-4f2c-8401-1464faf56166/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "87", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Drug-Attrition-Oracle", - "description": "Drug Attrition Oracle is a deep neural network model, based on the chemical\nstructure of the compounds, which can predict the probability of withdrawal\nfrom the market for compounds that have passed initial trials. The model\nprovides an interpretable layer which can find chemical substructures that are\nmost influential for making the prediction as well as additional drug and\nmolecular properties which can influence the probability of withdrawal. The\nmodel takes as an input only the SMILES string of the molecule and outputs a\n[conformal prediction](http://alrw.net/articles/06.pdf) whether the molecule\nis approved or withdrawn along with a confidence score. The explanation for a\nprediction is given using the [GNN\nExplainer](http://snap.stanford.edu/gnnexplainer/). To improve the GCN model\npredictions we trained additional graph neural network models for predicting\nmolecular properties: Bioavailability, Clearance Hepatocyte, CYP2C9 Substrate\nand Toxicity (nr-ppar-gamma). These predictions are used with the base GCN\nmodel for predicting the withdrawal in an XGBoost model which uses [SHAP\nvalues](https://shap.readthedocs.io/en/latest/index.html) for interpretation.\ufeff\n\nCode is available on [Github](https://github.com/dionizijefa/Drug-Attrition-\nOracle)\n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=33de7b45-cc1e-4ff4-b01a-7eb08c5859e9&revisionId=b8f10760-6b7d-4b6c-aea9-74a7851e2027&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=33de7b45-cc1e-4ff4-b01a-7eb08c5859e9&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/33de7b45-cc1e-4ff4-b01a-7eb08c5859e9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "91", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "innerpageanalysis", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3664c82c-39e1-4fd8-bf0a-ee7c7e745068&revisionId=288d9558-641d-4101-8a6c-548ce3acc69f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=3664c82c-39e1-4fd8-bf0a-ee7c7e745068&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/3664c82c-39e1-4fd8-bf0a-ee7c7e745068/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "92", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "innerpageanalysis", - "description": "Advanced deep learning models are trained separately and applied for each type\nof information, and then put together in **Inner Page Analysis** pipeline. The\npipeline extracts the information from historical data from Italian workers'\nsocial security cards.\n\nAnalysis of stamps data and extraction of their key informations is the main\ngoal of this project.\n\ninput and output of this project will be like below:\n\n1\\. input is a full page of stamps in both raw scanned files or ordinary\nimages in .png or .jpg format. file name will be like 11831_2b.\n\n2\\. output will be a .csv file that contains below information for each stamp\nas columns:\n\n * filename,ID,xb,stamp_id,stamp_class,price,face,color\n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=3664c82c-39e1-4fd8-bf0a-ee7c7e745068&revisionId=b29ec7cf-9cdc-4cc3-9864-d2c607bab121&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=3664c82c-39e1-4fd8-bf0a-ee7c7e745068&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/3664c82c-39e1-4fd8-bf0a-ee7c7e745068/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "93", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "OHDSI_PLP_PILOT", - "description": "Pilot for Patient level Prediction for the AI4EU challenge.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=367469d8-cbd1-42c9-b3e9-ecd670e95ce8&revisionId=c2da9001-caf3-4594-9fe9-cccd84aa4181&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=367469d8-cbd1-42c9-b3e9-ecd670e95ce8&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/367469d8-cbd1-42c9-b3e9-ecd670e95ce8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "94", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "OpenWIDE", - "description": "# OpenWIDE\n\n# \n\n###### Trustworthy Detection of mouthes for automated swab robot.\n\n \n\nThe service finds a mouth in the image securely by cascading 3 detectors,\nperson->face->mouth and evaluate how open the mouth is.It will only give one\nmouth per image, which is the dominant mouth.The result is given as a DICT\nwhere the most relevant information is:\n\n \n\n 1. mouthbox: bbox of mouth in format x1,y1,x2,y2\n 2. facebox: bbox of face in format x1,y1,x2,y2\n 3. personbox: bbox of person in format x1,y1,x2,y2\n 4. Score: Collective score of the three models\n 5. Open: A measure of openness. >0.8 tends to be WIDE open.\n 6. H: How centered is the mouth horizontally. ~0 = looking straight into the camera.\n 7. V: How centered is the mouth vertically. ~-.3 = looking straight into the camera.\n\n \n\n## Cloud host\n\n \n\nIt is hosted as a RPC service in Azure\n\n* openwide.northeurope.azurecontainer.io:8061\n\n \n\n## Dockerhub\n\n \n\n* dtivisionboxcloud/openwide:v1.1\n\n \n\n## Test\n\nIncluded is a test image and a test script.\n\n \n\nJust run :\n\n \n\n* python testRPCService.py\n\n \n\nand you should receive a dict with information about the mouth.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=36ae858b-6486-46ae-8e8c-01d644b93d4d&revisionId=515a1a44-4ad1-4b29-b4f4-efadfa665dee&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=36ae858b-6486-46ae-8e8c-01d644b93d4d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/36ae858b-6486-46ae-8e8c-01d644b93d4d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "95", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "divis_pump_lifetime_classification", - "description": "The image provides a model for the classifiction on vacuum pumps into the\ncategories \"short living\" (less than one year) and \"long living\". The data\nneeded is specific to the format of a challenge owner of the AI4EU project.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=36e5b789-fdb8-4016-84d6-829423b58ffc&revisionId=ca6c26a5-9252-4fa0-81c3-aea31d26dca8&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=36e5b789-fdb8-4016-84d6-829423b58ffc&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/36e5b789-fdb8-4016-84d6-829423b58ffc/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "99", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "edm_aad_agent_node_cl", - "description": "**EDM RL Controller predictions (Solution Provider: Artificialy SA)**\n\n \n\nReinforcement learning applied to Electrical discharge machining (EDM) control\nfor the AI4EU project with Agie Charmilles SA. _For in Depth instructions of\nhow to use this model, please follow the README.pdf which is placed in the\nDocuments tab._\n\n \n\n \n\n \n\n \n\n \n\n \n\nThe solution consists of two nodes: `data_node` server which streams a\nDataFrame of observations (EDM machine states) read from the path provided by\nthe client (`infile`); and an `agent_node` server which predicts control\nactions based on the agent / controller specified by the client. Output\npredictions are stored inside the `./data_predictions/` folder of the\n`agent_node` Docker container.\n\n \n\nTo use this solution, please use the Docker container and the additional files\n(which are in the Documents tap of the model in the marketplace) from both the\n`data_node` and `agent_node`. They are both in the AI4EU platform market place\nnamed as `edm_aad_agent_node_cl` and `edm_aad_data_node_cl`:\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=414791ed-55f9-457d-b377-f790161e2cd6&revisionId=7622a8e4-d52f-4288-9bc6-88d64da6f7f6&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=414791ed-55f9-457d-b377-f790161e2cd6&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/414791ed-55f9-457d-b377-f790161e2cd6/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "101", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ICD-10-CM-classifier", - "description": "# ICD-10-CM classifier\n\n \n\nThe ICD-10-CM classifier is docker image containing two neural classifier\nmodels contained within a gRPC server that allows for classification of\nmedical texts in Spanish or English.\n\nFine-tuned on the CodiEsp dataset, the models for both languages are built\nupon the Bert architecture. The Spanish model achieves a 0.5980 MAP score\nacross the test set of the CodiEsp-Diagnostic dataset, whereas the English\nversion achieves a 0.5249 MAP score.\n\nThis module may provide help for researchers or other data-science enthusiasts\nthat are looking into building tools to automatically diagnose medical\ndescriptions.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4356534c-aec1-4271-8eda-f125cb08909b&revisionId=ee4f05c5-b86d-423c-b1d6-21b24b14be4d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4356534c-aec1-4271-8eda-f125cb08909b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/4356534c-aec1-4271-8eda-f125cb08909b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "102", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AudioSegmentation", - "description": "This model splits an audio file into segments like one speaker and removes\nsilence.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4366dce4-cd87-4a51-bd39-2dbfe5fd5b6c&revisionId=4a4c3771-6c63-46b6-aad6-d5cf78e1a03f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4366dce4-cd87-4a51-bd39-2dbfe5fd5b6c&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/4366dce4-cd87-4a51-bd39-2dbfe5fd5b6c/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "105", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "CODE", - "description": "The main objective of the challenge is to develop an method for automatic\nclassification of clinical narratives to ICD-10 codes.\n\nOur approach for semantic text classification has three core components: (1)\nFormalization of domain knowledge of medical information and techniques of\nsemantic data fusion; (2) Multilingual NLP techniques for document\npreprocessing including all or some of: data cleaning, data normalization,\ndata augmentation, transitive connections analysis, data balancing, expert\u2019s\nheuristics. For medical data based on our expertise on DOID, ICD-O, ICD-9,\nICD-10, MESH, MONDO, UMLS, Orphanet, SNOMED classification, data augmentation\nincluding typos simulation and synonym replacement will be used; (3)\nMultilingual deep learning methods for supervised classification of disease\ninto its corresponding class from the ICD-10. We are fine tuning pretrained\nBERT family models (bioBERT, clinicalBERT, MultilingualBERT, PubMedBERT, etc.)\nwith domain specific terminology for the target language. Additional corpora\ngenerated from public documents and linked open data is used for fine-tuning\nof the deep learning classification model for the specific ICD-10\nclassification.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=47920b57-7ab9-4abe-9881-f77d57144944&revisionId=6fdf671b-38d8-4995-b924-30ef638df116&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=47920b57-7ab9-4abe-9881-f77d57144944&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/47920b57-7ab9-4abe-9881-f77d57144944/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "106", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "aquila-ai-service", - "description": "The experiment aims to compare the design of an electronic product,\nrepresented by a CAD file, with the picture of a real artifact of the product.\n\nThe proposed solution consists of two main phases. First, the system\nestablishes a machine learning flow that utilizes a neural architecture to\naddress the issue of component recognition (Object Detection) in panel images.\nSecond, the system exploits Answer Set Programming (ASP) to compare the\nreconstructed scheme from the image with the original patterns to detect any\nmisalignments or errors.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=48053444-5100-4476-a8c3-53db3108dcdb&revisionId=94d411e7-3383-47e5-a923-581e7a6f5a1f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=48053444-5100-4476-a8c3-53db3108dcdb&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/48053444-5100-4476-a8c3-53db3108dcdb/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "108", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "CDSICD10", - "description": "1st Call for Solutions, ICD10 classification using medical trained BERT and\nQA. \u201cOur solution combines two different approaches: one to identify the\nrelevant disease (ICD-10 category) and the other one to determine the\nsubcategory (the digits after the period).\n\nThe \u201ccategory-classifier\u201d is based on Spanish BERT (BETO) fine-tuned on\nSpanish clinical text (CodiEsp corpus).\n\nIn order to determine the subcategories of each ICD-10 category, we will use a\nquestion-answering approach based on a structured version of the ICD-10\ndictionary created be NER.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4af0b85d-6d3e-4678-a991-865366ce4152&revisionId=b7ed24a9-c8fa-42cf-8f72-58acbb6f9435&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4af0b85d-6d3e-4678-a991-865366ce4152&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/4af0b85d-6d3e-4678-a991-865366ce4152/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "110", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "aipanel_repurposing", - "description": "**_Goal:_**\n\nTo design a model that allows repurposing of already approved drugs i.e., the\nmodel predicts if a drug can be used to fight another disease or protein\ntarget that relates to the disease.\n\n \n\n ** _Approach:_**\n\nTo achieve this goal, another Deep Convolutional Neural Network (D-CNN) has\nbeen implemented on molecular descriptors obtained for the drugs and Protein\nDescriptors obtained for targets, to develop a prediction model which predicts\nthe IC50 value where IC50 refers to Half-maximal inhibitory concentration, the\nmost widely used and informative measure of a drug's efficacy.\n\n \n\nTo prepare the dataset, following drugs, targets and their combined activities\nwere obtained from specific databases:\n\n 1. 1651 Approved Drugs from CHEMBL Database with IC50 Bio-Activities\n 2. 1975 Targets from CHEMBL Database\n\n \n\nApprox. 40000 activities were obtained for above mentioned drugs and targets,\nwhere the activities belonged to phase 4 studies. Phase 4 refers to the Stage\nwhere a drug is accepted since it shows desired results towards a specific\nTarget. Around 53% of activities consisted of IC50 values less than 1000 nM.\nTherefore, activities were divided into two classes, active interaction (IC50\n<= 1000 nM) and inactive interaction (IC50 > 1000 nM). This allowed us to\ndevelop a binary classification model. Active refers to a positive response of\na drug towards a target.\n\n \n\nFor the obtained Drugs, their SMILES were extracted from CHEMBL. SMILES are\nnotations for describing the structure of chemical species using short ASCII\nstrings. The SMILES were further used to extract 881 PUBCHEM Molecular\nDescriptors using PaDEL, a software to calculate molecular descriptors and\nfingerprints. To obtain features for target proteins, a python based library\npropy3 was used w", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4c32c784-dd97-466c-b533-e4e8e541b80a&revisionId=fd42128d-cd93-4b30-89b7-4c1f756da6b2&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4c32c784-dd97-466c-b533-e4e8e541b80a&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/4c32c784-dd97-466c-b533-e4e8e541b80a/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "111", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Tag-my-outfit", - "description": "The **Tag My Outfit** service predicts the category and the attributes of a\npiece of clothing viewed in a given image. The prediction model is the\n[_Visual Semantic Attention Model_\n(VSAM)](http://openaccess.thecvf.com/content_ICCVW_2019/papers/CVFAD/Ferreira_Pose_Guided_Attention_for_Multi-\nLabel_Fashion_Image_Classification_ICCVW_2019_paper.pdf), and is supervised by\nautomatic pose extraction creating a discriminative feature space. This\nparticular classifier was trained with the open source DeepFashion dataset.\nFor further detail see\n\n\n \n\nThe model accepts an image as input and outputs the labels corresponding to\ncategory (e.g. dress), subcategory (Evening Dress) and attributes ( short,\nlong sleeve, round neckline)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4c8cf4f7-670c-4ee8-9c20-f0fff3dc2b1d&revisionId=bb44d189-da04-4eea-9d55-7d2b5518a3e3&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4c8cf4f7-670c-4ee8-9c20-f0fff3dc2b1d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/4c8cf4f7-670c-4ee8-9c20-f0fff3dc2b1d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "112", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Urban4Cast", - "description": "Docker Image for Parking Predictions. It allows you to obtain parking\npredictions, with various levels of spacial granularity. It uses gRPC and\nprotobuf as interfaces to the developed model. Please see the README of the\nproject in order to understand how to use it.\n\n \n\nThe inputs of the model define the spacial granularity (None, Neighborhood,\nStreet, Sensor). Apart from that, you can define the temporal granularity (15\nminutes, 1 hour, 1 day) and how many steps in the future you want to predict.\nThe results are the predictions for these steps, including the upper and lower\nbounds of the prediciton.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4d22b7a8-240f-4e3b-a359-018819d779b3&revisionId=09c477af-508f-4cdc-806e-ce0462ae07cd&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4d22b7a8-240f-4e3b-a359-018819d779b3&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/4d22b7a8-240f-4e3b-a359-018819d779b3/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "113", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AudioSpeakerRecognition", - "description": "This model add speaker recognition to audio mining pipelines.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=4f57c704-10c2-43ec-93ae-d2183b3180f1&revisionId=374b55ac-3579-4ee1-8f7b-c1f6f5779e7e&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=4f57c704-10c2-43ec-93ae-d2183b3180f1&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/4f57c704-10c2-43ec-93ae-d2183b3180f1/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "114", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Generic-CODE", - "description": "The proposed solution is based on fine-tuned with Spanish medical texts of the\npre-trained BERT family language models (transformers clinicalBERT and\nmultilingualBERT). The designed text-based classification service predicts\nICD-10 codes for clinical text in Spanish for the Colorectal cancer (CRC) and\nassociated diagnoses. The service output contains the ICD-10\n\u201csubclassification\u201d (4 sign) codes that gives additional information about\nmanifestation, severity and location of the injury or disease for a wider\nrange of disease (4227) ICD-10 codes. The prediction models for ICD-10 codes\nare with high accuracy: clinicalBERT: 0.949 AUC ROC score and\nmultilingualBERT: 0.950 AUC ROC score. The service allows the user to switch\nbetween two models (clinicalBERT and multilingualBERT) and to set the\nparameter N for top N diagnoses according to the specific needs.\n\n![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABOAAAAFECAYAAACODkEvAAAACXBIWXMAAAsTAAALEwEAmpwYAAAF8WlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS42LWMxNDggNzkuMTY0MDM2LCAyMDE5LzA4LzEzLTAxOjA2OjU3ICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRvYmUuY29tL3Bob3Rvc2hvcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgMjEuMCAoV2luZG93cykiIHhtcDpDcmVhdGVEYXRlPSIyMDIxLTExLTI2VDE3OjIwOjI4KzAyOjAw", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=50a5b4f8-a36b-45c1-81f7-a067232731c3&revisionId=0218b427-2fa9-4d92-b5e7-d331582765f9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=50a5b4f8-a36b-45c1-81f7-a067232731c3&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/50a5b4f8-a36b-45c1-81f7-a067232731c3/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "115", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ucrsuite-ed", - "description": "This module implements fast nearest-neighbor retrieval of a times series in a\nlarger time series expressed as location and distance using the UCR suite\nEuclidean Distance (ED) algorithm.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=50ebce0a-f91f-46eb-be32-b36574a1e068&revisionId=7b642559-fd32-41d5-ae18-753d03f5014a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=50ebce0a-f91f-46eb-be32-b36574a1e068&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/50ebce0a-f91f-46eb-be32-b36574a1e068/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "117", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "SmartRiver", - "description": "**The Digital Twin solution for AI-driven hydropower energy forecasting**\n\n ** **\n\n ** **\n\n ** **\n\n ** **\n\n **River discharge** rules energy production for Hydropower plants.\n\nPrediction of water resources for the next day, month, season, challenges\nevery energy producer and trader.\n\nSuch knowledge supports optimal energy production, avoiding wastes\n(underestimation) or empty reservoirs (overestimation).\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=52471527-6ec1-4233-8c8e-e8d412b300b7&revisionId=7391c733-e008-4467-9965-c905c536ffba&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=52471527-6ec1-4233-8c8e-e8d412b300b7&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/52471527-6ec1-4233-8c8e-e8d412b300b7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "118", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Aquila", - "description": "The experiment aims to compare the design of an electronic product,\nrepresented by a CAD file, with the picture of a real artifact of the product.\n\nThe proposed solution consists of two main phases. First, the system\nestablishes a machine learning flow that utilizes a neural architecture to\naddress the issue of component recognition (Object Detection) in panel images.\nSecond, the system exploits Answer Set Programming (ASP) to compare the\nreconstructed scheme from the image with the original patterns to detect any\nmisalignments or errors.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5357697a-114b-4649-8065-3c2108652ab3&revisionId=66f1c27a-797a-458e-9da2-c837e9e0402d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5357697a-114b-4649-8065-3c2108652ab3&version=1.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5357697a-114b-4649-8065-3c2108652ab3/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "120", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4opti", - "description": "This model is for production line prediction. More specifically based on the\nhistorical data the model is able to predict if the production will be late or\non time.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=54c201d7-caf2-4803-8321-6d5ab1ecf2ea&revisionId=10aface4-cf1c-4123-84dc-f91746ef6232&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=54c201d7-caf2-4803-8321-6d5ab1ecf2ea&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/54c201d7-caf2-4803-8321-6d5ab1ecf2ea/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "121", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "aquila-webapp", - "description": "The experiment aims to compare the design of an electronic product,\nrepresented by a CAD file, with the picture of a real artifact of the product.\n\nThe proposed solution consists of two main phases. First, the system\nestablishes a machine learning flow that utilizes a neural architecture to\naddress the issue of component recognition (Object Detection) in panel images.\nSecond, the system exploits Answer Set Programming (ASP) to compare the\nreconstructed scheme from the image with the original patterns to detect any\nmisalignments or errors.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5613118f-b66c-4cd7-b925-ea537d5a9c6c&revisionId=985597a7-a6e9-4a3f-a0b6-5fc0f90065c2&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5613118f-b66c-4cd7-b925-ea537d5a9c6c&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5613118f-b66c-4cd7-b925-ea537d5a9c6c/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "122", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "VideoShotDetection", - "description": "The shot detection system will detect the boundaries between video shots by\ndetecting a change between visual scenes.\n\n * Input: A video file. For a more accurate result, all frames need to be assessed. \n * Output: Detection result will be a file where each row contains the start and the end frames of each shot in the video\n\n **Model** : The underlying model for the shot detection is a deep learning-\nbased model called TransNetV2. This model has been trained on datasets with\ncombination of real (15%) and synthetic (85%) shot transitions (cuts) created\nfrom two datasets IACC.3 and ClipShots.\n\n**Evaluation** : This model achieves the F1 score of 0.898 on TRECVID 2007\ndataset. Annotations are provided by TRECVID and downloaded from their\n[website](https://www-\nnlpir.nist.gov/projects/tv2007/pastdata/master.shot.reference/). It appears\nthat the ground truth annotations differ about 2 frames from the actual cuts.\nAs a result, a tolerance of 2 frames is considered when applying the\nevaluation.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=56258e93-1bdf-4640-93f5-b3786e591acc&revisionId=91d5c71f-e984-4bb0-9c2b-aa2b15bea5e5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=56258e93-1bdf-4640-93f5-b3786e591acc&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/56258e93-1bdf-4640-93f5-b3786e591acc/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "125", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Idiap_BEAT_Face_Recognition_-_FaceNET", - "description": "A face recognition algorithm to compare one probe image against a set of\ntemplate images.\n\nThe images must be gray-scale and should contain the face region\nonly.Internally, the images are resized to 160x160 pixels.\n\nThis algorithm expects the pre-trained FaceNet model to be provided as input\nas well.\n\nThe model can be downloaded from\nhttps://drive.google.com/file/d/0B5MzpY9kBtDVZ2RpVDYwWmxoSUkwhich was made\navailable in\nhttps://github.com/davidsandberg/facenet/tree/b95c9c3290455cabc425dc3f9435650679a74c50\n\nReference experiment on the BEAT platform is\n[amohammadi/amohammadi/atnt_eigenfaces/1/atnt1](https://www.idiap.ch/software/beat/platform/experiments/amohammadi/amohammadi/atnt_eigenfaces/1/atnt1/).\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5889ec5c-8f7b-44b0-bb6b-164a8fa98fd9&revisionId=09d2cbe8-7eeb-4214-8826-b4665f4ebb8c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5889ec5c-8f7b-44b0-bb6b-164a8fa98fd9&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5889ec5c-8f7b-44b0-bb6b-164a8fa98fd9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "127", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "RoadDefectsDetection", - "description": "The model detects common road defects as well as gullies and manhole covers.\nIt is trained on image from the UK.\n\nFurthermore, it exposes a classfification model for pothole depths.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5a5ab3be-eddf-4956-829c-acb1934b7ead&revisionId=2a788999-6aec-4e2e-b1b6-30c9d1b39d78&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5a5ab3be-eddf-4956-829c-acb1934b7ead&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5a5ab3be-eddf-4956-829c-acb1934b7ead/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "129", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "cso", - "description": "AI-service to optimize stock management of components based on forecasting\nmodels and historical data analysis\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=5c2fbf7d-4417-49da-8714-7e37b925d81b&revisionId=a8e9a9ea-aa80-40e7-91b3-fb2a0fdc1504&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.6", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=5c2fbf7d-4417-49da-8714-7e37b925d81b&version=1.0.6", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/5c2fbf7d-4417-49da-8714-7e37b925d81b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "134", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "lexatexer-ai4hydro-proxy", - "description": "LexaTexer provides an Enterprise AI platform to support the energy value chain\nwith prebuilt, configurable AI applications addressing CAPEX intense hydro\nassets like Pelton and Francis turbines and pumps. In this project we combine\nour Enterprise AI platform and existing operational data to model the\nremaining useful life (RUL) of Pelton turbines based on real-world operational\nand environmental data. Thus, increasing RUL, efficiency and availability\nsignificantly. AI4Hydro plans to extent the remaining useful life of hydro\nturbines by up to 30%.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=600e7b34-68eb-4cff-892a-42b77eb71fbb&revisionId=8abc36f4-23a4-44bf-9d79-ad18f2d65dc9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=600e7b34-68eb-4cff-892a-42b77eb71fbb&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/600e7b34-68eb-4cff-892a-42b77eb71fbb/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "135", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "mytestmodel", - "description": "Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor\nincidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis\nnostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=61134a6f-511f-4144-ba26-1ae017bffa36&revisionId=6c316365-742b-43d9-96e4-54d4aa962d48&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=61134a6f-511f-4144-ba26-1ae017bffa36&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/61134a6f-511f-4144-ba26-1ae017bffa36/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "137", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "coverpageanalysis", - "description": "\n Key information extraction from document images is of paramount importance in office automation. \n Each cover card includes many words that are not required to be extracted. To extract the crucial key information, this repository works in three-folds:\n \n 1. Text detection with YOLOv5 \n 2. Text recognition with TRBA \n 3. Text recognition enhancement with natural language processing\n \n For more information, feel free to contact info@cogniteye.com\n \n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6297165d-d2f9-4617-90c5-d6586d34c84a&revisionId=b301cf36-fb4e-46cf-9425-a6dd1495d58c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6297165d-d2f9-4617-90c5-d6586d34c84a&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6297165d-d2f9-4617-90c5-d6586d34c84a/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "139", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "flask-model", - "description": "The initial model\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=63a30a14-770e-43d1-a929-1e1f1759af69&revisionId=ddc8368d-6dda-42c6-985a-66b7551e970b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=63a30a14-770e-43d1-a929-1e1f1759af69&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/63a30a14-770e-43d1-a929-1e1f1759af69/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "140", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "pumplife-prediction", - "description": "This repository contains the implementation of a service that performs a\nprediction on the expected running time of a pump. The prediction is made\nusing a series of parameters recorded during the pump's testing, that happens\nbefore the pump is sent to the customer.\n\n## Model description\n\nA series of different models have been tested and evaluated during the model\nselection phase. A Random Forest resulted to be the best performing model\nacross the validation set, and was thus implemented in the API in this\nrepository.\n\nThe input data is the csv file output of the test bench performed on the\npumps. The csv should contain a specific set of parameters, that are listed in\nthe Readme in this repository.\n\nThe model classifies the expected running time of the pump into 5 classes:\n\n * `[min,180]` ~ \"< 6 months\",\n * `(180,365]` ~ \"6 months ~ 1 year\",\n * `(365,730]` ~ \"1 year ~ 2 years\",\n * `(730,1e+03]` ~ \"2 years ~ 3 years\",\n * `(1e+03,max]` ~ \"> 3 years\".\n\nThe prediction output of the Random Forest is than binarized to obtain the\nclassification between the two classes `[< 1 year, > 1 year]`. The final\noutput of the model is one of this two classes.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=63bfe768-8f18-4265-89fc-18b77b10b4e5&revisionId=9358a7a6-141a-4b36-aabf-8e8ec6f3d6e9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=63bfe768-8f18-4265-89fc-18b77b10b4e5&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/63bfe768-8f18-4265-89fc-18b77b10b4e5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "141", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AI4EU-AgriCounting", - "description": "This model is part of the AI4EU Agriculture Pilot, where academia researchers,\nIT partners and smart agriculture companies showcase the opportunities of the\nAI4EU environment for unlikely stakeholders, like rural partners.\n\nCollectively, this consortium has produced a set of tools that exploit\nsatellite image, UAVs technologies, robotics and the latest trends in IA to\nhelp manage and predict the quality and productivity of vineyards.\n\nThis models deal with detection of cluster of grapes of a minimum quality and\nmaturation in an image, informing of the visual metrics of the detected\nregions.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6436a5d2-81d6-440d-9703-25eeede9ca73&revisionId=650ef51a-7c3b-404f-98e5-c85f7c2e1a30&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6436a5d2-81d6-440d-9703-25eeede9ca73&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6436a5d2-81d6-440d-9703-25eeede9ca73/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "143", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "advice-road-crop", - "description": "advice-road-crop is a semantic segmentation model that detects the region of\ninterest (ROI) of the image and crops this area to speed up the inference\nprocess. In the context of this project, the region of interest consists of\nthe road\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=64d59631-44f5-4179-9b2f-9b6b4fce0fff&revisionId=848cb306-75ee-4a5c-98c7-c9857b5f2afd&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=64d59631-44f5-4179-9b2f-9b6b4fce0fff&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/64d59631-44f5-4179-9b2f-9b6b4fce0fff/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "144", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "INERGY_Heat_Demand_Prediction", - "description": "This service is based on a Random Forest model implemented in context of\nI-NERGY project. The overall vision of I-NERGY is to promote AI in the energy\nsector by delivering:\n\n * An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.\n * Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.\n\nThis is a forecasting service for predicting thermal load (heat demand) of a\nSpanish Hospital in hourly basis. The data was provided by VEOLIA, from the\nhospital complex in C\u00f3rdoba (Spain). The hospital complex have a district\nheating network. The layout of this district heating network is a ring system\ncomposed by two independent rings for heating and cooling. This ring just\nprovides energy for heating and Domestic Hot Water (DHW).\n\nApart from being a district heating network, this system is complex due to the\ndifferent production sources used for heating and cooling. In this facility\nheat, cold and steam are produced by using different sources.\n\nFor more information on how to use the service, please see Documents section.\n\n _The project leading to this service has received funding from the European\nUnion\u2019s Horizon 2020 research and innovation programme under grant agreement\nNo 101016508_\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=64d9f84f-bd62-4da3-8571-756c79f9451e&revisionId=33554300-4673-481f-8203-3c37ec015440&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=64d9f84f-bd62-4da3-8571-756c79f9451e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/64d9f84f-bd62-4da3-8571-756c79f9451e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "145", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "SpeechRecognition", - "description": "Speech recognition reliably translates spoken information into digital text.\n\n# Main characteristics:\n\n * highly reliable speech recognition\n * robust against noise, e.g. in an industrial setting\n * can be combined with automatic speaker recognition\n * language models available for German and English\n * word and phoneme output to subsequent systems\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=65f43abe-ea13-45d1-9078-ce7fbbcb0d07&revisionId=3057c3ee-99e6-42f8-b398-05290d643917&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=65f43abe-ea13-45d1-9078-ce7fbbcb0d07&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/65f43abe-ea13-45d1-9078-ce7fbbcb0d07/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "146", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "SpeechRecognition", - "description": "Speech recognition reliably translates spoken information into digital text.\n\n# Main characteristics:\n\n * highly reliable speech recognition\n * robust against noise, e.g. in an industrial setting\n * can be combined with automatic speaker recognition\n * language models available for German and English\n * word and phoneme output to subsequent systems\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=65f43abe-ea13-45d1-9078-ce7fbbcb0d07&revisionId=9d6dadf1-ee95-4b9c-8f7b-ade96563bd64&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=65f43abe-ea13-45d1-9078-ce7fbbcb0d07&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/65f43abe-ea13-45d1-9078-ce7fbbcb0d07/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "147", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "rebase-model", - "description": "This is a LightGBM time-series forecasting model. LightGBM is a gradient\nboosting decision tree framework developed by Microsoft. It works by\nrecursively partitioning the feature-space into hyperrectangles and utilising\nthe mean (or median) of the target in the specific hyperrectangle as\nprediction. Every one step recursion is made to reduce the prediction errors\nof the previous model iteration. One of the advantages with LightGBM over\nother gradient boosting decision tree frameworks is its efficiency and the\nability to predict quantile distributions.\n\nThe asset provides a user interface where you can upload a train set and a set\nto predict on. The prediction is then displayed in a chart and can be\ndownloaded from the user-interface. It also exposes the rpc Predict() to be\nable to be called from another service. Here is a video\n[demonstration](https://drive.google.com/file/d/1GpD9hEg498Ic2H76Vh4uGzF_k4EVKa2j/view?usp=sharing).\nPlease refer to this[ ](https://github.com/rebase-energy/ai4eu-\nexperiment/tree/master/model)[readme](https://github.com/rebase-energy/ai4eu-\nexperiment/tree/master/model) for more information about how to use and\ninstall.\n\nThis project has received funding from the European Union's Horizon 2020\nresearch and innovation programme within the framework of the I-NERGY Project,\nfunded under grant agreement No 101016508\n\nWebsite:[ https://www.rebase.energy/](https://www.rebase.energy/)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6662fc35-2e6c-4f48-8e26-f7b677acbb62&revisionId=97313833-7e70-47b1-8524-139c2dc26a78&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6662fc35-2e6c-4f48-8e26-f7b677acbb62&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6662fc35-2e6c-4f48-8e26-f7b677acbb62/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "149", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "i-nergy-load-forecasting-lightgbm", - "description": "This is a forecasting service for predicting of the Portuguese aggregated\nelectricity load time series (15-min resolution, 24hr forecasting horizon).\nThis service is based on a LightGBM model implemented in the context of\n[I-NERGY](https://www.i-nergy.eu/) project. For more information on how to use\nthe solution, please see README.pdf in the Documents section.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=693e5d71-2141-4078-9bf8-0b8b0a9d28fd&revisionId=dccbd07e-3522-4aca-a479-62581058c352&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=693e5d71-2141-4078-9bf8-0b8b0a9d28fd&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/693e5d71-2141-4078-9bf8-0b8b0a9d28fd/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "150", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "SSC-Demo", - "description": "Model for finding stamps in the image and determining their value.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=69e1bd04-c689-44e0-8cb6-e7c45ba4d5c6&revisionId=2fbe123c-09ac-4fdb-9af7-c610a541d709&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=69e1bd04-c689-44e0-8cb6-e7c45ba4d5c6&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/69e1bd04-c689-44e0-8cb6-e7c45ba4d5c6/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "154", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "covid_predict", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=0ea72cd0-290e-49ad-9800-16fd365980a7&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.5", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.5", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "155", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "covid_predict", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=1fa906b5-1a75-4834-9cda-35120d2aa458&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "156", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "covid_predict", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=4d52b360-8cbc-48d3-9741-f921efea9963&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "157", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "covid_predict", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=5b01c8e7-44df-4103-8348-e64133b1377e&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.3", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "158", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "covid_predict", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=63e5e93b-1e72-4ea3-8f8a-b375f9748e3f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.4", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.4", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "159", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "covid_predict", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=6c56ef46-c70e-4ab7-ab49-c0e7ea856a60&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.7", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.7", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "160", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "covid_predict", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=79d17341-4421-4ad0-bc08-62d349621182&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.6", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.6", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "161", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "covid_predict", - "description": "Two trained Convolutional networks with capabilities to determine\nautomatically if a patient has pneumonia based on computer tomography (CT)\nscans or x-ray images. The raining phase is hidden to end users. It is a\nconstant process based on gathering open or anonymized clinical images.\n\nThe end users will be supplied with a docker. The communication with which is\nbased on grpc proto buffer. End users will supply a link to X-ray or CT image\nand will obtain diagnosis and it\u2019s probability.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=ba5f9197-f3dd-469c-ae3f-0fec081ac81a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.8", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.8", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "162", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "covid_predict", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&revisionId=db665028-c6b1-4e4f-beef-bfcbd14597ec&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6c463b35-6fb8-45ee-a52a-846110947c3b&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6c463b35-6fb8-45ee-a52a-846110947c3b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "165", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "rp4pl-classification", - "description": "rp4pl-classification (Reliable Prediction for Pump Lifetime) is a\nclassification model used to predict pump failures within a year of\ninstallation. The model input is the final quality test data from the pump\nmanufacturing process and the output is the failure prediction (whether the\npump is predicted to fail within a year installation - yes - or to not fail\nwithin a year of installation - no). The model pipeline included data\ntransformation and feature inference. Additionally, it includes a feature\nselection step to select the most relevant features from the input data.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6f0368f1-77c2-4bfe-b632-98ecd9c87bd9&revisionId=151771e8-422b-4a7b-9d87-8edbadfa6def&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6f0368f1-77c2-4bfe-b632-98ecd9c87bd9&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6f0368f1-77c2-4bfe-b632-98ecd9c87bd9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "166", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "rp4pl-classification", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6f0368f1-77c2-4bfe-b632-98ecd9c87bd9&revisionId=7352557e-d807-4ece-af4d-de5f3faa3956&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6f0368f1-77c2-4bfe-b632-98ecd9c87bd9&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6f0368f1-77c2-4bfe-b632-98ecd9c87bd9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "167", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "adios-apply", - "description": "I-NERGY - TTP1 - ADIOS APPLY MODEL\n\n \n\nApplies anomaly detection for electric power grids model to a full dataset. In\nthis phase, we use the previously trained models to label the unknown alarms.\nScikit-learn allows to save trained models to binary files on disk, so in this\nphase we first load our pretrained model and then we load also the one-hot\nencoder in case we are willing to use categorical data, or the text processing\nmodule if we want to use the text-based classification. Once the pre-trained\nmodel is loaded, it can be used to predict the labels of unknown alarms.\n\nAIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-apply-\nmodel\n\nAttribution\n\nThis project has received funding from the European Union's Horizon 2020\nresearch and innovation programme within the framework of the I-NERGY Project,\nfunded under grant agreement No 101016508\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=6f212625-4d1c-4f13-9f0b-fcfcd6bca65c&revisionId=4888be04-de9c-48b3-b9b4-3e45102956f1&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=6f212625-4d1c-4f13-9f0b-fcfcd6bca65c&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/6f212625-4d1c-4f13-9f0b-fcfcd6bca65c/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "169", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ML_Assistant_for_Vibration_Monitoring", - "description": "The models deployed to AI4EU experiment platform are based on the data\nprovided by HAT Analytics as part of the AI4EU challenge entitled \"ML\nassistant for vibration monitoring\".\n\n \n\nThree models have been developed corresponding to three different asset types:\n\n 1. Direct fans\n 2. Feet-mounted fans\n 3. Flange-mounted fans\n\n \n\nThe measurements are gathered from different measurement points namely\n\n 1. **FAN** : Fan casing\n 2. **MDE** : Motor-Drive End \n 3. **MNDE** : Motor-Non-Drive End\n\nNote that: Not all asset types provide data from all 3 measurement points.\n\n \n\nMeasurements from each measurement point can be provided from three axes Axial\n( **A** ), vertical ( **V** ), and Horizontal ( **H** )\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7264d5a0-ee24-497a-853d-acdf6b8bdd51&revisionId=23318740-fcef-4e42-8f59-c56ab7b8e72f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7264d5a0-ee24-497a-853d-acdf6b8bdd51&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7264d5a0-ee24-497a-853d-acdf6b8bdd51/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "170", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "aipanel_approvedvswithdrawn", - "description": "**_Goal:_**\n\nTo design a model that is able to predict whether a drug compound is approved\nor has potential tendencies to be withdrawn.\n\n ** _Approach:_**\n\nTo achieve this goal, a Deep Convolutional Neural Network (D-CNN) has been\nimplemented on molecular descriptors obtained for the drugs, to develop a\n2-class predictive model where the classes are 0: Approved, 1: Withdrawn.\n\nTo prepare the dataset, following drugs were obtained from specific databases:\n\n 1. 270 Withdrawn Drugs from Charite Database\n 2. 2800 Approved Drugs from CHEMBL Database\n\nDue to the imbalanced ratio of withdrawn and approved drugs, certain steps\nwere taken during data preparation to help the model learn a better\nrepresentation from the dataset. These steps are discussed in the later\nslides.\n\nFor the obtained Drugs, their SMILES were extracted from CHEMBL. SMILES are\nnotations for describing the structure of chemical species using short ASCII\nstrings. The SMILES were further used to extract 881 PUBCHEM Molecular\nDescriptors using PaDEL, a software to calculate molecular descriptors and\nfingerprints. Furthermore, 729 Chemotype features were also extracted for all\ndrugs, where a chemotype describes the subspecies of a drug using its\nmolecular structure. In total 1610 features were\nprepared.![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABCAAAAL9CAYAAAD6nBeuAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAP+lSURBVHhe7N0FnBz1+cfx53K5XNzd3UNcCBbc3f5YIVCktIUCpUWKtlDc2kKRFofiboEQJBAS4k7c3eWS3OVu//v53fySyWb3crbJXe775rXkdnZ8ZmfneX4yKZEoExERERERERFJonLBvyIiIiIiIiIiSaMEhIiIiIiIiIgknRIQIiIiIiIiIpJ0SkCIiIiIiIiISNIpASEiIiIiIiIiSacEhIiIiIiIiIgknRIQIiIiIiIiIpJ0SkCIiIiIiIiISNIpASEiIiIiIiIiSacEhIiIiIiIiIgknRIQIiIiIiIiIpJ0SkCIiIiIiIiISNIpASEiIiIiIiIiSacEhIiIiIiIiIgknRIQIiIiIiIiIpJ0SkCIiIiIiI", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=72bbafe5-031c-4a8c-ad21-42d1388b00fd&revisionId=8b6967d7-fd07-4a8d-b6e6-f66ed2a360ad&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=72bbafe5-031c-4a8c-ad21-42d1388b00fd&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/72bbafe5-031c-4a8c-ad21-42d1388b00fd/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "171", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "TEK_THOR_SIMULATION", - "description": "**AI4EU - THOR **THOR solution consists in a Hybrid optimization solution to\nmake the right decision on the amount spare parts in stock, considering past\nsales and forecasts. The purchase decision considers as input information\ncurrent stock status, production needs, production forecast, sales forecast,\nvariability Price of stock material and several restriction parameters.\n\n **Cash-Flow Simulation**. A probabilistic Monte Carlo simulator of cash-flow,\ntaking into account existing datasets and forecasts.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=731f99e5-8aef-4375-832f-8d5ababf21b3&revisionId=999f0664-c19c-4492-8520-cf467abc4b14&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=731f99e5-8aef-4375-832f-8d5ababf21b3&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/731f99e5-8aef-4375-832f-8d5ababf21b3/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "174", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "pddl-planners-ffi", - "description": "An ACUMOS component which, acting as a gRPC server, is able to call a number\nof PDDL action planners (`ff, fd, popf` and `optic` for now).\n\nAsset produced by the [AIPlan4EU](https://aiplan4eu.fbk.eu/) project.\n\n \n\n \n\n \n\nThis project contains an ACUMOS component which, acting as a gRPC server, is\nable to call a number of PDDL action planners (`ff`, `fd`, `popf` and `optic`\nfor now).\n\nThis is more a proof of concept on how to integrate PDDL Planner within a\ndocker made available for ACUMOS Hybrid Pipelines.\n\nIf you want to run the server locally, each of these planners needs to be\ninstalled separately and have to be available in your PATH. Otherwise, you can\nuse the Dockerize version (see [Docker\nversion](https://github.com/aiplan4eu/acumos-planners#Docker_version) on this\npage which contains all of them), still you will need the client.\n\n \n\nThe supported planners for now are:\n\n \n\n * `ff` is pretty straighforward to install [FF homepage](https://fai.cs.uni-saarland.de/hoffmann/ff.html)\n * `fd` fast downward is easy to install too [Fast Downward homepage](http://www.fast-downward.org/HomePage)\n * `popf`, I would not know, I grabbed the binary from the ROSPlan distribution (bad bad\u2026\u200b), but here is the [POPF homepage](https://nms.kcl.ac.uk/planning/software/popf.html)\n * `optic` is a pain to install, the Cmake files are broken\u2026\u200b Check [OPTIC homepage](https://nms.kcl.ac.uk/planning/software/optic.html), you may find the proper binary for you\u2026\u200b\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=73a6170b-47a0-4f99-bf95-af01798f693b&revisionId=e72ada49-fffb-45d3-9ef9-9e2b749cbd19&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=73a6170b-47a0-4f99-bf95-af01798f693b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/73a6170b-47a0-4f99-bf95-af01798f693b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "178", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "trondheim-rl-agent", - "description": "SUMO/RL implements a pipeline with a traffic simulator of the city of\nTrondheim, Norway, and a reinforcement learning autonomous agent that learns\nand implements traffic control policies with the goal of minimizing the number\nof pollution peaks above a given threshold. Each component can be ran stand\nalone.\n\nThis resource contains a trained Reinforcement Learning agent to interact with\nthe 'trondheim-simulator' traffic simulator with the goal of reducing\npollution peaks.\n\nFor a more detailed description check the github repository of the resource:\nhttps://github.com/tsveiga/AI4EU-RL-Trondheim\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=78591f43-c83a-45bb-b5fe-1d79d15cfdde&revisionId=bf5bcfff-4c70-4ca3-bf20-0c6d88f352f7&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=78591f43-c83a-45bb-b5fe-1d79d15cfdde&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/78591f43-c83a-45bb-b5fe-1d79d15cfdde/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "179", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Molecule-Trainer", - "description": "Molecule Trainer is a modelling pipeline for optimization, training and\ndeployment of models for molecular single prediction tasks. Molecule Trainer\noptimizes and trains a graph neural network based on Efficient Graph\nConvolution with fully connected layers at the end, which can produce accurate\nmodels with lower memory consumption and latency. As input it requires only a\nSMILES string of the molecules along with a binary or continuous target\nvariable. The pipeline automatically checks if the task is classification or\nregression and optimizes the classification or regression metrics accordingly.\nMolecule Trainer offers methods for optimization, training and prediction. The\ndescription of these methods is given in the user guide.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7a343bda-ecb5-4c6d-8a17-88c8d9139f50&revisionId=1626f215-66ff-4dbe-b4a1-17e3f74b64c5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7a343bda-ecb5-4c6d-8a17-88c8d9139f50&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7a343bda-ecb5-4c6d-8a17-88c8d9139f50/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "182", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ucrsuite-dtw", - "description": "This module implements fast nearest-neighbor retrieval of a times series in a\nlarger time series expressed as location and distance using the UCR suite\nDynamic Time Wrapping (DTW) algorithm.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7cc68464-54e3-4a57-9e36-afdd04af7b74&revisionId=aeafd55f-59f5-4191-a34a-16ad0f7433d6&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7cc68464-54e3-4a57-9e36-afdd04af7b74&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7cc68464-54e3-4a57-9e36-afdd04af7b74/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "185", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "atranscribe", - "description": "ATransCribe is a speech to text service. It uses whisper model for\ntranscription. Whisper is a general-purpose speech recognition model. It is\ntrained on a large dataset of diverse audio and is also a multi-task model\nthat can perform multilingual speech recognition as well as speech translation\nand language identification.Also using its underlying deep learning technology\nit process soundclips and removes background noises etc. for better\nresults.The app is developed and used for the H2020 project AI-PROFICIENT.\n\n \n\n \n\n \n\n \n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=7ed5c850-a7a4-4f71-bf97-c07be436424f&revisionId=b5057270-26f1-49da-b650-610d88fd6df1&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=7ed5c850-a7a4-4f71-bf97-c07be436424f&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/7ed5c850-a7a4-4f71-bf97-c07be436424f/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "190", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "entity_extractor", - "description": "Extracts personally identifiable information from documents of different\nformats. Entities detected include names, addresses, or faces.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=86b14065-b351-4e37-a394-a401a997c542&revisionId=fd34ef22-937c-4bec-9a02-f4af848e0c3b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=86b14065-b351-4e37-a394-a401a997c542&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/86b14065-b351-4e37-a394-a401a997c542/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "191", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "critical-part-classifier", - "description": "This is a composite pipeline consisting of the Tensorflow model created for\ncritical part prediction along with a generic data broken block that is used\nto match the contents of the CSV input file to the expected input features of\nthe model. Given a set of features that describe the production line\ncharacteristics or factory conditions, the model we have built predicts\nwhether a particular component part is critical or not to the supply chain.\nThe end goal is the optimization of the stock management.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=88e79675-8008-4b48-bbac-67e7b5c519ed&revisionId=f6e7ad03-637f-490e-babb-36eb7544cf59&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=88e79675-8008-4b48-bbac-67e7b5c519ed&version=0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/88e79675-8008-4b48-bbac-67e7b5c519ed/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "192", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4iot-calibration", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=047d727b-a7a2-43b3-bfca-c93cc1400095&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8949117e-d8a2-49a6-8bd8-359b3d5f1436/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "193", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4iot-calibration", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=14302825-5469-4de8-a0d1-105ff5b66388&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.3", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&version=1.0.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8949117e-d8a2-49a6-8bd8-359b3d5f1436/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "194", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4iot-calibration", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=4738bd06-fe95-4a25-9a68-825f107ffa4d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8949117e-d8a2-49a6-8bd8-359b3d5f1436/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "195", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4iot-calibration", - "description": "The Calibration component is part of the AI4IoT Calibration pipeline. It\nincludes a machine learning model that predicts the calibrated values of raw\ndata coming from low-cost sensors, such that the output is as close as\npossible to reference values. The component is deployed with a pre-trained\nmodel and outputs the calibrated values for PM2.5 and PM10 measurements.\nInputs are PM measurements from the sensor and meteorological data.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=4afa4cfa-ee5d-4ffa-b114-1f9f093a2ac6&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.4", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&version=1.0.4", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8949117e-d8a2-49a6-8bd8-359b3d5f1436/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "196", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4iot-calibration", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&revisionId=672734a5-ce48-47fc-81d6-b06b923fa3eb&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8949117e-d8a2-49a6-8bd8-359b3d5f1436&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8949117e-d8a2-49a6-8bd8-359b3d5f1436/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "201", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "SmartProc", - "description": "Based on the given and read-in data of a time series, the algorithm calculates\na forecast of how the data will develop further in a freely definable time\nhorizon. Trends are recognised and taken into account in the forecast, as are\nseasonalities and similar dependencies that are contained in the input data\nand are recognised by the algorithm. The algorithm can be used for all types\nof data where a forecast makes sense, such as sales figures for a product or\nparts requirements for purchasing from a supplier. It must be said, however,\nthat extraordinary events such as corona or disasters cannot be predicted by\nany AI-based algorithm - and it is true that it is only a prediction that does\nnot necessarily reflect reality. The readme.txt file contains an example of a\nclient script that addresses the algorithm and displays the result of the\nalgorithm in a browser.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=8c424961-1218-492f-b041-2653a84817a4&revisionId=e4572dcc-8e52-4207-91f3-897f17cd7861&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=8c424961-1218-492f-b041-2653a84817a4&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/8c424961-1218-492f-b041-2653a84817a4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "208", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "NLP-IMECH", - "description": "The module uses Natural Language Processing (cosine difference) to compare\ninput text with a list of sentences contained in a csv file and returns the\nmost similar description from the csv file along with its index in the csv\nfile.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=93301148-af5f-4647-bd0c-51180d6d3688&revisionId=23be4e3a-e8e5-4066-b668-5590f78e5f20&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=93301148-af5f-4647-bd0c-51180d6d3688&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/93301148-af5f-4647-bd0c-51180d6d3688/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "211", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "IoTConceptExtraction", - "description": "We developed an AI-based tool that automatically extracts knowledge from IoT\nontologies to support the construction of a unified ontology for Web of\nThings. The following technologies are used: W3C semantic web technologies\n(such as RDF, OWL, SPARQL, SKOS), Deep learning model (Word2vec) and\nunsupervised clustering algorithms (K-means).\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9629027c-1030-446d-80ad-dec86ddeadeb&revisionId=8daafca8-0c5d-4266-a25b-6c0aa4af0a79&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9629027c-1030-446d-80ad-dec86ddeadeb&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/9629027c-1030-446d-80ad-dec86ddeadeb/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "212", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "traffic-scene-segmentation-deeplab-xception65-cityscapes", - "description": "This module provides a semantic segmentation using the\n`xception65_cityscapes_trainfine` model from the tensorflow model zoo1.\n\nHere is the table of the cityscapes train classes with their id and their RGB\ncolor values used by the model and output of the module.\n\n \n \n | class name | ID | R | G | B |\n | ------------- | ---: | ---: | ---: | ---: |\n | ROAD | 0 | 128 | 64 | 128 |\n | SIDEWALK | 1 | 244 | 35 | 232 |\n | BUILDING | 2 | 70 | 70 | 70 |\n | WALL | 3 | 102 | 102 | 156 |\n | FENCE | 4 | 190 | 153 | 153 |\n | POLE | 5 | 153 | 153 | 153 |\n | TRAFFIC LIGHT | 6 | 250 | 170 | 30 |\n | TRAFFIC SIGN | 7 | 220 | 220 | 0 |\n | VEGETATION | 8 | 107 | 142 | 35 |\n | TERRAIN | 9 | 152 | 251 | 152 |\n | SKY | 10 | 70 | 130 | 180 |\n | PERSON | 11 | 220 | 20 | 60 |\n | RIDER | 12 | 255 | 0 | 0 |\n | CAR | 13 | 0 | 0 | 142 |\n | TRUCK | 14 | 0 | 0 | 70 |\n | BUS | 15 | 0 | 60 | 100 |\n | TRAIN | 16 | 0 | 80 | 100 |\n | MOTOCYCLE | 17 | 0 | 0 | 230 |\n | BICYCLE | 18 | 119 | 11 | 32 |\n \n\nThe resolution of images is restricted by the model. The maximum width is 2049\nand maximum height is 1025.\n\nCommunication of image data happens via filepaths that specfiy the location\nrelative to a docker volume mount path. Docker volume mount is expected to be\ncommuncated via a environment variable `SHARED_FOLDER_PATH`.\n\nThere are two outputs of the model. The first is a paletted image, where the\npalette index is the class and the color the cityscapes dataset colo", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=98febe4e-ce6d-4f33-90b1-7a87c6c1638b&revisionId=7a8c8f00-2f8d-47dc-91e6-7f02536c2498&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=98febe4e-ce6d-4f33-90b1-7a87c6c1638b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/98febe4e-ce6d-4f33-90b1-7a87c6c1638b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "216", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "advice-converter-pipeline", - "description": "In this pipeline, the label format converter node reads the annotations from\nthe shared folder and converts from one standard format to another\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9c590181-fcdd-4f08-afdb-d00cc8ae094c&revisionId=e37153fb-c912-4fe8-a95c-8dbcd52b94e5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "st3", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9c590181-fcdd-4f08-afdb-d00cc8ae094c&version=st3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/9c590181-fcdd-4f08-afdb-d00cc8ae094c/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "218", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ithermai-quality-check-service", - "description": "This is an AI model for classification of normal and faulty products of the\ninjection molding process. It uses RGBT camera frames as input and labels them\nas faulty and normal products.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9ddecdf3-9be6-4b4e-a74b-eccfe1c1a6e8&revisionId=21f1a7b0-3e82-492f-95bc-7b3e78d7cf36&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9ddecdf3-9be6-4b4e-a74b-eccfe1c1a6e8&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/9ddecdf3-9be6-4b4e-a74b-eccfe1c1a6e8/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "219", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "i-nergy-load-forecasting", - "description": "This is a forecasting service for predicting electrical load of a boiler room\nin a large District Heating Network in hourly basis.\n\nThis service is based on a Seasonal ARIMA model implemented in context of\n[I-NERGY](https://www.i-nergy.eu/) project.\n\nFor more information on how to use the solution, please see README.pdf in\nDocuments section.\n\n \n\n \n\n \n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=9fc0357c-2b50-4733-8225-44f78a9d5421&revisionId=ae6bd423-aa37-411f-a8f1-40aeb6b0bd4d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=9fc0357c-2b50-4733-8225-44f78a9d5421&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/9fc0357c-2b50-4733-8225-44f78a9d5421/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "220", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "house-prices-databroker", - "description": "Databroker of the House Price Prediction Pipeline.\n\n The databroker is responsible for the transfer of house-price dataset to the\nmodel. The features are selected based on higher correlation coeffecient. It\nhas a WebUI that can be used to feed new/ unseen input to the model that\npredicts the sales price of a house.\n\n **Repository:**\n\nPlease refer the following link for the houseprice-prediction code in the\nEclipse Graphene platform -\n\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a03e571c-634f-4da5-83cd-1cd069e304e0&revisionId=b577c72a-0f61-4d72-b04c-823ed54f4fa8&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a03e571c-634f-4da5-83cd-1cd069e304e0&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a03e571c-634f-4da5-83cd-1cd069e304e0/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "222", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "VideoObjectRecognition", - "description": "The video object recognition model detects and classifies objects in a video\nsegment\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a2dd4a73-eae7-4c03-9e10-d07de158d040&revisionId=e2e04665-c00e-4363-9d29-837af49a370d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a2dd4a73-eae7-4c03-9e10-d07de158d040&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a2dd4a73-eae7-4c03-9e10-d07de158d040/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "223", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "keras-iris-model", - "description": "Classify Iris blossoms with a keras model\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a48fdedd-0ba3-49a4-befe-046467110a6e&revisionId=988e80a4-0629-48d4-8805-ce3cc7f71429&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a48fdedd-0ba3-49a4-befe-046467110a6e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a48fdedd-0ba3-49a4-befe-046467110a6e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "228", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "predictive-maintenance", - "description": "Neural network trained in a Federated Learning way for predicting the failure\nof motors based on a set of features. The network is trained in an experiment\nin the DIH4AI project in a collaboration of the South Netherlands DIH and\nFortiss. The federated learning process was executed on an International Data\nSpaces architecture with the whole process being recorded by the Evidencia\nplugin, of which the factsheet is uploaded as document.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=a90b4145-51ec-4345-be5f-21d2c8e9a214&revisionId=c4624a34-affb-417b-b004-d30809697b49&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=a90b4145-51ec-4345-be5f-21d2c8e9a214&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/a90b4145-51ec-4345-be5f-21d2c8e9a214/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "230", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Healthymity", - "description": "Complex natural language processing model based on cognitive linguistics and\nsemi-supervised learning using neural networks. The model is used to predict\nICD code through medical notes text.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=acb05f2a-d6ed-491d-9d70-bea6b8092ca9&revisionId=73b36c23-5849-4ac1-95f1-753070175bd3&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=acb05f2a-d6ed-491d-9d70-bea6b8092ca9&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/acb05f2a-d6ed-491d-9d70-bea6b8092ca9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "232", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "forWoT", - "description": "![](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAlgCWAAD/2wBDAAYEBQYFBAYGBQYHBwYIChAKCgkJChQODwwQFxQYGBcUFhYaHSUfGhsjHBYWICwgIyYnKSopGR8tMC0oMCUoKSj/2wBDAQcHBwoIChMKChMoGhYaKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCj/wAARCAUHBkADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD6pooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArzz4/E/8Kr1cK7puaFCUYqcGZARkeoJr0OvPPj9/wAkt1X/AK6W/wD6PSgDCk+EHgZfs8UOgqZWiErvLfXAUD8JO5qeb4RfDu2WMz6HJ8wBLpd3LKPx8yvQZNPe4htpoXRXEIjYOu5WUj0qtNoMkkSRfaAUESx4Zc4x3HOBmsG6ibsZNzTdjif+FP8Aw6+1/ZhoczSDAJW6uSFJGRk7+Kr2vwg8DXF7LENCiRI3KlWv7nzMD+LHmYwa7/VxHZ3UF5d3UdvH5iqCqHe5wflyDznHpVdr2zvL+Mm+t5PKHnoIkO4rtzjd34IJHpT/AHj2Qe++hxi/Cb4bMWA0aQbQzAm7ucMB1x8/NLH8JvhtIjsNGlVUTzPmurkZX1Hz8101q+nReVCbq3DXEIaIbCWAccc5xzyOgzWppTQ6pard2Uu6PyTbjzIyMkEckH6UXq9UH7zqjh/+FRfDkW5lbQp1GQAGurkEk9MDzOaQ/CT4brAJTosgBbZt+13OQ3pjzM138WkSJBt88CRZBImFO1COwBPT8akj0xl2s8oaTzhMxAwD7ClzVOw05nDW/wAFfAE0QcaDKoPZru5U/l5lR3/wb+HllbPPNojhEHa8uCT6ADzK9RXhcVk69d6ckP2XUrlIfNGVy2DweordeY5S5Y3uedaZ8KfhxqBjWLQpkkdC4SS7uAcA45/eda0v+FIeAP8AoBt/4G3H/wAcrc8MjTIdTMVley30xjLb3YHYuenAHU11tNrsTRm5xu9zzOb4", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b1205bf1-4377-48ed-bab3-6d0c6838fe29&revisionId=0b698990-d86f-44e7-b63a-06cca83b3a86&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b1205bf1-4377-48ed-bab3-6d0c6838fe29&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b1205bf1-4377-48ed-bab3-6d0c6838fe29/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "233", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "forWoT", - "description": " \n\n![](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/4Re+RXhpZgAASUkqAAgAAAAHABIBAwABAAAAAQAAABoBBQABAAAAYgAAABsBBQABAAAAagAAACgBAwABAAAAAgAAADEBAgANAAAAcgAAADIBAgAUAAAAgAAAAGmHBAABAAAAlAAAAKYAAABgAAAAAQAAAGAAAAABAAAAR0lNUCAyLjEwLjI4AAAyMDIxOjEyOjAxIDE3OjQ5OjIzAAEAAaADAAEAAAABAAAAAAAAAAkA/gAEAAEAAAABAAAAAAEEAAEAAAAAAQAAAQEEAAEAAACKAAAAAgEDAAMAAAAYAQAAAwEDAAEAAAAGAAAABgEDAAEAAAAGAAAAFQEDAAEAAAADAAAAAQIEAAEAAAAeAQAAAgIEAAEAAACYFgAAAAAAAAgACAAIAP/Y/+AAEEpGSUYAAQEAAAEAAQAA/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8AAEQgAigEAAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIBAgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEXGBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX29/j5+v/aAAwDAQACEQMRAD8A9/ooooAKKKKACiiigDh9Ti2alK8cUUjFz/rOg+8T79hSNFPFgCGFgx/hTJHAx/F6k/lT9TVVvbwFQ2A0jNs5UEMoGc+xOKqOWMzoqNhXOcKvyljng56/L196Hd/ZTLlRoySvLX0JVSU+YXhhRjkKiMfmbOFGQaR4m2b5IxgYB2kYXpnvk/xfpVSKeJ54I1DBSVYEouQMqcj5uM7hng9Kns5Io0ZhC5KKw3EY2j5fmGT6EdOtUpTt8KCUKahdSu/Q1NBR49R2ssflnlGGAScc8du9dbXL6NI0mpD91hAqsrE5Jzn/AArqKm7e6sZQvbUKKKKCgooooAa4DRsDnBBHFRWcSw2kUaFiqjALDB/KpXz5bYODg4PpUdoHW1jEkgkcDlwcg0ATUUUUAFUNQ1rTtKdEvblYWcZUFScj8BV+ue8Ract3PDJ9mMrqpAIlZMDPsaumouXvbEyvbQkbxn4eU4bU4gfQq3+FCeMfD7nCalGx9Arf4VgP4YsbvEl1pzGTGOZn", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b1205bf1-4377-48ed-bab3-6d0c6838fe29&revisionId=21064c59-9d23-48e2-ba3b-c23f42e48603&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.3", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b1205bf1-4377-48ed-bab3-6d0c6838fe29&version=1.0.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b1205bf1-4377-48ed-bab3-6d0c6838fe29/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "234", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "forWoT", - "description": "![](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAlgCWAAD/2wBDAAYEBQYFBAYGBQYHBwYIChAKCgkJChQODwwQFxQYGBcUFhYaHSUfGhsjHBYWICwgIyYnKSopGR8tMC0oMCUoKSj/2wBDAQcHBwoIChMKChMoGhYaKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCj/wAARCAUHBkADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD6pooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArzz4/E/8Kr1cK7puaFCUYqcGZARkeoJr0OvPPj9/wAkt1X/AK6W/wD6PSgDCk+EHgZfs8UOgqZWiErvLfXAUD8JO5qeb4RfDu2WMz6HJ8wBLpd3LKPx8yvQZNPe4htpoXRXEIjYOu5WUj0qtNoMkkSRfaAUESx4Zc4x3HOBmsG6ibsZNzTdjif+FP8Aw6+1/ZhoczSDAJW6uSFJGRk7+Kr2vwg8DXF7LENCiRI3KlWv7nzMD+LHmYwa7/VxHZ3UF5d3UdvH5iqCqHe5wflyDznHpVdr2zvL+Mm+t5PKHnoIkO4rtzjd34IJHpT/AHj2Qe++hxi/Cb4bMWA0aQbQzAm7ucMB1x8/NLH8JvhtIjsNGlVUTzPmurkZX1Hz8101q+nReVCbq3DXEIaIbCWAccc5xzyOgzWppTQ6pard2Uu6PyTbjzIyMkEckH6UXq9UH7zqjh/+FRfDkW5lbQp1GQAGurkEk9MDzOaQ/CT4brAJTosgBbZt+13OQ3pjzM138WkSJBt88CRZBImFO1COwBPT8akj0xl2s8oaTzhMxAwD7ClzVOw05nDW/wAFfAE0QcaDKoPZru5U/l5lR3/wb+HllbPPNojhEHa8uCT6ADzK9RXhcVk69d6ckP2XUrlIfNGVy2DweordeY5S5Y3uedaZ8KfhxqBjWLQpkkdC4SS7uAcA45/eda0v+FIeAP8AoBt/4G3H/wAcrc8MjTIdTMVley30xjLb3YHYuenAHU11tNrsTRm5xu9zzOb4", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b1205bf1-4377-48ed-bab3-6d0c6838fe29&revisionId=7584565a-c2bd-4b52-a3a5-2f7c92ca7efb&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b1205bf1-4377-48ed-bab3-6d0c6838fe29&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b1205bf1-4377-48ed-bab3-6d0c6838fe29/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "235", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "forWoT", - "description": "![](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAlgCWAAD/2wBDAAYEBQYFBAYGBQYHBwYIChAKCgkJChQODwwQFxQYGBcUFhYaHSUfGhsjHBYWICwgIyYnKSopGR8tMC0oMCUoKSj/2wBDAQcHBwoIChMKChMoGhYaKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCj/wAARCAUHBkADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD6pooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArzz4/E/8Kr1cK7puaFCUYqcGZARkeoJr0OvPPj9/wAkt1X/AK6W/wD6PSgDCk+EHgZfs8UOgqZWiErvLfXAUD8JO5qeb4RfDu2WMz6HJ8wBLpd3LKPx8yvQZNPe4htpoXRXEIjYOu5WUj0qtNoMkkSRfaAUESx4Zc4x3HOBmsG6ibsZNzTdjif+FP8Aw6+1/ZhoczSDAJW6uSFJGRk7+Kr2vwg8DXF7LENCiRI3KlWv7nzMD+LHmYwa7/VxHZ3UF5d3UdvH5iqCqHe5wflyDznHpVdr2zvL+Mm+t5PKHnoIkO4rtzjd34IJHpT/AHj2Qe++hxi/Cb4bMWA0aQbQzAm7ucMB1x8/NLH8JvhtIjsNGlVUTzPmurkZX1Hz8101q+nReVCbq3DXEIaIbCWAccc5xzyOgzWppTQ6pard2Uu6PyTbjzIyMkEckH6UXq9UH7zqjh/+FRfDkW5lbQp1GQAGurkEk9MDzOaQ/CT4brAJTosgBbZt+13OQ3pjzM138WkSJBt88CRZBImFO1COwBPT8akj0xl2s8oaTzhMxAwD7ClzVOw05nDW/wAFfAE0QcaDKoPZru5U/l5lR3/wb+HllbPPNojhEHa8uCT6ADzK9RXhcVk69d6ckP2XUrlIfNGVy2DweordeY5S5Y3uedaZ8KfhxqBjWLQpkkdC4SS7uAcA45/eda0v+FIeAP8AoBt/4G3H/wAcrc8MjTIdTMVley30xjLb3YHYuenAHU11tNrsTRm5xu9zzOb4", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b1205bf1-4377-48ed-bab3-6d0c6838fe29&revisionId=7917e8bf-98ac-49e1-a49d-9238f61fba22&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b1205bf1-4377-48ed-bab3-6d0c6838fe29&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b1205bf1-4377-48ed-bab3-6d0c6838fe29/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "236", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "WorkerCard_Preprocessing", - "description": "The model is built to preprocess digitized worker cards. The model crops the\nworkercard in the image and performs morphological transformations to remove\nocculusions.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0&revisionId=88630572-3464-444b-9ed5-86bf4dde7c56&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "237", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "WorkerCard_Preprocessing", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0&revisionId=d0d8cf7e-7696-4d64-b064-744f26ba9f33&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b1dad9d4-2ccf-41a6-9a66-8b85fec80ba0/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "240", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "adios-train", - "description": " \n\nI-NERGY - TTP1 - ADIOS TRAIN MODEL\n\nTrains model for anomaly detection in power grid SCADA output. Given the alarm\nlabelled set, which can be extended using the labelling system described\nabove, we train a machine learning model to predict its category. The\navailable alarms are randomly split in half, and the first part is used as a\ntraining set and the latter as a test set, on which we evaluate the\nperformance.\n\nAIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-\ntraining-model\n\nAttribution\n\nThis project has received funding from the European Union's Horizon 2020\nresearch and innovation programme within the framework of the I-NERGY Project,\nfunded under grant agreement No 101016508\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b5664ace-53a0-4739-bf3d-8f549091f871&revisionId=0010242a-25ea-4ba2-b3fd-46f938004671&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b5664ace-53a0-4739-bf3d-8f549091f871&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b5664ace-53a0-4739-bf3d-8f549091f871/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "241", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "house-prices-model", - "description": "Prediction model of the House Price Prediction Pipeline.\n\nThe houseprice-prediction model trains with the dataset from the databroker.\nOnce trained, the model can then predict the sales price of houses for new\nunseen input data. It has a WebUI that displays the predicted sale price of\nthe house for corresponding inputs from the user.\n\n **Repository link: **\n\nPlease refer the following link for the houseprice-prediction code in the\nEclipse Graphene platform -\n\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b59939e2-76ef-4d82-b869-e96b89e6e175&revisionId=ae1f9926-f865-4467-8d56-b5e9a33fb193&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b59939e2-76ef-4d82-b869-e96b89e6e175&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b59939e2-76ef-4d82-b869-e96b89e6e175/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "244", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "sentiment-analysis-databroker", - "description": "The model is the databroker for the Sentiment Analysis pipeline.\n\nIt has a user interface(UI) that takes the query text from the user and\nconnects to the prediction model. The results can then be viewed on the\nPrediction model's UI.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b6adb7b2-d8f6-47c6-9702-d8a16338a8e1&revisionId=86d03e8a-619f-4f79-8759-10566671f01d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b6adb7b2-d8f6-47c6-9702-d8a16338a8e1&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b6adb7b2-d8f6-47c6-9702-d8a16338a8e1/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "246", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "SwabAI", - "description": "![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAAHgCAIAAAC6s0uzAAAgAElEQVR4AezBC24c14JEwQztf89njGvUoBrNpkhJlD/PEb59+7at2lsq7Kbaa6i2odpPQ7UPQLUPQ7VHqFDtR6HahmpPUO0Jqh2o9jeAau/Ctmobqr0F1TZUewuqXVCh2hNUu8G2ahsqVNuwrdoTVNtQ7YIK1T4M1S6otmFbhWoHKlTbsK3aBdUOVNtQodqGXSpU2FFtQ7UD26pt2KXahgrbql2wrUKFahu2VdhWYUeFChWqbah2oNqBCrtUqLCt2gUVKlTYVm1DtQPVNmyrUKFCtQPbql2wrUKFXaod2FahQrUN1TZUqLZhl2obqm2oUKFCtQ3VDlSosK1CtRtU2LsqVKi2YZcK+1fDPgYV9hbsgh2O6tu3b9iGHb59+7abar9CtQ3VDuyo9gjVNmyrdqDahgrVnqDaE1TYVu2HoMKOCjsq7KhQ7QmqXVDtXaj2BNU+CRUqVPswVDtQ7UCFCtVuUGFHhW3VDuxSbUO1DduqbahQ7QbVNuxSbUO1DdsqVNil2oZdqh3YpdqGbRUqVNuwS4XdVKiwo8J+VIU9qrZhR4Ud1TbsXRX2t1dhR4V9T4V9XoW9pcL+firs90KFHdUOVDtQYU+qbah2wT6gwn5UhW2oUG1DtQPbqh3YE+wJtmE32OFSbfOnaj+h2oZqGypUO1DtXaj2AdhW7Qmqbaiwo9qBbdUuqPYW7FGFfUyFChUqVLvBtmoXVPsroNqBCtUOVHuCahdsq3ZBhV2qHdhR7UCFChV2U+1AhQrVLtilQrUbbKuwo0K1DbupsKPCtgo7qm2oUGFHhWob9pYKe1Jhlwq7qbAXKlTYkwp7rcI+o8I+psK2CvuACvs5Ffaowt5VYV+jwn6RCvtpqLah2luwo9qBaht2VNhWYVu1DdUO7FGF3VTYuyrsJ6Dahj2psAN7hF2wC7Y5Kmzzh11Q7V0Vdqmwm2oXVDtQodqGahuqbai2odqGCtU+DzsqVNtQbUOFao9QbcO+QIVqB7ZV2FFhW7UnqHag2hNUO1DtEaodqLah2mvYVqHCjmoHtlWotqHagWoXVDuwo9oNqm2oUO3AjgrbKuyotqHagQrbKmyrtmGXChV2VNhNtQ07KlTbUGFbhW3VNlTYpcIeVdiTCvt1KuyfrMI+r8L+ySrsV0C1CypUO1BhW4UK2yrsUu1AtQ07KuxSocKOChX2lgr7eqiwD8CeOCrswA7HDn/Yhmo/p9qGahuq3aDagR3VbrCt2oYKFao9QbUbVLug2oZqG6q9BdU2VNhfpEKFbdWeoNojVLvBtmrfg20VKlTbUG1DhQrbqm3Yowrbqm2osEu1G+xSbcMuFXZU27BH1Q7sptqBHRW2VahQbcOOCntSYTcVtlXYVmFfo8K+XoX9bVTYWyrsV6iwrcL+IhX2lgr7pVBhNxV2VNijCjuqbdjHVKiwrdqGPamwo9qGPaqwXw0VKlTYC9gNdmAX7MA2f9oPqbCj2tdDhWofg2oXVDtQ7S2osK3ahm0Vdqmw36Lau1DtM1DtCaod2KMK26pt2FFtQ7UD26pt2FZhjyrsUqHCLhV2VNhRYVuFPaq2ocKOagd2VKh2wW6qHai2ocK2CrtUO7BLhd1U2Lsq7IdU2A+psJsK2yrspsK+p8K+TIV9TIX9w1XYT0O1DduqbdhRoUK1DdU2VNtQYVu1CyrsqFDtwLZqG3apsA+osNcq7DOwrdoT7F3YBRV2YIdjhz/thQoVK", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b70ca418-5cdc-4fc2-b166-5f48fd44605e&revisionId=0d853d81-62ad-4081-9750-3c57dbcb6c3c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b70ca418-5cdc-4fc2-b166-5f48fd44605e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b70ca418-5cdc-4fc2-b166-5f48fd44605e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "247", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "OptiRL", - "description": "Adaptive optimization model for Electric Discharge Machining.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b810ae05-a50e-4dd6-80ff-02384d56ca04&revisionId=257af2ec-9e0f-405d-852e-a6c7b8f73532&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b810ae05-a50e-4dd6-80ff-02384d56ca04&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b810ae05-a50e-4dd6-80ff-02384d56ca04/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "251", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "adios-label-extend", - "description": "I-NERGY - TTP1 - ADIOS LABEL EXTEND MODEL\n\nThis model extends train model dataset labels using one hot encoding and\nclosest distance matching.\n\nThe label extension mechanism uses similarity between alarms to associate each\nunknown alarm with its most similar known one. We pick a reduced portion of\nthe overall dataset (50k alarms) to extend the training set. The features of\nthe dataset are mainly string fields, except for the Priority file, which is\nnumerical. The similarity between each two alarms is measured in terms of the\nnumber of different features that they present.\n\nAIOD link: https://www.ai4europe.eu/research/ai-catalog/adios-i-nergy-label-\nextend\n\nAttribution\n\nThis project has received funding from the European Union's Horizon 2020\nresearch and innovation programme within the framework of the I-NERGY Project,\nfunded under grant agreement No 101016508\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b9748aa3-9340-4f27-a7b9-59cea5d80d3c&revisionId=4613434d-2ef5-4e60-9fb9-26382dafb97c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b9748aa3-9340-4f27-a7b9-59cea5d80d3c&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b9748aa3-9340-4f27-a7b9-59cea5d80d3c/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "252", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Multimodal_AI", - "description": "The proposed model is a draft solution for the challenge titled \"Enhancing\nClinical AI workflow\". The model is based on multi-modality which takes in\nmulti modal data features after translating, co-aligning and fusion. The main\nobjective is to integrate the model into the clinical decision support system\n.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=b97a16cd-e475-4f5f-83e5-f1d042a3772a&revisionId=34816a52-7ba9-4890-8203-c0a6dd5fe270&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=b97a16cd-e475-4f5f-83e5-f1d042a3772a&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/b97a16cd-e475-4f5f-83e5-f1d042a3772a/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "257", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "rp4pl-rul", - "description": "This model was developed part of the AI4EU program. RP4PL - RUL (Reliable\nPrediction for Pump Lifetime - Remaining useful lifetime) is used to predict\nthe remaining useful lifetime for manufactured pumps. It takes as input final\nquality test data from the pump manufacturing process and outputs a lifetime\nprediction. The model pipeline contains data transformation and feature\ninference. It is constructed using a random forest regression algorithm, along\nwith a feature selection step to reduce the set of features to a smaller\nsubset.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=bb45c963-205b-4d3b-aad4-9968dce77ee5&revisionId=cd27d33d-3a04-4cb1-be7c-b36522d0f8e1&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=bb45c963-205b-4d3b-aad4-9968dce77ee5&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/bb45c963-205b-4d3b-aad4-9968dce77ee5/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "259", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "SWE_predictor", - "description": "A regression ML model that provides a Snow Water Equivalent indicator from\nEarth Observation data and data from climate models (ERA5) in river watersheds\nin the Alpine area. This model has been developed by Amigo srl\n([https://amigoclimate.com](https://amigoclimate.com/)) for SnowPower, an\ninnovative Software-as-a-Service to assist hydropower operators that is part\nof the[ I-NERGY 1st Open call](https://www.ai4europe.eu/ai-\ncommunity/projects/i-nergy). In particular, this model is at the core of the\nSnow module of SnowPower.Details about data input requirements and model\nperformance are provided in the related entry in the AIOD Catalog\n([HERE](https://www.ai4europe.eu/research/ai-catalog/description-and-setup-ml-\nmodels-estimation-snow-water-equivalent-swe-and-runoff)).\n\n \n\n \n\n \n\n \n\n \n\nImage: flaticon.com\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=be8d1f46-c311-4578-a6cd-69dc8d3fa33b&revisionId=c310c554-9bfa-4146-9e21-6fff647f5abe&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=be8d1f46-c311-4578-a6cd-69dc8d3fa33b&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/be8d1f46-c311-4578-a6cd-69dc8d3fa33b/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "261", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4eu-kewot", - "description": "The main objective of this work is to deal with the semantic interoperability\nchallenge, where several entities exist in cross-domain ontologies describing\nthe same concept. Our main contributions can be summarized as follow:\n\n\u00b7 A thorough analysis of several ontologies belonging to two domains\n(city and mobility) was conducted. The ontological entities were enriched with\nGoogle embeddings and plotted in 2-dimensions, revealing concepts of high\nsimilarity, not only in terms of semantic but also of syntactic similarity. \n\n\u00b7 An AI approach was followed in order to automatically extract the\ntopics existing in ontologies of different domains. A detailed evaluation of\nthe AI method was performed, showing qualitative and promising results. A\nvisualization tool was deployed for easier exploration and contrast of the\ntopics.\n\n\u00b7 A Search Mechanism was prepared which takes as input the detected (or\nany other provided) topics T and an ontology O and returns as output a concept\no \\in O which is the most similar to a topic t \\in T\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c4cfb4bb-4ac6-4303-acd2-8eb3664c4138&revisionId=8676b4dc-21c3-4d65-b13b-8089ecbb33fc&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c4cfb4bb-4ac6-4303-acd2-8eb3664c4138&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c4cfb4bb-4ac6-4303-acd2-8eb3664c4138/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "262", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4eu-kewot", - "description": "The main objective of this work is to deal with the semantic interoperability\nchallenge, where several entities exist in cross-domain ontologies describing\nthe same concept. Our main contributions can be summarized as follow:\n\n\u00b7 A thorough analysis of several ontologies belonging to two domains\n(city and mobility) was conducted. The ontological entities were enriched with\nGoogle embeddings and plotted in 2-dimensions, revealing concepts of high\nsimilarity, not only in terms of semantic but also of syntactic similarity. \n\n\u00b7 An AI approach was followed in order to automatically extract the\ntopics existing in ontologies of different domains. A detailed evaluation of\nthe AI method was performed, showing qualitative and promising results. A\nvisualization tool was deployed for easier exploration and contrast of the\ntopics.\n\n\u00b7 A Search Mechanism was prepared which takes as input the detected (or\nany other provided) topics T and an ontology O and returns as output a concept\no \\in O which is the most similar to a topic t \\in T\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c4cfb4bb-4ac6-4303-acd2-8eb3664c4138&revisionId=d97c57a2-7f37-40ec-8ffd-b45f2f69c297&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c4cfb4bb-4ac6-4303-acd2-8eb3664c4138&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c4cfb4bb-4ac6-4303-acd2-8eb3664c4138/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "263", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "DAISY", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e&revisionId=004cf7bb-105e-48fb-9bf6-781fce08919c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "264", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "DAISY", - "description": "Combination of our expertise in vibration analysis with AI models that will\ncontribute to the diagnosis of rotating machinery\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e&revisionId=ee78edad-6fa5-456f-8bd1-6cc82fcffb33&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c556e9b0-7b9e-4aff-9e8c-baaa2a52bb8e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "265", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "CODE-CRC", - "description": "The proposed solution is based on fine-tuned with Spanish medical texts of the\npre-trained BERT family language models (transformers clinicalBERT and\nmultilingualBERT). The designed text-based classification service predicts\nICD-10 codes for clinical text in Spanish for the Colorectal cancer (CRC) and\nassociated diagnoses. The service output contains the ICD-10 \u201ccategory\u201d (3\nsign) codes that describe the basic manifestations of injury or sickness for\n158 types of diseases related to CRC. The prediction models for ICD-10 codes\nare with high accuracy: clinicalBERT: 0.794 AUC ROC score and\nmultilingualBERT: 0.806 AUC ROC score. The service allows the user to switch\nbetween two models (clinicalBERT and multilingualBERT) and to set the\nparameter N for top N diagnoses according to the specific needs.\n\n![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABOAAAAFECAYAAACODkEvAAAACXBIWXMAAAsTAAALEwEAmpwYAAAF8WlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS42LWMxNDggNzkuMTY0MDM2LCAyMDE5LzA4LzEzLTAxOjA2OjU3ICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRvYmUuY29tL3Bob3Rvc2hvcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgMjEuMCAoV2luZG93cykiIHhtcDpDcmVhdGVEYXRlPSIyMDIxLTExLTI2VDE3OjIwOjI4KzAyOjAwIiB4bXA6TW9kaWZ5RGF0ZT0iMjAyMS0xMS0yNlQxNzoyNDoxNSswMjowMC", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c592ffcf-afda-47c7-a675-4316ecf09afe&revisionId=b585179c-c483-49aa-b975-eb5c52b91930&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c592ffcf-afda-47c7-a675-4316ecf09afe&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c592ffcf-afda-47c7-a675-4316ecf09afe/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "266", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AudioTopicExtraction", - "description": "This model extracts the topics from an audio segment.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c8b8a888-ae48-41d4-8476-f2ca6851daa7&revisionId=5eacd881-de83-42f4-bf5a-6ca728f4f082&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c8b8a888-ae48-41d4-8476-f2ca6851daa7&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c8b8a888-ae48-41d4-8476-f2ca6851daa7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "267", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "FaceAI", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=2624aea0-1d51-48c8-9043-e64a928267a5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.3", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "268", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "FaceAI", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=6ede87b0-4f0c-4711-9aeb-9125806f7d7f&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "269", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "FaceAI", - "description": "This model provides a solution for swab robot finding the position of the\nmouth, considering the MDR safety regulations. The position finding alogithm\nis based on deep learning and AI.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=93e8df6b-6226-4674-9963-6d0aa6ddcc3c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "270", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "FaceAI", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=b3116c34-ce39-46c3-9cc7-119c13a85ebf&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "271", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "FaceAI", - "description": " \n\n \n\n \n\n \n\n \n\n![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAAHgCAIAAAC6s0uzAAAgAElEQVR4AezBC24c14JEwQztf89njGvUoBrNpkhJlD/PEb59+7at2lsq7Kbaa6i2odpPQ7UPQLUPQ7VHqFDtR6HahmpPUO0Jqh2o9jeAau/Ctmobqr0F1TZUewuqXVCh2hNUu8G2ahsqVNuwrdoTVNtQ7YIK1T4M1S6otmFbhWoHKlTbsK3aBdUOVNtQodqGXSpU2FFtQ7UD26pt2KXahgrbql2wrUKFahu2VdhWYUeFChWqbah2oNqBCrtUqLCt2gUVKlTYVm1DtQPVNmyrUKFCtQPbql2wrUKFXaod2FahQrUN1TZUqLZhl2obqm2oUKFCtQ3VDlSosK1CtRtU2LsqVKi2YZcK+1fDPgYV9hbsgh2O6tu3b9iGHb59+7abar9CtQ3VDuyo9gjVNmyrdqDahgrVnqDaE1TYVu2HoMKOCjsq7KhQ7QmqXVDtXaj2BNU+CRUqVPswVDtQ7UCFCtVuUGFHhW3VDuxSbUO1DduqbahQ7QbVNuxSbUO1DdsqVNil2oZdqh3YpdqGbRUqVNuwS4XdVKiwo8J+VIU9qrZhR4Ud1TbsXRX2t1dhR4V9T4V9XoW9pcL+firs90KFHdUOVDtQYU+qbah2wT6gwn5UhW2oUG1DtQPbqh3YE+wJtmE32OFSbfOnaj+h2oZqGypUO1DtXaj2AdhW7Qmqbaiwo9qBbdUuqPYW7FGFfUyFChUqVLvBtmoXVPsroNqBCtUOVHuCahdsq3ZBhV2qHdhR7UCFChV2U+1AhQrVLtilQrUbbKuwo0K1DbupsKPCtgo7qm2oUGFHhWob9pYKe1Jhlwq7qbAXKlTYkwp7rcI+o8I+psK2CvuACvs5Ffaowt5VYV+jwn6RCvtpqLah2luwo9qBaht2VNhWYVu1DdUO7FGF3VTYuyrsJ6Dahj2psAN7hF2wC7Y5Kmzzh11Q7V0Vdqmwm2oXVDtQodqGahuqbai2odqGCtU+DzsqVNtQbUOFao9QbcO+QIVqB7ZV2FFhW7UnqHag2hNUO1DtEaodqLah2mvYVqHCjmoHtlWotqHagWoXVDuwo9oNqm2oUO3AjgrbKuyotqHagQrbKmyrtmGXChV2VNhNtQ07KlTbUGFbhW3VNlTYpcIeVdiTCvt1KuyfrMI+r8L+ySrsV0C1CypUO1BhW4UK2yrsUu1AtQ07KuxSocKOChX2lgr7eqiwD8CeOCrswA7HDn/Yhmo/p9qGahuq3aDagR3VbrCt2oYKFao9QbUbVLug2oZqG6q9BdU2VNhfpEKFbdWeoNojVLvBtmrfg20VKlTbUG1DhQrbqm3Yowrbqm2osEu1G+xSbcMuFXZU27BH1Q7sptqBHRW2VahQbcOOCntSYTcVtlXYVmFfo8K+XoX9bVTYWyrsV6iwrcL+IhX2lgr7pVBhNxV2VNijCjuqbdjHVKiwrdqGPamwo9qGPaqwXw0VKlTYC9gNdmAX7MA2f9oPqbCj2tdDhWofg2oXVDtQ7S2osK3ahm0Vdqmw36Lau1DtM1DtCaod2KMK26pt2FFtQ7UD26pt2FZhjyrsUqHCLhV2VNhRYVuFPaq2ocKOagd2VKh2wW6qHai2ocK2CrtUO7BLhd1U2Lsq7IdU2A+psJsK2yrspsK+p8K+TIV9TIX9w1XYT0O1DduqbdhRoUK1DdU2VNtQYVu1CyrsqFDtwLZqG3apsA+osNcq7DOwrdoT7", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=cf6e5a3c-21ec-4abf-a584-bd354eab5fa5&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.5", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.5", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "272", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "FaceAI", - "description": "This model provides a solution for swab robot finding the position of the\nmouth, considering the MDR safety regulations. The position finding alogithm\nis based on deep learning and AI.\n\nThe docker image is based on Python 3.9 slim buster. Scikit-learn and pandas\nare installed.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&revisionId=e0e410f4-12d8-4fec-9113-3b01be44ad62&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.4", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=c90fd53d-8e1e-4700-9b20-4b8318dd6497&version=1.0.4", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/c90fd53d-8e1e-4700-9b20-4b8318dd6497/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "274", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "iSolutions", - "description": "The proposed model is used detection of the teeth and the lips to\nidentification position the mouth detects. The model is including a decision-\nmaking process for robots in a medical context.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cb4a33c5-a9a6-4432-bd49-10336956b6b0&revisionId=8e9f567a-8231-4f59-9aef-bfa8e6b79fc0&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=cb4a33c5-a9a6-4432-bd49-10336956b6b0&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/cb4a33c5-a9a6-4432-bd49-10336956b6b0/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "275", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "MusicDetection", - "description": "With the software tool for automatic detection of music in combination with\ndetection of speech sequences, Fraunhofer IDMT offers a highly effective\nsolution to determine the exact amount of music and speech in radio and TV\nprograms. The tools can be used to optimize broadcasting programs or provide\naccurate accounting for copyright agencies.\n\nLess work: Using Fraunhofer IDMT\u2019s software tools, the amount of music and\nspeech in radio and TV programs no longer needs to be determined by means of\ntedious manual work (typically personnel reading through audio content lists).\nThe tool is able to detect and measure general audio categories (music,\nspeech, music and speech combined, other content) both in live streams and in\nstored digital audio files.\n\nEasy integration: The tools are scalable and can easily be integrated with\nstandard workflows and components. It can be used in production and live\nstreaming environments, both online and offline.\n\nEasy data export: The tools easily integrate with content management systems.\nFor data output, users may choose between XML files, cue sheets, or other\nstandard data export formats.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=cd6d1c1b-896e-4c62-9312-14416d5d411f&revisionId=b836fd7f-e5bf-4879-8d1a-c4ff5df393a9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=cd6d1c1b-896e-4c62-9312-14416d5d411f&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/cd6d1c1b-896e-4c62-9312-14416d5d411f/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "276", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Idiap_BEAT_Handwritten_Digit_Recognition_-_Multiclass_Logistic_Regressor_trained_on_M-NIST", - "description": "This algorithm contains a logistic regression model trained on the MNIST\ndatabase.It takes as input images of digits and outputs the classification\nlabel of images.\n\nTo test drive it, the MNIST data broker can be used.This model does not\nrequire any configuration and thus can be used as is.\n\nThe reference experiment on the BEAT platform is\n[amohammadi/amohammadi/mnist_simple/1/mnist1](https://www.idiap.ch/software/beat/platform/experiments/amohammadi/amohammadi/mnist_simple/1/mnist1/)\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ce1b6792-889d-46cf-9529-3215802f729c&revisionId=eb3669aa-0889-42e9-a89f-7dab1b12baf1&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ce1b6792-889d-46cf-9529-3215802f729c&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ce1b6792-889d-46cf-9529-3215802f729c/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "280", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "news-classifier", - "description": "**Overview:**\n\nThe classifier module is the core of the entire news training pipeline. It is\nresponsible for the following activities,\n\n1.Training process: Upon receiving the training parameters from the trainer\nnode, the classifier node starts the training process.\n\n2.Saving the trained models: Upon successful training, the models are saved in\nboth the h5 and onnx format available in the shared folder.\n\n3.Classifying the results: The Reuters dataset newswires are labeled over 46\ntopics. The test sequences are thereupon classified based on these topics.\n\n **Repository link:**\n\nPlease refer the following link for the code that represents the trainer\nmodule in the Eclipse Graphene platform -\n\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d05e44a5-d3e6-4730-b8ab-dd7a23fd52d4&revisionId=28719994-c987-4ce9-b88f-4f9d5e4129fc&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d05e44a5-d3e6-4730-b8ab-dd7a23fd52d4&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d05e44a5-d3e6-4730-b8ab-dd7a23fd52d4/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "281", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "viume-pic2text", - "description": "The proposed model will address the detection of stamps and named entities\nfrom the images. To do that, the structure is split into two main modules.\n1)Extractor 2)Analyzer. As Extractor, different models based on convolutional\nand recurrent neural networks will be trained to detect the stamps,\nsignatures, and text. As Analyzer,\n\nThe trained NLP model will crop the document and use a custom trained model to\nextract the relevant information and all the relations inside the document.\nThe extracted information from the document will be assigned with a unique ID\nand the corresponding columns will be filled with the extracted data.\n\n \n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d0882778-0ca2-4028-b90c-6c91da657817&revisionId=c2fe1abd-af13-4e90-a176-a44fdc5e4912&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d0882778-0ca2-4028-b90c-6c91da657817&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d0882778-0ca2-4028-b90c-6c91da657817/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "283", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AI_REGIO_DSS4TB", - "description": "This module is an intelligent troubleshooting system that is able to identify\nthe component that is most probably damaged after a series of closed-ended\nquestions answered by the operator. Such systems are built upon a given\nknowledge, which is an information matrix that represents the relationship\nbetween the possible symptoms and the failure components. The probability\nevolution is mainly based on the Bayes theorem which can elaborate the\nconditional probability. It consists of computing the likelihood of a general\nevent occurrence based on the prior probability and the new information\nprovided by each answer. More specifically, each answer allows updating the\nprobability associated with each failure, based on which the next question\nwill be selected.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d1ce6215-8102-4b46-b495-5907bea57ba1&revisionId=d43ac2fe-3d60-4198-a664-7eed1ef2d152&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d1ce6215-8102-4b46-b495-5907bea57ba1&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d1ce6215-8102-4b46-b495-5907bea57ba1/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "285", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "sentiment-analysis-model", - "description": "The model is part of the Sentiment Analysis pipeline.\n\nIt analysis the sentiment of the query text sent by the databroker and returns\nthe prediction. This prediction can also be viewed on the user interface.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d2cfc140-0d61-41fb-86ef-fbe2f192c4d2&revisionId=cfec1423-8627-4669-92a1-ca5497743b70&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d2cfc140-0d61-41fb-86ef-fbe2f192c4d2&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d2cfc140-0d61-41fb-86ef-fbe2f192c4d2/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "286", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "INERGY_Heat_Decision", - "description": "This service is based on a decision support system (DSS) implemented in\ncontext of I-NERGY project. The overall vision of I-NERGY is to promote AI in\nthe energy sector by delivering:\n\n * An open modular framework for supporting AI-on-Demand in the energy sector by capitalising on state-of-the-art AI, IoT, semantics, federated learning, analytics tools.\n * Financing support through Open Calls to third party SMEs for new energy use cases and technology building blocks validation, as well as for new AI-based energy services development, fully aligning to AIoD requirements.\n\nThis is a DSS service for for help in the decision on which energy source (for\nheat generation) use in a Spanish Hospital in hourly basis. The data was\nprovided by VEOLIA, from the hospital complex in C\u00f3rdoba (Spain). The hospital\ncomplex have a district heating network. The layout of this district heating\nnetwork is a ring system composed by two independent rings for heating and\ncooling. This ring just provides energy for heating and Domestic Hot Water\n(DHW).\n\nApart from being a district heating network, this system is complex due to the\ndifferent production sources used for heating and cooling. In this facility\nheat, cold and steam are produced by using different sources.\n\nFor more information on how to use the service, please see Documents section.\n\n _The project leading to this service has received funding from the European\nUnion\u2019s Horizon 2020 research and innovation programme under grant agreement\nNo 101016508_\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d30263f9-9902-407d-b0c8-f389b541e98d&revisionId=97e3b739-a584-4b83-a25b-43e6a0bfaf39&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d30263f9-9902-407d-b0c8-f389b541e98d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d30263f9-9902-407d-b0c8-f389b541e98d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "288", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AudioSpeechToTextEnglish", - "description": "This model converts an audio segment to english text.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5418d91-8eda-42ff-9348-570e5ba0a110&revisionId=ef9a485f-d31d-4f1b-be03-205d112a6b59&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5418d91-8eda-42ff-9348-570e5ba0a110&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5418d91-8eda-42ff-9348-570e5ba0a110/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "293", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "PII_Detector", - "description": "PII Detector automatically detects personally identifiable information in\nunstructured files (documents or images).The face detection model analyzes an\nimage file to find faces. The method returns a list of items, each of which\ncontains the coordinates of a face that was detected in the file.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d5ffc67f-b5ef-42c6-a97b-238546af935a&revisionId=b4adbc99-9aec-4ec1-bb58-abbd40f5b75b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d5ffc67f-b5ef-42c6-a97b-238546af935a&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d5ffc67f-b5ef-42c6-a97b-238546af935a/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "295", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "openpose", - "description": "Openpose is a Real-time multi-person keypoint detection model for body, face,\nhands, and foot estimation, originally developed by CMU but includes several\nupdates.\n\nOpenpose-AI4EU is a component that uses an improved version (Mobilenet v2) and\ncan be included in pipelines built with AI4EU experiments or can run\nstandalone as a dockerized grpc service. For that we include test scripts. The\ncomponent input is one image and outputs the parameters of all body keypoints\ndetected (index of the skeleton keypoin, x and y coordinates in the image and\nthe confidence score).\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d7e0ffc8-afcd-42a3-8d8a-01ea395d1303&revisionId=2beda89e-c87e-416c-980e-fe4908f8c87d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d7e0ffc8-afcd-42a3-8d8a-01ea395d1303&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d7e0ffc8-afcd-42a3-8d8a-01ea395d1303/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "296", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "TEK_THOR_OPTIMIZATION", - "description": "**AI4EU - THOR **THOR solution consists in a Hybrid optimization solution to\nmake the right decision on the amount spare parts in stock, considering past\nsales and forecasts. The purchase decision considers as input information\ncurrent stock status, production needs, production forecast, sales forecast,\nvariability Price of stock material and several restriction parameters.\n\n **Optimization model**. EDA and Genetic search have been implemented to\nminimizing the total cost of spare parts procurement as well as covering cash-\nflow restrictions and production needs. This optimization provides as a result\nthe procurement plan\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d8745d72-f4c0-49f6-8d20-514e8ad74f86&revisionId=644482dc-abd6-4805-b46a-4cd98192ae1c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d8745d72-f4c0-49f6-8d20-514e8ad74f86&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d8745d72-f4c0-49f6-8d20-514e8ad74f86/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "297", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Daisy_software", - "description": "Even though maintenance of rotating machines, such as motors, pumps and fans\nthrough Vibration Monitoring (VM) has been a proven process, it requires an\nexperienced 3rd party service engineer to attend the vessel onboard for\nvibration data collection and onshore vibration analysis and machinery\ncondition reporting and that attendance onboard in many cases is not feasible.\n\nTo give a response to this problem, Daisy allows to apply AI to the large\namount of mechanical vibration data of different assets, in order to build\ncomputational models that help in the classification and early detection of\nthe faults that the rotating machinery of ships could have.\n\nWith this software, the user can load vibration data, apply signal processing\ntechniques and train machine learning (ML) models with no prior programming\nexperience on artificial intelligence (AI) and signal processing knowledge.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=d89b25d2-fcf8-48ae-9858-3f32cf047d8d&revisionId=ec81a5a2-0f51-4254-94a7-b80e92c6560a&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=d89b25d2-fcf8-48ae-9858-3f32cf047d8d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/d89b25d2-fcf8-48ae-9858-3f32cf047d8d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "299", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Time-prediction-for-example-manufacturing", - "description": "This module provides a manufacturing time prediction for an example\nmanufacturing process.\n\nThe example manufacturing process is a conveyor belt machine, that takes a\nnumber of inputs to process. There are 2 processing stations and depending on\nthe input item only either or both of these stations can be used to process\nthe item.\n\nThe model was trained on simulated data.\n\n## Input\n\nThe input is a list of products characterized by a type (3 different colors)\nand the time of entering the process.\n\n## Model\n\nFor the prediction a stacked LSTM model with spatial dropout is used.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&revisionId=6385bf2f-ef0a-4481-a13c-35ef3859a82e&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "300", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Time-prediction-for-example-manufacturing", - "description": "This module provides a manufacturing time prediction for an example\nmanufacturing process.\n\nThe example manufacturing process is a conveyor belt machine, that takes a\nnumber of inputs to process. There are 2 processing stations and depending on\nthe input item only either or both of these stations can be used to process\nthe item.\n\nThe model was trained on simulated data.\n\n## Input\n\nThe input is a list of products characterized by a type (3 different colors)\nand the time of entering the process.\n\n## Model\n\nFor the prediction a stacked LSTM model with spatial dropout is used.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&revisionId=6faffcf2-e451-4973-b768-cfa4bf01469b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "301", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Time-prediction-for-example-manufacturing", - "description": "This module provides a manufacturing time prediction for an example\nmanufacturing process.\n\nThe example manufacturing process is a conveyor belt machine, that takes a\nnumber of inputs to process. There are 2 processing stations and depending on\nthe input item only either or both of these stations can be used to process\nthe item.\n\nThe model was trained on simulated data.\n\n## Input\n\nThe input is a list of products characterized by a type (3 different colors)\nand the time of entering the process.\n\n## Model\n\nFor the prediction a stacked LSTM model with spatial dropout is used.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&revisionId=d42e8f33-0bb7-407b-b72d-9fde9a276bd7&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/db5eb7cf-db87-4f86-bf8c-9cbc82d1b3ac/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "303", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "lane-detector", - "description": "# AI4EU Pluto lane-detector\n\nThe model runs a lane detector over an image.\n\n \n\nThe image can be send as bytes, and the result will be the corresponding bytes\nfor the keypoints detected along with the shape of the keypoints array. This\nhelps reconstruct the multidimensional array from the bytes returned. The same\nfor the `results` which is the original image overlayed with the detected\nkeypoints, the `results_shape` provides the shape to reconstruct the array.\n\n \n\n### Example\n\n \n\n```python\n\n \n\nimport grpc\n\nfrom PIL import Image\n\nimport numpy as np\n\n \n\nstart_ch = timer()\n\nport_addr = 'localhost:8061'\n\n \n\n# open a gRPC channel\n\nchannel = grpc.insecure_channel(port_addr)\n\n \n\nfilepath = \"assets/test.png\"\n\n \n\nwith open(filepath, 'rb') as f:\n\n content = f.read()\n\n \n\nrequestPrediction = model_pb2.Features(img=content)\n\n \n\nresponsePrediction = stub.make_prediction(requestPrediction)\n\n \n\nprint('The prediction is :', responsePrediction.results)\n\n \n\n \n\n# Recreate image:\n\n \n\nimg_shape = [*responsePrediction.results_shape]\n\nnp_img = np.frombuffer(responsePrediction.results,\ndtype=np.uint8).reshape(img_shape)\n\n \n\nimage = Image.fromarray(np_img).convert('RGB'))\n\n```\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dc34b9b5-3990-41fb-93b7-1a56cf1016cc&revisionId=23c1693f-08f7-4175-9b72-f2d999b24a98&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=dc34b9b5-3990-41fb-93b7-1a56cf1016cc&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/dc34b9b5-3990-41fb-93b7-1a56cf1016cc/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "305", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4eu-competences", - "description": "This tool computes the match between text and concepts from ESCO based on the\nESCO model itself and the FastText computation model. Trustworthy is ensured\nin part by these models and their developers. Given a free text description,\nand the weight parameters, the service produces a set of matches that\nrepresent the corresponding ESCO competence (text and URI) and the similarity\nmeasure.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dc67374a-0a1c-4477-86b2-9db8f0a1faed&revisionId=977872e8-b343-4fa4-b5fe-31afc77c9e05&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=dc67374a-0a1c-4477-86b2-9db8f0a1faed&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/dc67374a-0a1c-4477-86b2-9db8f0a1faed/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "306", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "medical_notes_classification", - "description": "Our solution is a NLP classification model fine-tuned on Spanish free text\n(medical notes) to predict IDC-10 codes. We will start from a transformer\nmodel trained on a Spanish corpus, such as BETO, and fine-tune it on general\nSpanish medical corpus (research paper or anonymized data delivered by Amadix\nand its partners), with pre-training tasks such as Masked Language Modeling.\nWe will then fine-tune it on free-text data provided by AMADIX (medical notes)\nin order to predict the target ICD-10 codes.\n\nWe will also create a prediction explanation module to our product, in order\nfor the end user to be able to understand the model prediction by visualizing\nthe words in the input free text that push the model toward the predicted\nICD-10 code. In order to do that, we will use SHAP values, which have\ndemonstrated their performance for such tasks.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=dd0d9853-2060-44d3-94c7-208a0423609d&revisionId=19a88d46-b9df-47e2-bb53-38ac4fe02eec&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=dd0d9853-2060-44d3-94c7-208a0423609d&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/dd0d9853-2060-44d3-94c7-208a0423609d/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "307", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AudioPunctuationEnglish", - "description": "This model add english punctuation to an audio segment.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ddf8de5f-5391-48be-a457-bce86757f8ba&revisionId=1846bb25-f697-4091-ba13-79f0ebb3147c&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ddf8de5f-5391-48be-a457-bce86757f8ba&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ddf8de5f-5391-48be-a457-bce86757f8ba/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "308", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AI4IMS", - "description": "AI-based Inventory Management System, AI4IMS, integrates simulation,\noptimization and search algorithms in an advanced inventory management\napproach for an adaptive and dynamic response.\n\nFirstly, we acquire and cleanse the required data to obtain a reliable dataset\nincluding product prices and demand forecasting. As a result of the\nforecasting, the uncertainty associated with material resources prices and\ndemand is also characterized.\n\nSecondly, we capture the production plant and procurement system in a\nsimulation environment.\n\nThirdly, a direct randomized sampling method generates alternative scenarios\nfor handling the uncertainty characterized during the forecasting step.\n\nNext, a simulation-based optimization system finds an improved procurement\npolicy within the solution space.\n\nFinally, a variability analysis generates alternative solutions, which are\nprovided for decision-maker support.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=df50bc0b-e499-4249-b468-b94c0a1cf9fc&revisionId=ee9a1418-2876-414f-982f-84960e811a6d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=df50bc0b-e499-4249-b468-b94c0a1cf9fc&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/df50bc0b-e499-4249-b468-b94c0a1cf9fc/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "311", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AI-Panel", - "description": "The following notebook shows a sample model highlighting a preliminary step\ntaken towards supporting pharmaceutical & nutraceutical drug discovery based\non qualitative compound properties and customer requirements. The goal is to\ncreate a sophisticated predictive model capable of providing\nsuggestions/predictions regarding compounds that have specific therapeutic\nadvantages as well as their interaction with other compounds. The current\nmodel utilizes an exemplary dataset that contains for each\n**substance/compound** , a set of quantitative features describing the\ncompound's efficacy. It is envisaged that the dataset will comprise multi-\nmodal features such as physiochemical parameters, drug status, regulatory &\nsafety data, and company-internal data. This **numeric,** **textual, and image\ndata** is extracted and consolidated from open access chemical\ndataspaces/databases. This diversity of data will facilitate the design of a\npredictive model that filters drugs and related compounds based on product\ndevelopment and customer needs.\n\n![](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAASwAAAD9CAQAAACsYYLAAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAJiS0dEAP+Hj8y/AAAACXBIWXMAAABIAAAASABGyWs+AAAib0lEQVR42u3deXhdVbmA8d85mZOmTdo0TToPtIUOtIAgk4LMQ0UGkasiKoqAyFUEQVEURVC4clGRQRzulSsCgiAgMg9lHsvYlpaWQtt0SJs2bZJmOMlZ948eY1ILTW3Scb19nj45Jyd7ne/b71577bX3WisRGmSLRLqX1mzZcmMeIt1NMqYgEsWKRLEiUaxIJIoV2Tb4t7oa0lZr0WyJWRqt0aIWC6xs/0SWoYrl6C1XgVLjFctWpGirTkZag0bNar2uyRrNVkmrsUjIfCJhoH6S+shTKN+u+slWoGgrP0IbNGhV5w2rNGqxWspq86XbP9HPIJTIVajAWBXy5Or9b8fVRbEaLbHa2xabodoqi7Rp06BeUgJptLWnf61aCQlJaUFCH3mSCg1WaJQRhhusVD85WzjhzaqtNs9CsyxUp0qLNo1WS0hm4gra1olL++96y5cl12DFhhhtqGF6K5e3heNKqbHSfPPNM1e9Ko3Smq0S2vdK57gSsjJxBWm9FMmSZZDeyo030CglyhV0+RskQ", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e1806340-8d4e-499f-9dd6-0bbfec2d9c2e&revisionId=9b7f8343-b181-4b7b-af8b-df5133e97005&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e1806340-8d4e-499f-9dd6-0bbfec2d9c2e&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e1806340-8d4e-499f-9dd6-0bbfec2d9c2e/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "312", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Tensorflow-Critical-Part-Classifier", - "description": "This is a Tensorflow model created for critical part prediction. Given a set\nof features that describe the production line characteristics or factory\nconditions, the model we have built predicts whether a particular component\npart is critical or not to the supply chain. The end goal is the optimization\nof the stock management.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e1a73166-03a3-4b93-a785-28d0591d7271&revisionId=3ef45d82-30f5-4f98-b9c8-44afe80b44a9&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e1a73166-03a3-4b93-a785-28d0591d7271&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e1a73166-03a3-4b93-a785-28d0591d7271/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "314", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "EntityRecognizer", - "description": "The entity recognizer is a deep learning-based solution that takes a text\ndocument as input and returns a list of instances of pre-defined entities\n(Person, Location, Organization, Miscellaneous).\n\n \n\nIt uses bidirectional LSTM networks to generate informative word\nrepresentations that capture the contextual dependencies between words in a\nsentence. Additionally, a CRF layer is added on top for a higher tagging\naccuracy. The models have been built using FlairNLP, a PyTorch-based NLP\nframework.\n\n \n\nThis tool includes a multilingual NER model supporting English, German and\nDutch.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&revisionId=41df686d-9fa3-4104-996f-fa926332adbb&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e3794e16-0225-4bf1-a99c-b99638a22232/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "315", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "EntityRecognizer", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&revisionId=7220ac2a-a908-46df-a58d-bad87bbbad23&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e3794e16-0225-4bf1-a99c-b99638a22232/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "316", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "EntityRecognizer", - "description": "The entity recognizer is a deep learning-based solution that takes a text\ndocument as input and returns a list of instances of pre-defined entities\n(Person, Location, Organization, Miscellaneous).\n\n \n\nIt uses bidirectional LSTM networks to generate informative word\nrepresentations that capture the contextual dependencies between words in a\nsentence. Additionally, a CRF layer is added on top for a higher tagging\naccuracy. The models have been built using FlairNLP, a PyTorch-based NLP\nframework.\n\n \n\nThis tool includes a multilingual NER model supporting English, German and\nDutch.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&revisionId=f7447500-0c8d-4ca7-be7e-24ce3fefd144&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e3794e16-0225-4bf1-a99c-b99638a22232&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e3794e16-0225-4bf1-a99c-b99638a22232/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "317", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Explanations4AdultClassification", - "description": "This tool provides predictions and explanations for the classification of\ninstances of [Adult Census](https://archive.ics.uci.edu/ml/datasets/adult)\ndataset. The explanation method is called LionForests, while the prediction is\nbased on a random forests model. The corresponding paper supporting this\ntechnique can be found here: http://ceur-ws.org/Vol-2659/mollas.pdf in\nProceedings of the First International Workshop on New Foundations for Human-\nCentered AI (NeHuAI) co-located with 24th European Conference on Artificial\nIntelligence (ECAI 2020).\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e4208fe5-3b5c-4fe0-9cff-c28b828db530&revisionId=5d31e250-36f3-4033-9ab9-17a9213f96ae&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e4208fe5-3b5c-4fe0-9cff-c28b828db530&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e4208fe5-3b5c-4fe0-9cff-c28b828db530/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "322", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "edm-agent", - "description": "EDM agent component is an RL based agent that controls the EDM environment\n(AI4EU component edm-env) based on the observed voltage and frequencies. It is\nbased on the PPO algorithm and was trained using the `train.py` script that is\navailable in the github repository of the component:\nhttps://github.com/threethirds/edm.\n\nThis component has a user interface via 8062 port which can be used to run a\nsmall demo control scenario. It also has a protobuf API via 8061 port in order\nto connect to the EDM environment.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&revisionId=05ea3f80-92b1-4ffc-b1ab-1b3bb38cee7b&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e50055a9-56ad-478a-9f57-c6553202f2a9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "323", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "edm-agent", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&revisionId=459c7f89-264e-4321-8b5c-c9ab7e9dab3d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e50055a9-56ad-478a-9f57-c6553202f2a9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "324", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "edm-agent", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&revisionId=731df380-b367-4945-a59f-5b145e8c6de1&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e50055a9-56ad-478a-9f57-c6553202f2a9&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e50055a9-56ad-478a-9f57-c6553202f2a9/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "333", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4eu-security-pilot-model", - "description": "This container provides a model for Thread Prediction in Network Traffic.\n\nTherefore, this container can detect malicious traffic.\n\n \n\nThis container can be trained with the training interface and predict traffic\nwith the prediction interface.\n\nThis container provides two inputs and one output.\n\nThe training input is to provide training data. You can connect this input\nwith the ai4eu-security-databroker training output. After starting the\ntraining the data will be transfered to train the model.\n\nThe second input is the prediction input. You can connect this input with the\nai4eu-security-databroker prediction output. After starting the model you can\nsee the prediction results in the prediction output. There, you get a number\nbetween 0 and 1. According to your data you have to set a threshold to specify\nif the data are fraud or benign. The threshold can be found in the evaluation\ncontainer of the model.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=e8c82055-1afc-444c-9c21-3d64ea601b28&revisionId=0b99f79d-5e7c-4b0f-850f-bae2b6e710ce&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=e8c82055-1afc-444c-9c21-3d64ea601b28&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/e8c82055-1afc-444c-9c21-3d64ea601b28/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "336", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "TransformersGraphAlgorithmsAgainstColonCancer", - "description": "Automatic classification of ICD-10 codes from free text and medical records\nbased on BERT model. The application of NLP (textual information extraction)\ntasks in combination with other numerical biomarkers will involve that the\nmodel will improve in accuracy and a greater number of cancer patients will be\ndetected earlier, improving the future healthcare system.\n\nMoreover, the automatic identification or classification in ICD-10 codes from\nfree text not only helps to improve the predictive model but also avoids the\nmanual assigning codes that is expensive, time consuming and error prone.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=0859679b-e1ba-4d89-8093-2313212216af&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.1", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.1", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "337", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "TransformersGraphAlgorithmsAgainstColonCancer", - "description": "Automatic classification of ICD-10 codes from medical records based on\nTransformers. The application of NLP tasks in combination with other numerical\nbiomarkers will involve that the model will improve in accuracy and a greater\nnumber of cancer patients will be detected earlier, improving the future\nhealthcare system.\n\nMoreover, the automatic identification of ICD-10 codes from free text not only\nhelps to improve the predictive model but also avoids the manual assigning\ncodes that is expensive, time consuming and error prone.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=50c071a6-aba1-4f90-97a3-2ab1108a0d22&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.5", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.5", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "338", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "TransformersGraphAlgorithmsAgainstColonCancer", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=6bef6d39-cb13-4a6d-b1c3-dc6df07cff05&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "339", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "TransformersGraphAlgorithmsAgainstColonCancer", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=7fce648b-ae3c-4514-84f1-d599a2b9e54d&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.3", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.3", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "340", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "TransformersGraphAlgorithmsAgainstColonCancer", - "description": "Automatic classification of ICD-10 codes from free text and medical records\nbased on BERT model. The application of NLP (textual information extraction)\ntasks in combination with other numerical biomarkers will involve that the\nmodel will improve in accuracy and a greater number of cancer patients will be\ndetected earlier, improving the future healthcare system.\n\nMoreover, the automatic identification or classification in ICD-10 codes from\nfree text not only helps to improve the predictive model but also avoids the\nmanual assigning codes that is expensive, time consuming and error prone.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=94e9caf4-0568-4125-b2e2-03872507d1d0&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "341", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "TransformersGraphAlgorithmsAgainstColonCancer", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&revisionId=f85a7339-8bf4-442f-82fa-36eee8214057&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.4", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=eae81a4a-c2e5-4fff-abf3-71ecca4ab829&version=1.0.4", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/eae81a4a-c2e5-4fff-abf3-71ecca4ab829/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "344", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "TEK_THOR_FORECAST", - "description": "**AI4EU - THOR **THOR solution consists in a Hybrid optimization solution to\nmake the right decision on the amount spare parts in stock, considering past\nsales and forecasts. The purchase decision considers as input information\ncurrent stock status, production needs, production forecast, sales forecast,\nvariability Price of stock material and several restriction parameters.\n\n **Forecast**. An auto-adjustable predictive model forecasts the short-term\nexpected sales of end products as well as the expected price evolution of\nspared parts.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ebcb6fba-d8f4-4010-a6b2-8386040c9030&revisionId=afc31a74-dcad-4b4e-a691-b31750478365&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.4", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ebcb6fba-d8f4-4010-a6b2-8386040c9030&version=1.0.4", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ebcb6fba-d8f4-4010-a6b2-8386040c9030/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "345", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "Idiap_BEAT_Face_Recognition_-_Eigenfaces_trained_on_ATNT", - "description": "A biometrics algorithm that compares a probe image to a set of template images\nand outputs a comparison score.\n\nThis algorithm was trained on the ATNT database and reproduces the EigenFaces\nface recognition baseline.\n\nThe input images must be gray-scale and of the size of 92x92. The training\ndata comes from the [BOB atnt database\n](https://www.idiap.ch/software/bob/docs/bob/bob.db.atnt/master/index.html)package.\n\nReference experiment on the BEAT platform is\n[amohammadi/amohammadi/atnt_eigenfaces/1/atnt1](https://www.idiap.ch/software/beat/platform/experiments/amohammadi/amohammadi/atnt_eigenfaces/1/atnt1/).\n\n \n\n \n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ec640afb-9d7a-499d-977c-bceb435acff7&revisionId=2043f0e1-b332-499d-8472-c946faccd8c2&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ec640afb-9d7a-499d-977c-bceb435acff7&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ec640afb-9d7a-499d-977c-bceb435acff7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "346", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AudioPunctuationGerman", - "description": "This model adds German punctuation to an audio mining pipeline.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=ed1a8947-f102-4786-9dbb-412568317a3f&revisionId=ace9dada-2a60-4530-b264-f4edb8511ca8&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=ed1a8947-f102-4786-9dbb-412568317a3f&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/ed1a8947-f102-4786-9dbb-412568317a3f/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "350", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "atcrecognize", - "description": "Atcrecognize extracts text from images that contain label tags. Using its\nunderlying deep learning technology, atcrecognize enhances the image, removes\nthe unnecessary parts of the image, and feeds into the ocr model that extracts\nthe text with more precision. The app is developed and used for the H2020\nproject AI-PROFICIENT.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f24a9c17-5f4f-4bec-b0f5-5fd20e4669a7&revisionId=af42ba9b-ec9e-4f37-8a46-e581c9f3d811&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f24a9c17-5f4f-4bec-b0f5-5fd20e4669a7&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f24a9c17-5f4f-4bec-b0f5-5fd20e4669a7/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "353", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "pomdp-ir", - "description": "Partially Observable Markov Decision Processes with Information Rewards\n(POMDP-IR) is a framework to compute policies for autonomous agents with the\ngoal of gathering information about particular features on the environment.\nSymbolicPerseus-IR extends one of the most knowns POMDP solvers to include\nInformation Rewards. It lets you compute and test policies for a given input\nenvironment model.\n\n \n\nCheck the github repository of the resource for a more detailed overview:\nhttps://github.com/tsveiga/ai4eu-pomdp-ir\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=f7fc6fdd-3bc0-4443-a28d-dc08109d0ffd&revisionId=d5743c46-5b96-4d8a-90be-fdeb5e248f45&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=f7fc6fdd-3bc0-4443-a28d-dc08109d0ffd&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/f7fc6fdd-3bc0-4443-a28d-dc08109d0ffd/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "361", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AI4Agri-qualitypredictor", - "description": "", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fb06bc01-1ac9-4a7b-bcdc-cae78e970796&revisionId=6f5ef2c7-3b29-46e9-881b-7adf1191df62&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.2", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fb06bc01-1ac9-4a7b-bcdc-cae78e970796&version=1.0.2", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/fb06bc01-1ac9-4a7b-bcdc-cae78e970796/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "362", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "AI4Agri-qualitypredictor", - "description": "This component generates a set of models in order to predict one grape yield\nand three different grape quality indicators related to the [AI4EU agriculture\npilot](https://www.ai4eu.eu/ai4agriculture).\n\n \n\n \n\n \n\nTo do that, the current component connects to the AI4EU agriculture pilot\nKnowledge graph and retrieves all the required data (according to the dates\nand parcel information provided in the prediction request and the target\nvariable requested) to generate different models that will be evaluated and\nused to provide the best prediction possible.\n\n \n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fb06bc01-1ac9-4a7b-bcdc-cae78e970796&revisionId=d34ae15a-d648-4c34-ae31-7f5ca2abc7a2&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fb06bc01-1ac9-4a7b-bcdc-cae78e970796&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/fb06bc01-1ac9-4a7b-bcdc-cae78e970796/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "364", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "ai4eu-lexatexer-pump-rul", - "description": "Provides access to a REST API which consumes a pumps quality assurance data\nand delivers failure probabilities and MTTF densities.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fbbe4dff-5eaa-4171-b15a-d8035a79a035&revisionId=7bcfcec0-10e7-4c4f-af17-be65b435c5b3&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fbbe4dff-5eaa-4171-b15a-d8035a79a035&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/fbbe4dff-5eaa-4171-b15a-d8035a79a035/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - }, - { - "platform": "ai4experiments", - "platform_identifier": "365", - "aiod_entry": { - "editor": [], - "status": "draft" - }, - "name": "pira-analyzer", - "description": "This component leverages AI technologies for information extraction to\nidentify semantically-relevant structured information from semi-/un-structured\ndocuments. This information is classified as personally identifiable\ninformation (PII) entities or not by leveraging named entity recognition.\nIdentified PII entities are further classified into different categories\ndepending on their nature.\n\n", - "same_as": "https://aiexp.ai4europe.eu/#/marketSolutions?solutionId=fe6bca3a-9583-4f6c-993a-ec104226a679&revisionId=353595da-df92-4d10-8690-d6e1665040af&parentUrl=marketplace#md-model-detail-template", - "date_published": "2023-09-01T15:15:00.000", - "version": "1.0.0", - "pid": "", - "alternate_name": [], - "application_area": [], - "citation": [], - "contact": [], - "creator": [], - "distribution": [], - "has_part": [], - "industrial_sector": [], - "is_part_of": [], - "keyword": [], - "license": "https://aiexp.ai4europe.eu/api/getLicenseFile?solutionId=fe6bca3a-9583-4f6c-993a-ec104226a679&version=1.0.0", - "media": [ - { - "checksum": "", - "checksum_algorithm": "", - "copyright": "", - "content_url": "https://aiexp.ai4europe.eu/api/solutions/fe6bca3a-9583-4f6c-993a-ec104226a679/picture", - "content_size_kb": 0, - "date_published": "2023-09-01T15:15:00.000", - "description": "", - "encoding_format": "", - "name": "" - } - ], - "note": [], - "related_experiment": [], - "research_area": [], - "scientific_domain": [], - "type": "" - } -] + { + "platform": "example", + "platform_identifier": "1", + "name": "The name of this resource", + "description": "A description.", + "same_as": "https://www.example.com/resource/this_resource", + "date_published": "2022-01-01T15:15:00.000", + "version": "1.1.0", + "pid": "https://doi.org/10.1000/182", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [ + "alias 1", + "alias 2" + ], + "application_area": [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "citation": [], + "contact": [], + "creator": [], + "distribution": [ + { + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/model.zip", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "application/zip", + "name": "Name of this file.", + "technology_readiness_level": 1, + "installation_script": "./install.sh", + "installation": "Build the Dockerfile", + "installation_time_milliseconds": 100, + "deployment_script": "./run.sh", + "deployment": "You can run the run.py file using python3. See README.md for required arguments.", + "deployment_time_milliseconds": 100, + "os_requirement": "Windows 11.", + "dependency": "Python packages as listed in requirements.txt.", + "hardware_requirement": "4GB RAM; 100MB storage; 1GHz processor with 8 cores." + } + ], + "has_part": [], + "industrial_sector": [ + "Finance", + "eCommerce", + "Healthcare" + ], + "is_part_of": [], + "keyword": [ + "keyword1", + "keyword2" + ], + "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", + "media": [ + { + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/image.jpeg", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "image/jpeg", + "name": "Name of this file." + } + ], + "note": [ + { + "value": "A brief record of points or ideas about this AI resource." + } + ], + "related_experiment": [], + "research_area": [ + "Explainable AI", + "Physical AI" + ], + "scientific_domain": [ + "Anomaly Detection", + "Voice Recognition", + "Computer Vision." + ], + "type": "Large Language Model" + } +] \ No newline at end of file diff --git a/src/connectors/example/resources/resource/organisations.json b/src/connectors/example/resources/resource/organisations.json index 939359f0..0e809e0f 100644 --- a/src/connectors/example/resources/resource/organisations.json +++ b/src/connectors/example/resources/resource/organisations.json @@ -71,4 +71,4 @@ ], "type": "Research Institution" } -] +] \ No newline at end of file diff --git a/src/connectors/example/resources/resource/projects.json b/src/connectors/example/resources/resource/projects.json index 561bca97..61c17652 100644 --- a/src/connectors/example/resources/resource/projects.json +++ b/src/connectors/example/resources/resource/projects.json @@ -8,7 +8,6 @@ "date_published": "2022-01-01T15:15:00.000", "version": "1.1.0", "pid": "https://doi.org/10.1000/182", - "coordinator": 1, "aiod_entry": { "editor": [], "status": "draft" @@ -82,4 +81,4 @@ "end_date": "2021-02-03T15:15:00", "total_cost_euro": 10000000 } -] +] \ No newline at end of file diff --git a/src/tests/resources/elasticsearch/dataset_search.json b/src/tests/resources/elasticsearch/dataset_search.json index 73493b2f..4433362d 100644 --- a/src/tests/resources/elasticsearch/dataset_search.json +++ b/src/tests/resources/elasticsearch/dataset_search.json @@ -1,52 +1,53 @@ { - "took" : 2, - "timed_out" : false, - "_shards" : { - "total" : 1, - "successful" : 1, - "skipped" : 0, - "failed" : 0 + "took" : 4, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 83, + "relation" : "eq" }, - "hits" : { - "total" : { - "value" : 1, - "relation" : "eq" + "max_score" : 0.74700636, + "hits" : [ + { + "_index" : "dataset", + "_id" : "dataset_104", + "_score" : 0.74700636, + "_ignored" : [ + "description.keyword" + ], + "_source" : { + "version" : "1.0.1", + "same_as" : "https://www.example.com/resource/this_resource", + "asset_identifier" : 106, + "identifier" : 104, + "resource_identifier" : 106, + "description" : "A description.", + "temporal_coverage" : "", + "type" : "dataset", + "platform_identifier" : "287", + "date_published" : "2023-09-01T00:00:00.000Z", + "date_modified" : "2023-09-01T00:00:00.000Z", + "application_area" : [ ], + "name" : "trondheim-simulator", + "measurement_technique" : "", + "license" : "https://www.example.com/resource/this_resource", + "issn" : "00000000", + "date_created" : "2023-09-01T00:00:00.000Z", + "@version" : "1", + "platform" : "ai4experiments", + "status" : "draft", + "@timestamp" : "2023-09-01T00:00:00.000Z" }, - "max_score" : 6.3386726, - "hits" : [ - { - "_index" : "dataset", - "_id" : "dataset_2", - "_score" : 6.3386726, - "_ignored" : [ - "description.keyword" - ], - "_source" : { - "license" : "https://test_resource.test", - "date_created" : "2023-09-01T00:00:00.000Z", - "date_published" : "2023-09-01T00:00:00.000Z", - "version" : "1.0.0", - "measurement_technique" : "", - "temporal_coverage" : "", - "issn" : "00000000", - "type" : "dataset", - "platform_identifier" : "6", - "resource_identifier" : 4, - "date_modified" : "2023-09-01T00:00:00.000Z", - "asset_identifier" : 4, - "@version" : "1", - "status" : "draft", - "description" : "A description", - "platform" : "example", - "name" : "A name", - "same_as" : "https://test_resource.test", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "identifier" : 2 - }, - "sort": [ - 1 - ] - } + "sort" : [ + 1 ] - } + } + ] + } } diff --git a/src/tests/resources/elasticsearch/event_search.json b/src/tests/resources/elasticsearch/event_search.json new file mode 100644 index 00000000..cc350fa0 --- /dev/null +++ b/src/tests/resources/elasticsearch/event_search.json @@ -0,0 +1,55 @@ +{ + "took" : 2, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 0.2876821, + "hits" : [ + { + "_index" : "event", + "_id" : "event_1", + "_score" : 0.2876821, + "_source" : { + "type" : "event", + "end_date" : "2023-09-01T00:00:00.000Z", + "mode" : "offline", + "description" : "A description.", + "platform_identifier" : "1", + "application_area" : [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "registration_link" : "https://example.com/registration-form", + "name" : "The name of the Event", + "@version" : "1", + "event_status" : "scheduled", + "schedule" : "10:00-10:30: Opening. 10:30-11:00 ...", + "platform" : "example", + "organiser_type" : "person", + "start_date" : "2023-09-01T00:00:00.000Z", + "same_as" : "https://www.example.com/resource/this_resource", + "identifier" : 1, + "resource_identifier" : 374, + "date_modified" : "2023-09-01T00:00:00.000Z", + "organiser_identifier" : 2, + "date_created" : "2023-09-01T00:00:00.000Z", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "status" : "draft" + }, + "sort" : [ + 1 + ] + } + ] + } +} diff --git a/src/tests/resources/elasticsearch/experiment_search.json b/src/tests/resources/elasticsearch/experiment_search.json index 6fb41161..a58679e3 100644 --- a/src/tests/resources/elasticsearch/experiment_search.json +++ b/src/tests/resources/elasticsearch/experiment_search.json @@ -1,52 +1,53 @@ { - "took" : 1, - "timed_out" : false, - "_shards" : { - "total" : 1, - "successful" : 1, - "skipped" : 0, - "failed" : 0 + "took" : 4, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 21, + "relation" : "eq" }, - "hits" : { - "total" : { - "value" : 1, - "relation" : "eq" + "max_score" : 0.6878389, + "hits" : [ + { + "_index" : "experiment", + "_id" : "experiment_32", + "_score" : 0.6878389, + "_ignored" : [ + "description.keyword" + ], + "_source" : { + "version" : "1.0", + "same_as" : "https://www.example.com/resource/this_resource", + "asset_identifier" : 169, + "identifier" : 32, + "resource_identifier" : 170, + "description" : "A description.", + "reproducibility_explanation" : "", + "type" : "experiment", + "platform_identifier" : "397", + "date_published" : "2023-09-01T00:00:00.000Z", + "experimental_workflow" : "", + "date_modified" : "2023-09-01T00:00:00.000Z", + "execution_settings" : "", + "application_area" : [ ], + "name" : "aqpredvisualize", + "@version" : "1", + "license" : "https://www.example.com/resource/this_resource", + "date_created" : "2023-09-01T00:00:00.000Z", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "platform" : "ai4experiments", + "status" : "draft" }, - "max_score" : 4.3783627, - "hits" : [ - { - "_index" : "experiment", - "_id" : "experiment_40", - "_score" : 4.3783627, - "_ignored" : [ - "description.keyword" - ], - "_source" : { - "license" : "https://test_resource.test", - "date_created" : "2023-09-01T00:00:00.000Z", - "date_published" : "2023-09-01T00:00:00.000Z", - "version" : "1.0.0", - "reproducibility_explanation" : "", - "type" : "experiment", - "experimental_workflow" : "", - "platform_identifier" : "405", - "resource_identifier" : 179, - "date_modified" : "2023-09-01T00:00:00.000Z", - "asset_identifier" : 177, - "@version" : "1", - "status" : "draft", - "description" : "A description", - "platform" : "example", - "name" : "A name", - "same_as" : "https://test_resource.test", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "identifier" : 40, - "execution_settings" : "" - }, - "sort": [ - 1 - ] - } + "sort" : [ + 1 ] - } + } + ] + } } diff --git a/src/tests/resources/elasticsearch/ml_model_search.json b/src/tests/resources/elasticsearch/ml_model_search.json index c47dfa44..1094dbcf 100644 --- a/src/tests/resources/elasticsearch/ml_model_search.json +++ b/src/tests/resources/elasticsearch/ml_model_search.json @@ -1,51 +1,51 @@ { - "took" : 4, - "timed_out" : false, - "_shards" : { - "total" : 1, - "successful" : 1, - "skipped" : 0, - "failed" : 0 + "took" : 24, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 110, + "relation" : "eq" }, - "hits" : { - "total" : { - "value" : 1, - "relation" : "eq" + "max_score" : 0.6961925, + "hits" : [ + { + "_index" : "ml_model", + "_id" : "ml_model_168", + "_score" : 0.6961925, + "_ignored" : [ + "description.keyword" + ], + "_source" : { + "ml_model_type" : "", + "version" : "1.0.0", + "same_as" : "https://www.example.com/resource/this_resource", + "asset_identifier" : 349, + "identifier" : 168, + "resource_identifier" : 350, + "description" : "A description.", + "type" : "ml_model", + "platform_identifier" : "316", + "date_published" : "2023-09-01T00:00:00.000Z", + "date_modified" : "2023-09-01T00:00:00.000Z", + "application_area" : [ ], + "name" : "EntityRecognizer", + "@version" : "1", + "license" : "https://www.example.com/resource/this_resource", + "date_created" : "2023-09-01T00:00:00.000Z", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "platform" : "ai4experiments", + "status" : "draft" }, - "max_score" : 6.245174, - "hits" : [ - { - "_index" : "ml_model", - "_id" : "ml_model_3", - "_score" : 6.245174, - "_ignored" : [ - "description.keyword" - ], - "_source" : { - "license" : "https://test_resource.test", - "date_created" : "2023-09-01T00:00:00.000Z", - "date_published" : "2023-09-01T00:00:00.000Z", - "version" : "1.0.1", - "type" : "ml_model", - "platform_identifier" : "3", - "resource_identifier" : 186, - "date_modified" : "2023-09-01T00:00:00.000Z", - "asset_identifier" : 184, - "@version" : "1", - "status" : "draft", - "description" : "A description", - "platform" : "example", - "name" : "A name", - "same_as" : "https://test_resource.test", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "identifier" : 3, - "ml_model_type" : "" - }, - "sort": [ - 1 - ] - } + "sort" : [ + 1 ] - } + } + ] + } } - diff --git a/src/tests/resources/elasticsearch/news_search.json b/src/tests/resources/elasticsearch/news_search.json new file mode 100644 index 00000000..e75a6bcf --- /dev/null +++ b/src/tests/resources/elasticsearch/news_search.json @@ -0,0 +1,49 @@ +{ + "took" : 4, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 0.2876821, + "hits" : [ + { + "_index" : "news", + "_id" : "news_1", + "_score" : 0.2876821, + "_source" : { + "same_as" : "https://www.example.com/resource/this_resource", + "type" : "news", + "identifier" : 1, + "resource_identifier" : 371, + "description" : "A description.", + "platform_identifier" : "1", + "date_modified" : "2023-09-01T00:00:00.000Z", + "application_area" : [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "name" : "Name of the News item", + "@version" : "1", + "headline" : "A headline to show on top of the page.", + "alternative_headline" : "An alternative headline.", + "date_created" : "2023-09-01T00:00:00.000Z", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "platform" : "example", + "status" : "draft" + }, + "sort" : [ + 1 + ] + } + ] + } +} diff --git a/src/tests/resources/elasticsearch/organisation_search.json b/src/tests/resources/elasticsearch/organisation_search.json new file mode 100644 index 00000000..573df1b7 --- /dev/null +++ b/src/tests/resources/elasticsearch/organisation_search.json @@ -0,0 +1,51 @@ +{ + "took" : 2, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 0.2876821, + "hits" : [ + { + "_index" : "organisation", + "_id" : "organisation_1", + "_score" : 0.2876821, + "_source" : { + "agent" : "organisation", + "same_as" : "https://www.example.com/resource/this_resource", + "date_founded" : "2022-01-01", + "identifier" : 1, + "resource_identifier" : 372, + "description" : "A description.", + "legal_name" : "The legal Organisation Name", + "type" : "organisation", + "platform_identifier" : "1", + "organisation_type" : "Research Institution", + "date_modified" : "2023-09-01T00:00:00.000Z", + "application_area" : [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "name" : "The name of this organisation", + "@version" : "1", + "date_created" : "2023-09-01T00:00:00.000Z", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "platform" : "example", + "status" : "draft" + }, + "sort" : [ + 1 + ] + } + ] + } +} diff --git a/src/tests/resources/elasticsearch/project_search.json b/src/tests/resources/elasticsearch/project_search.json new file mode 100644 index 00000000..98927eb1 --- /dev/null +++ b/src/tests/resources/elasticsearch/project_search.json @@ -0,0 +1,52 @@ +{ + "took" : 4, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 0.2876821, + "hits" : [ + { + "_index" : "project", + "_id" : "project_1", + "_score" : 0.2876821, + "_source" : { + "coordinator_identifier" : 1, + "start_date" : "2023-09-01T00:00:00.000Z", + "same_as" : "https://www.example.com/resource/this_resource", + "end_date" : "2023-09-01T00:00:00.000Z", + "identifier" : 1, + "resource_identifier" : 375, + "description" : "A description.", + "type" : "project", + "platform_identifier" : "1", + "date_modified" : "2023-09-01T00:00:00.000Z", + "total_cost_euro" : 1.0E7, + "application_area" : [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "name" : "Name of the Project", + "coordinator_name" : "The name of this organisation", + "@version" : "1", + "date_created" : "2023-09-01T00:00:00.000Z", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "platform" : "example", + "status" : "draft" + }, + "sort" : [ + 1 + ] + } + ] + } +} diff --git a/src/tests/resources/elasticsearch/publication_search.json b/src/tests/resources/elasticsearch/publication_search.json index a3a45f78..67adb2b5 100644 --- a/src/tests/resources/elasticsearch/publication_search.json +++ b/src/tests/resources/elasticsearch/publication_search.json @@ -1,51 +1,56 @@ { - "took" : 2, - "timed_out" : false, - "_shards" : { - "total" : 1, - "successful" : 1, - "skipped" : 0, - "failed" : 0 + "took" : 1, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" }, - "hits" : { - "total" : { - "value" : 1, - "relation" : "eq" + "max_score" : 0.2876821, + "hits" : [ + { + "_index" : "publication", + "_id" : "publication_1", + "_score" : 0.2876821, + "_source" : { + "type" : "publication", + "description" : "A description.", + "platform_identifier" : "1", + "date_published" : "2023-09-01T00:00:00.000Z", + "publication_type" : "journal", + "application_area" : [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "name" : "The name of this publication", + "isbn" : "9783161484100", + "issn" : "20493630", + "@version" : "1", + "platform" : "example", + "knowledge_asset_identifier" : null, + "version" : "1.1.0", + "same_as" : "https://www.example.com/resource/this_resource", + "asset_identifier" : 370, + "identifier" : 1, + "resource_identifier" : 376, + "date_modified" : "2023-09-01T00:00:00.000Z", + "license" : "https://www.example.com/resource/this_resource", + "permanent_identifier" : "http://dx.doi.org/10.1093/ajae/aaq063", + "date_created" : "2023-09-01T00:00:00.000Z", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "status" : "draft" }, - "max_score" : 1.0, - "hits" : [ - { - "_index" : "publication", - "_id" : "publication_1", - "_score" : 1.0, - "_source" : { - "date_created" : "2023-09-01T00:00:00.000Z", - "date_published" : "2023-09-01T00:00:00.000Z", - "type" : "publication", - "isbn" : "0000000000000", - "@version" : "1", - "date_modified" : "2023-09-01T00:00:00.000Z", - "asset_identifier" : 370, - "status" : "draft", - "name" : "A name", - "permanent_identifier" : "https://test_resource.test", - "identifier" : 1, - "license" : "https://test_resource.test", - "knowledge_asset_identifier" : null, - "version" : "1.0.0", - "issn" : "00000000", - "publication_type" : "journal", - "platform_identifier" : "1", - "resource_identifier" : 376, - "description" : "A description", - "platform" : "example", - "same_as" : "https://test_resource.test", - "@timestamp" : "2023-09-01T00:00:00.000Z" - }, - "sort": [ - 1 - ] - } + "sort" : [ + 1 ] - } + } + ] + } } diff --git a/src/tests/resources/elasticsearch/service_search.json b/src/tests/resources/elasticsearch/service_search.json index bbe3149e..d889facf 100644 --- a/src/tests/resources/elasticsearch/service_search.json +++ b/src/tests/resources/elasticsearch/service_search.json @@ -1,44 +1,49 @@ { - "took" : 1, - "timed_out" : false, - "_shards" : { - "total" : 1, - "successful" : 1, - "skipped" : 0, - "failed" : 0 + "took" : 1, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" }, - "hits" : { - "total" : { - "value" : 1, - "relation" : "eq" + "max_score" : 0.2876821, + "hits" : [ + { + "_index" : "service", + "_id" : "service_1", + "_score" : 0.2876821, + "_source" : { + "same_as" : "https://www.example.com/resource/this_resource", + "type" : "service", + "identifier" : 1, + "resource_identifier" : 377, + "description" : "A description.", + "platform_identifier" : "1", + "date_modified" : "2023-09-01T00:00:00.000Z", + "slogan" : "Making your Smart Paradigm Shifts more Disruptive", + "application_area" : [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "name" : "The name of this service", + "@version" : "1", + "date_created" : "2023-09-01T00:00:00.000Z", + "@timestamp" : "2023-09-01T00:00:00.000Z", + "platform" : "example", + "status" : "draft", + "terms_of_service" : "Your use of this service is subject to the following terms: [...]." }, - "max_score" : 1.0, - "hits" : [ - { - "_index" : "service", - "_id" : "service_1", - "_score" : 1.0, - "_source" : { - "date_created" : "2023-09-01T00:00:00.000Z", - "type" : "service", - "terms_of_service" : "Some terms", - "slogan" : "A slogan", - "platform_identifier" : "1", - "resource_identifier" : 377, - "date_modified" : "2023-09-01T00:00:00.000Z", - "@version" : "1", - "status" : "draft", - "description" : "A description", - "platform" : "example", - "name" : "A name", - "same_as" : "https://test_resource.test", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "identifier" : 1 - }, - "sort": [ - 1 - ] - } + "sort" : [ + 1 ] - } + } + ] + } } diff --git a/src/tests/routers/search_routers/test_search_router_datasets.py b/src/tests/routers/search_routers/test_search_router_datasets.py index 09a2703b..ff76ee16 100644 --- a/src/tests/routers/search_routers/test_search_router_datasets.py +++ b/src/tests/routers/search_routers/test_search_router_datasets.py @@ -31,18 +31,19 @@ def test_search_happy_path(client: TestClient): resource = response.json()['resources'][0] # Test the response - assert resource['identifier'] == 2 - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['aiod_entry']['status'] == "draft" - assert resource['name'] == "A name" - assert resource['description'] == "A description" - assert resource['version'] == "1.0.0" - assert resource['issn'] == "00000000" - assert resource['platform'] == "example" - assert resource['platform_identifier'] == "6" - assert resource['license'] == "https://test_resource.test" + assert resource['version'] == "1.0.1" + assert resource['same_as'] == "https://www.example.com/resource/this_resource" + assert resource['identifier'] == 104 + assert resource['description'] == "A description." + assert resource['temporal_coverage'] == "" + assert resource['platform_identifier'] == "287" assert resource['date_published'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['application_area'] == [ ] + assert resource['name'] == "trondheim-simulator" assert resource['measurement_technique'] == "" - assert resource['temporal_coverage'] == "" - assert resource['same_as'] == "https://test_resource.test" + assert resource['license'] == "https://www.example.com/resource/this_resource" + assert resource['issn'] == "00000000" + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['platform'] == "ai4experiments" + assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_events.py b/src/tests/routers/search_routers/test_search_router_events.py new file mode 100644 index 00000000..a3998aad --- /dev/null +++ b/src/tests/routers/search_routers/test_search_router_events.py @@ -0,0 +1,48 @@ +import os +import json + +from unittest.mock import Mock +from starlette.testclient import TestClient +from tests.testutils.paths import path_test_resources +from routers.search_routers import SearchRouterEvents, router_list + +def test_search_happy_path(client: TestClient): + """Tests the Events search""" + + # Get the correspondent router instance from the search routers list + search_router = None + for router_instance in router_list: + if isinstance(router_instance, SearchRouterEvents): + search_router = router_instance + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + mocked_file = os.path.join(resources_path, "event_search.json") + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + response = client.get("/search/events/v1", + params={'search_query': "description"}) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()['resources'][0] + + # Test the response + assert resource['end_date'] == "2023-09-01T00:00:00+00:00" + assert resource['mode'] == "offline" + assert resource['description'] == "A description." + assert resource['platform_identifier'] == "1" + assert resource['application_area'] == ["Fraud Prevention", "Voice Assistance", "Disease Classification"] + assert resource['registration_link'] == "https://example.com/registration-form" + assert resource['name'] == "The name of the Event" + assert resource['schedule'] == "10:00-10:30: Opening. 10:30-11:00 ..." + assert resource['platform'] == "example" + assert resource['start_date'] == "2023-09-01T00:00:00+00:00" + assert resource['same_as'] == "https://www.example.com/resource/this_resource" + assert resource['identifier'] == 1 + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_experiments.py b/src/tests/routers/search_routers/test_search_router_experiments.py index bef1aca9..1f4f3ebc 100644 --- a/src/tests/routers/search_routers/test_search_router_experiments.py +++ b/src/tests/routers/search_routers/test_search_router_experiments.py @@ -31,18 +31,19 @@ def test_search_happy_path(client: TestClient): resource = response.json()['resources'][0] # Test the response - assert resource['identifier'] == 40 - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['aiod_entry']['status'] == "draft" - assert resource['name'] == "A name" - assert resource['description'] == "A description" - assert resource['version'] == "1.0.0" - assert resource['platform'] == "example" - assert resource['platform_identifier'] == "405" - assert resource['license'] == "https://test_resource.test" - assert resource['date_published'] == "2023-09-01T00:00:00+00:00" + assert resource['version'] == "1.0" + assert resource['same_as'] == "https://www.example.com/resource/this_resource" + assert resource['identifier'] == 32 + assert resource['description'] == "A description." assert resource['reproducibility_explanation'] == "" + assert resource['platform_identifier'] == "397" + assert resource['date_published'] == "2023-09-01T00:00:00+00:00" assert resource['experimental_workflow'] == "" + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" assert resource['execution_settings'] == "" - assert resource['same_as'] == "https://test_resource.test" + assert resource['application_area'] == [ ] + assert resource['name'] == "aqpredvisualize" + assert resource['license'] == "https://www.example.com/resource/this_resource" + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['platform'] == "ai4experiments" + assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_ml_model.py b/src/tests/routers/search_routers/test_search_router_ml_model.py index 58a4cafb..27b497a3 100644 --- a/src/tests/routers/search_routers/test_search_router_ml_model.py +++ b/src/tests/routers/search_routers/test_search_router_ml_model.py @@ -31,15 +31,16 @@ def test_search_happy_path(client: TestClient): resource = response.json()['resources'][0] # Test the response - assert resource['identifier'] == 3 - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['version'] == "1.0.0" + assert resource['same_as'] == "https://www.example.com/resource/this_resource" + assert resource['identifier'] == 168 + assert resource['description'] == "A description." + assert resource['platform_identifier'] == "316" + assert resource['date_published'] == "2023-09-01T00:00:00+00:00" assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['application_area'] == [ ] + assert resource['name'] == "EntityRecognizer" + assert resource['license'] == "https://www.example.com/resource/this_resource" + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['platform'] == "ai4experiments" assert resource['aiod_entry']['status'] == "draft" - assert resource['name'] == "A name" - assert resource['description'] == "A description" - assert resource['version'] == "1.0.1" - assert resource['platform'] == "example" - assert resource['platform_identifier'] == "3" - assert resource['license'] == "https://test_resource.test" - assert resource['date_published'] == "2023-09-01T00:00:00+00:00" - assert resource['same_as'] == "https://test_resource.test" diff --git a/src/tests/routers/search_routers/test_search_router_news.py b/src/tests/routers/search_routers/test_search_router_news.py new file mode 100644 index 00000000..b9665dc2 --- /dev/null +++ b/src/tests/routers/search_routers/test_search_router_news.py @@ -0,0 +1,45 @@ +import os +import json + +from unittest.mock import Mock +from starlette.testclient import TestClient +from tests.testutils.paths import path_test_resources +from routers.search_routers import SearchRouterNews, router_list + +def test_search_happy_path(client: TestClient): + """Tests the News search""" + + # Get the correspondent router instance from the search routers list + search_router = None + for router_instance in router_list: + if isinstance(router_instance, SearchRouterNews): + search_router = router_instance + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + mocked_file = os.path.join(resources_path, "news_search.json") + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + response = client.get("/search/news/v1", + params={'search_query': "description"}) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()['resources'][0] + + # Test the response + assert resource['same_as'] == "https://www.example.com/resource/this_resource" + assert resource['identifier'] == 1 + assert resource['description'] == "A description." + assert resource['platform_identifier'] == "1" + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['application_area'] == ["Fraud Prevention", "Voice Assistance", "Disease Classification"] + assert resource['name'] == "Name of the News item" + assert resource['headline'] == "A headline to show on top of the page." + assert resource['alternative_headline'] == "An alternative headline." + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['platform'] == "example" + assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_organisations.py b/src/tests/routers/search_routers/test_search_router_organisations.py new file mode 100644 index 00000000..4ae5b351 --- /dev/null +++ b/src/tests/routers/search_routers/test_search_router_organisations.py @@ -0,0 +1,45 @@ +import os +import json + +from unittest.mock import Mock +from starlette.testclient import TestClient +from tests.testutils.paths import path_test_resources +from routers.search_routers import SearchRouterOrganisations, router_list + +def test_search_happy_path(client: TestClient): + """Tests the Organisations search""" + + # Get the correspondent router instance from the search routers list + search_router = None + for router_instance in router_list: + if isinstance(router_instance, SearchRouterOrganisations): + search_router = router_instance + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + mocked_file = os.path.join(resources_path, "organisation_search.json") + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + response = client.get("/search/organisations/v1", + params={'search_query': "description"}) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()['resources'][0] + + # Test the response + assert resource['same_as'] == "https://www.example.com/resource/this_resource" + assert resource['date_founded'] == "2022-01-01" + assert resource['identifier'] == 1 + assert resource['description'] == "A description." + assert resource['legal_name'] == "The legal Organisation Name" + assert resource['platform_identifier'] == "1" + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['application_area'] == ["Fraud Prevention", "Voice Assistance", "Disease Classification"] + assert resource['name'] == "The name of this organisation" + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['platform'] == "example" + assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_projects.py b/src/tests/routers/search_routers/test_search_router_projects.py new file mode 100644 index 00000000..4a3251c4 --- /dev/null +++ b/src/tests/routers/search_routers/test_search_router_projects.py @@ -0,0 +1,46 @@ +import os +import json + +from unittest.mock import Mock +from starlette.testclient import TestClient +from tests.testutils.paths import path_test_resources +from routers.search_routers import SearchRouterProjects, router_list + +def test_search_happy_path(client: TestClient): + """Tests the Projects search""" + + # Get the correspondent router instance from the search routers list + search_router = None + for router_instance in router_list: + if isinstance(router_instance, SearchRouterProjects): + search_router = router_instance + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + mocked_file = os.path.join(resources_path, "project_search.json") + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + response = client.get("/search/projects/v1", + params={'search_query': "description"}) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()['resources'][0] + + # Test the response + assert resource['start_date'] == "2023-09-01T00:00:00+00:00" + assert resource['same_as'] == "https://www.example.com/resource/this_resource" + assert resource['end_date'] == "2023-09-01T00:00:00+00:00" + assert resource['identifier'] == 1 + assert resource['description'] == "A description." + assert resource['platform_identifier'] == "1" + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['total_cost_euro'] == 1.0E7 + assert resource['application_area'] == [ "Fraud Prevention", "Voice Assistance", "Disease Classification"] + assert resource['name'] == "Name of the Project" + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['platform'] == "example" + assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_publications.py b/src/tests/routers/search_routers/test_search_router_publications.py index 5793f7bb..b7c0bf9f 100644 --- a/src/tests/routers/search_routers/test_search_router_publications.py +++ b/src/tests/routers/search_routers/test_search_router_publications.py @@ -31,18 +31,19 @@ def test_search_happy_path(client: TestClient): resource = response.json()['resources'][0] # Test the response + assert resource['description'] == "A description." + assert resource['platform_identifier'] == "1" + assert resource['date_published'] == "2023-09-01T00:00:00+00:00" + assert resource['application_area'] == ["Fraud Prevention", "Voice Assistance", "Disease Classification"] + assert resource['name'] == "The name of this publication" + assert resource['isbn'] == "9783161484100" + assert resource['issn'] == "20493630" + assert resource['platform'] == "example" + assert resource['version'] == "1.1.0" + assert resource['same_as'] == "https://www.example.com/resource/this_resource" assert resource['identifier'] == 1 - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + assert resource['license'] == "https://www.example.com/resource/this_resource" + assert resource['permanent_identifier'] == "http://dx.doi.org/10.1093/ajae/aaq063" + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" assert resource['aiod_entry']['status'] == "draft" - assert resource['name'] == "A name" - assert resource['description'] == "A description" - assert resource['version'] == "1.0.0" - assert resource['platform'] == "example" - assert resource['platform_identifier'] == "1" - assert resource['date_published'] == "2023-09-01T00:00:00+00:00" - assert resource['issn'] == "00000000" - assert resource['isbn'] == "0000000000000" - assert resource['permanent_identifier'] == "https://test_resource.test" - assert resource['license'] == "https://test_resource.test" - assert resource['same_as'] == "https://test_resource.test" diff --git a/src/tests/routers/search_routers/test_search_router_services.py b/src/tests/routers/search_routers/test_search_router_services.py index e4ff9223..90b615b0 100644 --- a/src/tests/routers/search_routers/test_search_router_services.py +++ b/src/tests/routers/search_routers/test_search_router_services.py @@ -31,14 +31,15 @@ def test_search_happy_path(client: TestClient): resource = response.json()['resources'][0] # Test the response + assert resource['same_as'] == "https://www.example.com/resource/this_resource" assert resource['identifier'] == 1 - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" + assert resource['description'] == "A description." + assert resource['platform_identifier'] == "1" assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['aiod_entry']['status'] == "draft" - assert resource['name'] == "A name" - assert resource['slogan'] == "A slogan" - assert resource['description'] == "A description" + assert resource['slogan'] == "Making your Smart Paradigm Shifts more Disruptive" + assert resource['application_area'] == ["Fraud Prevention", "Voice Assistance", "Disease Classification"] + assert resource['name'] == "The name of this service" + assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" assert resource['platform'] == "example" - assert resource['platform_identifier'] == "1" - assert resource['terms_of_service'] == "Some terms" - assert resource['same_as'] == "https://test_resource.test" + assert resource['aiod_entry']['status'] == "draft" + assert resource['terms_of_service'] == "Your use of this service is subject to the following terms: [...]." From 92038e75ab80cc22ba28537a67de64c7fc103234 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Fri, 10 Nov 2023 14:05:34 +0100 Subject: [PATCH 43/79] pull request modifications --- .env | 2 +- Dockerfile | 1 - README.md | 5 + connectors/fill-examples.sh | 4 - docker-compose.yaml | 6 +- es/setup/{curl_dockerfile => Dockerfile} | 0 es/setup/curl.sh | 18 +- pyproject.toml | 1 + src/routers/global_search_router.py | 181 ++++++++++++++++++ .../search_routers/search_router_generic.py | 20 ++ 10 files changed, 221 insertions(+), 17 deletions(-) rename es/setup/{curl_dockerfile => Dockerfile} (100%) create mode 100644 src/routers/global_search_router.py create mode 100644 src/routers/search_routers/search_router_generic.py diff --git a/.env b/.env index e6cc40b2..8c6792ae 100644 --- a/.env +++ b/.env @@ -13,10 +13,10 @@ POST_LOGOUT_REDIRECT_URIS=http://${HOSTNAME}/aiod-auth/realms/aiod/protocol/open #ELASTICSEARCH ES_USER=elastic -ES_JAVA_OPTS="-Xmx256m -Xms256m" ES_PASSWORD=changeme ES_DISCOVERY_TYPE=single-node ES_ROLE="edit_aiod_resources" +ES_JAVA_OPTS="-Xmx256m -Xms256m" #LOGSTASH LS_JAVA_OPTS="-Xmx256m -Xms256m" diff --git a/Dockerfile b/Dockerfile index 2bbc2266..d91d001b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,7 +14,6 @@ ENV PATH="${PATH}:/home/apprunner/.local/bin" # Install python packages globally, so that it can also be used from cron dockers (running as root) COPY ./pyproject.toml /app/pyproject.toml RUN pip install . -RUN pip install elasticsearch # This can be overwritten by a live volume, to support live code changes COPY ./src /app diff --git a/README.md b/README.md index 261a1177..0bd49d80 100644 --- a/README.md +++ b/README.md @@ -58,6 +58,11 @@ For development: - Additional 'mysqlclient' dependencies. Please have a look at [their installation instructions] (https://github.com/PyMySQL/mysqlclient#install). +## Production environment + +For production environments elasticsearch recomends -Xss4G and -Xmx8G for the JVM settings.\ +This parameters can be defined in the .env file. +See the [elasticsearch guide](https://www.elastic.co/guide/en/logstash/current/jvm-settings.html). ## Installation diff --git a/connectors/fill-examples.sh b/connectors/fill-examples.sh index 0be8d742..e9b8727d 100755 --- a/connectors/fill-examples.sh +++ b/connectors/fill-examples.sh @@ -8,10 +8,6 @@ python3 connectors/synchronization.py \ -c connectors.example.example.ExampleComputationalAssetConnector \ -w /opt/connectors/data/example/computational_asset -python3 connectors/synchronization.py \ - -c connectors.example.example.ExampleDatasetConnector \ - -w /opt/connectors/data/example/datasset - python3 connectors/synchronization.py \ -c connectors.example.example.ExampleEducationalResourceConnector \ -w /opt/connectors/data/example/educational_resource diff --git a/docker-compose.yaml b/docker-compose.yaml index 8ed7e4ac..6eb536ff 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -170,6 +170,7 @@ services: env_file: .env environment: - ES_JAVA_OPTS=$ES_JAVA_OPTS + - ELASTIC_USER=$ES_USER - ELASTIC_PASSWORD=$ES_PASSWORD - discovery.type=$ES_DISCOVERY_TYPE ports: @@ -182,7 +183,7 @@ services: read_only: true - ./data/elasticsearch:/usr/share/elasticsearch/data healthcheck: - test: ["CMD-SHELL", "curl -u elastic:changeme http://localhost:9200/_cat/health | grep -q -E 'green|yellow'"] + test: ["CMD-SHELL", "curl -u $ES_USER:$ES_PASSWORD --silent --fail localhost:9200/_cluster/health || exit 1"] interval: 5s timeout: 30s retries: 30 @@ -190,9 +191,10 @@ services: elasticsearch_setup: build: context: es/setup/ - dockerfile: curl_dockerfile + dockerfile: Dockerfile env_file: .env environment: + - ES_USER=$ES_USER - ES_PASSWORD=$ES_PASSWORD restart: "no" depends_on: diff --git a/es/setup/curl_dockerfile b/es/setup/Dockerfile similarity index 100% rename from es/setup/curl_dockerfile rename to es/setup/Dockerfile diff --git a/es/setup/curl.sh b/es/setup/curl.sh index f3ab7e91..cb0cb43a 100755 --- a/es/setup/curl.sh +++ b/es/setup/curl.sh @@ -1,9 +1,9 @@ -curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/dataset?pretty -H 'Content-Type: application/json' -d @/dataset.json -curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/event?pretty -H 'Content-Type: application/json' -d @/event.json -curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/experiment?pretty -H 'Content-Type: application/json' -d @/experiment.json -curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/ml_model?pretty -H 'Content-Type: application/json' -d @/ml_model.json -curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/news?pretty -H 'Content-Type: application/json' -d @/news.json -curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/organisation?pretty -H 'Content-Type: application/json' -d @/organisation.json -curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/project?pretty -H 'Content-Type: application/json' -d @/project.json -curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/publication?pretty -H 'Content-Type: application/json' -d @/publication.json -curl -u elastic:${ES_PASSWORD} -X PUT elasticsearch:9200/service?pretty -H 'Content-Type: application/json' -d @/service.json +curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/dataset?pretty -H 'Content-Type: application/json' -d @/dataset.json +curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/event?pretty -H 'Content-Type: application/json' -d @/event.json +curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/experiment?pretty -H 'Content-Type: application/json' -d @/experiment.json +curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/ml_model?pretty -H 'Content-Type: application/json' -d @/ml_model.json +curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/news?pretty -H 'Content-Type: application/json' -d @/news.json +curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/organisation?pretty -H 'Content-Type: application/json' -d @/organisation.json +curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/project?pretty -H 'Content-Type: application/json' -d @/project.json +curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/publication?pretty -H 'Content-Type: application/json' -d @/publication.json +curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/service?pretty -H 'Content-Type: application/json' -d @/service.json diff --git a/pyproject.toml b/pyproject.toml index 0982eb37..e6320df5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,6 +31,7 @@ dependencies = [ "xmltodict==0.13.0", "python-multipart==0.0.6", "mysql-connector-python==8.2.0", + "elasticsearch==8.10.1", ] readme = "README.md" diff --git a/src/routers/global_search_router.py b/src/routers/global_search_router.py new file mode 100644 index 00000000..93e2cbc3 --- /dev/null +++ b/src/routers/global_search_router.py @@ -0,0 +1,181 @@ +import abc +import os +from typing import TypeVar, Generic, Any, Type, Annotated + +from elasticsearch import Elasticsearch +from fastapi import APIRouter, Depends, HTTPException, Query +from pydantic import BaseModel +from sqlalchemy.engine import Engine +from starlette import status + +from authentication import get_current_user#, has_role +from database.model.concept.aiod_entry import AIoDEntryRead +from database.model.resource_read_and_create import resource_read + +SORT = {"identifier": "asc"} +LIMIT_MAX = 1000 + +RESOURCE = TypeVar("RESOURCE") + + +class SearchResult(BaseModel, Generic[RESOURCE]): + total_hits: int + resources: list[RESOURCE] + next_offset: list | None + current_page: int + page_size: int + +class SearchRouter(Generic[RESOURCE], abc.ABC): + """ + Providing search functionality in ElasticSearch + """ + + def __init__(self, client: Elasticsearch): + self.client: Elasticsearch = client + + @property + @abc.abstractmethod + def es_index(self) -> str: + """The name of the elasticsearch index""" + + @property + @abc.abstractmethod + def resource_name_plural(self) -> str: + """The name of the resource (plural)""" + + @property + def key_translations(self) -> dict[str, str]: + """If an attribute is called differently in elasticsearch than in our + metadata model, you can define a translation dictionary here. The key + should be the name in elasticsearch, the value the name in our data + model.""" + return {} + + @property + @abc.abstractmethod + def resource_class(self) -> RESOURCE: + """The resource class""" + + def create(self, engine: Engine, url_prefix: str) -> APIRouter: + router = APIRouter() + read_class = resource_read(self.resource_class) # type: ignore + + # TODO: check parameters correctness + @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", + tags=["search"]) + def search( + platforms: Annotated[list[str] | None, Query()] = None, + search_query: str = "", + search_fields: Annotated[list[str] | None, Query()] = None, + limit: int = 10, + page: int = 1 +# offset: Annotated[list[str] | None, Query()] = None + ) -> SearchResult[read_class]: # type: ignore + f""" + Search for {self.resource_name_plural}. + """ + + if limit > LIMIT_MAX: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"The limit should be maximum {LIMIT_MAX}. " + f"If you want more results, use pagination." + ) + + # Prepare query + # ----------------------------------------------------------------- + + # Matches of the search concept for each field + if search_fields: + + # The selected fields must be present in the match fields + if not set(search_fields).issubset(set(self.match_fields)): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"The available search fields for this entity " + f"are:{self.match_fields}" + ) + + # Search in specific search fields + query_matches = [{'match': {f: search_query}} + for f in search_fields] + + else: + + # Search in any match field + query_matches = [{'match': {f: search_query}} + for f in self.match_fields] + + if platforms: + + # Matches of the platform field for each selected platform + platform_matches = [{'match': {'platform': p}} + for p in platforms] + + # Must match platform and search query on at least one field + query = { + 'bool': { + 'must': { + 'bool': { + 'should': platform_matches, + 'minimum_should_match': 1 + } + }, + 'should': query_matches, + 'minimum_should_match': 1 + } + } + + else: + + # Must match search concept on at least one field + query = { + 'bool': { + 'should': query_matches, + 'minimum_should_match': 1 + } + } + + # ----------------------------------------------------------------- + +# result = self.client.search(index=self.es_index, query=query, +# size=limit, sort=SORT, +# search_after=offset) + from_ = limit*(page - 1) + result = self.client.search(index=self.es_index, query=query, + from_=from_, size=limit, sort=SORT) + + total_hits = result["hits"]["total"]["value"] + resources: list[read_class] = [ # type: ignore + self._cast_resource(read_class, hit["_source"]) # type: ignore + for hit in result["hits"]["hits"] + ] + next_offset = ( + result["hits"]["hits"][-1]["sort"] + if len(result["hits"]["hits"]) > 0 else None + ) + return SearchResult[read_class]( # type: ignore + total_hits=total_hits, + resources=resources, + next_offset=next_offset, + current_page=page, + page_size=limit + ) + + return router + + def _cast_resource( + self, resource_class: RESOURCE, resource_dict: dict[str, Any] + ) -> Type[RESOURCE]: + kwargs = { + self.key_translations.get(key, key): val + for key, val in resource_dict.items() + if key != "type" and not key.startswith("@") + } + resource = resource_class(**kwargs) # type: ignore + resource.aiod_entry = AIoDEntryRead( + date_modified=resource_dict["date_modified"], + date_created=resource_dict["date_created"], + status=resource_dict["status"], + ) + return resource diff --git a/src/routers/search_routers/search_router_generic.py b/src/routers/search_routers/search_router_generic.py new file mode 100644 index 00000000..c33ec197 --- /dev/null +++ b/src/routers/search_routers/search_router_generic.py @@ -0,0 +1,20 @@ +from database.model.dataset.dataset import Dataset +from routers.search_router import SearchRouter + + +class SearchRouterAIODEntry(SearchRouter[Dataset]): + @property + def es_index(self) -> str: + return "dataset" + + @property + def resource_name_plural(self) -> str: + return "datasets" + + @property + def resource_class(self): + return Dataset + + @property + def match_fields(self): + return ['name', 'description', 'issn'] From 9b1fd52366ff93301a951fe427784228ee9f75f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Sat, 11 Nov 2023 13:42:48 +0100 Subject: [PATCH 44/79] pull request modifications --- es/elasticsearch_query.py | 100 ------- es/setup/Dockerfile | 4 +- es/setup/{curl.sh => create_indices.sh} | 0 logstash/Dockerfile | 10 +- logstash/config/logstash.yml | 9 +- logstash/{pipeline => }/logstash_config.py | 243 +++++++++++------- logstash/pipeline/conf/init_table.conf | 20 +- logstash/pipeline/conf/sync_table.conf | 38 +-- logstash/pipeline/sql/init_dataset.sql | 2 + logstash/pipeline/sql/init_event.sql | 2 + logstash/pipeline/sql/init_experiment.sql | 2 + logstash/pipeline/sql/init_ml_model.sql | 2 + logstash/pipeline/sql/init_news.sql | 2 + logstash/pipeline/sql/init_organisation.sql | 2 + logstash/pipeline/sql/init_project.sql | 2 + logstash/pipeline/sql/init_publication.sql | 2 + logstash/pipeline/sql/init_service.sql | 2 + logstash/pipeline/sql/rm_dataset.sql | 35 +-- logstash/pipeline/sql/rm_event.sql | 40 +-- logstash/pipeline/sql/rm_experiment.sql | 35 +-- logstash/pipeline/sql/rm_ml_model.sql | 34 +-- logstash/pipeline/sql/rm_news.sql | 28 +- logstash/pipeline/sql/rm_organisation.sql | 34 +-- logstash/pipeline/sql/rm_project.sql | 33 +-- logstash/pipeline/sql/rm_publication.sql | 39 +-- logstash/pipeline/sql/rm_service.sql | 28 +- logstash/pipeline/sql/sync_dataset.sql | 2 + logstash/pipeline/sql/sync_event.sql | 2 + logstash/pipeline/sql/sync_experiment.sql | 2 + logstash/pipeline/sql/sync_ml_model.sql | 2 + logstash/pipeline/sql/sync_news.sql | 2 + logstash/pipeline/sql/sync_organisation.sql | 2 + logstash/pipeline/sql/sync_project.sql | 2 + logstash/pipeline/sql/sync_publication.sql | 2 + logstash/pipeline/sql/sync_service.sql | 2 + src/routers/search_router.py | 91 ++++--- .../search_routers/search_router_datasets.py | 2 +- .../search_routers/search_router_events.py | 2 +- .../search_router_experiments.py | 2 +- .../search_routers/search_router_generic.py | 2 +- .../search_routers/search_router_ml_models.py | 2 +- .../search_routers/search_router_news.py | 2 +- .../search_router_organisations.py | 2 +- .../search_routers/search_router_projects.py | 2 +- .../search_router_publications.py | 2 +- .../search_routers/search_router_services.py | 2 +- 46 files changed, 310 insertions(+), 567 deletions(-) delete mode 100755 es/elasticsearch_query.py rename es/setup/{curl.sh => create_indices.sh} (100%) rename logstash/{pipeline => }/logstash_config.py (62%) diff --git a/es/elasticsearch_query.py b/es/elasticsearch_query.py deleted file mode 100755 index 5d2c4c6e..00000000 --- a/es/elasticsearch_query.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import json -from elasticsearch import Elasticsearch - -# Global parameters -SIZE = 2 -SORT = {"identifier": "asc"} - -def main(index, search_concept, platforms): - - # Get elasticsearch password - with open("../.env", "r") as f: - for line in f: - if "ES_PASSWORD" in line: - elastic_password = line.split("=")[1][:-1] - if "ES_USER" in line: - elastic_user = line.split("=")[1][:-1] - - # Generate client - es_client = Elasticsearch("http://localhost:9200", - basic_auth=(elastic_user, elastic_password)) - - # Prepare query - # ------------------------------------------------------------------------- - - # Search fields corresponding to the indices - match_fields = ['name', 'description'] - if ('dataset' in index) or ('publication' in index): - match_fields.append('issn') - if 'publication' in index: - match_fields.append('isbn') - if 'service' in index: - match_fields.append('slogan') - - # Matches of the search concept for each field - query_matches = [{'match': {f: search_concept}} for f in match_fields] - - if platforms: - - # Matches of the platform field for each selected platform - platform_matches = [{'match': {'platform': p}} for p in platforms] - - # Query must match platform and search concept on at least one field - query = { - 'bool': { - 'must': { - 'bool': { - 'should': platform_matches, - 'minimum_should_match': 1 - } - }, - 'should': query_matches, - 'minimum_should_match': 1 - } - } - - else: - - # Query must match search concept on at least one field - query = { - 'bool': { - 'should': query_matches, - 'minimum_should_match': 1 - } - } - - # ------------------------------------------------------------------------- - - # Perform first search - result = es_client.search(index=index, query=query, size=SIZE, sort=SORT) - - print(json.dumps(dict(result), indent=4)) - - # Print total number of results - print(f"TOTAL RESULTS: {result['hits']['total']['value']}") - - query_result = 1 - while result["hits"]["hits"]: - - # Print current results - print(f"QUERY RESULT: {query_result}") - print(json.dumps(dict(result)["hits"]["hits"], indent=4)) - - # Actualise search_after and query_result for the next search - search_after = result["hits"]["hits"][-1]["sort"] - query_result += 1 - - # Perform next search - result = es_client.search( - index=index, query=query, size=SIZE, search_after=search_after, sort=SORT - ) - - -if __name__ == "__main__": - index = ["publication"] # List of assets - search_concept = "name" # Search concept - platforms = ["example", "ai4experiments"] # List of platforms - main(index, search_concept, platforms) diff --git a/es/setup/Dockerfile b/es/setup/Dockerfile index 661ed8c8..1e9c0bef 100644 --- a/es/setup/Dockerfile +++ b/es/setup/Dockerfile @@ -11,7 +11,7 @@ COPY organisation.json /organisation.json COPY project.json /project.json COPY publication.json /publication.json COPY service.json /service.json -COPY curl.sh /curl.sh +COPY create_indices.sh /create_indices.sh -ENTRYPOINT ["/bin/bash", "/curl.sh"] +ENTRYPOINT ["/bin/bash", "/create_indices.sh"] diff --git a/es/setup/curl.sh b/es/setup/create_indices.sh similarity index 100% rename from es/setup/curl.sh rename to es/setup/create_indices.sh diff --git a/logstash/Dockerfile b/logstash/Dockerfile index 8e805e17..432bdf4b 100644 --- a/logstash/Dockerfile +++ b/logstash/Dockerfile @@ -1,11 +1,11 @@ # https://www.docker.elastic.co/ -FROM docker.elastic.co/logstash/logstash:7.13.0 +FROM docker.elastic.co/logstash/logstash:8.11.0 # Download MySQL JDBC driver to connect Logstash to MySQL -RUN curl -Lo "mysql-connector-java-8.0.22.tar.gz" "https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-8.0.22.tar.gz" \ - && tar -xf "mysql-connector-java-8.0.22.tar.gz" "mysql-connector-java-8.0.22/mysql-connector-java-8.0.22.jar" \ - && mv "mysql-connector-java-8.0.22/mysql-connector-java-8.0.22.jar" "mysql-connector-java-8.0.22.jar" \ - && rm -r "mysql-connector-java-8.0.22" "mysql-connector-java-8.0.22.tar.gz" +RUN curl -Lo "mysql-connector-j-8.2.0.tar.gz" "https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-j-8.2.0.tar.gz" \ + && tar -xf "mysql-connector-j-8.2.0.tar.gz" "mysql-connector-j-8.2.0/mysql-connector-j-8.2.0.jar" \ + && mv "mysql-connector-j-8.2.0/mysql-connector-j-8.2.0.jar" "mysql-connector-j.jar" \ + && rm -r "mysql-connector-j-8.2.0" "mysql-connector-j-8.2.0.tar.gz" ENTRYPOINT ["/usr/local/bin/docker-entrypoint"] diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index a48c35ff..f6f2926e 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1,12 +1,7 @@ ---- -## Default Logstash configuration from Logstash base image. -## https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml -# +# This file has been generated by `logstash_config.py` file +# --------------------------------------------------------- http.host: "0.0.0.0" xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] - -## X-Pack security credentials -# xpack.monitoring.enabled: true xpack.monitoring.elasticsearch.username: elastic xpack.monitoring.elasticsearch.password: changeme diff --git a/logstash/pipeline/logstash_config.py b/logstash/logstash_config.py similarity index 62% rename from logstash/pipeline/logstash_config.py rename to logstash/logstash_config.py index f0865cdd..d9b8224e 100755 --- a/logstash/pipeline/logstash_config.py +++ b/logstash/logstash_config.py @@ -7,7 +7,7 @@ # ============================================================================= # Repository base path -REPO_PATH = os.path.join("..", "..") +REPO_PATH = ".." # Working path WORKING_PATH = os.path.join(".") @@ -15,8 +15,19 @@ # MACROS FOR THE DOCUMENTS GENERATION FUNCTIONS # ============================================================================= +INFO = """{0} This file has been generated by `logstash_config.py` file +{0} --------------------------------------------------------- +""" + +CONF_BASE = """http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] +xpack.monitoring.enabled: true +xpack.monitoring.elasticsearch.username: {0} +xpack.monitoring.elasticsearch.password: {1} +""" + INIT_INPUT_BASE = """ jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "{0}" @@ -29,7 +40,7 @@ """ SYNC_INPUT_BASE = """ jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "{0}" @@ -42,7 +53,7 @@ type => "{2}" }} jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "{0}" @@ -143,6 +154,11 @@ ORDER BY aiod.{0}.identifier """ +SQL_RM_BASE = """SELECT {0}.identifier +FROM aiod.{0} +WHERE aiod.{0}.date_deleted IS NOT NULL AND aiod.{0}.date_deleted > :sql_last_value +""" + AI_ASSET_BASE = """, -- AIAsset {0}.ai_asset_id AS `asset_identifier`, @@ -198,22 +214,32 @@ SYNC_CLAUSE = """ WHERE aiod.{0}.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" -RM_CLAUSE = """ -WHERE aiod.{0}.date_deleted IS NOT NULL AND aiod.{0}.date_deleted > :sql_last_value""" - # DOCUMENTS GENERATION FUNCTIONS # ============================================================================= -def generate_config_file(conf_path, db_user, db_pass, es_user, es_pass, - entities, sync=False): +def generate_conf_file(conf_path, es_user, es_pass): + + file_path = os.path.join(conf_path, "logstash.yml") + + # Generate configuration file + with open(file_path, 'w') as f: + + # Info + f.write(INFO.format('#')) + + # Configuration + f.write(CONF_BASE.format(es_user, es_pass)) + +def generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, + es_user, es_pass, entities, sync=False): if not sync: # init file - file_path = os.path.join(conf_path, "init_table.conf") + file_path = os.path.join(pipeline_conf_path, "init_table.conf") input_base = INIT_INPUT_BASE date_filter = DATE_FILTER.format("") output_base = INIT_OUTPUT_BASE else: # sync file - file_path = os.path.join(conf_path, "sync_table.conf") + file_path = os.path.join(pipeline_conf_path, "sync_table.conf") input_base = SYNC_INPUT_BASE date_filter = DATE_FILTER.format(SYNC_DATE_FILTER_ADDON) output_base = SYNC_OUTPUT_BASE @@ -221,6 +247,9 @@ def generate_config_file(conf_path, db_user, db_pass, es_user, es_pass, # Generate configuration file with open(file_path, 'w') as f: + # Info + f.write(INFO.format('#')) + # Input f.write("input {\n") for entity in entities: @@ -239,82 +268,94 @@ def generate_config_file(conf_path, db_user, db_pass, es_user, es_pass, f.write(output_base.format(es_user, es_pass, entity)) f.write("}\n") -def generate_sql_file(sql_path, entity, sync=False, rm=False): +def generate_pipeline_sql_files(pipeline_sql_path, entity, sync=False): + + # Generate output file path + if sync: + file_path = os.path.join(pipeline_sql_path, f"sync_{entity}.sql") + else: + file_path = os.path.join(pipeline_sql_path, f"init_{entity}.sql") + + # Write the output file + with open(file_path, 'w') as f: + + # Info + f.write(INFO.format('--')) + + # Left joins + left_joins = "" + + # For ai_asset entities + ai_asset_attributes = "" + if entity in ai_asset_entities: + ai_asset_attributes = AI_ASSET_BASE.format(entity) + left_joins += LEFT_LICENSE.format(entity) + + # Attributes + entity_attributes = "" + if entity in attributes.keys(): + entity_attributes = (ATTRIBUTES_BASE + + f"{entity}.{attributes[entity][0]}") + for attribute in attributes[entity][1:]: + entity_attributes += f",\n {entity}.{attribute}" + + # For entities with a type relation + type_attribute = "" + if entity in type_entities: + type_attribute = TYPE_BASE.format(entity) + left_joins += LEFT_TYPE.format(entity) + + # For entities with a mode relation + mode_attribute = "" + if entity in mode_entities: + mode_attribute = MODE_BASE.format(entity) + left_joins += LEFT_MODE.format(entity) + + # For entities with a status relation + status_attribute = "" + if entity in status_entities: + status_attribute = STATUS_BASE.format(entity) + left_joins += LEFT_STATUS.format(entity) + + # For entities with an agent relation + agent_attribute = "" + if entity in agent_entities.keys(): + agent_attribute = AGENT_BASE.format(agent_entities[entity][1]) + left_joins += LEFT_AGENT.format(entity, agent_entities[entity][0]) + + # For entities with an organisation relation + organisation_attribute = "" + if entity in organisation_entities.keys(): + organisation_attribute = ORGANISATION_BASE.format( + organisation_entities[entity][1]) + left_joins += LEFT_ORGANISATION.format(entity, + organisation_entities[entity][0]) + + # Where clause + if sync: + where_clause = SYNC_CLAUSE.format(entity) + else: + where_clause = INIT_CLAUSE.format(entity) + + f.write(SQL_BASE.format(entity, ai_asset_attributes, + entity_attributes, type_attribute, + mode_attribute, status_attribute, + agent_attribute, organisation_attribute, + left_joins, where_clause)) + +def generate_pipeline_sql_rm_files(pipeline_sql_path, entity): # Generate output file path - if rm: # rm (regardless of the value of sync) - file_path = os.path.join(sql_path, f"rm_{entity}.sql") - elif sync: # sync and not rm - file_path = os.path.join(sql_path, f"sync_{entity}.sql") - else: # not sync and not rm - file_path = os.path.join(sql_path, f"init_{entity}.sql") + file_path = os.path.join(pipeline_sql_path, f"rm_{entity}.sql") # Write the output file with open(file_path, 'w') as f: - - # Left joins - left_joins = "" - - # For ai_asset entities - ai_asset_attributes = "" - if entity in ai_asset_entities: - ai_asset_attributes = AI_ASSET_BASE.format(entity) - left_joins += LEFT_LICENSE.format(entity) - - # Attributes - entity_attributes = "" - if entity in attributes.keys(): - entity_attributes = (ATTRIBUTES_BASE - + f"{entity}.{attributes[entity][0]}") - for attribute in attributes[entity][1:]: - entity_attributes += f",\n {entity}.{attribute}" - - # For entities with a type relation - type_attribute = "" - if entity in type_entities: - type_attribute = TYPE_BASE.format(entity) - left_joins += LEFT_TYPE.format(entity) - - # For entities with a mode relation - mode_attribute = "" - if entity in mode_entities: - mode_attribute = MODE_BASE.format(entity) - left_joins += LEFT_MODE.format(entity) - - # For entities with a status relation - status_attribute = "" - if entity in status_entities: - status_attribute = STATUS_BASE.format(entity) - left_joins += LEFT_STATUS.format(entity) - - # For entities with an agent relation - agent_attribute = "" - if entity in agent_entities.keys(): - agent_attribute = AGENT_BASE.format(agent_entities[entity][1]) - left_joins += LEFT_AGENT.format(entity, - agent_entities[entity][0]) - - # For entities with an organisation relation - organisation_attribute = "" - if entity in organisation_entities.keys(): - organisation_attribute = ORGANISATION_BASE.format( - organisation_entities[entity][1]) - left_joins += LEFT_ORGANISATION.format(entity, - organisation_entities[entity][0]) - - # Where clause - if rm: # rm (regardless of the value of sync) - where_clause = RM_CLAUSE.format(entity) - elif sync: # sync and not rm - where_clause = SYNC_CLAUSE.format(entity) - else: # not sync and not rm - where_clause = INIT_CLAUSE.format(entity) - - f.write(SQL_BASE.format(entity, ai_asset_attributes, - entity_attributes, type_attribute, - mode_attribute, status_attribute, - agent_attribute, organisation_attribute, - left_joins, where_clause)) + + # Info + f.write(INFO.format('--')) + + # SQL query + f.write(SQL_RM_BASE.format(entity)) # MAIN FUNCTION # ============================================================================= @@ -323,25 +364,35 @@ def main(base_path, db_user, db_pass, es_user, es_pass, entities, ai_asset_entities, attributes, type_entities, mode_entities, status_entities, agent_entities, organisation_entities): - # Make configuration dirs - conf_path = os.path.join(base_path, "conf") + # Make configuration dir + conf_path = os.path.join(base_path, "config") os.makedirs(conf_path, exist_ok=True) - sql_path = os.path.join(base_path, "sql") - os.makedirs(sql_path, exist_ok=True) - # Configuration init file - generate_config_file(conf_path, db_user, db_pass, es_user, es_pass, - entities, sync=False) + # Make pipeline configuration dirs + pipeline_conf_path = os.path.join(base_path, "pipeline", "conf") + os.makedirs(pipeline_conf_path, exist_ok=True) + pipeline_sql_path = os.path.join(base_path, "pipeline", "sql") + os.makedirs(pipeline_sql_path, exist_ok=True) + + # Generate logstash configuration file + generate_conf_file(conf_path, es_user, es_pass) + + # Generate pipeline configuration init file + generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, + es_user, es_pass, entities, sync=False) - # Configuration sync file - generate_config_file(conf_path, db_user, db_pass, es_user, es_pass, - entities, sync=True) + # Generate pipeline configuration sync file + generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, + es_user, es_pass, entities, sync=True) + + # Generate SQL init and sync files + for entity in entities: + generate_pipeline_sql_files(pipeline_sql_path, entity, sync=False) + generate_pipeline_sql_files(pipeline_sql_path, entity, sync=True) - # Generate SQL init, sync and rm files + # Generate SQL rm files for entity in entities: - generate_sql_file(sql_path, entity, sync=False, rm=False) # init - generate_sql_file(sql_path, entity, sync=True, rm=False) # sync - generate_sql_file(sql_path, entity, rm=True) # rm + generate_pipeline_sql_rm_files(pipeline_sql_path, entity) if __name__ == "__main__": diff --git a/logstash/pipeline/conf/init_table.conf b/logstash/pipeline/conf/init_table.conf index f14ca5cd..df8bd2c1 100644 --- a/logstash/pipeline/conf/init_table.conf +++ b/logstash/pipeline/conf/init_table.conf @@ -1,6 +1,8 @@ +# This file has been generated by `logstash_config.py` file +# --------------------------------------------------------- input { jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -11,7 +13,7 @@ input { type => "dataset" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -22,7 +24,7 @@ input { type => "event" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -33,7 +35,7 @@ input { type => "experiment" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -44,7 +46,7 @@ input { type => "ml_model" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -55,7 +57,7 @@ input { type => "news" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -66,7 +68,7 @@ input { type => "organisation" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -77,7 +79,7 @@ input { type => "project" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -88,7 +90,7 @@ input { type => "publication" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" diff --git a/logstash/pipeline/conf/sync_table.conf b/logstash/pipeline/conf/sync_table.conf index 267b67f9..a0d8bc80 100644 --- a/logstash/pipeline/conf/sync_table.conf +++ b/logstash/pipeline/conf/sync_table.conf @@ -1,6 +1,8 @@ +# This file has been generated by `logstash_config.py` file +# --------------------------------------------------------- input { jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -13,7 +15,7 @@ input { type => "dataset" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -26,7 +28,7 @@ input { type => "rm_dataset" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -39,7 +41,7 @@ input { type => "event" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -52,7 +54,7 @@ input { type => "rm_event" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -65,7 +67,7 @@ input { type => "experiment" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -78,7 +80,7 @@ input { type => "rm_experiment" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -91,7 +93,7 @@ input { type => "ml_model" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -104,7 +106,7 @@ input { type => "rm_ml_model" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -117,7 +119,7 @@ input { type => "news" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -130,7 +132,7 @@ input { type => "rm_news" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -143,7 +145,7 @@ input { type => "organisation" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -156,7 +158,7 @@ input { type => "rm_organisation" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -169,7 +171,7 @@ input { type => "project" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -182,7 +184,7 @@ input { type => "rm_project" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -195,7 +197,7 @@ input { type => "publication" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -208,7 +210,7 @@ input { type => "rm_publication" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" @@ -221,7 +223,7 @@ input { type => "service" } jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-java-8.0.22.jar" + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" jdbc_driver_class => "com.mysql.jdbc.Driver" jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" jdbc_user => "root" diff --git a/logstash/pipeline/sql/init_dataset.sql b/logstash/pipeline/sql/init_dataset.sql index 3774e884..8e2aae6b 100644 --- a/logstash/pipeline/sql/init_dataset.sql +++ b/logstash/pipeline/sql/init_dataset.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept dataset.identifier, diff --git a/logstash/pipeline/sql/init_event.sql b/logstash/pipeline/sql/init_event.sql index 61293035..870814fa 100644 --- a/logstash/pipeline/sql/init_event.sql +++ b/logstash/pipeline/sql/init_event.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept event.identifier, diff --git a/logstash/pipeline/sql/init_experiment.sql b/logstash/pipeline/sql/init_experiment.sql index 62a26575..9f3fac0b 100644 --- a/logstash/pipeline/sql/init_experiment.sql +++ b/logstash/pipeline/sql/init_experiment.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept experiment.identifier, diff --git a/logstash/pipeline/sql/init_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql index b5216c09..7d7405f5 100644 --- a/logstash/pipeline/sql/init_ml_model.sql +++ b/logstash/pipeline/sql/init_ml_model.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept ml_model.identifier, diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql index 30a12659..09f33797 100644 --- a/logstash/pipeline/sql/init_news.sql +++ b/logstash/pipeline/sql/init_news.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept news.identifier, diff --git a/logstash/pipeline/sql/init_organisation.sql b/logstash/pipeline/sql/init_organisation.sql index 4a4bfa49..388ee122 100644 --- a/logstash/pipeline/sql/init_organisation.sql +++ b/logstash/pipeline/sql/init_organisation.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept organisation.identifier, diff --git a/logstash/pipeline/sql/init_project.sql b/logstash/pipeline/sql/init_project.sql index 6e9dd176..325e1a3c 100644 --- a/logstash/pipeline/sql/init_project.sql +++ b/logstash/pipeline/sql/init_project.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept project.identifier, diff --git a/logstash/pipeline/sql/init_publication.sql b/logstash/pipeline/sql/init_publication.sql index d14d8079..36702546 100644 --- a/logstash/pipeline/sql/init_publication.sql +++ b/logstash/pipeline/sql/init_publication.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept publication.identifier, diff --git a/logstash/pipeline/sql/init_service.sql b/logstash/pipeline/sql/init_service.sql index 96732d7a..1478826e 100644 --- a/logstash/pipeline/sql/init_service.sql +++ b/logstash/pipeline/sql/init_service.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept service.identifier, diff --git a/logstash/pipeline/sql/rm_dataset.sql b/logstash/pipeline/sql/rm_dataset.sql index 0c07b87a..da1c359f 100644 --- a/logstash/pipeline/sql/rm_dataset.sql +++ b/logstash/pipeline/sql/rm_dataset.sql @@ -1,34 +1,5 @@ -SELECT - -- Concept - dataset.identifier, - dataset.platform, - dataset.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - dataset.ai_resource_id AS `resource_identifier`, - dataset.name, - dataset.description, - dataset.same_as, - -- AIAsset - dataset.ai_asset_id AS `asset_identifier`, - dataset.date_published, - dataset.version, - license.name AS `license`, - -- Attributes - dataset.issn, - dataset.measurement_technique, - dataset.temporal_coverage, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- +SELECT dataset.identifier FROM aiod.dataset -INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.dataset.license_identifier=aiod.license.identifier -LEFT JOIN aiod.dataset_application_area_link ON aiod.dataset_application_area_link.from_identifier=aiod.dataset.identifier -LEFT JOIN aiod.application_area ON aiod.dataset_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.dataset.date_deleted IS NOT NULL AND aiod.dataset.date_deleted > :sql_last_value -GROUP BY aiod.dataset.identifier -ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/rm_event.sql b/logstash/pipeline/sql/rm_event.sql index 888e6215..2bba4020 100644 --- a/logstash/pipeline/sql/rm_event.sql +++ b/logstash/pipeline/sql/rm_event.sql @@ -1,39 +1,5 @@ -SELECT - -- Concept - event.identifier, - event.platform, - event.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - event.ai_resource_id AS `resource_identifier`, - event.name, - event.description, - event.same_as, - -- Attributes - event.start_date, - event.end_date, - event.schedule, - event.registration_link, - event.organiser_identifier, - -- Mode - event_mode.name AS `mode`, - -- Status - event_status.name AS `event_status`, - -- Agent - agent.type AS `organiser_type`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- +SELECT event.identifier FROM aiod.event -INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.event_mode ON aiod.event.mode_identifier=aiod.event_mode.identifier -LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier -LEFT JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier -LEFT JOIN aiod.event_application_area_link ON aiod.event_application_area_link.from_identifier=aiod.event.identifier -LEFT JOIN aiod.application_area ON aiod.event_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.event.date_deleted IS NOT NULL AND aiod.event.date_deleted > :sql_last_value -GROUP BY aiod.event.identifier -ORDER BY aiod.event.identifier diff --git a/logstash/pipeline/sql/rm_experiment.sql b/logstash/pipeline/sql/rm_experiment.sql index 9d75f3cc..92ae7107 100644 --- a/logstash/pipeline/sql/rm_experiment.sql +++ b/logstash/pipeline/sql/rm_experiment.sql @@ -1,34 +1,5 @@ -SELECT - -- Concept - experiment.identifier, - experiment.platform, - experiment.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - experiment.ai_resource_id AS `resource_identifier`, - experiment.name, - experiment.description, - experiment.same_as, - -- AIAsset - experiment.ai_asset_id AS `asset_identifier`, - experiment.date_published, - experiment.version, - license.name AS `license`, - -- Attributes - experiment.experimental_workflow, - experiment.execution_settings, - experiment.reproducibility_explanation, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- +SELECT experiment.identifier FROM aiod.experiment -INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.experiment.license_identifier=aiod.license.identifier -LEFT JOIN aiod.experiment_application_area_link ON aiod.experiment_application_area_link.from_identifier=aiod.experiment.identifier -LEFT JOIN aiod.application_area ON aiod.experiment_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.experiment.date_deleted IS NOT NULL AND aiod.experiment.date_deleted > :sql_last_value -GROUP BY aiod.experiment.identifier -ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/rm_ml_model.sql b/logstash/pipeline/sql/rm_ml_model.sql index 2a146b9f..51d9e2df 100644 --- a/logstash/pipeline/sql/rm_ml_model.sql +++ b/logstash/pipeline/sql/rm_ml_model.sql @@ -1,33 +1,5 @@ -SELECT - -- Concept - ml_model.identifier, - ml_model.platform, - ml_model.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - ml_model.ai_resource_id AS `resource_identifier`, - ml_model.name, - ml_model.description, - ml_model.same_as, - -- AIAsset - ml_model.ai_asset_id AS `asset_identifier`, - ml_model.date_published, - ml_model.version, - license.name AS `license`, - -- Type - ml_model_type.name AS `ml_model_type`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- +SELECT ml_model.identifier FROM aiod.ml_model -INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifier -LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier -LEFT JOIN aiod.ml_model_application_area_link ON aiod.ml_model_application_area_link.from_identifier=aiod.ml_model.identifier -LEFT JOIN aiod.application_area ON aiod.ml_model_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.ml_model.date_deleted IS NOT NULL AND aiod.ml_model.date_deleted > :sql_last_value -GROUP BY aiod.ml_model.identifier -ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/rm_news.sql b/logstash/pipeline/sql/rm_news.sql index 0ee6822a..0f53c36a 100644 --- a/logstash/pipeline/sql/rm_news.sql +++ b/logstash/pipeline/sql/rm_news.sql @@ -1,27 +1,5 @@ -SELECT - -- Concept - news.identifier, - news.platform, - news.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - news.ai_resource_id AS `resource_identifier`, - news.name, - news.description, - news.same_as, - -- Attributes - news.headline, - news.alternative_headline, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- +SELECT news.identifier FROM aiod.news -INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.news_application_area_link ON aiod.news_application_area_link.from_identifier=aiod.news.identifier -LEFT JOIN aiod.application_area ON aiod.news_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.news.date_deleted IS NOT NULL AND aiod.news.date_deleted > :sql_last_value -GROUP BY aiod.news.identifier -ORDER BY aiod.news.identifier diff --git a/logstash/pipeline/sql/rm_organisation.sql b/logstash/pipeline/sql/rm_organisation.sql index 25173ac7..160df96d 100644 --- a/logstash/pipeline/sql/rm_organisation.sql +++ b/logstash/pipeline/sql/rm_organisation.sql @@ -1,33 +1,5 @@ -SELECT - -- Concept - organisation.identifier, - organisation.platform, - organisation.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - organisation.ai_resource_id AS `resource_identifier`, - organisation.name, - organisation.description, - organisation.same_as, - -- Attributes - organisation.date_founded, - organisation.legal_name, - -- Type - organisation_type.name AS `organisation_type`, - -- Agent - agent.type AS `agent`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- +SELECT organisation.identifier FROM aiod.organisation -INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.organisation_type ON aiod.organisation.type_identifier=aiod.organisation_type.identifier -LEFT JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier -LEFT JOIN aiod.organisation_application_area_link ON aiod.organisation_application_area_link.from_identifier=aiod.organisation.identifier -LEFT JOIN aiod.application_area ON aiod.organisation_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.organisation.date_deleted IS NOT NULL AND aiod.organisation.date_deleted > :sql_last_value -GROUP BY aiod.organisation.identifier -ORDER BY aiod.organisation.identifier diff --git a/logstash/pipeline/sql/rm_project.sql b/logstash/pipeline/sql/rm_project.sql index 037dc8e9..486988bd 100644 --- a/logstash/pipeline/sql/rm_project.sql +++ b/logstash/pipeline/sql/rm_project.sql @@ -1,32 +1,5 @@ -SELECT - -- Concept - project.identifier, - project.platform, - project.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - project.ai_resource_id AS `resource_identifier`, - project.name, - project.description, - project.same_as, - -- Attributes - project.start_date, - project.end_date, - project.total_cost_euro, - project.coordinator_identifier, - -- Organisation - organisation.name AS `coordinator_name`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- +SELECT project.identifier FROM aiod.project -INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier -LEFT JOIN aiod.project_application_area_link ON aiod.project_application_area_link.from_identifier=aiod.project.identifier -LEFT JOIN aiod.application_area ON aiod.project_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.project.date_deleted IS NOT NULL AND aiod.project.date_deleted > :sql_last_value -GROUP BY aiod.project.identifier -ORDER BY aiod.project.identifier diff --git a/logstash/pipeline/sql/rm_publication.sql b/logstash/pipeline/sql/rm_publication.sql index 706e3bd6..980ab79e 100644 --- a/logstash/pipeline/sql/rm_publication.sql +++ b/logstash/pipeline/sql/rm_publication.sql @@ -1,38 +1,5 @@ -SELECT - -- Concept - publication.identifier, - publication.platform, - publication.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - publication.ai_resource_id AS `resource_identifier`, - publication.name, - publication.description, - publication.same_as, - -- AIAsset - publication.ai_asset_id AS `asset_identifier`, - publication.date_published, - publication.version, - license.name AS `license`, - -- Attributes - publication.permanent_identifier, - publication.isbn, - publication.issn, - publication.knowledge_asset_id AS `knowledge_asset_identifier`, - -- Type - publication_type.name AS `publication_type`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- +SELECT publication.identifier FROM aiod.publication -INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.publication.license_identifier=aiod.license.identifier -LEFT JOIN aiod.publication_type ON aiod.publication.type_identifier=aiod.publication_type.identifier -LEFT JOIN aiod.publication_application_area_link ON aiod.publication_application_area_link.from_identifier=aiod.publication.identifier -LEFT JOIN aiod.application_area ON aiod.publication_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.publication.date_deleted IS NOT NULL AND aiod.publication.date_deleted > :sql_last_value -GROUP BY aiod.publication.identifier -ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/rm_service.sql b/logstash/pipeline/sql/rm_service.sql index d4ad46f0..82d24c1c 100644 --- a/logstash/pipeline/sql/rm_service.sql +++ b/logstash/pipeline/sql/rm_service.sql @@ -1,27 +1,5 @@ -SELECT - -- Concept - service.identifier, - service.platform, - service.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - service.ai_resource_id AS `resource_identifier`, - service.name, - service.description, - service.same_as, - -- Attributes - service.slogan, - service.terms_of_service, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- +SELECT service.identifier FROM aiod.service -INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.service_application_area_link ON aiod.service_application_area_link.from_identifier=aiod.service.identifier -LEFT JOIN aiod.application_area ON aiod.service_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.service.date_deleted IS NOT NULL AND aiod.service.date_deleted > :sql_last_value -GROUP BY aiod.service.identifier -ORDER BY aiod.service.identifier diff --git a/logstash/pipeline/sql/sync_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql index c0b30a86..1758c91e 100644 --- a/logstash/pipeline/sql/sync_dataset.sql +++ b/logstash/pipeline/sql/sync_dataset.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept dataset.identifier, diff --git a/logstash/pipeline/sql/sync_event.sql b/logstash/pipeline/sql/sync_event.sql index 7af694ae..205f07d7 100644 --- a/logstash/pipeline/sql/sync_event.sql +++ b/logstash/pipeline/sql/sync_event.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept event.identifier, diff --git a/logstash/pipeline/sql/sync_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql index 198b287d..2b88047a 100644 --- a/logstash/pipeline/sql/sync_experiment.sql +++ b/logstash/pipeline/sql/sync_experiment.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept experiment.identifier, diff --git a/logstash/pipeline/sql/sync_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql index 4825aad2..0f2d9199 100644 --- a/logstash/pipeline/sql/sync_ml_model.sql +++ b/logstash/pipeline/sql/sync_ml_model.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept ml_model.identifier, diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql index 42a6b694..d4d9005d 100644 --- a/logstash/pipeline/sql/sync_news.sql +++ b/logstash/pipeline/sql/sync_news.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept news.identifier, diff --git a/logstash/pipeline/sql/sync_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql index 88cee11b..e39db777 100644 --- a/logstash/pipeline/sql/sync_organisation.sql +++ b/logstash/pipeline/sql/sync_organisation.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept organisation.identifier, diff --git a/logstash/pipeline/sql/sync_project.sql b/logstash/pipeline/sql/sync_project.sql index 626fdc30..7d712b30 100644 --- a/logstash/pipeline/sql/sync_project.sql +++ b/logstash/pipeline/sql/sync_project.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept project.identifier, diff --git a/logstash/pipeline/sql/sync_publication.sql b/logstash/pipeline/sql/sync_publication.sql index eb548dc6..68e38dfa 100644 --- a/logstash/pipeline/sql/sync_publication.sql +++ b/logstash/pipeline/sql/sync_publication.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept publication.identifier, diff --git a/logstash/pipeline/sql/sync_service.sql b/logstash/pipeline/sql/sync_service.sql index 96c114aa..202101c9 100644 --- a/logstash/pipeline/sql/sync_service.sql +++ b/logstash/pipeline/sql/sync_service.sql @@ -1,3 +1,5 @@ +-- This file has been generated by `logstash_config.py` file +-- --------------------------------------------------------- SELECT -- Concept service.identifier, diff --git a/src/routers/search_router.py b/src/routers/search_router.py index 93e2cbc3..0ca9c539 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -1,5 +1,4 @@ import abc -import os from typing import TypeVar, Generic, Any, Type, Annotated from elasticsearch import Elasticsearch @@ -56,6 +55,11 @@ def key_translations(self) -> dict[str, str]: def resource_class(self) -> RESOURCE: """The resource class""" + @property + @abc.abstractmethod + def match_fields(self) -> set: + """The set of indexed fields""" + def create(self, engine: Engine, url_prefix: str) -> APIRouter: router = APIRouter() read_class = resource_read(self.resource_class) # type: ignore @@ -86,26 +90,21 @@ def search( # ----------------------------------------------------------------- # Matches of the search concept for each field - if search_fields: - - # The selected fields must be present in the match fields - if not set(search_fields).issubset(set(self.match_fields)): - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The available search fields for this entity " - f"are:{self.match_fields}" - ) - - # Search in specific search fields - query_matches = [{'match': {f: search_query}} - for f in search_fields] - - else: - - # Search in any match field - query_matches = [{'match': {f: search_query}} - for f in self.match_fields] + fields = search_fields if search_fields else self.match_fields + if not set(fields).issubset(self.match_fields): + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, + detail=f"The available search fields for " + f"this entity are:" + f"{self.match_fields}") + query_matches = [{'match': {f: search_query}} for f in fields] + # Must match search concept on at least one field + query = { + 'bool': { + 'should': query_matches, + 'minimum_should_match': 1 + } + } if platforms: # Matches of the platform field for each selected platform @@ -113,28 +112,38 @@ def search( for p in platforms] # Must match platform and search query on at least one field - query = { - 'bool': { - 'must': { - 'bool': { - 'should': platform_matches, - 'minimum_should_match': 1 - } - }, - 'should': query_matches, - 'minimum_should_match': 1 - } - } + query['bool']['must'] = {'bool': {'should': platform_matches, + 'minimum_should_match': 1}} - else: - - # Must match search concept on at least one field - query = { - 'bool': { - 'should': query_matches, - 'minimum_should_match': 1 - } - } +# if platforms: +# +# # Matches of the platform field for each selected platform +# platform_matches = [{'match': {'platform': p}} +# for p in platforms] +# +# # Must match platform and search query on at least one field +# query = { +# 'bool': { +# 'must': { +# 'bool': { +# 'should': platform_matches, +# 'minimum_should_match': 1 +# } +# }, +# 'should': query_matches, +# 'minimum_should_match': 1 +# } +# } +# +# else: +# +# # Must match search concept on at least one field +# query = { +# 'bool': { +# 'should': query_matches, +# 'minimum_should_match': 1 +# } +# } # ----------------------------------------------------------------- diff --git a/src/routers/search_routers/search_router_datasets.py b/src/routers/search_routers/search_router_datasets.py index e30c404d..9bbc5c18 100644 --- a/src/routers/search_routers/search_router_datasets.py +++ b/src/routers/search_routers/search_router_datasets.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return ['name', 'description', 'issn'] + return set(['name', 'description', 'issn']) diff --git a/src/routers/search_routers/search_router_events.py b/src/routers/search_routers/search_router_events.py index 1a669b0b..53cc6547 100644 --- a/src/routers/search_routers/search_router_events.py +++ b/src/routers/search_routers/search_router_events.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return ['name', 'description', 'organiser_type', 'mode'] + return set(['name', 'description', 'organiser_type', 'mode']) diff --git a/src/routers/search_routers/search_router_experiments.py b/src/routers/search_routers/search_router_experiments.py index e255b2db..97b8d19f 100644 --- a/src/routers/search_routers/search_router_experiments.py +++ b/src/routers/search_routers/search_router_experiments.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return ['name', 'description'] + return set(['name', 'description']) diff --git a/src/routers/search_routers/search_router_generic.py b/src/routers/search_routers/search_router_generic.py index c33ec197..3306a058 100644 --- a/src/routers/search_routers/search_router_generic.py +++ b/src/routers/search_routers/search_router_generic.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return ['name', 'description', 'issn'] + return set(['name', 'description', 'issn']) diff --git a/src/routers/search_routers/search_router_ml_models.py b/src/routers/search_routers/search_router_ml_models.py index a172b740..5f83d792 100644 --- a/src/routers/search_routers/search_router_ml_models.py +++ b/src/routers/search_routers/search_router_ml_models.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return ['name', 'description'] + return set(['name', 'description']) diff --git a/src/routers/search_routers/search_router_news.py b/src/routers/search_routers/search_router_news.py index 36045d23..4e52bac1 100644 --- a/src/routers/search_routers/search_router_news.py +++ b/src/routers/search_routers/search_router_news.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return ['name', 'description', 'headline', 'alternative_headline'] + return set(['name', 'description', 'headline', 'alternative_headline']) diff --git a/src/routers/search_routers/search_router_organisations.py b/src/routers/search_routers/search_router_organisations.py index ae7dc8e2..3ba707ec 100644 --- a/src/routers/search_routers/search_router_organisations.py +++ b/src/routers/search_routers/search_router_organisations.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return ['name', 'legal_name', 'description'] + return set(['name', 'legal_name', 'description']) diff --git a/src/routers/search_routers/search_router_projects.py b/src/routers/search_routers/search_router_projects.py index a6984dba..8b9d67ae 100644 --- a/src/routers/search_routers/search_router_projects.py +++ b/src/routers/search_routers/search_router_projects.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return ['name', 'description', 'coordinator_name'] + return set(['name', 'description', 'coordinator_name']) diff --git a/src/routers/search_routers/search_router_publications.py b/src/routers/search_routers/search_router_publications.py index 5af2eac8..66bf40b1 100644 --- a/src/routers/search_routers/search_router_publications.py +++ b/src/routers/search_routers/search_router_publications.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return ['name', 'description', 'issn', 'isbn'] + return set(['name', 'description', 'issn', 'isbn']) diff --git a/src/routers/search_routers/search_router_services.py b/src/routers/search_routers/search_router_services.py index cbcef5da..42c613e9 100644 --- a/src/routers/search_routers/search_router_services.py +++ b/src/routers/search_routers/search_router_services.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return ['name', 'description', 'slogan'] + return set(['name', 'description', 'slogan']) From 74ac4f05d5018a2301eb0c4714ee1cca71aae5d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Sat, 11 Nov 2023 13:56:44 +0100 Subject: [PATCH 45/79] pull request modifications --- src/routers/global_search_router.py | 181 ---------------------------- src/routers/search_router.py | 37 +----- 2 files changed, 6 insertions(+), 212 deletions(-) delete mode 100644 src/routers/global_search_router.py diff --git a/src/routers/global_search_router.py b/src/routers/global_search_router.py deleted file mode 100644 index 93e2cbc3..00000000 --- a/src/routers/global_search_router.py +++ /dev/null @@ -1,181 +0,0 @@ -import abc -import os -from typing import TypeVar, Generic, Any, Type, Annotated - -from elasticsearch import Elasticsearch -from fastapi import APIRouter, Depends, HTTPException, Query -from pydantic import BaseModel -from sqlalchemy.engine import Engine -from starlette import status - -from authentication import get_current_user#, has_role -from database.model.concept.aiod_entry import AIoDEntryRead -from database.model.resource_read_and_create import resource_read - -SORT = {"identifier": "asc"} -LIMIT_MAX = 1000 - -RESOURCE = TypeVar("RESOURCE") - - -class SearchResult(BaseModel, Generic[RESOURCE]): - total_hits: int - resources: list[RESOURCE] - next_offset: list | None - current_page: int - page_size: int - -class SearchRouter(Generic[RESOURCE], abc.ABC): - """ - Providing search functionality in ElasticSearch - """ - - def __init__(self, client: Elasticsearch): - self.client: Elasticsearch = client - - @property - @abc.abstractmethod - def es_index(self) -> str: - """The name of the elasticsearch index""" - - @property - @abc.abstractmethod - def resource_name_plural(self) -> str: - """The name of the resource (plural)""" - - @property - def key_translations(self) -> dict[str, str]: - """If an attribute is called differently in elasticsearch than in our - metadata model, you can define a translation dictionary here. The key - should be the name in elasticsearch, the value the name in our data - model.""" - return {} - - @property - @abc.abstractmethod - def resource_class(self) -> RESOURCE: - """The resource class""" - - def create(self, engine: Engine, url_prefix: str) -> APIRouter: - router = APIRouter() - read_class = resource_read(self.resource_class) # type: ignore - - # TODO: check parameters correctness - @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", - tags=["search"]) - def search( - platforms: Annotated[list[str] | None, Query()] = None, - search_query: str = "", - search_fields: Annotated[list[str] | None, Query()] = None, - limit: int = 10, - page: int = 1 -# offset: Annotated[list[str] | None, Query()] = None - ) -> SearchResult[read_class]: # type: ignore - f""" - Search for {self.resource_name_plural}. - """ - - if limit > LIMIT_MAX: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The limit should be maximum {LIMIT_MAX}. " - f"If you want more results, use pagination." - ) - - # Prepare query - # ----------------------------------------------------------------- - - # Matches of the search concept for each field - if search_fields: - - # The selected fields must be present in the match fields - if not set(search_fields).issubset(set(self.match_fields)): - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The available search fields for this entity " - f"are:{self.match_fields}" - ) - - # Search in specific search fields - query_matches = [{'match': {f: search_query}} - for f in search_fields] - - else: - - # Search in any match field - query_matches = [{'match': {f: search_query}} - for f in self.match_fields] - - if platforms: - - # Matches of the platform field for each selected platform - platform_matches = [{'match': {'platform': p}} - for p in platforms] - - # Must match platform and search query on at least one field - query = { - 'bool': { - 'must': { - 'bool': { - 'should': platform_matches, - 'minimum_should_match': 1 - } - }, - 'should': query_matches, - 'minimum_should_match': 1 - } - } - - else: - - # Must match search concept on at least one field - query = { - 'bool': { - 'should': query_matches, - 'minimum_should_match': 1 - } - } - - # ----------------------------------------------------------------- - -# result = self.client.search(index=self.es_index, query=query, -# size=limit, sort=SORT, -# search_after=offset) - from_ = limit*(page - 1) - result = self.client.search(index=self.es_index, query=query, - from_=from_, size=limit, sort=SORT) - - total_hits = result["hits"]["total"]["value"] - resources: list[read_class] = [ # type: ignore - self._cast_resource(read_class, hit["_source"]) # type: ignore - for hit in result["hits"]["hits"] - ] - next_offset = ( - result["hits"]["hits"][-1]["sort"] - if len(result["hits"]["hits"]) > 0 else None - ) - return SearchResult[read_class]( # type: ignore - total_hits=total_hits, - resources=resources, - next_offset=next_offset, - current_page=page, - page_size=limit - ) - - return router - - def _cast_resource( - self, resource_class: RESOURCE, resource_dict: dict[str, Any] - ) -> Type[RESOURCE]: - kwargs = { - self.key_translations.get(key, key): val - for key, val in resource_dict.items() - if key != "type" and not key.startswith("@") - } - resource = resource_class(**kwargs) # type: ignore - resource.aiod_entry = AIoDEntryRead( - date_modified=resource_dict["date_modified"], - date_created=resource_dict["date_created"], - status=resource_dict["status"], - ) - return resource diff --git a/src/routers/search_router.py b/src/routers/search_router.py index 0ca9c539..dbfe1264 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -64,7 +64,6 @@ def create(self, engine: Engine, url_prefix: str) -> APIRouter: router = APIRouter() read_class = resource_read(self.resource_class) # type: ignore - # TODO: check parameters correctness @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", tags=["search"]) def search( @@ -79,6 +78,12 @@ def search( Search for {self.resource_name_plural}. """ + if page < 1: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"The page numbers start by 1." + ) + if limit > LIMIT_MAX: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, @@ -115,36 +120,6 @@ def search( query['bool']['must'] = {'bool': {'should': platform_matches, 'minimum_should_match': 1}} -# if platforms: -# -# # Matches of the platform field for each selected platform -# platform_matches = [{'match': {'platform': p}} -# for p in platforms] -# -# # Must match platform and search query on at least one field -# query = { -# 'bool': { -# 'must': { -# 'bool': { -# 'should': platform_matches, -# 'minimum_should_match': 1 -# } -# }, -# 'should': query_matches, -# 'minimum_should_match': 1 -# } -# } -# -# else: -# -# # Must match search concept on at least one field -# query = { -# 'bool': { -# 'should': query_matches, -# 'minimum_should_match': 1 -# } -# } - # ----------------------------------------------------------------- # result = self.client.search(index=self.es_index, query=query, From 7c2057657b4efbe17e832a80e00d47b23e4c554e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 13 Nov 2023 14:37:45 +0100 Subject: [PATCH 46/79] Combined search with sql queries in process --- docker-compose.yaml | 20 +- logstash/pipeline/sql/init_dataset.sql | 32 +- logstash/pipeline/sql/init_event.sql | 37 +- logstash/pipeline/sql/init_experiment.sql | 32 +- logstash/pipeline/sql/init_ml_model.sql | 31 +- logstash/pipeline/sql/init_news.sql | 25 +- logstash/pipeline/sql/init_organisation.sql | 31 +- logstash/pipeline/sql/init_project.sql | 30 +- logstash/pipeline/sql/init_publication.sql | 36 +- logstash/pipeline/sql/init_service.sql | 25 +- logstash/pipeline/sql/sync_dataset.sql | 32 +- logstash/pipeline/sql/sync_event.sql | 37 +- logstash/pipeline/sql/sync_experiment.sql | 32 +- logstash/pipeline/sql/sync_ml_model.sql | 31 +- logstash/pipeline/sql/sync_news.sql | 25 +- logstash/pipeline/sql/sync_organisation.sql | 31 +- logstash/pipeline/sql/sync_project.sql | 30 +- logstash/pipeline/sql/sync_publication.sql | 36 +- logstash/pipeline/sql/sync_service.sql | 25 +- logstash/setup/Dockerfile | 5 + src/routers/search_router.py | 60 ++- .../search_routers/search_router_generic.py | 20 - .../generate_logstash_config_files.py | 344 ++++++++++++++++++ .../generate_logstash_config_files_bkp.py | 32 +- 24 files changed, 448 insertions(+), 591 deletions(-) create mode 100644 logstash/setup/Dockerfile delete mode 100644 src/routers/search_routers/search_router_generic.py create mode 100755 src/setup/logstash/generate_logstash_config_files.py rename logstash/logstash_config.py => src/setup/logstash/generate_logstash_config_files_bkp.py (95%) diff --git a/docker-compose.yaml b/docker-compose.yaml index 6eb536ff..f9db5c4e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -200,7 +200,23 @@ services: depends_on: elasticsearch: condition: service_healthy - + + logstash_setup: + build: + context: logstash/setup/ + dockerfile: Dockerfile + env_file: .env + environment: + - MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD + - ES_USER=$ES_USER + - ES_PASSWORD=$ES_PASSWORD + volumes: + - ./src/setup/logstash:/app + - ./logstash:/logstash + command: > + python generate_logstash_config_files.py + restart: "no" + logstash: build: context: logstash/ @@ -236,5 +252,7 @@ services: condition: service_healthy elasticsearch_setup: condition: service_completed_successfully + logstash_setup: + condition: service_completed_successfully fill-db-with-examples: condition: service_completed_successfully diff --git a/logstash/pipeline/sql/init_dataset.sql b/logstash/pipeline/sql/init_dataset.sql index 8e2aae6b..7667a4fe 100644 --- a/logstash/pipeline/sql/init_dataset.sql +++ b/logstash/pipeline/sql/init_dataset.sql @@ -1,36 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - dataset.identifier, - dataset.platform, - dataset.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - dataset.ai_resource_id AS `resource_identifier`, - dataset.name, - dataset.description, - dataset.same_as, - -- AIAsset - dataset.ai_asset_id AS `asset_identifier`, - dataset.date_published, - dataset.version, - license.name AS `license`, - -- Attributes - dataset.issn, - dataset.measurement_technique, - dataset.temporal_coverage, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, dataset.identifier, name, description, issn FROM aiod.dataset INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.dataset.license_identifier=aiod.license.identifier -LEFT JOIN aiod.dataset_application_area_link ON aiod.dataset_application_area_link.from_identifier=aiod.dataset.identifier -LEFT JOIN aiod.application_area ON aiod.dataset_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.dataset.date_deleted IS NULL -GROUP BY aiod.dataset.identifier -ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/init_event.sql b/logstash/pipeline/sql/init_event.sql index 870814fa..3ca67f88 100644 --- a/logstash/pipeline/sql/init_event.sql +++ b/logstash/pipeline/sql/init_event.sql @@ -1,41 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - event.identifier, - event.platform, - event.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - event.ai_resource_id AS `resource_identifier`, - event.name, - event.description, - event.same_as, - -- Attributes - event.start_date, - event.end_date, - event.schedule, - event.registration_link, - event.organiser_identifier, - -- Mode - event_mode.name AS `mode`, - -- Status - event_status.name AS `event_status`, - -- Agent - agent.type AS `organiser_type`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, event.identifier, name, description FROM aiod.event INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.event_mode ON aiod.event.mode_identifier=aiod.event_mode.identifier -LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier -LEFT JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier -LEFT JOIN aiod.event_application_area_link ON aiod.event_application_area_link.from_identifier=aiod.event.identifier -LEFT JOIN aiod.application_area ON aiod.event_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.event.date_deleted IS NULL -GROUP BY aiod.event.identifier -ORDER BY aiod.event.identifier diff --git a/logstash/pipeline/sql/init_experiment.sql b/logstash/pipeline/sql/init_experiment.sql index 9f3fac0b..47e9e744 100644 --- a/logstash/pipeline/sql/init_experiment.sql +++ b/logstash/pipeline/sql/init_experiment.sql @@ -1,36 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - experiment.identifier, - experiment.platform, - experiment.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - experiment.ai_resource_id AS `resource_identifier`, - experiment.name, - experiment.description, - experiment.same_as, - -- AIAsset - experiment.ai_asset_id AS `asset_identifier`, - experiment.date_published, - experiment.version, - license.name AS `license`, - -- Attributes - experiment.experimental_workflow, - experiment.execution_settings, - experiment.reproducibility_explanation, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, experiment.identifier, name, description FROM aiod.experiment INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.experiment.license_identifier=aiod.license.identifier -LEFT JOIN aiod.experiment_application_area_link ON aiod.experiment_application_area_link.from_identifier=aiod.experiment.identifier -LEFT JOIN aiod.application_area ON aiod.experiment_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.experiment.date_deleted IS NULL -GROUP BY aiod.experiment.identifier -ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/init_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql index 7d7405f5..5d9026a7 100644 --- a/logstash/pipeline/sql/init_ml_model.sql +++ b/logstash/pipeline/sql/init_ml_model.sql @@ -1,35 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - ml_model.identifier, - ml_model.platform, - ml_model.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - ml_model.ai_resource_id AS `resource_identifier`, - ml_model.name, - ml_model.description, - ml_model.same_as, - -- AIAsset - ml_model.ai_asset_id AS `asset_identifier`, - ml_model.date_published, - ml_model.version, - license.name AS `license`, - -- Type - ml_model_type.name AS `ml_model_type`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, ml_model.identifier, name, description FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifier -LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier -LEFT JOIN aiod.ml_model_application_area_link ON aiod.ml_model_application_area_link.from_identifier=aiod.ml_model.identifier -LEFT JOIN aiod.application_area ON aiod.ml_model_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.ml_model.date_deleted IS NULL -GROUP BY aiod.ml_model.identifier -ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql index 09f33797..03fffe3a 100644 --- a/logstash/pipeline/sql/init_news.sql +++ b/logstash/pipeline/sql/init_news.sql @@ -1,29 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - news.identifier, - news.platform, - news.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - news.ai_resource_id AS `resource_identifier`, - news.name, - news.description, - news.same_as, - -- Attributes - news.headline, - news.alternative_headline, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, news.identifier, name, description, headline, alternative_headline FROM aiod.news INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.news_application_area_link ON aiod.news_application_area_link.from_identifier=aiod.news.identifier -LEFT JOIN aiod.application_area ON aiod.news_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.news.date_deleted IS NULL -GROUP BY aiod.news.identifier -ORDER BY aiod.news.identifier diff --git a/logstash/pipeline/sql/init_organisation.sql b/logstash/pipeline/sql/init_organisation.sql index 388ee122..c2fc8d82 100644 --- a/logstash/pipeline/sql/init_organisation.sql +++ b/logstash/pipeline/sql/init_organisation.sql @@ -1,35 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - organisation.identifier, - organisation.platform, - organisation.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - organisation.ai_resource_id AS `resource_identifier`, - organisation.name, - organisation.description, - organisation.same_as, - -- Attributes - organisation.date_founded, - organisation.legal_name, - -- Type - organisation_type.name AS `organisation_type`, - -- Agent - agent.type AS `agent`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, organisation.identifier, name, description, legal_name FROM aiod.organisation INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.organisation_type ON aiod.organisation.type_identifier=aiod.organisation_type.identifier -LEFT JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier -LEFT JOIN aiod.organisation_application_area_link ON aiod.organisation_application_area_link.from_identifier=aiod.organisation.identifier -LEFT JOIN aiod.application_area ON aiod.organisation_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.organisation.date_deleted IS NULL -GROUP BY aiod.organisation.identifier -ORDER BY aiod.organisation.identifier diff --git a/logstash/pipeline/sql/init_project.sql b/logstash/pipeline/sql/init_project.sql index 325e1a3c..d08dc07b 100644 --- a/logstash/pipeline/sql/init_project.sql +++ b/logstash/pipeline/sql/init_project.sql @@ -1,34 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - project.identifier, - project.platform, - project.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - project.ai_resource_id AS `resource_identifier`, - project.name, - project.description, - project.same_as, - -- Attributes - project.start_date, - project.end_date, - project.total_cost_euro, - project.coordinator_identifier, - -- Organisation - organisation.name AS `coordinator_name`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, project.identifier, name, description FROM aiod.project INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier -LEFT JOIN aiod.project_application_area_link ON aiod.project_application_area_link.from_identifier=aiod.project.identifier -LEFT JOIN aiod.application_area ON aiod.project_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.project.date_deleted IS NULL -GROUP BY aiod.project.identifier -ORDER BY aiod.project.identifier diff --git a/logstash/pipeline/sql/init_publication.sql b/logstash/pipeline/sql/init_publication.sql index 36702546..93ecfbfc 100644 --- a/logstash/pipeline/sql/init_publication.sql +++ b/logstash/pipeline/sql/init_publication.sql @@ -1,40 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - publication.identifier, - publication.platform, - publication.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - publication.ai_resource_id AS `resource_identifier`, - publication.name, - publication.description, - publication.same_as, - -- AIAsset - publication.ai_asset_id AS `asset_identifier`, - publication.date_published, - publication.version, - license.name AS `license`, - -- Attributes - publication.permanent_identifier, - publication.isbn, - publication.issn, - publication.knowledge_asset_id AS `knowledge_asset_identifier`, - -- Type - publication_type.name AS `publication_type`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, publication.identifier, name, description, issn, isbn FROM aiod.publication INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.publication.license_identifier=aiod.license.identifier -LEFT JOIN aiod.publication_type ON aiod.publication.type_identifier=aiod.publication_type.identifier -LEFT JOIN aiod.publication_application_area_link ON aiod.publication_application_area_link.from_identifier=aiod.publication.identifier -LEFT JOIN aiod.application_area ON aiod.publication_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.publication.date_deleted IS NULL -GROUP BY aiod.publication.identifier -ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/init_service.sql b/logstash/pipeline/sql/init_service.sql index 1478826e..39cc7d08 100644 --- a/logstash/pipeline/sql/init_service.sql +++ b/logstash/pipeline/sql/init_service.sql @@ -1,29 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - service.identifier, - service.platform, - service.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - service.ai_resource_id AS `resource_identifier`, - service.name, - service.description, - service.same_as, - -- Attributes - service.slogan, - service.terms_of_service, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, service.identifier, name, description, slogan FROM aiod.service INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.service_application_area_link ON aiod.service_application_area_link.from_identifier=aiod.service.identifier -LEFT JOIN aiod.application_area ON aiod.service_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.service.date_deleted IS NULL -GROUP BY aiod.service.identifier -ORDER BY aiod.service.identifier diff --git a/logstash/pipeline/sql/sync_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql index 1758c91e..1be7b313 100644 --- a/logstash/pipeline/sql/sync_dataset.sql +++ b/logstash/pipeline/sql/sync_dataset.sql @@ -1,36 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - dataset.identifier, - dataset.platform, - dataset.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - dataset.ai_resource_id AS `resource_identifier`, - dataset.name, - dataset.description, - dataset.same_as, - -- AIAsset - dataset.ai_asset_id AS `asset_identifier`, - dataset.date_published, - dataset.version, - license.name AS `license`, - -- Attributes - dataset.issn, - dataset.measurement_technique, - dataset.temporal_coverage, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, dataset.identifier, name, description, issn FROM aiod.dataset INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.dataset.license_identifier=aiod.license.identifier -LEFT JOIN aiod.dataset_application_area_link ON aiod.dataset_application_area_link.from_identifier=aiod.dataset.identifier -LEFT JOIN aiod.application_area ON aiod.dataset_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.dataset.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value -GROUP BY aiod.dataset.identifier -ORDER BY aiod.dataset.identifier diff --git a/logstash/pipeline/sql/sync_event.sql b/logstash/pipeline/sql/sync_event.sql index 205f07d7..d9d91b13 100644 --- a/logstash/pipeline/sql/sync_event.sql +++ b/logstash/pipeline/sql/sync_event.sql @@ -1,41 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - event.identifier, - event.platform, - event.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - event.ai_resource_id AS `resource_identifier`, - event.name, - event.description, - event.same_as, - -- Attributes - event.start_date, - event.end_date, - event.schedule, - event.registration_link, - event.organiser_identifier, - -- Mode - event_mode.name AS `mode`, - -- Status - event_status.name AS `event_status`, - -- Agent - agent.type AS `organiser_type`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, event.identifier, name, description FROM aiod.event INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.event_mode ON aiod.event.mode_identifier=aiod.event_mode.identifier -LEFT JOIN aiod.event_status ON aiod.event.status_identifier=aiod.event_status.identifier -LEFT JOIN aiod.agent ON aiod.event.organiser_identifier=aiod.agent.identifier -LEFT JOIN aiod.event_application_area_link ON aiod.event_application_area_link.from_identifier=aiod.event.identifier -LEFT JOIN aiod.application_area ON aiod.event_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.event.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value -GROUP BY aiod.event.identifier -ORDER BY aiod.event.identifier diff --git a/logstash/pipeline/sql/sync_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql index 2b88047a..790a57b8 100644 --- a/logstash/pipeline/sql/sync_experiment.sql +++ b/logstash/pipeline/sql/sync_experiment.sql @@ -1,36 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - experiment.identifier, - experiment.platform, - experiment.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - experiment.ai_resource_id AS `resource_identifier`, - experiment.name, - experiment.description, - experiment.same_as, - -- AIAsset - experiment.ai_asset_id AS `asset_identifier`, - experiment.date_published, - experiment.version, - license.name AS `license`, - -- Attributes - experiment.experimental_workflow, - experiment.execution_settings, - experiment.reproducibility_explanation, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, experiment.identifier, name, description FROM aiod.experiment INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.experiment.license_identifier=aiod.license.identifier -LEFT JOIN aiod.experiment_application_area_link ON aiod.experiment_application_area_link.from_identifier=aiod.experiment.identifier -LEFT JOIN aiod.application_area ON aiod.experiment_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.experiment.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value -GROUP BY aiod.experiment.identifier -ORDER BY aiod.experiment.identifier diff --git a/logstash/pipeline/sql/sync_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql index 0f2d9199..ff294426 100644 --- a/logstash/pipeline/sql/sync_ml_model.sql +++ b/logstash/pipeline/sql/sync_ml_model.sql @@ -1,35 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - ml_model.identifier, - ml_model.platform, - ml_model.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - ml_model.ai_resource_id AS `resource_identifier`, - ml_model.name, - ml_model.description, - ml_model.same_as, - -- AIAsset - ml_model.ai_asset_id AS `asset_identifier`, - ml_model.date_published, - ml_model.version, - license.name AS `license`, - -- Type - ml_model_type.name AS `ml_model_type`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, ml_model.identifier, name, description FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.ml_model.license_identifier=aiod.license.identifier -LEFT JOIN aiod.ml_model_type ON aiod.ml_model.type_identifier=aiod.ml_model_type.identifier -LEFT JOIN aiod.ml_model_application_area_link ON aiod.ml_model_application_area_link.from_identifier=aiod.ml_model.identifier -LEFT JOIN aiod.application_area ON aiod.ml_model_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.ml_model.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value -GROUP BY aiod.ml_model.identifier -ORDER BY aiod.ml_model.identifier diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql index d4d9005d..c44bb861 100644 --- a/logstash/pipeline/sql/sync_news.sql +++ b/logstash/pipeline/sql/sync_news.sql @@ -1,29 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - news.identifier, - news.platform, - news.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - news.ai_resource_id AS `resource_identifier`, - news.name, - news.description, - news.same_as, - -- Attributes - news.headline, - news.alternative_headline, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, news.identifier, name, description, headline, alternative_headline FROM aiod.news INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.news_application_area_link ON aiod.news_application_area_link.from_identifier=aiod.news.identifier -LEFT JOIN aiod.application_area ON aiod.news_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.news.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value -GROUP BY aiod.news.identifier -ORDER BY aiod.news.identifier diff --git a/logstash/pipeline/sql/sync_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql index e39db777..7b79ba56 100644 --- a/logstash/pipeline/sql/sync_organisation.sql +++ b/logstash/pipeline/sql/sync_organisation.sql @@ -1,35 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - organisation.identifier, - organisation.platform, - organisation.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - organisation.ai_resource_id AS `resource_identifier`, - organisation.name, - organisation.description, - organisation.same_as, - -- Attributes - organisation.date_founded, - organisation.legal_name, - -- Type - organisation_type.name AS `organisation_type`, - -- Agent - agent.type AS `agent`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, organisation.identifier, name, description, legal_name FROM aiod.organisation INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.organisation_type ON aiod.organisation.type_identifier=aiod.organisation_type.identifier -LEFT JOIN aiod.agent ON aiod.organisation.agent_id=aiod.agent.identifier -LEFT JOIN aiod.organisation_application_area_link ON aiod.organisation_application_area_link.from_identifier=aiod.organisation.identifier -LEFT JOIN aiod.application_area ON aiod.organisation_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.organisation.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value -GROUP BY aiod.organisation.identifier -ORDER BY aiod.organisation.identifier diff --git a/logstash/pipeline/sql/sync_project.sql b/logstash/pipeline/sql/sync_project.sql index 7d712b30..366c4636 100644 --- a/logstash/pipeline/sql/sync_project.sql +++ b/logstash/pipeline/sql/sync_project.sql @@ -1,34 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - project.identifier, - project.platform, - project.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - project.ai_resource_id AS `resource_identifier`, - project.name, - project.description, - project.same_as, - -- Attributes - project.start_date, - project.end_date, - project.total_cost_euro, - project.coordinator_identifier, - -- Organisation - organisation.name AS `coordinator_name`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, project.identifier, name, description FROM aiod.project INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.organisation ON aiod.project.coordinator_identifier=aiod.organisation.identifier -LEFT JOIN aiod.project_application_area_link ON aiod.project_application_area_link.from_identifier=aiod.project.identifier -LEFT JOIN aiod.application_area ON aiod.project_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.project.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value -GROUP BY aiod.project.identifier -ORDER BY aiod.project.identifier diff --git a/logstash/pipeline/sql/sync_publication.sql b/logstash/pipeline/sql/sync_publication.sql index 68e38dfa..5697a6b1 100644 --- a/logstash/pipeline/sql/sync_publication.sql +++ b/logstash/pipeline/sql/sync_publication.sql @@ -1,40 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - publication.identifier, - publication.platform, - publication.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - publication.ai_resource_id AS `resource_identifier`, - publication.name, - publication.description, - publication.same_as, - -- AIAsset - publication.ai_asset_id AS `asset_identifier`, - publication.date_published, - publication.version, - license.name AS `license`, - -- Attributes - publication.permanent_identifier, - publication.isbn, - publication.issn, - publication.knowledge_asset_id AS `knowledge_asset_identifier`, - -- Type - publication_type.name AS `publication_type`, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, publication.identifier, name, description, issn, isbn FROM aiod.publication INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.license ON aiod.publication.license_identifier=aiod.license.identifier -LEFT JOIN aiod.publication_type ON aiod.publication.type_identifier=aiod.publication_type.identifier -LEFT JOIN aiod.publication_application_area_link ON aiod.publication_application_area_link.from_identifier=aiod.publication.identifier -LEFT JOIN aiod.application_area ON aiod.publication_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.publication.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value -GROUP BY aiod.publication.identifier -ORDER BY aiod.publication.identifier diff --git a/logstash/pipeline/sql/sync_service.sql b/logstash/pipeline/sql/sync_service.sql index 202101c9..219cab73 100644 --- a/logstash/pipeline/sql/sync_service.sql +++ b/logstash/pipeline/sql/sync_service.sql @@ -1,29 +1,6 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT - -- Concept - service.identifier, - service.platform, - service.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - service.ai_resource_id AS `resource_identifier`, - service.name, - service.description, - service.same_as, - -- Attributes - service.slogan, - service.terms_of_service, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` +SELECT aiod_entry.date_modified, service.identifier, name, description, slogan FROM aiod.service INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier -LEFT JOIN aiod.service_application_area_link ON aiod.service_application_area_link.from_identifier=aiod.service.identifier -LEFT JOIN aiod.application_area ON aiod.service_application_area_link.linked_identifier=aiod.application_area.identifier WHERE aiod.service.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value -GROUP BY aiod.service.identifier -ORDER BY aiod.service.identifier diff --git a/logstash/setup/Dockerfile b/logstash/setup/Dockerfile new file mode 100644 index 00000000..a1048220 --- /dev/null +++ b/logstash/setup/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3.11-slim-bullseye + +# RUN apt-get update && apt-get -y install pip && pip install Jinja2 + +WORKDIR /app \ No newline at end of file diff --git a/src/routers/search_router.py b/src/routers/search_router.py index dbfe1264..b941a197 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -5,11 +5,14 @@ from fastapi import APIRouter, Depends, HTTPException, Query from pydantic import BaseModel from sqlalchemy.engine import Engine +from sqlmodel import Session, select from starlette import status from authentication import get_current_user#, has_role from database.model.concept.aiod_entry import AIoDEntryRead from database.model.resource_read_and_create import resource_read +from database.model.platform.platform import Platform +from .resource_router import _wrap_as_http_exception SORT = {"identifier": "asc"} LIMIT_MAX = 1000 @@ -78,12 +81,31 @@ def search( Search for {self.resource_name_plural}. """ - if page < 1: + # Parameter correctness + # ----------------------------------------------------------------- + + try: + with Session(engine) as session: + query = select(Platform) + database_platforms = session.scalars(query).all() + platform_names = set([p.name for p in database_platforms]) + except Exception as e: + raise _wrap_as_http_exception(e) + + if platforms and not set(platforms).issubset(platform_names): raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The page numbers start by 1." + detail=f"The available platformas are: {platform_names}" ) + fields = search_fields if search_fields else self.match_fields + if not set(fields).issubset(self.match_fields): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"The available search fields for this entity " + f"are: {self.match_fields}" + ) + if limit > LIMIT_MAX: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, @@ -91,16 +113,16 @@ def search( f"If you want more results, use pagination." ) + if page < 1: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"The page numbers start by 1." + ) + # Prepare query # ----------------------------------------------------------------- # Matches of the search concept for each field - fields = search_fields if search_fields else self.match_fields - if not set(fields).issubset(self.match_fields): - raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The available search fields for " - f"this entity are:" - f"{self.match_fields}") query_matches = [{'match': {f: search_query}} for f in fields] # Must match search concept on at least one field @@ -120,15 +142,27 @@ def search( query['bool']['must'] = {'bool': {'should': platform_matches, 'minimum_should_match': 1}} + # Launch search query # ----------------------------------------------------------------- -# result = self.client.search(index=self.es_index, query=query, -# size=limit, sort=SORT, -# search_after=offset) from_ = limit*(page - 1) result = self.client.search(index=self.es_index, query=query, from_=from_, size=limit, sort=SORT) + # Launch database query + # ----------------------------------------------------------------- + + try: + with Session(engine) as session: + query = select(Platform) + database_platforms = session.scalars(query).all() + platform_names = set([p.name for p in database_platforms]) + except Exception as e: + raise _wrap_as_http_exception(e) + + # Manage results + # ----------------------------------------------------------------- + total_hits = result["hits"]["total"]["value"] resources: list[read_class] = [ # type: ignore self._cast_resource(read_class, hit["_source"]) # type: ignore @@ -159,7 +193,7 @@ def _cast_resource( resource = resource_class(**kwargs) # type: ignore resource.aiod_entry = AIoDEntryRead( date_modified=resource_dict["date_modified"], - date_created=resource_dict["date_created"], - status=resource_dict["status"], +# date_created=resource_dict["date_created"], +# status=resource_dict["status"], ) return resource diff --git a/src/routers/search_routers/search_router_generic.py b/src/routers/search_routers/search_router_generic.py deleted file mode 100644 index 3306a058..00000000 --- a/src/routers/search_routers/search_router_generic.py +++ /dev/null @@ -1,20 +0,0 @@ -from database.model.dataset.dataset import Dataset -from routers.search_router import SearchRouter - - -class SearchRouterAIODEntry(SearchRouter[Dataset]): - @property - def es_index(self) -> str: - return "dataset" - - @property - def resource_name_plural(self) -> str: - return "datasets" - - @property - def resource_class(self): - return Dataset - - @property - def match_fields(self): - return set(['name', 'description', 'issn']) diff --git a/src/setup/logstash/generate_logstash_config_files.py b/src/setup/logstash/generate_logstash_config_files.py new file mode 100755 index 00000000..6caf7d40 --- /dev/null +++ b/src/setup/logstash/generate_logstash_config_files.py @@ -0,0 +1,344 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import os + +# PATH MACROS +# ============================================================================= + +# Repository base path +#REPO_PATH = os.path.join("..", "..", "..") + +FIELDS = { + "dataset": ["aiod_entry.date_modified", "dataset.identifier", "name", "description", "issn"], + "event": ["aiod_entry.date_modified", "event.identifier", "name", "description",], + "experiment": ["aiod_entry.date_modified", "experiment.identifier", "name", "description"], + "ml_model": ["aiod_entry.date_modified", "ml_model.identifier", "name", "description"], + "news": ["aiod_entry.date_modified", "news.identifier", "name", "description", "headline", + "alternative_headline"], + "organisation": ["aiod_entry.date_modified", "organisation.identifier", "name", "description", "legal_name"], + "project": ["aiod_entry.date_modified", "project.identifier", "name", "description"], + "publication": ["aiod_entry.date_modified", "publication.identifier", "name", "description", "issn", "isbn"], + "service": ["aiod_entry.date_modified", "service.identifier", "name", "description", "slogan"] +} + +# MACROS FOR THE DOCUMENTS GENERATION FUNCTIONS +# ============================================================================= + +INFO = """{0} This file has been generated by `logstash_config.py` file +{0} --------------------------------------------------------- +""" + +CONF_BASE = """http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] +xpack.monitoring.enabled: true +xpack.monitoring.elasticsearch.username: {0} +xpack.monitoring.elasticsearch.password: {1} +""" + +INIT_INPUT_BASE = """ jdbc {{ + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{0}" + jdbc_password => "{1}" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_{2}.sql" + type => "{2}" + }} +""" + +SYNC_INPUT_BASE = """ jdbc {{ + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{0}" + jdbc_password => "{1}" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_{2}.sql" + type => "{2}" + }} + jdbc {{ + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{0}" + jdbc_password => "{1}" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_{2}.sql" + type => "rm_{2}" + }} +""" + +FILTER_BASE = """filter {{ + if ![application_area] {{ + mutate {{ + replace => {{"application_area" => ""}} + }} + }} + mutate {{ + # remove_field => ["@version", "@timestamp"] + split => {{"application_area" => ","}} + }}{0} +}} +""" + +DATE_FILTER = """ + if [type] == "organisation" {0}{{ + ruby {{ + code => ' + t = Time.at(event.get("date_founded").to_f) + event.set("date_founded", t.strftime("%Y-%m-%d")) + ' + }} + }} +""" + +SYNC_DATE_FILTER_ADDON = """or [type] == "rm_organisation" """ + +INIT_OUTPUT_BASE = """ if [type] == "{2}" {{ + elasticsearch {{ + hosts => "elasticsearch:9200" + user => "{0}" + password => "{1}" + ecs_compatibility => disabled + index => "{2}" + document_id => "{2}_%{{identifier}}" + }} + }} +""" + +SYNC_OUTPUT_BASE = """ if [type] == "{2}" {{ + elasticsearch {{ + hosts => "elasticsearch:9200" + user => "{0}" + password => "{1}" + ecs_compatibility => disabled + index => "{2}" + document_id => "{2}_%{{identifier}}" + }} + }} + if [type] == "rm_{2}" {{ + elasticsearch {{ + action => "delete" + hosts => "elasticsearch:9200" + user => "{0}" + password => "{1}" + ecs_compatibility => disabled + index => "{2}" + document_id => "{2}_%{{identifier}}" + }} + }} +""" + +SQL_BASE = """SELECT {1} +FROM aiod.{0} +INNER JOIN aiod.aiod_entry ON aiod.{0}.aiod_entry_identifier=aiod.aiod_entry.identifier{2} +""" + +SQL_RM_BASE = """SELECT {0}.identifier +FROM aiod.{0} +WHERE aiod.{0}.date_deleted IS NOT NULL AND aiod.{0}.date_deleted > :sql_last_value +""" + +INIT_CLAUSE = """ +WHERE aiod.{0}.date_deleted IS NULL""" + +SYNC_CLAUSE = """ +WHERE aiod.{0}.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" + +# DOCUMENTS GENERATION FUNCTIONS +# ============================================================================= + +def generate_conf_file(conf_path, es_user, es_pass): + + file_path = os.path.join(conf_path, "logstash.yml") + + # Generate configuration file + with open(file_path, 'w') as f: + + # Info + f.write(INFO.format('#')) + + # Configuration + f.write(CONF_BASE.format(es_user, es_pass)) + +def generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, + es_user, es_pass, entities, sync=False): + + if not sync: # init file + file_path = os.path.join(pipeline_conf_path, "init_table.conf") + input_base = INIT_INPUT_BASE + date_filter = DATE_FILTER.format("") + output_base = INIT_OUTPUT_BASE + else: # sync file + file_path = os.path.join(pipeline_conf_path, "sync_table.conf") + input_base = SYNC_INPUT_BASE + date_filter = DATE_FILTER.format(SYNC_DATE_FILTER_ADDON) + output_base = SYNC_OUTPUT_BASE + + # Generate configuration file + with open(file_path, 'w') as f: + + # Info + f.write(INFO.format('#')) + + # Input + f.write("input {\n") + for entity in entities: + f.write(input_base.format(db_user, db_pass, entity)) + f.write("}\n") + + # Filters + if "organisation" in entities: + f.write(FILTER_BASE.format(date_filter)) + else: + f.write(FILTER_BASE.format("")) + + # Output + f.write("output {\n") + for entity in entities: + f.write(output_base.format(es_user, es_pass, entity)) + f.write("}\n") + +def generate_pipeline_sql_files(pipeline_sql_path, entity, sync=False): + + # Generate output file path + if sync: + file_path = os.path.join(pipeline_sql_path, f"sync_{entity}.sql") + else: + file_path = os.path.join(pipeline_sql_path, f"init_{entity}.sql") + + # Write the output file + with open(file_path, 'w') as f: + + # Info + f.write(INFO.format('--')) + + # Where clause + if sync: + where_clause = SYNC_CLAUSE.format(entity) + else: + where_clause = INIT_CLAUSE.format(entity) + + f.write(SQL_BASE.format(entity, ", ".join(FIELDS[entity]), where_clause)) + +def generate_pipeline_sql_rm_files(pipeline_sql_path, entity): + + # Generate output file path + file_path = os.path.join(pipeline_sql_path, f"rm_{entity}.sql") + + # Write the output file + with open(file_path, 'w') as f: + + # Info + f.write(INFO.format('--')) + + # SQL query + f.write(SQL_RM_BASE.format(entity)) + +# MAIN FUNCTION +# ============================================================================= + +def main(base_path, db_user, db_pass, es_user, es_pass, entities, + ai_asset_entities, attributes, type_entities, mode_entities, + status_entities, agent_entities, organisation_entities): + + # Make configuration dir + conf_path = os.path.join(base_path, "config") + os.makedirs(conf_path, exist_ok=True) + + # Make pipeline configuration dirs + pipeline_conf_path = os.path.join(base_path, "pipeline", "conf") + os.makedirs(pipeline_conf_path, exist_ok=True) + pipeline_sql_path = os.path.join(base_path, "pipeline", "sql") + os.makedirs(pipeline_sql_path, exist_ok=True) + + # Generate logstash configuration file + generate_conf_file(conf_path, es_user, es_pass) + + # Generate pipeline configuration init file + generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, + es_user, es_pass, entities, sync=False) + + # Generate pipeline configuration sync file + generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, + es_user, es_pass, entities, sync=True) + + # Generate SQL init and sync files + for entity in entities: + generate_pipeline_sql_files(pipeline_sql_path, entity, sync=False) + generate_pipeline_sql_files(pipeline_sql_path, entity, sync=True) + + # Generate SQL rm files + for entity in entities: + generate_pipeline_sql_rm_files(pipeline_sql_path, entity) + +if __name__ == "__main__": + + # PATH MACROS + # ------------------------------------------------------------------------- + + # Repository base path +# repo_path = REPO_PATH + + # Configuration base path +# base_path = os.path.join(repo_path, "logstash") + base_path = "/logstash" + + # ------------------------------------------------------------------------- + + # Users and passwords + db_user = "root" + db_pass = os.environ['MYSQL_ROOT_PASSWORD'] + es_user = os.environ['ES_USER'] + es_pass = os.environ['ES_PASSWORD'] +# with open(os.path.join(repo_path, ".env"), "r") as f: +# for line in f: +# if "MYSQL_ROOT_PASSWORD" in line: +# db_pass = line.split("=")[1][:-1] +# if "ES_USER" in line: +# es_user = line.split("=")[1][:-1] +# if "ES_PASSWORD" in line: +# es_pass = line.split("=")[1][:-1] + + # Entities and attributes + entities = ["dataset", "event", "experiment", "ml_model", "news", + "organisation", "project", "publication", "service"] + ai_asset_entities = ["dataset", "experiment", "ml_model", "publication"] + attributes = { + "dataset": ["issn", "measurement_technique", "temporal_coverage"], + "event": ["start_date", "end_date", "schedule", "registration_link", + "organiser_identifier"], + "experiment": ["experimental_workflow", "execution_settings", + "reproducibility_explanation"], + "news": ["headline", "alternative_headline"], + "organisation": ["date_founded", "legal_name"], + "project": ["start_date", "end_date", "total_cost_euro", + "coordinator_identifier"], + "publication": ["permanent_identifier", "isbn", "issn", + "knowledge_asset_id AS `knowledge_asset_identifier`"], + "service": ["slogan", "terms_of_service"] + } + type_entities = ["ml_model", "organisation", "publication"] + mode_entities = ["event"] + status_entities = ["event"] + agent_entities = { + "event": ("organiser_identifier", "organiser_type"), + "organisation": ("agent_id", "agent") + } + organisation_entities = { + "project": ("coordinator_identifier", "coordinator_name") + } + + # Main function + main(base_path, db_user, db_pass, es_user, es_pass, entities, + ai_asset_entities, attributes, type_entities, mode_entities, + status_entities, agent_entities, organisation_entities) diff --git a/logstash/logstash_config.py b/src/setup/logstash/generate_logstash_config_files_bkp.py similarity index 95% rename from logstash/logstash_config.py rename to src/setup/logstash/generate_logstash_config_files_bkp.py index d9b8224e..d80fb1ea 100755 --- a/logstash/logstash_config.py +++ b/src/setup/logstash/generate_logstash_config_files_bkp.py @@ -7,10 +7,7 @@ # ============================================================================= # Repository base path -REPO_PATH = ".." - -# Working path -WORKING_PATH = os.path.join(".") +#REPO_PATH = os.path.join("..", "..", "..") # MACROS FOR THE DOCUMENTS GENERATION FUNCTIONS # ============================================================================= @@ -400,26 +397,27 @@ def main(base_path, db_user, db_pass, es_user, es_pass, entities, # ------------------------------------------------------------------------- # Repository base path - repo_path = REPO_PATH +# repo_path = REPO_PATH # Configuration base path - base_path = WORKING_PATH +# base_path = os.path.join(repo_path, "logstash") + base_path = "/logstash" # ------------------------------------------------------------------------- # Users and passwords db_user = "root" - db_pass = "" - es_user = "" - es_pass = "" - with open(os.path.join(repo_path, ".env"), "r") as f: - for line in f: - if "MYSQL_ROOT_PASSWORD" in line: - db_pass = line.split("=")[1][:-1] - if "ES_USER" in line: - es_user = line.split("=")[1][:-1] - if "ES_PASSWORD" in line: - es_pass = line.split("=")[1][:-1] + db_pass = os.environ['MYSQL_ROOT_PASSWORD'] + es_user = os.environ['ES_USER'] + es_pass = os.environ['ES_PASSWORD'] +# with open(os.path.join(repo_path, ".env"), "r") as f: +# for line in f: +# if "MYSQL_ROOT_PASSWORD" in line: +# db_pass = line.split("=")[1][:-1] +# if "ES_USER" in line: +# es_user = line.split("=")[1][:-1] +# if "ES_PASSWORD" in line: +# es_pass = line.split("=")[1][:-1] # Entities and attributes entities = ["dataset", "event", "experiment", "ml_model", "news", From 0aeaf258cfdfcfd0d83b6d9b1c32924b6a818ac4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 13 Nov 2023 18:19:37 +0100 Subject: [PATCH 47/79] Search functionality combined with optional SQL statment to retrieve everything --- docker-compose.yaml | 30 ++++---- logstash/pipeline/conf/init_table.conf | 2 +- logstash/pipeline/conf/sync_table.conf | 2 +- logstash/pipeline/sql/init_dataset.sql | 3 +- logstash/pipeline/sql/init_event.sql | 3 +- logstash/pipeline/sql/init_experiment.sql | 3 +- logstash/pipeline/sql/init_ml_model.sql | 3 +- logstash/pipeline/sql/init_news.sql | 3 +- logstash/pipeline/sql/init_organisation.sql | 3 +- logstash/pipeline/sql/init_project.sql | 3 +- logstash/pipeline/sql/init_publication.sql | 3 +- logstash/pipeline/sql/init_service.sql | 3 +- logstash/pipeline/sql/sync_dataset.sql | 3 +- logstash/pipeline/sql/sync_event.sql | 3 +- logstash/pipeline/sql/sync_experiment.sql | 3 +- logstash/pipeline/sql/sync_ml_model.sql | 3 +- logstash/pipeline/sql/sync_news.sql | 3 +- logstash/pipeline/sql/sync_organisation.sql | 3 +- logstash/pipeline/sql/sync_project.sql | 3 +- logstash/pipeline/sql/sync_publication.sql | 3 +- logstash/pipeline/sql/sync_service.sql | 3 +- src/routers/search_router.py | 77 ++++++++++++------- .../search_routers/search_router_datasets.py | 2 +- .../search_routers/search_router_events.py | 2 +- .../search_router_experiments.py | 2 +- .../search_routers/search_router_ml_models.py | 2 +- .../search_routers/search_router_news.py | 2 +- .../search_router_organisations.py | 2 +- .../search_routers/search_router_projects.py | 2 +- .../search_router_publications.py | 2 +- .../search_routers/search_router_services.py | 2 +- .../generate_logstash_config_files.py | 23 +++--- 32 files changed, 126 insertions(+), 80 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index f9db5c4e..afc3770e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -188,18 +188,18 @@ services: timeout: 30s retries: 30 - elasticsearch_setup: - build: - context: es/setup/ - dockerfile: Dockerfile - env_file: .env - environment: - - ES_USER=$ES_USER - - ES_PASSWORD=$ES_PASSWORD - restart: "no" - depends_on: - elasticsearch: - condition: service_healthy +# elasticsearch_setup: +# build: +# context: es/setup/ +# dockerfile: Dockerfile +# env_file: .env +# environment: +# - ES_USER=$ES_USER +# - ES_PASSWORD=$ES_PASSWORD +# restart: "no" +# depends_on: +# elasticsearch: +# condition: service_healthy logstash_setup: build: @@ -250,8 +250,10 @@ services: depends_on: app: condition: service_healthy - elasticsearch_setup: - condition: service_completed_successfully + elasticsearch: + condition: service_healthy +# elasticsearch_setup: +# condition: service_completed_successfully logstash_setup: condition: service_completed_successfully fill-db-with-examples: diff --git a/logstash/pipeline/conf/init_table.conf b/logstash/pipeline/conf/init_table.conf index df8bd2c1..32dda9f1 100644 --- a/logstash/pipeline/conf/init_table.conf +++ b/logstash/pipeline/conf/init_table.conf @@ -108,7 +108,7 @@ filter { } } mutate { - # remove_field => ["@version", "@timestamp"] + remove_field => ["@version", "@timestamp"] split => {"application_area" => ","} } if [type] == "organisation" { diff --git a/logstash/pipeline/conf/sync_table.conf b/logstash/pipeline/conf/sync_table.conf index a0d8bc80..76c76fcf 100644 --- a/logstash/pipeline/conf/sync_table.conf +++ b/logstash/pipeline/conf/sync_table.conf @@ -243,7 +243,7 @@ filter { } } mutate { - # remove_field => ["@version", "@timestamp"] + remove_field => ["@version", "@timestamp"] split => {"application_area" => ","} } if [type] == "organisation" or [type] == "rm_organisation" { diff --git a/logstash/pipeline/sql/init_dataset.sql b/logstash/pipeline/sql/init_dataset.sql index 7667a4fe..7b66724b 100644 --- a/logstash/pipeline/sql/init_dataset.sql +++ b/logstash/pipeline/sql/init_dataset.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, dataset.identifier, name, description, issn +SELECT aiod_entry.date_modified, dataset.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', issn FROM aiod.dataset INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier WHERE aiod.dataset.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_event.sql b/logstash/pipeline/sql/init_event.sql index 3ca67f88..19882005 100644 --- a/logstash/pipeline/sql/init_event.sql +++ b/logstash/pipeline/sql/init_event.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, event.identifier, name, description +SELECT aiod_entry.date_modified, event.identifier, name, description_identifier, text.plain, text.html FROM aiod.event INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier WHERE aiod.event.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_experiment.sql b/logstash/pipeline/sql/init_experiment.sql index 47e9e744..045cfd5b 100644 --- a/logstash/pipeline/sql/init_experiment.sql +++ b/logstash/pipeline/sql/init_experiment.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, experiment.identifier, name, description +SELECT aiod_entry.date_modified, experiment.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' FROM aiod.experiment INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier WHERE aiod.experiment.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql index 5d9026a7..8aa9a400 100644 --- a/logstash/pipeline/sql/init_ml_model.sql +++ b/logstash/pipeline/sql/init_ml_model.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, ml_model.identifier, name, description +SELECT aiod_entry.date_modified, ml_model.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier WHERE aiod.ml_model.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql index 03fffe3a..a53cd961 100644 --- a/logstash/pipeline/sql/init_news.sql +++ b/logstash/pipeline/sql/init_news.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, news.identifier, name, description, headline, alternative_headline +SELECT aiod_entry.date_modified, news.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', headline, alternative_headline FROM aiod.news INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier WHERE aiod.news.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_organisation.sql b/logstash/pipeline/sql/init_organisation.sql index c2fc8d82..4272025b 100644 --- a/logstash/pipeline/sql/init_organisation.sql +++ b/logstash/pipeline/sql/init_organisation.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, organisation.identifier, name, description, legal_name +SELECT aiod_entry.date_modified, organisation.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', legal_name FROM aiod.organisation INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier WHERE aiod.organisation.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_project.sql b/logstash/pipeline/sql/init_project.sql index d08dc07b..6d58918f 100644 --- a/logstash/pipeline/sql/init_project.sql +++ b/logstash/pipeline/sql/init_project.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, project.identifier, name, description +SELECT aiod_entry.date_modified, project.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' FROM aiod.project INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier WHERE aiod.project.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_publication.sql b/logstash/pipeline/sql/init_publication.sql index 93ecfbfc..6da544db 100644 --- a/logstash/pipeline/sql/init_publication.sql +++ b/logstash/pipeline/sql/init_publication.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, publication.identifier, name, description, issn, isbn +SELECT aiod_entry.date_modified, publication.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', issn, isbn FROM aiod.publication INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier WHERE aiod.publication.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_service.sql b/logstash/pipeline/sql/init_service.sql index 39cc7d08..c41dfbdf 100644 --- a/logstash/pipeline/sql/init_service.sql +++ b/logstash/pipeline/sql/init_service.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, service.identifier, name, description, slogan +SELECT aiod_entry.date_modified, service.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', slogan FROM aiod.service INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier WHERE aiod.service.date_deleted IS NULL diff --git a/logstash/pipeline/sql/sync_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql index 1be7b313..f5440b8f 100644 --- a/logstash/pipeline/sql/sync_dataset.sql +++ b/logstash/pipeline/sql/sync_dataset.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, dataset.identifier, name, description, issn +SELECT aiod_entry.date_modified, dataset.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', issn FROM aiod.dataset INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier WHERE aiod.dataset.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_event.sql b/logstash/pipeline/sql/sync_event.sql index d9d91b13..8dcac2f7 100644 --- a/logstash/pipeline/sql/sync_event.sql +++ b/logstash/pipeline/sql/sync_event.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, event.identifier, name, description +SELECT aiod_entry.date_modified, event.identifier, name, description_identifier, text.plain, text.html FROM aiod.event INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier WHERE aiod.event.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql index 790a57b8..a71d0b19 100644 --- a/logstash/pipeline/sql/sync_experiment.sql +++ b/logstash/pipeline/sql/sync_experiment.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, experiment.identifier, name, description +SELECT aiod_entry.date_modified, experiment.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' FROM aiod.experiment INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier WHERE aiod.experiment.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql index ff294426..95c2f524 100644 --- a/logstash/pipeline/sql/sync_ml_model.sql +++ b/logstash/pipeline/sql/sync_ml_model.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, ml_model.identifier, name, description +SELECT aiod_entry.date_modified, ml_model.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' FROM aiod.ml_model INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier WHERE aiod.ml_model.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql index c44bb861..575a9b48 100644 --- a/logstash/pipeline/sql/sync_news.sql +++ b/logstash/pipeline/sql/sync_news.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, news.identifier, name, description, headline, alternative_headline +SELECT aiod_entry.date_modified, news.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', headline, alternative_headline FROM aiod.news INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier WHERE aiod.news.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql index 7b79ba56..588f0a2b 100644 --- a/logstash/pipeline/sql/sync_organisation.sql +++ b/logstash/pipeline/sql/sync_organisation.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, organisation.identifier, name, description, legal_name +SELECT aiod_entry.date_modified, organisation.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', legal_name FROM aiod.organisation INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier WHERE aiod.organisation.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_project.sql b/logstash/pipeline/sql/sync_project.sql index 366c4636..9a9c0a92 100644 --- a/logstash/pipeline/sql/sync_project.sql +++ b/logstash/pipeline/sql/sync_project.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, project.identifier, name, description +SELECT aiod_entry.date_modified, project.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' FROM aiod.project INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier WHERE aiod.project.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_publication.sql b/logstash/pipeline/sql/sync_publication.sql index 5697a6b1..22abd17d 100644 --- a/logstash/pipeline/sql/sync_publication.sql +++ b/logstash/pipeline/sql/sync_publication.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, publication.identifier, name, description, issn, isbn +SELECT aiod_entry.date_modified, publication.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', issn, isbn FROM aiod.publication INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier WHERE aiod.publication.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_service.sql b/logstash/pipeline/sql/sync_service.sql index 219cab73..1f8ccea7 100644 --- a/logstash/pipeline/sql/sync_service.sql +++ b/logstash/pipeline/sql/sync_service.sql @@ -1,6 +1,7 @@ -- This file has been generated by `logstash_config.py` file -- --------------------------------------------------------- -SELECT aiod_entry.date_modified, service.identifier, name, description, slogan +SELECT aiod_entry.date_modified, service.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', slogan FROM aiod.service INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier WHERE aiod.service.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/src/routers/search_router.py b/src/routers/search_router.py index b941a197..2f53e912 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -74,7 +74,8 @@ def search( search_query: str = "", search_fields: Annotated[list[str] | None, Query()] = None, limit: int = 10, - page: int = 1 + page: int = 1, + get_all: bool = True # offset: Annotated[list[str] | None, Query()] = None ) -> SearchResult[read_class]: # type: ignore f""" @@ -149,29 +150,28 @@ def search( result = self.client.search(index=self.es_index, query=query, from_=from_, size=limit, sort=SORT) - # Launch database query - # ----------------------------------------------------------------- - - try: - with Session(engine) as session: - query = select(Platform) - database_platforms = session.scalars(query).all() - platform_names = set([p.name for p in database_platforms]) - except Exception as e: - raise _wrap_as_http_exception(e) - - # Manage results - # ----------------------------------------------------------------- - total_hits = result["hits"]["total"]["value"] - resources: list[read_class] = [ # type: ignore - self._cast_resource(read_class, hit["_source"]) # type: ignore - for hit in result["hits"]["hits"] - ] next_offset = ( - result["hits"]["hits"][-1]["sort"] - if len(result["hits"]["hits"]) > 0 else None - ) + result["hits"]["hits"][-1]["sort"] + if len(result["hits"]["hits"]) > 0 else None + ) + if get_all: + + # Launch database query + resources: list[read_class] = [ + self._db_query(engine, read_class, + hit["_source"]["identifier"]) + for hit in result["hits"]["hits"] + ] + + else: + + # Return just the elasticsearch contents + resources: list[read_class] = [ # type: ignore + self._cast_resource(read_class, hit["_source"]) + for hit in result["hits"]["hits"] + ] + return SearchResult[read_class]( # type: ignore total_hits=total_hits, resources=resources, @@ -192,8 +192,33 @@ def _cast_resource( } resource = resource_class(**kwargs) # type: ignore resource.aiod_entry = AIoDEntryRead( - date_modified=resource_dict["date_modified"], -# date_created=resource_dict["date_created"], -# status=resource_dict["status"], + date_modified=resource_dict["date_modified"] ) - return resource + resource.description = { + "plain": resource_dict["plain"], + "html": resource_dict["html"] + } + return self._clean_structure(dict(resource)) + + def _db_query( + self, engine: Engine, read_class: RESOURCE, identifier: int + ) -> Type[RESOURCE]: + try: + with Session(engine) as session: + query = select(self.resource_class).where( + self.resource_class.identifier == identifier) + resource = session.scalars(query).first() + # Some error handling if resource does not exist + resource_read = read_class.from_orm(resource) + except Exception as e: + raise _wrap_as_http_exception(e) + return resource_read + + def _clean_structure(self, structure: dict): + new_structure = {} + for key, value in structure.items(): + if isinstance(value, dict): + value = self._clean_structure(value) + if value: + new_structure[key] = value + return new_structure diff --git a/src/routers/search_routers/search_router_datasets.py b/src/routers/search_routers/search_router_datasets.py index 9bbc5c18..7550d411 100644 --- a/src/routers/search_routers/search_router_datasets.py +++ b/src/routers/search_routers/search_router_datasets.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return set(['name', 'description', 'issn']) + return set(['name', 'plain', 'html', 'issn']) diff --git a/src/routers/search_routers/search_router_events.py b/src/routers/search_routers/search_router_events.py index 53cc6547..aeedde45 100644 --- a/src/routers/search_routers/search_router_events.py +++ b/src/routers/search_routers/search_router_events.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return set(['name', 'description', 'organiser_type', 'mode']) + return set(['name', 'plain', 'html']) diff --git a/src/routers/search_routers/search_router_experiments.py b/src/routers/search_routers/search_router_experiments.py index 97b8d19f..6208038e 100644 --- a/src/routers/search_routers/search_router_experiments.py +++ b/src/routers/search_routers/search_router_experiments.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return set(['name', 'description']) + return set(['name', 'plain', 'html']) diff --git a/src/routers/search_routers/search_router_ml_models.py b/src/routers/search_routers/search_router_ml_models.py index 5f83d792..f8bb5055 100644 --- a/src/routers/search_routers/search_router_ml_models.py +++ b/src/routers/search_routers/search_router_ml_models.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return set(['name', 'description']) + return set(['name', 'plain', 'html']) diff --git a/src/routers/search_routers/search_router_news.py b/src/routers/search_routers/search_router_news.py index 4e52bac1..4c22e619 100644 --- a/src/routers/search_routers/search_router_news.py +++ b/src/routers/search_routers/search_router_news.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return set(['name', 'description', 'headline', 'alternative_headline']) + return set(['name', 'plain', 'html', 'headline', 'alternative_headline']) diff --git a/src/routers/search_routers/search_router_organisations.py b/src/routers/search_routers/search_router_organisations.py index 3ba707ec..ae7edcf1 100644 --- a/src/routers/search_routers/search_router_organisations.py +++ b/src/routers/search_routers/search_router_organisations.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return set(['name', 'legal_name', 'description']) + return set(['name', 'legal_name', 'plain', 'html']) diff --git a/src/routers/search_routers/search_router_projects.py b/src/routers/search_routers/search_router_projects.py index 8b9d67ae..cd69e368 100644 --- a/src/routers/search_routers/search_router_projects.py +++ b/src/routers/search_routers/search_router_projects.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return set(['name', 'description', 'coordinator_name']) + return set(['name', 'plain', 'html']) diff --git a/src/routers/search_routers/search_router_publications.py b/src/routers/search_routers/search_router_publications.py index 66bf40b1..4d2f9263 100644 --- a/src/routers/search_routers/search_router_publications.py +++ b/src/routers/search_routers/search_router_publications.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return set(['name', 'description', 'issn', 'isbn']) + return set(['name', 'plain', 'html', 'issn', 'isbn']) diff --git a/src/routers/search_routers/search_router_services.py b/src/routers/search_routers/search_router_services.py index 42c613e9..70126b0f 100644 --- a/src/routers/search_routers/search_router_services.py +++ b/src/routers/search_routers/search_router_services.py @@ -17,4 +17,4 @@ def resource_class(self): @property def match_fields(self): - return set(['name', 'description', 'slogan']) + return set(['name', 'plain', 'html', 'slogan']) diff --git a/src/setup/logstash/generate_logstash_config_files.py b/src/setup/logstash/generate_logstash_config_files.py index 6caf7d40..2fc7a57f 100755 --- a/src/setup/logstash/generate_logstash_config_files.py +++ b/src/setup/logstash/generate_logstash_config_files.py @@ -10,16 +10,16 @@ #REPO_PATH = os.path.join("..", "..", "..") FIELDS = { - "dataset": ["aiod_entry.date_modified", "dataset.identifier", "name", "description", "issn"], - "event": ["aiod_entry.date_modified", "event.identifier", "name", "description",], - "experiment": ["aiod_entry.date_modified", "experiment.identifier", "name", "description"], - "ml_model": ["aiod_entry.date_modified", "ml_model.identifier", "name", "description"], - "news": ["aiod_entry.date_modified", "news.identifier", "name", "description", "headline", + "dataset": ["aiod_entry.date_modified", "dataset.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'", "issn"], + "event": ["aiod_entry.date_modified", "event.identifier", "name", "description_identifier", "text.plain", "text.html",], + "experiment": ["aiod_entry.date_modified", "experiment.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'"], + "ml_model": ["aiod_entry.date_modified", "ml_model.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'"], + "news": ["aiod_entry.date_modified", "news.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'", "headline", "alternative_headline"], - "organisation": ["aiod_entry.date_modified", "organisation.identifier", "name", "description", "legal_name"], - "project": ["aiod_entry.date_modified", "project.identifier", "name", "description"], - "publication": ["aiod_entry.date_modified", "publication.identifier", "name", "description", "issn", "isbn"], - "service": ["aiod_entry.date_modified", "service.identifier", "name", "description", "slogan"] + "organisation": ["aiod_entry.date_modified", "organisation.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'", "legal_name"], + "project": ["aiod_entry.date_modified", "project.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'"], + "publication": ["aiod_entry.date_modified", "publication.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'", "issn", "isbn"], + "service": ["aiod_entry.date_modified", "service.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'", "slogan"] } # MACROS FOR THE DOCUMENTS GENERATION FUNCTIONS @@ -84,7 +84,7 @@ }} }} mutate {{ - # remove_field => ["@version", "@timestamp"] + remove_field => ["@version", "@timestamp"] split => {{"application_area" => ","}} }}{0} }} @@ -140,7 +140,8 @@ SQL_BASE = """SELECT {1} FROM aiod.{0} -INNER JOIN aiod.aiod_entry ON aiod.{0}.aiod_entry_identifier=aiod.aiod_entry.identifier{2} +INNER JOIN aiod.aiod_entry ON aiod.{0}.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.{0}.description_identifier=aiod.text.identifier{2} """ SQL_RM_BASE = """SELECT {0}.identifier From 05c68142a5714059989aca2aacad2db8f68699a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 15 Nov 2023 13:19:04 +0100 Subject: [PATCH 48/79] Elasticsearch and logstash configuration integrated in src --- docker-compose.yaml | 41 +- es/setup/Dockerfile | 17 - es/setup/create_indices.sh | 9 - es/setup/dataset.json | 159 ------ es/setup/event.json | 152 ------ es/setup/experiment.json | 161 ------- es/setup/ml_model.json | 141 ------ es/setup/news.json | 120 ----- es/setup/organisation.json | 136 ------ es/setup/project.json | 127 ----- es/setup/publication.json | 168 ------- es/setup/service.json | 122 ----- logstash/config/logstash.yml | 8 +- logstash/pipeline/conf/init_table.conf | 215 --------- logstash/pipeline/conf/sync_table.conf | 449 ----------------- logstash/pipeline/sql/init_dataset.sql | 7 - logstash/pipeline/sql/init_event.sql | 7 - logstash/pipeline/sql/init_experiment.sql | 7 - logstash/pipeline/sql/init_ml_model.sql | 7 - logstash/pipeline/sql/init_news.sql | 7 - logstash/pipeline/sql/init_organisation.sql | 7 - logstash/pipeline/sql/init_project.sql | 7 - logstash/pipeline/sql/init_publication.sql | 7 - logstash/pipeline/sql/init_service.sql | 7 - logstash/pipeline/sql/rm_dataset.sql | 5 - logstash/pipeline/sql/rm_event.sql | 5 - logstash/pipeline/sql/rm_experiment.sql | 5 - logstash/pipeline/sql/rm_ml_model.sql | 5 - logstash/pipeline/sql/rm_news.sql | 5 - logstash/pipeline/sql/rm_organisation.sql | 5 - logstash/pipeline/sql/rm_project.sql | 5 - logstash/pipeline/sql/rm_publication.sql | 5 - logstash/pipeline/sql/rm_service.sql | 5 - logstash/pipeline/sql/sync_dataset.sql | 7 - logstash/pipeline/sql/sync_event.sql | 7 - logstash/pipeline/sql/sync_experiment.sql | 7 - logstash/pipeline/sql/sync_ml_model.sql | 7 - logstash/pipeline/sql/sync_news.sql | 7 - logstash/pipeline/sql/sync_organisation.sql | 7 - logstash/pipeline/sql/sync_project.sql | 7 - logstash/pipeline/sql/sync_publication.sql | 7 - logstash/pipeline/sql/sync_service.sql | 7 - logstash/setup/Dockerfile | 5 - src/routers/search_router.py | 6 +- .../generate_elasticsearch_indices.py | 85 ++++ .../generate_logstash_config_files.py | 175 ++----- .../generate_logstash_config_files_bkp.py | 454 ------------------ 47 files changed, 150 insertions(+), 2771 deletions(-) delete mode 100644 es/setup/Dockerfile delete mode 100755 es/setup/create_indices.sh delete mode 100644 es/setup/dataset.json delete mode 100644 es/setup/event.json delete mode 100644 es/setup/experiment.json delete mode 100644 es/setup/ml_model.json delete mode 100644 es/setup/news.json delete mode 100644 es/setup/organisation.json delete mode 100644 es/setup/project.json delete mode 100644 es/setup/publication.json delete mode 100644 es/setup/service.json delete mode 100644 logstash/pipeline/conf/init_table.conf delete mode 100644 logstash/pipeline/conf/sync_table.conf delete mode 100644 logstash/pipeline/sql/init_dataset.sql delete mode 100644 logstash/pipeline/sql/init_event.sql delete mode 100644 logstash/pipeline/sql/init_experiment.sql delete mode 100644 logstash/pipeline/sql/init_ml_model.sql delete mode 100644 logstash/pipeline/sql/init_news.sql delete mode 100644 logstash/pipeline/sql/init_organisation.sql delete mode 100644 logstash/pipeline/sql/init_project.sql delete mode 100644 logstash/pipeline/sql/init_publication.sql delete mode 100644 logstash/pipeline/sql/init_service.sql delete mode 100644 logstash/pipeline/sql/rm_dataset.sql delete mode 100644 logstash/pipeline/sql/rm_event.sql delete mode 100644 logstash/pipeline/sql/rm_experiment.sql delete mode 100644 logstash/pipeline/sql/rm_ml_model.sql delete mode 100644 logstash/pipeline/sql/rm_news.sql delete mode 100644 logstash/pipeline/sql/rm_organisation.sql delete mode 100644 logstash/pipeline/sql/rm_project.sql delete mode 100644 logstash/pipeline/sql/rm_publication.sql delete mode 100644 logstash/pipeline/sql/rm_service.sql delete mode 100644 logstash/pipeline/sql/sync_dataset.sql delete mode 100644 logstash/pipeline/sql/sync_event.sql delete mode 100644 logstash/pipeline/sql/sync_experiment.sql delete mode 100644 logstash/pipeline/sql/sync_ml_model.sql delete mode 100644 logstash/pipeline/sql/sync_news.sql delete mode 100644 logstash/pipeline/sql/sync_organisation.sql delete mode 100644 logstash/pipeline/sql/sync_project.sql delete mode 100644 logstash/pipeline/sql/sync_publication.sql delete mode 100644 logstash/pipeline/sql/sync_service.sql delete mode 100644 logstash/setup/Dockerfile create mode 100755 src/setup/elasticsearch/generate_elasticsearch_indices.py delete mode 100755 src/setup/logstash/generate_logstash_config_files_bkp.py diff --git a/docker-compose.yaml b/docker-compose.yaml index afc3770e..c7a40e73 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -44,8 +44,7 @@ services: depends_on: app: condition: service_healthy - - + deletion: build: context: deletion @@ -187,35 +186,25 @@ services: interval: 5s timeout: 30s retries: 30 - -# elasticsearch_setup: -# build: -# context: es/setup/ -# dockerfile: Dockerfile -# env_file: .env -# environment: -# - ES_USER=$ES_USER -# - ES_PASSWORD=$ES_PASSWORD -# restart: "no" -# depends_on: -# elasticsearch: -# condition: service_healthy - logstash_setup: - build: - context: logstash/setup/ - dockerfile: Dockerfile + es_logstash_setup: + image: ai4eu_server + container_name: es_logstash_setup env_file: .env environment: - MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD - ES_USER=$ES_USER - ES_PASSWORD=$ES_PASSWORD volumes: - - ./src/setup/logstash:/app + - ./src:/app - ./logstash:/logstash command: > - python generate_logstash_config_files.py + /bin/bash -c "python setup/logstash/generate_logstash_config_files.py && + python setup/elasticsearch/generate_elasticsearch_indices.py" restart: "no" + depends_on: + elasticsearch: + condition: service_healthy logstash: build: @@ -248,13 +237,7 @@ services: target: /usr/share/logstash/sql read_only: true depends_on: - app: - condition: service_healthy - elasticsearch: - condition: service_healthy -# elasticsearch_setup: -# condition: service_completed_successfully - logstash_setup: - condition: service_completed_successfully fill-db-with-examples: condition: service_completed_successfully + es_logstash_setup: + condition: service_completed_successfully diff --git a/es/setup/Dockerfile b/es/setup/Dockerfile deleted file mode 100644 index 1e9c0bef..00000000 --- a/es/setup/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM ubuntu:22.04 - -RUN apt-get update && apt-get install -y curl - -COPY dataset.json /dataset.json -COPY event.json /event.json -COPY experiment.json /experiment.json -COPY ml_model.json /ml_model.json -COPY news.json /news.json -COPY organisation.json /organisation.json -COPY project.json /project.json -COPY publication.json /publication.json -COPY service.json /service.json -COPY create_indices.sh /create_indices.sh - -ENTRYPOINT ["/bin/bash", "/create_indices.sh"] - diff --git a/es/setup/create_indices.sh b/es/setup/create_indices.sh deleted file mode 100755 index cb0cb43a..00000000 --- a/es/setup/create_indices.sh +++ /dev/null @@ -1,9 +0,0 @@ -curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/dataset?pretty -H 'Content-Type: application/json' -d @/dataset.json -curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/event?pretty -H 'Content-Type: application/json' -d @/event.json -curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/experiment?pretty -H 'Content-Type: application/json' -d @/experiment.json -curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/ml_model?pretty -H 'Content-Type: application/json' -d @/ml_model.json -curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/news?pretty -H 'Content-Type: application/json' -d @/news.json -curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/organisation?pretty -H 'Content-Type: application/json' -d @/organisation.json -curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/project?pretty -H 'Content-Type: application/json' -d @/project.json -curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/publication?pretty -H 'Content-Type: application/json' -d @/publication.json -curl -u ${ES_USER}:${ES_PASSWORD} -X PUT elasticsearch:9200/service?pretty -H 'Content-Type: application/json' -d @/service.json diff --git a/es/setup/dataset.json b/es/setup/dataset.json deleted file mode 100644 index 395f68c8..00000000 --- a/es/setup/dataset.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "mappings" : { - "properties" : { - "@timestamp" : { - "type" : "date", - "index" : false - }, - "@version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "asset_identifier" : { - "type" : "long", - "index" : false - }, - "date_created" : { - "type" : "date", - "index" : false - }, - "date_modified" : { - "type" : "date" - }, - "date_published" : { - "type" : "date", - "index" : false - }, - "description" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "identifier" : { - "type" : "long", - "index" : false - }, - "issn" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "license" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "measurement_technique" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform_identifier" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "resource_identifier" : { - "type" : "long", - "index" : false - }, - "same_as" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "status" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "temporal_coverage" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - } -} diff --git a/es/setup/event.json b/es/setup/event.json deleted file mode 100644 index 2deb13ef..00000000 --- a/es/setup/event.json +++ /dev/null @@ -1,152 +0,0 @@ -{ - "mappings" : { - "properties" : { - "@timestamp" : { - "type" : "date", - "index" : false - }, - "@version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "date_created" : { - "type" : "date", - "index" : false - }, - "date_modified" : { - "type" : "date" - }, - "description" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "end_date" : { - "type" : "date", - "index" : false - }, - "identifier" : { - "type" : "long", - "index" : false - }, - "mode" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "organiser_identifier" : { - "type" : "long", - "index" : false - }, - "organiser_type" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform_identifier" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "registration_link" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "resource_identifier" : { - "type" : "long", - "index" : false - }, - "same_as" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "schedule" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "start_date" : { - "type" : "date", - "index" : false - }, - "status" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - } -} diff --git a/es/setup/experiment.json b/es/setup/experiment.json deleted file mode 100644 index 4787149b..00000000 --- a/es/setup/experiment.json +++ /dev/null @@ -1,161 +0,0 @@ -{ - "mappings" : { - "properties" : { - "@timestamp" : { - "type" : "date", - "index" : false - }, - "@version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "asset_identifier" : { - "type" : "long", - "index" : false - }, - "date_created" : { - "type" : "date", - "index" : false - }, - "date_modified" : { - "type" : "date" - }, - "date_published" : { - "type" : "date", - "index" : false - }, - "description" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "execution_settings" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "experimental_workflow" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "identifier" : { - "type" : "long", - "index" : false - }, - "license" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform_identifier" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "reproducibility_explanation" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "resource_identifier" : { - "type" : "long", - "index" : false - }, - "same_as" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "status" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - } -} - diff --git a/es/setup/ml_model.json b/es/setup/ml_model.json deleted file mode 100644 index d847b674..00000000 --- a/es/setup/ml_model.json +++ /dev/null @@ -1,141 +0,0 @@ -{ - "mappings" : { - "properties" : { - "@timestamp" : { - "type" : "date", - "index" : false - }, - "@version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "asset_identifier" : { - "type" : "long", - "index" : false - }, - "date_created" : { - "type" : "date", - "index" : false - }, - "date_modified" : { - "type" : "date" - }, - "date_published" : { - "type" : "date", - "index" : false - }, - "description" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "identifier" : { - "type" : "long", - "index" : false - }, - "license" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "ml_model_type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform_identifier" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "resource_identifier" : { - "type" : "long", - "index" : false - }, - "same_as" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "status" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - } -} - diff --git a/es/setup/news.json b/es/setup/news.json deleted file mode 100644 index 7349acad..00000000 --- a/es/setup/news.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "mappings" : { - "properties" : { - "@timestamp" : { - "type" : "date", - "index" : false - }, - "@version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "alternative_headline" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "date_created" : { - "type" : "date", - "index" : false - }, - "date_modified" : { - "type" : "date" - }, - "description" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "headline" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "identifier" : { - "type" : "long", - "index" : false - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform_identifier" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "resource_identifier" : { - "type" : "long", - "index" : false - }, - "same_as" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "status" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - } -} diff --git a/es/setup/organisation.json b/es/setup/organisation.json deleted file mode 100644 index 9d02c7b3..00000000 --- a/es/setup/organisation.json +++ /dev/null @@ -1,136 +0,0 @@ -{ - "mappings" : { - "properties" : { - "@timestamp" : { - "type" : "date", - "index" : false - }, - "@version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "agent" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "date_created" : { - "type" : "date", - "index" : false - }, - "date_founded" : { - "type" : "date", - "format": "yyyy-MM-dd", - "index" : false - }, - "date_modified" : { - "type" : "date" - }, - "description" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "identifier" : { - "type" : "long", - "index" : false - }, - "legal_name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "organisation_type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform_identifier" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "resource_identifier" : { - "type" : "long", - "index" : false - }, - "same_as" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "status" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - } -} diff --git a/es/setup/project.json b/es/setup/project.json deleted file mode 100644 index 74dfb9cd..00000000 --- a/es/setup/project.json +++ /dev/null @@ -1,127 +0,0 @@ -{ - "mappings" : { - "properties" : { - "@timestamp" : { - "type" : "date", - "index" : false - }, - "@version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "coordinator_identifier" : { - "type" : "long", - "index" : false - }, - "coordinator_name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "date_created" : { - "type" : "date", - "index" : false - }, - "date_modified" : { - "type" : "date" - }, - "description" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "end_date" : { - "type" : "date", - "index" : false - }, - "identifier" : { - "type" : "long", - "index" : false - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform_identifier" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "resource_identifier" : { - "type" : "long", - "index" : false - }, - "same_as" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "start_date" : { - "type" : "date", - "index" : false - }, - "status" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "total_cost_euro" : { - "type" : "float", - "index" : false - }, - "type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - } -} diff --git a/es/setup/publication.json b/es/setup/publication.json deleted file mode 100644 index 5d07ad11..00000000 --- a/es/setup/publication.json +++ /dev/null @@ -1,168 +0,0 @@ -{ - "mappings" : { - "properties" : { - "@timestamp" : { - "type" : "date", - "index" : false - }, - "@version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "asset_identifier" : { - "type" : "long", - "index" : false - }, - "date_created" : { - "type" : "date", - "index" : false - }, - "date_modified" : { - "type" : "date" - }, - "date_published" : { - "type" : "date", - "index" : false - }, - "description" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "identifier" : { - "type" : "long", - "index" : false - }, - "isbn" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "issn" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "license" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "permanent_identifier" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform_identifier" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "publication_type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "resource_identifier" : { - "type" : "long", - "index" : false - }, - "same_as" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "status" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - } -} diff --git a/es/setup/service.json b/es/setup/service.json deleted file mode 100644 index 0fc18eaf..00000000 --- a/es/setup/service.json +++ /dev/null @@ -1,122 +0,0 @@ -{ - "mappings" : { - "properties" : { - "@timestamp" : { - "type" : "date", - "index" : false - }, - "@version" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "date_created" : { - "type" : "date", - "index" : false - }, - "date_modified" : { - "type" : "date" - }, - "description" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "identifier" : { - "type" : "long", - "index" : false - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "platform_identifier" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "resource_identifier" : { - "type" : "long", - "index" : false - }, - "same_as" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "slogan" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "status" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "terms_of_service" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "type" : { - "type" : "text", - "index" : false, - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - } -} - diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index f6f2926e..8b137891 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1,7 +1 @@ -# This file has been generated by `logstash_config.py` file -# --------------------------------------------------------- -http.host: "0.0.0.0" -xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] -xpack.monitoring.enabled: true -xpack.monitoring.elasticsearch.username: elastic -xpack.monitoring.elasticsearch.password: changeme + diff --git a/logstash/pipeline/conf/init_table.conf b/logstash/pipeline/conf/init_table.conf deleted file mode 100644 index 32dda9f1..00000000 --- a/logstash/pipeline/conf/init_table.conf +++ /dev/null @@ -1,215 +0,0 @@ -# This file has been generated by `logstash_config.py` file -# --------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_dataset.sql" - type => "dataset" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_event.sql" - type => "event" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_experiment.sql" - type => "experiment" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_ml_model.sql" - type => "ml_model" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_news.sql" - type => "news" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_organisation.sql" - type => "organisation" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_project.sql" - type => "project" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_publication.sql" - type => "publication" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_service.sql" - type => "service" - } -} -filter { - if ![application_area] { - mutate { - replace => {"application_area" => ""} - } - } - mutate { - remove_field => ["@version", "@timestamp"] - split => {"application_area" => ","} - } - if [type] == "organisation" { - ruby { - code => ' - t = Time.at(event.get("date_founded").to_f) - event.set("date_founded", t.strftime("%Y-%m-%d")) - ' - } - } - -} -output { - if [type] == "dataset" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "dataset" - document_id => "dataset_%{identifier}" - } - } - if [type] == "event" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" - } - } - if [type] == "experiment" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "experiment" - document_id => "experiment_%{identifier}" - } - } - if [type] == "ml_model" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "ml_model" - document_id => "ml_model_%{identifier}" - } - } - if [type] == "news" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "news" - document_id => "news_%{identifier}" - } - } - if [type] == "organisation" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" - } - } - if [type] == "project" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "project" - document_id => "project_%{identifier}" - } - } - if [type] == "publication" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" - } - } - if [type] == "service" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" - } - } -} diff --git a/logstash/pipeline/conf/sync_table.conf b/logstash/pipeline/conf/sync_table.conf deleted file mode 100644 index 76c76fcf..00000000 --- a/logstash/pipeline/conf/sync_table.conf +++ /dev/null @@ -1,449 +0,0 @@ -# This file has been generated by `logstash_config.py` file -# --------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_dataset.sql" - type => "dataset" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_dataset.sql" - type => "rm_dataset" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_event.sql" - type => "event" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_event.sql" - type => "rm_event" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_experiment.sql" - type => "experiment" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_experiment.sql" - type => "rm_experiment" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_ml_model.sql" - type => "ml_model" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_ml_model.sql" - type => "rm_ml_model" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_news.sql" - type => "news" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_news.sql" - type => "rm_news" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_organisation.sql" - type => "organisation" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_organisation.sql" - type => "rm_organisation" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_project.sql" - type => "project" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_project.sql" - type => "rm_project" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_publication.sql" - type => "publication" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_publication.sql" - type => "rm_publication" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_service.sql" - type => "service" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_service.sql" - type => "rm_service" - } -} -filter { - if ![application_area] { - mutate { - replace => {"application_area" => ""} - } - } - mutate { - remove_field => ["@version", "@timestamp"] - split => {"application_area" => ","} - } - if [type] == "organisation" or [type] == "rm_organisation" { - ruby { - code => ' - t = Time.at(event.get("date_founded").to_f) - event.set("date_founded", t.strftime("%Y-%m-%d")) - ' - } - } - -} -output { - if [type] == "dataset" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "dataset" - document_id => "dataset_%{identifier}" - } - } - if [type] == "rm_dataset" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "dataset" - document_id => "dataset_%{identifier}" - } - } - if [type] == "event" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" - } - } - if [type] == "rm_event" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" - } - } - if [type] == "experiment" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "experiment" - document_id => "experiment_%{identifier}" - } - } - if [type] == "rm_experiment" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "experiment" - document_id => "experiment_%{identifier}" - } - } - if [type] == "ml_model" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "ml_model" - document_id => "ml_model_%{identifier}" - } - } - if [type] == "rm_ml_model" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "ml_model" - document_id => "ml_model_%{identifier}" - } - } - if [type] == "news" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "news" - document_id => "news_%{identifier}" - } - } - if [type] == "rm_news" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "news" - document_id => "news_%{identifier}" - } - } - if [type] == "organisation" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" - } - } - if [type] == "rm_organisation" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" - } - } - if [type] == "project" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "project" - document_id => "project_%{identifier}" - } - } - if [type] == "rm_project" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "project" - document_id => "project_%{identifier}" - } - } - if [type] == "publication" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" - } - } - if [type] == "rm_publication" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" - } - } - if [type] == "service" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" - } - } - if [type] == "rm_service" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" - } - } -} diff --git a/logstash/pipeline/sql/init_dataset.sql b/logstash/pipeline/sql/init_dataset.sql deleted file mode 100644 index 7b66724b..00000000 --- a/logstash/pipeline/sql/init_dataset.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, dataset.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', issn -FROM aiod.dataset -INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier -WHERE aiod.dataset.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_event.sql b/logstash/pipeline/sql/init_event.sql deleted file mode 100644 index 19882005..00000000 --- a/logstash/pipeline/sql/init_event.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, event.identifier, name, description_identifier, text.plain, text.html -FROM aiod.event -INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier -WHERE aiod.event.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_experiment.sql b/logstash/pipeline/sql/init_experiment.sql deleted file mode 100644 index 045cfd5b..00000000 --- a/logstash/pipeline/sql/init_experiment.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, experiment.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' -FROM aiod.experiment -INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier -WHERE aiod.experiment.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql deleted file mode 100644 index 8aa9a400..00000000 --- a/logstash/pipeline/sql/init_ml_model.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, ml_model.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' -FROM aiod.ml_model -INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier -WHERE aiod.ml_model.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql deleted file mode 100644 index a53cd961..00000000 --- a/logstash/pipeline/sql/init_news.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, news.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', headline, alternative_headline -FROM aiod.news -INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier -WHERE aiod.news.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_organisation.sql b/logstash/pipeline/sql/init_organisation.sql deleted file mode 100644 index 4272025b..00000000 --- a/logstash/pipeline/sql/init_organisation.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, organisation.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', legal_name -FROM aiod.organisation -INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier -WHERE aiod.organisation.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_project.sql b/logstash/pipeline/sql/init_project.sql deleted file mode 100644 index 6d58918f..00000000 --- a/logstash/pipeline/sql/init_project.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, project.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' -FROM aiod.project -INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier -WHERE aiod.project.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_publication.sql b/logstash/pipeline/sql/init_publication.sql deleted file mode 100644 index 6da544db..00000000 --- a/logstash/pipeline/sql/init_publication.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, publication.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', issn, isbn -FROM aiod.publication -INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier -WHERE aiod.publication.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_service.sql b/logstash/pipeline/sql/init_service.sql deleted file mode 100644 index c41dfbdf..00000000 --- a/logstash/pipeline/sql/init_service.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, service.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', slogan -FROM aiod.service -INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier -WHERE aiod.service.date_deleted IS NULL diff --git a/logstash/pipeline/sql/rm_dataset.sql b/logstash/pipeline/sql/rm_dataset.sql deleted file mode 100644 index da1c359f..00000000 --- a/logstash/pipeline/sql/rm_dataset.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT dataset.identifier -FROM aiod.dataset -WHERE aiod.dataset.date_deleted IS NOT NULL AND aiod.dataset.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_event.sql b/logstash/pipeline/sql/rm_event.sql deleted file mode 100644 index 2bba4020..00000000 --- a/logstash/pipeline/sql/rm_event.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT event.identifier -FROM aiod.event -WHERE aiod.event.date_deleted IS NOT NULL AND aiod.event.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_experiment.sql b/logstash/pipeline/sql/rm_experiment.sql deleted file mode 100644 index 92ae7107..00000000 --- a/logstash/pipeline/sql/rm_experiment.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT experiment.identifier -FROM aiod.experiment -WHERE aiod.experiment.date_deleted IS NOT NULL AND aiod.experiment.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_ml_model.sql b/logstash/pipeline/sql/rm_ml_model.sql deleted file mode 100644 index 51d9e2df..00000000 --- a/logstash/pipeline/sql/rm_ml_model.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT ml_model.identifier -FROM aiod.ml_model -WHERE aiod.ml_model.date_deleted IS NOT NULL AND aiod.ml_model.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_news.sql b/logstash/pipeline/sql/rm_news.sql deleted file mode 100644 index 0f53c36a..00000000 --- a/logstash/pipeline/sql/rm_news.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT news.identifier -FROM aiod.news -WHERE aiod.news.date_deleted IS NOT NULL AND aiod.news.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_organisation.sql b/logstash/pipeline/sql/rm_organisation.sql deleted file mode 100644 index 160df96d..00000000 --- a/logstash/pipeline/sql/rm_organisation.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT organisation.identifier -FROM aiod.organisation -WHERE aiod.organisation.date_deleted IS NOT NULL AND aiod.organisation.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_project.sql b/logstash/pipeline/sql/rm_project.sql deleted file mode 100644 index 486988bd..00000000 --- a/logstash/pipeline/sql/rm_project.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT project.identifier -FROM aiod.project -WHERE aiod.project.date_deleted IS NOT NULL AND aiod.project.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_publication.sql b/logstash/pipeline/sql/rm_publication.sql deleted file mode 100644 index 980ab79e..00000000 --- a/logstash/pipeline/sql/rm_publication.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT publication.identifier -FROM aiod.publication -WHERE aiod.publication.date_deleted IS NOT NULL AND aiod.publication.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_service.sql b/logstash/pipeline/sql/rm_service.sql deleted file mode 100644 index 82d24c1c..00000000 --- a/logstash/pipeline/sql/rm_service.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT service.identifier -FROM aiod.service -WHERE aiod.service.date_deleted IS NOT NULL AND aiod.service.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/sync_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql deleted file mode 100644 index f5440b8f..00000000 --- a/logstash/pipeline/sql/sync_dataset.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, dataset.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', issn -FROM aiod.dataset -INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier -WHERE aiod.dataset.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_event.sql b/logstash/pipeline/sql/sync_event.sql deleted file mode 100644 index 8dcac2f7..00000000 --- a/logstash/pipeline/sql/sync_event.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, event.identifier, name, description_identifier, text.plain, text.html -FROM aiod.event -INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier -WHERE aiod.event.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql deleted file mode 100644 index a71d0b19..00000000 --- a/logstash/pipeline/sql/sync_experiment.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, experiment.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' -FROM aiod.experiment -INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier -WHERE aiod.experiment.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql deleted file mode 100644 index 95c2f524..00000000 --- a/logstash/pipeline/sql/sync_ml_model.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, ml_model.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' -FROM aiod.ml_model -INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier -WHERE aiod.ml_model.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql deleted file mode 100644 index 575a9b48..00000000 --- a/logstash/pipeline/sql/sync_news.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, news.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', headline, alternative_headline -FROM aiod.news -INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier -WHERE aiod.news.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql deleted file mode 100644 index 588f0a2b..00000000 --- a/logstash/pipeline/sql/sync_organisation.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, organisation.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', legal_name -FROM aiod.organisation -INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier -WHERE aiod.organisation.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_project.sql b/logstash/pipeline/sql/sync_project.sql deleted file mode 100644 index 9a9c0a92..00000000 --- a/logstash/pipeline/sql/sync_project.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, project.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html' -FROM aiod.project -INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier -WHERE aiod.project.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_publication.sql b/logstash/pipeline/sql/sync_publication.sql deleted file mode 100644 index 22abd17d..00000000 --- a/logstash/pipeline/sql/sync_publication.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, publication.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', issn, isbn -FROM aiod.publication -INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier -WHERE aiod.publication.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_service.sql b/logstash/pipeline/sql/sync_service.sql deleted file mode 100644 index 1f8ccea7..00000000 --- a/logstash/pipeline/sql/sync_service.sql +++ /dev/null @@ -1,7 +0,0 @@ --- This file has been generated by `logstash_config.py` file --- --------------------------------------------------------- -SELECT aiod_entry.date_modified, service.identifier, name, description_identifier, text.plain as 'plain', text.html as 'html', slogan -FROM aiod.service -INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier -WHERE aiod.service.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/setup/Dockerfile b/logstash/setup/Dockerfile deleted file mode 100644 index a1048220..00000000 --- a/logstash/setup/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM python:3.11-slim-bullseye - -# RUN apt-get update && apt-get -y install pip && pip install Jinja2 - -WORKDIR /app \ No newline at end of file diff --git a/src/routers/search_router.py b/src/routers/search_router.py index 2f53e912..2b3be77e 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -151,10 +151,8 @@ def search( from_=from_, size=limit, sort=SORT) total_hits = result["hits"]["total"]["value"] - next_offset = ( - result["hits"]["hits"][-1]["sort"] - if len(result["hits"]["hits"]) > 0 else None - ) + next_offset = (result["hits"]["hits"][-1]["sort"] + if len(result["hits"]["hits"]) > 0 else None) if get_all: # Launch database query diff --git a/src/setup/elasticsearch/generate_elasticsearch_indices.py b/src/setup/elasticsearch/generate_elasticsearch_indices.py new file mode 100755 index 00000000..603a2591 --- /dev/null +++ b/src/setup/elasticsearch/generate_elasticsearch_indices.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +"""Generates the elasticsearch indices + +Launched by the es_logstash_setup container in the docker-compose file. +""" + +import os +import copy +from elasticsearch import Elasticsearch + +from routers.search_routers import router_list + +BASE_MAPPING = { + "mappings" : { + "properties" : { + "date_modified" : { + "type" : "date" + }, + "identifier" : { + "type" : "long" + }, + "name" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "plain" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "html" : { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + } + } + } +} + +def add_field(base_mapping, field): + new_mapping = copy.deepcopy(base_mapping) + new_mapping["mappings"]["properties"][field] = {"type": "text", "fields": {"keyword": {"type": "keyword"}}} + return new_mapping + +def generate_mapping(entity, fields): + mapping = BASE_MAPPING + for field in fields: + mapping = add_field(mapping, field) + return mapping + +def main(): + + # Generate client + es_user = os.environ['ES_USER'] + es_password = os.environ['ES_PASSWORD'] + es_client = Elasticsearch("http://elasticsearch:9200", + basic_auth=(es_user, es_password)) + + # Search for entities and their extra fields + global_fields = set(['name', 'plain', 'html']) + entities = {} + for router in router_list: + extra_fields = list(router.match_fields^global_fields) + entities[router.es_index] = extra_fields + + # Add indices with mappings + for entity, fields in entities.items(): + mapping = generate_mapping(entity, fields) + print(f"{entity}: {mapping}") + es_client.indices.create(index=entity, body=mapping, ignore=400) + +if __name__ == "__main__": + main() diff --git a/src/setup/logstash/generate_logstash_config_files.py b/src/setup/logstash/generate_logstash_config_files.py index 2fc7a57f..f960c6d1 100755 --- a/src/setup/logstash/generate_logstash_config_files.py +++ b/src/setup/logstash/generate_logstash_config_files.py @@ -1,32 +1,28 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -import os +"""Generates the logstash configuration and pipelines files -# PATH MACROS -# ============================================================================= +This file generates the logstash configuration file in logstash/config, the +pipelines configuration files in logstash/pipelines/conf and the pipelines +sql sentences in logstash/pipelines/sql. -# Repository base path -#REPO_PATH = os.path.join("..", "..", "..") +Launched by the es_logstash_setup container in the docker-compose file. +""" -FIELDS = { - "dataset": ["aiod_entry.date_modified", "dataset.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'", "issn"], - "event": ["aiod_entry.date_modified", "event.identifier", "name", "description_identifier", "text.plain", "text.html",], - "experiment": ["aiod_entry.date_modified", "experiment.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'"], - "ml_model": ["aiod_entry.date_modified", "ml_model.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'"], - "news": ["aiod_entry.date_modified", "news.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'", "headline", - "alternative_headline"], - "organisation": ["aiod_entry.date_modified", "organisation.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'", "legal_name"], - "project": ["aiod_entry.date_modified", "project.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'"], - "publication": ["aiod_entry.date_modified", "publication.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'", "issn", "isbn"], - "service": ["aiod_entry.date_modified", "service.identifier", "name", "description_identifier", "text.plain as 'plain'", "text.html as 'html'", "slogan"] -} +import os + +from routers.search_routers import router_list # MACROS FOR THE DOCUMENTS GENERATION FUNCTIONS # ============================================================================= -INFO = """{0} This file has been generated by `logstash_config.py` file -{0} --------------------------------------------------------- +BASE_FIELDS = ["{0}.identifier", "{0}.name", "text.plain as 'plain'", + "text.html as 'html'", "aiod_entry.date_modified"] + +INFO = """{0} This file has been generated by `generate_logstash_config.py` +{0} file, placed in `src/setup/logstash` +{0} ------------------------------------------------------------- """ CONF_BASE = """http.host: "0.0.0.0" @@ -77,32 +73,13 @@ }} """ -FILTER_BASE = """filter {{ - if ![application_area] {{ - mutate {{ - replace => {{"application_area" => ""}} - }} - }} - mutate {{ +FILTER = """filter { + mutate { remove_field => ["@version", "@timestamp"] - split => {{"application_area" => ","}} - }}{0} -}} -""" - -DATE_FILTER = """ - if [type] == "organisation" {0}{{ - ruby {{ - code => ' - t = Time.at(event.get("date_founded").to_f) - event.set("date_founded", t.strftime("%Y-%m-%d")) - ' - }} - }} + } +} """ -SYNC_DATE_FILTER_ADDON = """or [type] == "rm_organisation" """ - INIT_OUTPUT_BASE = """ if [type] == "{2}" {{ elasticsearch {{ hosts => "elasticsearch:9200" @@ -177,12 +154,10 @@ def generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, if not sync: # init file file_path = os.path.join(pipeline_conf_path, "init_table.conf") input_base = INIT_INPUT_BASE - date_filter = DATE_FILTER.format("") output_base = INIT_OUTPUT_BASE else: # sync file file_path = os.path.join(pipeline_conf_path, "sync_table.conf") input_base = SYNC_INPUT_BASE - date_filter = DATE_FILTER.format(SYNC_DATE_FILTER_ADDON) output_base = SYNC_OUTPUT_BASE # Generate configuration file @@ -198,10 +173,7 @@ def generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, f.write("}\n") # Filters - if "organisation" in entities: - f.write(FILTER_BASE.format(date_filter)) - else: - f.write(FILTER_BASE.format("")) + f.write(FILTER) # Output f.write("output {\n") @@ -209,7 +181,7 @@ def generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, f.write(output_base.format(es_user, es_pass, entity)) f.write("}\n") -def generate_pipeline_sql_files(pipeline_sql_path, entity, sync=False): +def generate_pipeline_sql_files(pipeline_sql_path, entity, fields, sync=False): # Generate output file path if sync: @@ -229,7 +201,10 @@ def generate_pipeline_sql_files(pipeline_sql_path, entity, sync=False): else: where_clause = INIT_CLAUSE.format(entity) - f.write(SQL_BASE.format(entity, ", ".join(FIELDS[entity]), where_clause)) + # Generate field list + field_list = ", ".join(fields).format(entity) + + f.write(SQL_BASE.format(entity, field_list, where_clause)) def generate_pipeline_sql_rm_files(pipeline_sql_path, entity): @@ -248,9 +223,21 @@ def generate_pipeline_sql_rm_files(pipeline_sql_path, entity): # MAIN FUNCTION # ============================================================================= -def main(base_path, db_user, db_pass, es_user, es_pass, entities, - ai_asset_entities, attributes, type_entities, mode_entities, - status_entities, agent_entities, organisation_entities): +def main(): + + # Get configuration variables + base_path = "/logstash" + db_user = "root" + db_pass = os.environ['MYSQL_ROOT_PASSWORD'] + es_user = os.environ['ES_USER'] + es_pass = os.environ['ES_PASSWORD'] + + # Search for entities and their extra fields + global_fields = set(['name', 'plain', 'html']) + entities = {} + for router in router_list: + extra_fields = list(router.match_fields^global_fields) + entities[router.es_index] = BASE_FIELDS + extra_fields # Make configuration dir conf_path = os.path.join(base_path, "config") @@ -265,81 +252,19 @@ def main(base_path, db_user, db_pass, es_user, es_pass, entities, # Generate logstash configuration file generate_conf_file(conf_path, es_user, es_pass) - # Generate pipeline configuration init file + # Generate pipeline configuration init and sync files generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, - es_user, es_pass, entities, sync=False) - - # Generate pipeline configuration sync file + es_user, es_pass, entities.keys(), sync=False) generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, - es_user, es_pass, entities, sync=True) + es_user, es_pass, entities.keys(), sync=True) - # Generate SQL init and sync files - for entity in entities: - generate_pipeline_sql_files(pipeline_sql_path, entity, sync=False) - generate_pipeline_sql_files(pipeline_sql_path, entity, sync=True) - - # Generate SQL rm files - for entity in entities: + # Generate SQL init, sync and rm files + for entity, fields in entities.items(): + generate_pipeline_sql_files(pipeline_sql_path, entity, fields, + sync=False) + generate_pipeline_sql_files(pipeline_sql_path, entity, fields, + sync=True) generate_pipeline_sql_rm_files(pipeline_sql_path, entity) if __name__ == "__main__": - - # PATH MACROS - # ------------------------------------------------------------------------- - - # Repository base path -# repo_path = REPO_PATH - - # Configuration base path -# base_path = os.path.join(repo_path, "logstash") - base_path = "/logstash" - - # ------------------------------------------------------------------------- - - # Users and passwords - db_user = "root" - db_pass = os.environ['MYSQL_ROOT_PASSWORD'] - es_user = os.environ['ES_USER'] - es_pass = os.environ['ES_PASSWORD'] -# with open(os.path.join(repo_path, ".env"), "r") as f: -# for line in f: -# if "MYSQL_ROOT_PASSWORD" in line: -# db_pass = line.split("=")[1][:-1] -# if "ES_USER" in line: -# es_user = line.split("=")[1][:-1] -# if "ES_PASSWORD" in line: -# es_pass = line.split("=")[1][:-1] - - # Entities and attributes - entities = ["dataset", "event", "experiment", "ml_model", "news", - "organisation", "project", "publication", "service"] - ai_asset_entities = ["dataset", "experiment", "ml_model", "publication"] - attributes = { - "dataset": ["issn", "measurement_technique", "temporal_coverage"], - "event": ["start_date", "end_date", "schedule", "registration_link", - "organiser_identifier"], - "experiment": ["experimental_workflow", "execution_settings", - "reproducibility_explanation"], - "news": ["headline", "alternative_headline"], - "organisation": ["date_founded", "legal_name"], - "project": ["start_date", "end_date", "total_cost_euro", - "coordinator_identifier"], - "publication": ["permanent_identifier", "isbn", "issn", - "knowledge_asset_id AS `knowledge_asset_identifier`"], - "service": ["slogan", "terms_of_service"] - } - type_entities = ["ml_model", "organisation", "publication"] - mode_entities = ["event"] - status_entities = ["event"] - agent_entities = { - "event": ("organiser_identifier", "organiser_type"), - "organisation": ("agent_id", "agent") - } - organisation_entities = { - "project": ("coordinator_identifier", "coordinator_name") - } - - # Main function - main(base_path, db_user, db_pass, es_user, es_pass, entities, - ai_asset_entities, attributes, type_entities, mode_entities, - status_entities, agent_entities, organisation_entities) + main() diff --git a/src/setup/logstash/generate_logstash_config_files_bkp.py b/src/setup/logstash/generate_logstash_config_files_bkp.py deleted file mode 100755 index d80fb1ea..00000000 --- a/src/setup/logstash/generate_logstash_config_files_bkp.py +++ /dev/null @@ -1,454 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import os - -# PATH MACROS -# ============================================================================= - -# Repository base path -#REPO_PATH = os.path.join("..", "..", "..") - -# MACROS FOR THE DOCUMENTS GENERATION FUNCTIONS -# ============================================================================= - -INFO = """{0} This file has been generated by `logstash_config.py` file -{0} --------------------------------------------------------- -""" - -CONF_BASE = """http.host: "0.0.0.0" -xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] -xpack.monitoring.enabled: true -xpack.monitoring.elasticsearch.username: {0} -xpack.monitoring.elasticsearch.password: {1} -""" - -INIT_INPUT_BASE = """ jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "{0}" - jdbc_password => "{1}" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_{2}.sql" - type => "{2}" - }} -""" - -SYNC_INPUT_BASE = """ jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "{0}" - jdbc_password => "{1}" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_{2}.sql" - type => "{2}" - }} - jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "{0}" - jdbc_password => "{1}" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_{2}.sql" - type => "rm_{2}" - }} -""" - -FILTER_BASE = """filter {{ - if ![application_area] {{ - mutate {{ - replace => {{"application_area" => ""}} - }} - }} - mutate {{ - # remove_field => ["@version", "@timestamp"] - split => {{"application_area" => ","}} - }}{0} -}} -""" - -DATE_FILTER = """ - if [type] == "organisation" {0}{{ - ruby {{ - code => ' - t = Time.at(event.get("date_founded").to_f) - event.set("date_founded", t.strftime("%Y-%m-%d")) - ' - }} - }} -""" - -SYNC_DATE_FILTER_ADDON = """or [type] == "rm_organisation" """ - -INIT_OUTPUT_BASE = """ if [type] == "{2}" {{ - elasticsearch {{ - hosts => "elasticsearch:9200" - user => "{0}" - password => "{1}" - ecs_compatibility => disabled - index => "{2}" - document_id => "{2}_%{{identifier}}" - }} - }} -""" - -#TODO: TEST DELETE WITHOUT protocol => "transport" -SYNC_OUTPUT_BASE = """ if [type] == "{2}" {{ - elasticsearch {{ - hosts => "elasticsearch:9200" - user => "{0}" - password => "{1}" - ecs_compatibility => disabled - index => "{2}" - document_id => "{2}_%{{identifier}}" - }} - }} - if [type] == "rm_{2}" {{ - elasticsearch {{ - action => "delete" - hosts => "elasticsearch:9200" - user => "{0}" - password => "{1}" - ecs_compatibility => disabled - index => "{2}" - document_id => "{2}_%{{identifier}}" - }} - }} -""" - -SQL_BASE = """SELECT - -- Concept - {0}.identifier, - {0}.platform, - {0}.platform_identifier, - -- Concept.aiod_entry - status.name AS `status`, - aiod_entry.date_modified, - aiod_entry.date_created, - -- Resource - {0}.ai_resource_id AS `resource_identifier`, - {0}.name, - {0}.description, - {0}.same_as{1}{2}{3}{4}{5}{6}{7}, - -- Application Area - GROUP_CONCAT(application_area.name) AS `application_area` -FROM aiod.{0} -INNER JOIN aiod.aiod_entry ON aiod.{0}.aiod_entry_identifier=aiod.aiod_entry.identifier -INNER JOIN aiod.status ON aiod.aiod_entry.status_identifier=aiod.status.identifier{8} -LEFT JOIN aiod.{0}_application_area_link ON aiod.{0}_application_area_link.from_identifier=aiod.{0}.identifier -LEFT JOIN aiod.application_area ON aiod.{0}_application_area_link.linked_identifier=aiod.application_area.identifier{9} -GROUP BY aiod.{0}.identifier -ORDER BY aiod.{0}.identifier -""" - -SQL_RM_BASE = """SELECT {0}.identifier -FROM aiod.{0} -WHERE aiod.{0}.date_deleted IS NOT NULL AND aiod.{0}.date_deleted > :sql_last_value -""" - -AI_ASSET_BASE = """, - -- AIAsset - {0}.ai_asset_id AS `asset_identifier`, - {0}.date_published, - {0}.version, - license.name AS `license`""" - -ATTRIBUTES_BASE = """, - -- Attributes - """ - -TYPE_BASE = """, - -- Type - {0}_type.name AS `{0}_type`""" - -MODE_BASE = """, - -- Mode - {0}_mode.name AS `mode`""" - -STATUS_BASE = """, - -- Status - {0}_status.name AS `{0}_status`""" - -AGENT_BASE = """, - -- Agent - agent.type AS `{0}`""" - -ORGANISATION_BASE = """, - -- Organisation - organisation.name AS `{0}`""" - -LEFT_LICENSE = """ -LEFT JOIN aiod.license ON aiod.{0}.license_identifier=aiod.license.identifier""" - -LEFT_TYPE = """ -LEFT JOIN aiod.{0}_type ON aiod.{0}.type_identifier=aiod.{0}_type.identifier""" - -LEFT_MODE = """ -LEFT JOIN aiod.{0}_mode ON aiod.{0}.mode_identifier=aiod.{0}_mode.identifier""" - -LEFT_STATUS = """ -LEFT JOIN aiod.{0}_status ON aiod.{0}.status_identifier=aiod.{0}_status.identifier""" - -LEFT_AGENT = """ -LEFT JOIN aiod.agent ON aiod.{0}.{1}=aiod.agent.identifier""" - -LEFT_ORGANISATION = """ -LEFT JOIN aiod.organisation ON aiod.{0}.{1}=aiod.organisation.identifier""" - -INIT_CLAUSE = """ -WHERE aiod.{0}.date_deleted IS NULL""" - -SYNC_CLAUSE = """ -WHERE aiod.{0}.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" - -# DOCUMENTS GENERATION FUNCTIONS -# ============================================================================= - -def generate_conf_file(conf_path, es_user, es_pass): - - file_path = os.path.join(conf_path, "logstash.yml") - - # Generate configuration file - with open(file_path, 'w') as f: - - # Info - f.write(INFO.format('#')) - - # Configuration - f.write(CONF_BASE.format(es_user, es_pass)) - -def generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, - es_user, es_pass, entities, sync=False): - - if not sync: # init file - file_path = os.path.join(pipeline_conf_path, "init_table.conf") - input_base = INIT_INPUT_BASE - date_filter = DATE_FILTER.format("") - output_base = INIT_OUTPUT_BASE - else: # sync file - file_path = os.path.join(pipeline_conf_path, "sync_table.conf") - input_base = SYNC_INPUT_BASE - date_filter = DATE_FILTER.format(SYNC_DATE_FILTER_ADDON) - output_base = SYNC_OUTPUT_BASE - - # Generate configuration file - with open(file_path, 'w') as f: - - # Info - f.write(INFO.format('#')) - - # Input - f.write("input {\n") - for entity in entities: - f.write(input_base.format(db_user, db_pass, entity)) - f.write("}\n") - - # Filters - if "organisation" in entities: - f.write(FILTER_BASE.format(date_filter)) - else: - f.write(FILTER_BASE.format("")) - - # Output - f.write("output {\n") - for entity in entities: - f.write(output_base.format(es_user, es_pass, entity)) - f.write("}\n") - -def generate_pipeline_sql_files(pipeline_sql_path, entity, sync=False): - - # Generate output file path - if sync: - file_path = os.path.join(pipeline_sql_path, f"sync_{entity}.sql") - else: - file_path = os.path.join(pipeline_sql_path, f"init_{entity}.sql") - - # Write the output file - with open(file_path, 'w') as f: - - # Info - f.write(INFO.format('--')) - - # Left joins - left_joins = "" - - # For ai_asset entities - ai_asset_attributes = "" - if entity in ai_asset_entities: - ai_asset_attributes = AI_ASSET_BASE.format(entity) - left_joins += LEFT_LICENSE.format(entity) - - # Attributes - entity_attributes = "" - if entity in attributes.keys(): - entity_attributes = (ATTRIBUTES_BASE - + f"{entity}.{attributes[entity][0]}") - for attribute in attributes[entity][1:]: - entity_attributes += f",\n {entity}.{attribute}" - - # For entities with a type relation - type_attribute = "" - if entity in type_entities: - type_attribute = TYPE_BASE.format(entity) - left_joins += LEFT_TYPE.format(entity) - - # For entities with a mode relation - mode_attribute = "" - if entity in mode_entities: - mode_attribute = MODE_BASE.format(entity) - left_joins += LEFT_MODE.format(entity) - - # For entities with a status relation - status_attribute = "" - if entity in status_entities: - status_attribute = STATUS_BASE.format(entity) - left_joins += LEFT_STATUS.format(entity) - - # For entities with an agent relation - agent_attribute = "" - if entity in agent_entities.keys(): - agent_attribute = AGENT_BASE.format(agent_entities[entity][1]) - left_joins += LEFT_AGENT.format(entity, agent_entities[entity][0]) - - # For entities with an organisation relation - organisation_attribute = "" - if entity in organisation_entities.keys(): - organisation_attribute = ORGANISATION_BASE.format( - organisation_entities[entity][1]) - left_joins += LEFT_ORGANISATION.format(entity, - organisation_entities[entity][0]) - - # Where clause - if sync: - where_clause = SYNC_CLAUSE.format(entity) - else: - where_clause = INIT_CLAUSE.format(entity) - - f.write(SQL_BASE.format(entity, ai_asset_attributes, - entity_attributes, type_attribute, - mode_attribute, status_attribute, - agent_attribute, organisation_attribute, - left_joins, where_clause)) - -def generate_pipeline_sql_rm_files(pipeline_sql_path, entity): - - # Generate output file path - file_path = os.path.join(pipeline_sql_path, f"rm_{entity}.sql") - - # Write the output file - with open(file_path, 'w') as f: - - # Info - f.write(INFO.format('--')) - - # SQL query - f.write(SQL_RM_BASE.format(entity)) - -# MAIN FUNCTION -# ============================================================================= - -def main(base_path, db_user, db_pass, es_user, es_pass, entities, - ai_asset_entities, attributes, type_entities, mode_entities, - status_entities, agent_entities, organisation_entities): - - # Make configuration dir - conf_path = os.path.join(base_path, "config") - os.makedirs(conf_path, exist_ok=True) - - # Make pipeline configuration dirs - pipeline_conf_path = os.path.join(base_path, "pipeline", "conf") - os.makedirs(pipeline_conf_path, exist_ok=True) - pipeline_sql_path = os.path.join(base_path, "pipeline", "sql") - os.makedirs(pipeline_sql_path, exist_ok=True) - - # Generate logstash configuration file - generate_conf_file(conf_path, es_user, es_pass) - - # Generate pipeline configuration init file - generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, - es_user, es_pass, entities, sync=False) - - # Generate pipeline configuration sync file - generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, - es_user, es_pass, entities, sync=True) - - # Generate SQL init and sync files - for entity in entities: - generate_pipeline_sql_files(pipeline_sql_path, entity, sync=False) - generate_pipeline_sql_files(pipeline_sql_path, entity, sync=True) - - # Generate SQL rm files - for entity in entities: - generate_pipeline_sql_rm_files(pipeline_sql_path, entity) - -if __name__ == "__main__": - - # PATH MACROS - # ------------------------------------------------------------------------- - - # Repository base path -# repo_path = REPO_PATH - - # Configuration base path -# base_path = os.path.join(repo_path, "logstash") - base_path = "/logstash" - - # ------------------------------------------------------------------------- - - # Users and passwords - db_user = "root" - db_pass = os.environ['MYSQL_ROOT_PASSWORD'] - es_user = os.environ['ES_USER'] - es_pass = os.environ['ES_PASSWORD'] -# with open(os.path.join(repo_path, ".env"), "r") as f: -# for line in f: -# if "MYSQL_ROOT_PASSWORD" in line: -# db_pass = line.split("=")[1][:-1] -# if "ES_USER" in line: -# es_user = line.split("=")[1][:-1] -# if "ES_PASSWORD" in line: -# es_pass = line.split("=")[1][:-1] - - # Entities and attributes - entities = ["dataset", "event", "experiment", "ml_model", "news", - "organisation", "project", "publication", "service"] - ai_asset_entities = ["dataset", "experiment", "ml_model", "publication"] - attributes = { - "dataset": ["issn", "measurement_technique", "temporal_coverage"], - "event": ["start_date", "end_date", "schedule", "registration_link", - "organiser_identifier"], - "experiment": ["experimental_workflow", "execution_settings", - "reproducibility_explanation"], - "news": ["headline", "alternative_headline"], - "organisation": ["date_founded", "legal_name"], - "project": ["start_date", "end_date", "total_cost_euro", - "coordinator_identifier"], - "publication": ["permanent_identifier", "isbn", "issn", - "knowledge_asset_id AS `knowledge_asset_identifier`"], - "service": ["slogan", "terms_of_service"] - } - type_entities = ["ml_model", "organisation", "publication"] - mode_entities = ["event"] - status_entities = ["event"] - agent_entities = { - "event": ("organiser_identifier", "organiser_type"), - "organisation": ("agent_id", "agent") - } - organisation_entities = { - "project": ("coordinator_identifier", "coordinator_name") - } - - # Main function - main(base_path, db_user, db_pass, es_user, es_pass, entities, - ai_asset_entities, attributes, type_entities, mode_entities, - status_entities, agent_entities, organisation_entities) From 92b05cf63f119614f792933d5090e8b3d32c633a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 15 Nov 2023 15:14:41 +0100 Subject: [PATCH 49/79] Search router tests actualised --- logstash/config/logstash.yml | 9 +- logstash/pipeline/conf/init_table.conf | 201 ++++++++ logstash/pipeline/conf/sync_table.conf | 435 ++++++++++++++++++ logstash/pipeline/sql/init_dataset.sql | 8 + logstash/pipeline/sql/init_event.sql | 8 + logstash/pipeline/sql/init_experiment.sql | 8 + logstash/pipeline/sql/init_ml_model.sql | 8 + logstash/pipeline/sql/init_news.sql | 8 + logstash/pipeline/sql/init_organisation.sql | 8 + logstash/pipeline/sql/init_project.sql | 8 + logstash/pipeline/sql/init_publication.sql | 8 + logstash/pipeline/sql/init_service.sql | 8 + logstash/pipeline/sql/rm_dataset.sql | 6 + logstash/pipeline/sql/rm_event.sql | 6 + logstash/pipeline/sql/rm_experiment.sql | 6 + logstash/pipeline/sql/rm_ml_model.sql | 6 + logstash/pipeline/sql/rm_news.sql | 6 + logstash/pipeline/sql/rm_organisation.sql | 6 + logstash/pipeline/sql/rm_project.sql | 6 + logstash/pipeline/sql/rm_publication.sql | 6 + logstash/pipeline/sql/rm_service.sql | 6 + logstash/pipeline/sql/sync_dataset.sql | 8 + logstash/pipeline/sql/sync_event.sql | 8 + logstash/pipeline/sql/sync_experiment.sql | 8 + logstash/pipeline/sql/sync_ml_model.sql | 8 + logstash/pipeline/sql/sync_news.sql | 8 + logstash/pipeline/sql/sync_organisation.sql | 8 + logstash/pipeline/sql/sync_project.sql | 8 + logstash/pipeline/sql/sync_publication.sql | 8 + logstash/pipeline/sql/sync_service.sql | 8 + .../elasticsearch/dataset_search.json | 41 +- .../resources/elasticsearch/event_search.json | 36 +- .../elasticsearch/experiment_search.json | 40 +- .../elasticsearch/ml_model_search.json | 38 +- .../resources/elasticsearch/news_search.json | 32 +- .../elasticsearch/organisation_search.json | 33 +- .../elasticsearch/project_search.json | 33 +- .../elasticsearch/publication_search.json | 37 +- .../elasticsearch/service_search.json | 31 +- .../test_search_router_datasets.py | 49 -- .../test_search_router_events.py | 48 -- .../test_search_router_experiments.py | 49 -- .../test_search_router_ml_model.py | 46 -- .../search_routers/test_search_router_news.py | 45 -- .../test_search_router_organisations.py | 45 -- .../test_search_router_projects.py | 46 -- .../test_search_router_publications.py | 49 -- .../test_search_router_services.py | 45 -- .../search_routers/test_search_routers.py | 134 ++++++ 49 files changed, 1063 insertions(+), 657 deletions(-) create mode 100644 logstash/pipeline/conf/init_table.conf create mode 100644 logstash/pipeline/conf/sync_table.conf create mode 100644 logstash/pipeline/sql/init_dataset.sql create mode 100644 logstash/pipeline/sql/init_event.sql create mode 100644 logstash/pipeline/sql/init_experiment.sql create mode 100644 logstash/pipeline/sql/init_ml_model.sql create mode 100644 logstash/pipeline/sql/init_news.sql create mode 100644 logstash/pipeline/sql/init_organisation.sql create mode 100644 logstash/pipeline/sql/init_project.sql create mode 100644 logstash/pipeline/sql/init_publication.sql create mode 100644 logstash/pipeline/sql/init_service.sql create mode 100644 logstash/pipeline/sql/rm_dataset.sql create mode 100644 logstash/pipeline/sql/rm_event.sql create mode 100644 logstash/pipeline/sql/rm_experiment.sql create mode 100644 logstash/pipeline/sql/rm_ml_model.sql create mode 100644 logstash/pipeline/sql/rm_news.sql create mode 100644 logstash/pipeline/sql/rm_organisation.sql create mode 100644 logstash/pipeline/sql/rm_project.sql create mode 100644 logstash/pipeline/sql/rm_publication.sql create mode 100644 logstash/pipeline/sql/rm_service.sql create mode 100644 logstash/pipeline/sql/sync_dataset.sql create mode 100644 logstash/pipeline/sql/sync_event.sql create mode 100644 logstash/pipeline/sql/sync_experiment.sql create mode 100644 logstash/pipeline/sql/sync_ml_model.sql create mode 100644 logstash/pipeline/sql/sync_news.sql create mode 100644 logstash/pipeline/sql/sync_organisation.sql create mode 100644 logstash/pipeline/sql/sync_project.sql create mode 100644 logstash/pipeline/sql/sync_publication.sql create mode 100644 logstash/pipeline/sql/sync_service.sql delete mode 100644 src/tests/routers/search_routers/test_search_router_datasets.py delete mode 100644 src/tests/routers/search_routers/test_search_router_events.py delete mode 100644 src/tests/routers/search_routers/test_search_router_experiments.py delete mode 100644 src/tests/routers/search_routers/test_search_router_ml_model.py delete mode 100644 src/tests/routers/search_routers/test_search_router_news.py delete mode 100644 src/tests/routers/search_routers/test_search_router_organisations.py delete mode 100644 src/tests/routers/search_routers/test_search_router_projects.py delete mode 100644 src/tests/routers/search_routers/test_search_router_publications.py delete mode 100644 src/tests/routers/search_routers/test_search_router_services.py create mode 100644 src/tests/routers/search_routers/test_search_routers.py diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index 8b137891..13fda917 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1 +1,8 @@ - +# This file has been generated by `generate_logstash_config.py` +# file, placed in `src/setup/logstash` +# ------------------------------------------------------------- +http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] +xpack.monitoring.enabled: true +xpack.monitoring.elasticsearch.username: elastic +xpack.monitoring.elasticsearch.password: changeme diff --git a/logstash/pipeline/conf/init_table.conf b/logstash/pipeline/conf/init_table.conf new file mode 100644 index 00000000..89a38dc6 --- /dev/null +++ b/logstash/pipeline/conf/init_table.conf @@ -0,0 +1,201 @@ +# This file has been generated by `generate_logstash_config.py` +# file, placed in `src/setup/logstash` +# ------------------------------------------------------------- +input { + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_dataset.sql" + type => "dataset" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_event.sql" + type => "event" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_experiment.sql" + type => "experiment" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_ml_model.sql" + type => "ml_model" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_news.sql" + type => "news" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_organisation.sql" + type => "organisation" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_project.sql" + type => "project" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_publication.sql" + type => "publication" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_service.sql" + type => "service" + } +} +filter { + mutate { + remove_field => ["@version", "@timestamp"] + } +} +output { + if [type] == "dataset" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "dataset" + document_id => "dataset_%{identifier}" + } + } + if [type] == "event" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "event" + document_id => "event_%{identifier}" + } + } + if [type] == "experiment" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "experiment" + document_id => "experiment_%{identifier}" + } + } + if [type] == "ml_model" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "ml_model" + document_id => "ml_model_%{identifier}" + } + } + if [type] == "news" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "news" + document_id => "news_%{identifier}" + } + } + if [type] == "organisation" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "organisation" + document_id => "organisation_%{identifier}" + } + } + if [type] == "project" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "project" + document_id => "project_%{identifier}" + } + } + if [type] == "publication" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "publication" + document_id => "publication_%{identifier}" + } + } + if [type] == "service" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "service" + document_id => "service_%{identifier}" + } + } +} diff --git a/logstash/pipeline/conf/sync_table.conf b/logstash/pipeline/conf/sync_table.conf new file mode 100644 index 00000000..54e06132 --- /dev/null +++ b/logstash/pipeline/conf/sync_table.conf @@ -0,0 +1,435 @@ +# This file has been generated by `generate_logstash_config.py` +# file, placed in `src/setup/logstash` +# ------------------------------------------------------------- +input { + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_dataset.sql" + type => "dataset" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_dataset.sql" + type => "rm_dataset" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_event.sql" + type => "event" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_event.sql" + type => "rm_event" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_experiment.sql" + type => "experiment" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_experiment.sql" + type => "rm_experiment" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_ml_model.sql" + type => "ml_model" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_ml_model.sql" + type => "rm_ml_model" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_news.sql" + type => "news" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_news.sql" + type => "rm_news" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_organisation.sql" + type => "organisation" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_organisation.sql" + type => "rm_organisation" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_project.sql" + type => "project" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_project.sql" + type => "rm_project" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_publication.sql" + type => "publication" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_publication.sql" + type => "rm_publication" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_service.sql" + type => "service" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_service.sql" + type => "rm_service" + } +} +filter { + mutate { + remove_field => ["@version", "@timestamp"] + } +} +output { + if [type] == "dataset" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "dataset" + document_id => "dataset_%{identifier}" + } + } + if [type] == "rm_dataset" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "dataset" + document_id => "dataset_%{identifier}" + } + } + if [type] == "event" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "event" + document_id => "event_%{identifier}" + } + } + if [type] == "rm_event" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "event" + document_id => "event_%{identifier}" + } + } + if [type] == "experiment" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "experiment" + document_id => "experiment_%{identifier}" + } + } + if [type] == "rm_experiment" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "experiment" + document_id => "experiment_%{identifier}" + } + } + if [type] == "ml_model" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "ml_model" + document_id => "ml_model_%{identifier}" + } + } + if [type] == "rm_ml_model" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "ml_model" + document_id => "ml_model_%{identifier}" + } + } + if [type] == "news" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "news" + document_id => "news_%{identifier}" + } + } + if [type] == "rm_news" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "news" + document_id => "news_%{identifier}" + } + } + if [type] == "organisation" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "organisation" + document_id => "organisation_%{identifier}" + } + } + if [type] == "rm_organisation" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "organisation" + document_id => "organisation_%{identifier}" + } + } + if [type] == "project" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "project" + document_id => "project_%{identifier}" + } + } + if [type] == "rm_project" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "project" + document_id => "project_%{identifier}" + } + } + if [type] == "publication" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "publication" + document_id => "publication_%{identifier}" + } + } + if [type] == "rm_publication" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "publication" + document_id => "publication_%{identifier}" + } + } + if [type] == "service" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "service" + document_id => "service_%{identifier}" + } + } + if [type] == "rm_service" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "service" + document_id => "service_%{identifier}" + } + } +} diff --git a/logstash/pipeline/sql/init_dataset.sql b/logstash/pipeline/sql/init_dataset.sql new file mode 100644 index 00000000..bc05e4f8 --- /dev/null +++ b/logstash/pipeline/sql/init_dataset.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT dataset.identifier, dataset.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, issn +FROM aiod.dataset +INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier +WHERE aiod.dataset.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_event.sql b/logstash/pipeline/sql/init_event.sql new file mode 100644 index 00000000..a86defc0 --- /dev/null +++ b/logstash/pipeline/sql/init_event.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT event.identifier, event.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.event +INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier +WHERE aiod.event.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_experiment.sql b/logstash/pipeline/sql/init_experiment.sql new file mode 100644 index 00000000..ced51834 --- /dev/null +++ b/logstash/pipeline/sql/init_experiment.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT experiment.identifier, experiment.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.experiment +INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier +WHERE aiod.experiment.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql new file mode 100644 index 00000000..7f42db9c --- /dev/null +++ b/logstash/pipeline/sql/init_ml_model.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT ml_model.identifier, ml_model.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.ml_model +INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier +WHERE aiod.ml_model.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql new file mode 100644 index 00000000..89467d50 --- /dev/null +++ b/logstash/pipeline/sql/init_news.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, headline, alternative_headline +FROM aiod.news +INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier +WHERE aiod.news.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_organisation.sql b/logstash/pipeline/sql/init_organisation.sql new file mode 100644 index 00000000..8b8093ea --- /dev/null +++ b/logstash/pipeline/sql/init_organisation.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT organisation.identifier, organisation.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, legal_name +FROM aiod.organisation +INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier +WHERE aiod.organisation.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_project.sql b/logstash/pipeline/sql/init_project.sql new file mode 100644 index 00000000..40acfd85 --- /dev/null +++ b/logstash/pipeline/sql/init_project.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT project.identifier, project.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.project +INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier +WHERE aiod.project.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_publication.sql b/logstash/pipeline/sql/init_publication.sql new file mode 100644 index 00000000..900793f5 --- /dev/null +++ b/logstash/pipeline/sql/init_publication.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT publication.identifier, publication.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, isbn, issn +FROM aiod.publication +INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier +WHERE aiod.publication.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_service.sql b/logstash/pipeline/sql/init_service.sql new file mode 100644 index 00000000..19c1dc6e --- /dev/null +++ b/logstash/pipeline/sql/init_service.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT service.identifier, service.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, slogan +FROM aiod.service +INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier +WHERE aiod.service.date_deleted IS NULL diff --git a/logstash/pipeline/sql/rm_dataset.sql b/logstash/pipeline/sql/rm_dataset.sql new file mode 100644 index 00000000..8967362b --- /dev/null +++ b/logstash/pipeline/sql/rm_dataset.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT dataset.identifier +FROM aiod.dataset +WHERE aiod.dataset.date_deleted IS NOT NULL AND aiod.dataset.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_event.sql b/logstash/pipeline/sql/rm_event.sql new file mode 100644 index 00000000..82e29c2e --- /dev/null +++ b/logstash/pipeline/sql/rm_event.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT event.identifier +FROM aiod.event +WHERE aiod.event.date_deleted IS NOT NULL AND aiod.event.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_experiment.sql b/logstash/pipeline/sql/rm_experiment.sql new file mode 100644 index 00000000..49492e0a --- /dev/null +++ b/logstash/pipeline/sql/rm_experiment.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT experiment.identifier +FROM aiod.experiment +WHERE aiod.experiment.date_deleted IS NOT NULL AND aiod.experiment.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_ml_model.sql b/logstash/pipeline/sql/rm_ml_model.sql new file mode 100644 index 00000000..60083eee --- /dev/null +++ b/logstash/pipeline/sql/rm_ml_model.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT ml_model.identifier +FROM aiod.ml_model +WHERE aiod.ml_model.date_deleted IS NOT NULL AND aiod.ml_model.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_news.sql b/logstash/pipeline/sql/rm_news.sql new file mode 100644 index 00000000..00e821c9 --- /dev/null +++ b/logstash/pipeline/sql/rm_news.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT news.identifier +FROM aiod.news +WHERE aiod.news.date_deleted IS NOT NULL AND aiod.news.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_organisation.sql b/logstash/pipeline/sql/rm_organisation.sql new file mode 100644 index 00000000..f48384c4 --- /dev/null +++ b/logstash/pipeline/sql/rm_organisation.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT organisation.identifier +FROM aiod.organisation +WHERE aiod.organisation.date_deleted IS NOT NULL AND aiod.organisation.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_project.sql b/logstash/pipeline/sql/rm_project.sql new file mode 100644 index 00000000..114a4afc --- /dev/null +++ b/logstash/pipeline/sql/rm_project.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT project.identifier +FROM aiod.project +WHERE aiod.project.date_deleted IS NOT NULL AND aiod.project.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_publication.sql b/logstash/pipeline/sql/rm_publication.sql new file mode 100644 index 00000000..2430d33a --- /dev/null +++ b/logstash/pipeline/sql/rm_publication.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT publication.identifier +FROM aiod.publication +WHERE aiod.publication.date_deleted IS NOT NULL AND aiod.publication.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_service.sql b/logstash/pipeline/sql/rm_service.sql new file mode 100644 index 00000000..4aa10aa3 --- /dev/null +++ b/logstash/pipeline/sql/rm_service.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT service.identifier +FROM aiod.service +WHERE aiod.service.date_deleted IS NOT NULL AND aiod.service.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/sync_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql new file mode 100644 index 00000000..f9098c01 --- /dev/null +++ b/logstash/pipeline/sql/sync_dataset.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT dataset.identifier, dataset.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, issn +FROM aiod.dataset +INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier +WHERE aiod.dataset.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_event.sql b/logstash/pipeline/sql/sync_event.sql new file mode 100644 index 00000000..192cd2e9 --- /dev/null +++ b/logstash/pipeline/sql/sync_event.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT event.identifier, event.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.event +INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier +WHERE aiod.event.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql new file mode 100644 index 00000000..32adf2a0 --- /dev/null +++ b/logstash/pipeline/sql/sync_experiment.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT experiment.identifier, experiment.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.experiment +INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier +WHERE aiod.experiment.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql new file mode 100644 index 00000000..c4224895 --- /dev/null +++ b/logstash/pipeline/sql/sync_ml_model.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT ml_model.identifier, ml_model.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.ml_model +INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier +WHERE aiod.ml_model.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql new file mode 100644 index 00000000..89870644 --- /dev/null +++ b/logstash/pipeline/sql/sync_news.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, headline, alternative_headline +FROM aiod.news +INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier +WHERE aiod.news.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql new file mode 100644 index 00000000..9c058cfa --- /dev/null +++ b/logstash/pipeline/sql/sync_organisation.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT organisation.identifier, organisation.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, legal_name +FROM aiod.organisation +INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier +WHERE aiod.organisation.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_project.sql b/logstash/pipeline/sql/sync_project.sql new file mode 100644 index 00000000..81d0c95a --- /dev/null +++ b/logstash/pipeline/sql/sync_project.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT project.identifier, project.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.project +INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier +WHERE aiod.project.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_publication.sql b/logstash/pipeline/sql/sync_publication.sql new file mode 100644 index 00000000..48577b9c --- /dev/null +++ b/logstash/pipeline/sql/sync_publication.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT publication.identifier, publication.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, isbn, issn +FROM aiod.publication +INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier +WHERE aiod.publication.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_service.sql b/logstash/pipeline/sql/sync_service.sql new file mode 100644 index 00000000..2566c0ae --- /dev/null +++ b/logstash/pipeline/sql/sync_service.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config.py` +-- file, placed in `src/setup/logstash` +-- ------------------------------------------------------------- +SELECT service.identifier, service.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, slogan +FROM aiod.service +INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier +WHERE aiod.service.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/src/tests/resources/elasticsearch/dataset_search.json b/src/tests/resources/elasticsearch/dataset_search.json index 4433362d..9445ce69 100644 --- a/src/tests/resources/elasticsearch/dataset_search.json +++ b/src/tests/resources/elasticsearch/dataset_search.json @@ -1,5 +1,5 @@ { - "took" : 4, + "took" : 5, "timed_out" : false, "_shards" : { "total" : 1, @@ -9,43 +9,26 @@ }, "hits" : { "total" : { - "value" : 83, + "value" : 1, "relation" : "eq" }, - "max_score" : 0.74700636, + "max_score" : null, "hits" : [ { "_index" : "dataset", - "_id" : "dataset_104", - "_score" : 0.74700636, - "_ignored" : [ - "description.keyword" - ], + "_id" : "dataset_1", + "_score" : null, "_source" : { - "version" : "1.0.1", - "same_as" : "https://www.example.com/resource/this_resource", - "asset_identifier" : 106, - "identifier" : 104, - "resource_identifier" : 106, - "description" : "A description.", - "temporal_coverage" : "", - "type" : "dataset", - "platform_identifier" : "287", - "date_published" : "2023-09-01T00:00:00.000Z", + "identifier" : 1, "date_modified" : "2023-09-01T00:00:00.000Z", - "application_area" : [ ], - "name" : "trondheim-simulator", - "measurement_technique" : "", - "license" : "https://www.example.com/resource/this_resource", - "issn" : "00000000", - "date_created" : "2023-09-01T00:00:00.000Z", - "@version" : "1", - "platform" : "ai4experiments", - "status" : "draft", - "@timestamp" : "2023-09-01T00:00:00.000Z" + "name" : "A name.", + "plain" : "A plain text description.", + "issn" : "20493630", + "type" : "dataset", + "html" : "An html description." }, "sort" : [ - 1 + 1 ] } ] diff --git a/src/tests/resources/elasticsearch/event_search.json b/src/tests/resources/elasticsearch/event_search.json index cc350fa0..302cdad1 100644 --- a/src/tests/resources/elasticsearch/event_search.json +++ b/src/tests/resources/elasticsearch/event_search.json @@ -1,5 +1,5 @@ { - "took" : 2, + "took" : 1, "timed_out" : false, "_shards" : { "total" : 1, @@ -12,42 +12,22 @@ "value" : 1, "relation" : "eq" }, - "max_score" : 0.2876821, + "max_score" : null, "hits" : [ { "_index" : "event", "_id" : "event_1", - "_score" : 0.2876821, + "_score" : null, "_source" : { - "type" : "event", - "end_date" : "2023-09-01T00:00:00.000Z", - "mode" : "offline", - "description" : "A description.", - "platform_identifier" : "1", - "application_area" : [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" - ], - "registration_link" : "https://example.com/registration-form", - "name" : "The name of the Event", - "@version" : "1", - "event_status" : "scheduled", - "schedule" : "10:00-10:30: Opening. 10:30-11:00 ...", - "platform" : "example", - "organiser_type" : "person", - "start_date" : "2023-09-01T00:00:00.000Z", - "same_as" : "https://www.example.com/resource/this_resource", "identifier" : 1, - "resource_identifier" : 374, "date_modified" : "2023-09-01T00:00:00.000Z", - "organiser_identifier" : 2, - "date_created" : "2023-09-01T00:00:00.000Z", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "status" : "draft" + "name" : "A name.", + "plain" : "A plain text description.", + "type" : "event", + "html" : "An html description." }, "sort" : [ - 1 + 1 ] } ] diff --git a/src/tests/resources/elasticsearch/experiment_search.json b/src/tests/resources/elasticsearch/experiment_search.json index a58679e3..dfceb200 100644 --- a/src/tests/resources/elasticsearch/experiment_search.json +++ b/src/tests/resources/elasticsearch/experiment_search.json @@ -1,5 +1,5 @@ { - "took" : 4, + "took" : 1, "timed_out" : false, "_shards" : { "total" : 1, @@ -9,43 +9,25 @@ }, "hits" : { "total" : { - "value" : 21, + "value" : 1, "relation" : "eq" }, - "max_score" : 0.6878389, + "max_score" : null, "hits" : [ { "_index" : "experiment", - "_id" : "experiment_32", - "_score" : 0.6878389, - "_ignored" : [ - "description.keyword" - ], + "_id" : "experiment_1", + "_score" : null, "_source" : { - "version" : "1.0", - "same_as" : "https://www.example.com/resource/this_resource", - "asset_identifier" : 169, - "identifier" : 32, - "resource_identifier" : 170, - "description" : "A description.", - "reproducibility_explanation" : "", - "type" : "experiment", - "platform_identifier" : "397", - "date_published" : "2023-09-01T00:00:00.000Z", - "experimental_workflow" : "", + "identifier" : 1, "date_modified" : "2023-09-01T00:00:00.000Z", - "execution_settings" : "", - "application_area" : [ ], - "name" : "aqpredvisualize", - "@version" : "1", - "license" : "https://www.example.com/resource/this_resource", - "date_created" : "2023-09-01T00:00:00.000Z", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "platform" : "ai4experiments", - "status" : "draft" + "name" : "A name.", + "plain" : "A plain text description.", + "type" : "experiment", + "html" : "An html description." }, "sort" : [ - 1 + 1 ] } ] diff --git a/src/tests/resources/elasticsearch/ml_model_search.json b/src/tests/resources/elasticsearch/ml_model_search.json index 1094dbcf..ce6e8bc0 100644 --- a/src/tests/resources/elasticsearch/ml_model_search.json +++ b/src/tests/resources/elasticsearch/ml_model_search.json @@ -1,5 +1,5 @@ { - "took" : 24, + "took" : 1, "timed_out" : false, "_shards" : { "total" : 1, @@ -9,41 +9,25 @@ }, "hits" : { "total" : { - "value" : 110, + "value" : 1, "relation" : "eq" }, - "max_score" : 0.6961925, + "max_score" : null, "hits" : [ { "_index" : "ml_model", - "_id" : "ml_model_168", - "_score" : 0.6961925, - "_ignored" : [ - "description.keyword" - ], + "_id" : "ml_model_1", + "_score" : null, "_source" : { - "ml_model_type" : "", - "version" : "1.0.0", - "same_as" : "https://www.example.com/resource/this_resource", - "asset_identifier" : 349, - "identifier" : 168, - "resource_identifier" : 350, - "description" : "A description.", - "type" : "ml_model", - "platform_identifier" : "316", - "date_published" : "2023-09-01T00:00:00.000Z", + "identifier" : 1, "date_modified" : "2023-09-01T00:00:00.000Z", - "application_area" : [ ], - "name" : "EntityRecognizer", - "@version" : "1", - "license" : "https://www.example.com/resource/this_resource", - "date_created" : "2023-09-01T00:00:00.000Z", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "platform" : "ai4experiments", - "status" : "draft" + "name" : "A name.", + "plain" : "A plain text description.", + "type" : "ml_model", + "html" : "An html description." }, "sort" : [ - 1 + 1 ] } ] diff --git a/src/tests/resources/elasticsearch/news_search.json b/src/tests/resources/elasticsearch/news_search.json index e75a6bcf..e08ee850 100644 --- a/src/tests/resources/elasticsearch/news_search.json +++ b/src/tests/resources/elasticsearch/news_search.json @@ -1,5 +1,5 @@ { - "took" : 4, + "took" : 1, "timed_out" : false, "_shards" : { "total" : 1, @@ -12,36 +12,24 @@ "value" : 1, "relation" : "eq" }, - "max_score" : 0.2876821, + "max_score" : null, "hits" : [ { "_index" : "news", "_id" : "news_1", - "_score" : 0.2876821, + "_score" : null, "_source" : { - "same_as" : "https://www.example.com/resource/this_resource", - "type" : "news", "identifier" : 1, - "resource_identifier" : 371, - "description" : "A description.", - "platform_identifier" : "1", + "headline" : "A headline.", "date_modified" : "2023-09-01T00:00:00.000Z", - "application_area" : [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" - ], - "name" : "Name of the News item", - "@version" : "1", - "headline" : "A headline to show on top of the page.", - "alternative_headline" : "An alternative headline.", - "date_created" : "2023-09-01T00:00:00.000Z", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "platform" : "example", - "status" : "draft" + "name" : "A name.", + "plain" : "A plain text description.", + "type" : "news", + "html" : "An html description.", + "alternative_headline" : "An alternative headline." }, "sort" : [ - 1 + 1 ] } ] diff --git a/src/tests/resources/elasticsearch/organisation_search.json b/src/tests/resources/elasticsearch/organisation_search.json index 573df1b7..b1288671 100644 --- a/src/tests/resources/elasticsearch/organisation_search.json +++ b/src/tests/resources/elasticsearch/organisation_search.json @@ -1,5 +1,5 @@ { - "took" : 2, + "took" : 1, "timed_out" : false, "_shards" : { "total" : 1, @@ -12,38 +12,23 @@ "value" : 1, "relation" : "eq" }, - "max_score" : 0.2876821, + "max_score" : null, "hits" : [ { "_index" : "organisation", "_id" : "organisation_1", - "_score" : 0.2876821, + "_score" : null, "_source" : { - "agent" : "organisation", - "same_as" : "https://www.example.com/resource/this_resource", - "date_founded" : "2022-01-01", "identifier" : 1, - "resource_identifier" : 372, - "description" : "A description.", - "legal_name" : "The legal Organisation Name", - "type" : "organisation", - "platform_identifier" : "1", - "organisation_type" : "Research Institution", "date_modified" : "2023-09-01T00:00:00.000Z", - "application_area" : [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" - ], - "name" : "The name of this organisation", - "@version" : "1", - "date_created" : "2023-09-01T00:00:00.000Z", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "platform" : "example", - "status" : "draft" + "name" : "A name.", + "plain" : "A plain text description.", + "type" : "organisation", + "html" : "An html description.", + "legal_name" : "A legal name." }, "sort" : [ - 1 + 1 ] } ] diff --git a/src/tests/resources/elasticsearch/project_search.json b/src/tests/resources/elasticsearch/project_search.json index 98927eb1..d83d3c6b 100644 --- a/src/tests/resources/elasticsearch/project_search.json +++ b/src/tests/resources/elasticsearch/project_search.json @@ -1,5 +1,5 @@ { - "took" : 4, + "took" : 1, "timed_out" : false, "_shards" : { "total" : 1, @@ -12,39 +12,22 @@ "value" : 1, "relation" : "eq" }, - "max_score" : 0.2876821, + "max_score" : null, "hits" : [ { "_index" : "project", "_id" : "project_1", - "_score" : 0.2876821, + "_score" : null, "_source" : { - "coordinator_identifier" : 1, - "start_date" : "2023-09-01T00:00:00.000Z", - "same_as" : "https://www.example.com/resource/this_resource", - "end_date" : "2023-09-01T00:00:00.000Z", "identifier" : 1, - "resource_identifier" : 375, - "description" : "A description.", - "type" : "project", - "platform_identifier" : "1", "date_modified" : "2023-09-01T00:00:00.000Z", - "total_cost_euro" : 1.0E7, - "application_area" : [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" - ], - "name" : "Name of the Project", - "coordinator_name" : "The name of this organisation", - "@version" : "1", - "date_created" : "2023-09-01T00:00:00.000Z", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "platform" : "example", - "status" : "draft" + "name" : "A name.", + "plain" : "A plain text description.", + "type" : "project", + "html" : "An html description." }, "sort" : [ - 1 + 1 ] } ] diff --git a/src/tests/resources/elasticsearch/publication_search.json b/src/tests/resources/elasticsearch/publication_search.json index 67adb2b5..ed56038b 100644 --- a/src/tests/resources/elasticsearch/publication_search.json +++ b/src/tests/resources/elasticsearch/publication_search.json @@ -12,43 +12,24 @@ "value" : 1, "relation" : "eq" }, - "max_score" : 0.2876821, + "max_score" : null, "hits" : [ { "_index" : "publication", "_id" : "publication_1", - "_score" : 0.2876821, + "_score" : null, "_source" : { - "type" : "publication", - "description" : "A description.", - "platform_identifier" : "1", - "date_published" : "2023-09-01T00:00:00.000Z", - "publication_type" : "journal", - "application_area" : [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" - ], - "name" : "The name of this publication", - "isbn" : "9783161484100", - "issn" : "20493630", - "@version" : "1", - "platform" : "example", - "knowledge_asset_identifier" : null, - "version" : "1.1.0", - "same_as" : "https://www.example.com/resource/this_resource", - "asset_identifier" : 370, "identifier" : 1, - "resource_identifier" : 376, "date_modified" : "2023-09-01T00:00:00.000Z", - "license" : "https://www.example.com/resource/this_resource", - "permanent_identifier" : "http://dx.doi.org/10.1093/ajae/aaq063", - "date_created" : "2023-09-01T00:00:00.000Z", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "status" : "draft" + "name" : "A name.", + "plain" : "A plain text description.", + "issn" : "20493630", + "type" : "publication", + "isbn" : "9783161484100", + "html" : "An html description." }, "sort" : [ - 1 + 1 ] } ] diff --git a/src/tests/resources/elasticsearch/service_search.json b/src/tests/resources/elasticsearch/service_search.json index d889facf..f2a151af 100644 --- a/src/tests/resources/elasticsearch/service_search.json +++ b/src/tests/resources/elasticsearch/service_search.json @@ -1,5 +1,5 @@ { - "took" : 1, + "took" : 2, "timed_out" : false, "_shards" : { "total" : 1, @@ -12,36 +12,23 @@ "value" : 1, "relation" : "eq" }, - "max_score" : 0.2876821, + "max_score" : null, "hits" : [ { "_index" : "service", "_id" : "service_1", - "_score" : 0.2876821, + "_score" : null, "_source" : { - "same_as" : "https://www.example.com/resource/this_resource", - "type" : "service", "identifier" : 1, - "resource_identifier" : 377, - "description" : "A description.", - "platform_identifier" : "1", + "slogan" : "A slogan.", "date_modified" : "2023-09-01T00:00:00.000Z", - "slogan" : "Making your Smart Paradigm Shifts more Disruptive", - "application_area" : [ - "Fraud Prevention", - "Voice Assistance", - "Disease Classification" - ], - "name" : "The name of this service", - "@version" : "1", - "date_created" : "2023-09-01T00:00:00.000Z", - "@timestamp" : "2023-09-01T00:00:00.000Z", - "platform" : "example", - "status" : "draft", - "terms_of_service" : "Your use of this service is subject to the following terms: [...]." + "name" : "A name.", + "plain" : "A plain text description.", + "type" : "service", + "html" : "An html description." }, "sort" : [ - 1 + 1 ] } ] diff --git a/src/tests/routers/search_routers/test_search_router_datasets.py b/src/tests/routers/search_routers/test_search_router_datasets.py deleted file mode 100644 index ff76ee16..00000000 --- a/src/tests/routers/search_routers/test_search_router_datasets.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import json - -from unittest.mock import Mock -from starlette.testclient import TestClient -from tests.testutils.paths import path_test_resources -from routers.search_routers import SearchRouterDatasets, router_list - -def test_search_happy_path(client: TestClient): - """Tests the Datasets search""" - - # Get the correspondent router instance from the search routers list - search_router = None - for router_instance in router_list: - if isinstance(router_instance, SearchRouterDatasets): - search_router = router_instance - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - mocked_file = os.path.join(resources_path, "dataset_search.json") - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) - response = client.get("/search/datasets/v1", - params={'search_query': "description"}) - - # Assert the correct execution and get the response - assert response.status_code == 200, response.json() - resource = response.json()['resources'][0] - - # Test the response - assert resource['version'] == "1.0.1" - assert resource['same_as'] == "https://www.example.com/resource/this_resource" - assert resource['identifier'] == 104 - assert resource['description'] == "A description." - assert resource['temporal_coverage'] == "" - assert resource['platform_identifier'] == "287" - assert resource['date_published'] == "2023-09-01T00:00:00+00:00" - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['application_area'] == [ ] - assert resource['name'] == "trondheim-simulator" - assert resource['measurement_technique'] == "" - assert resource['license'] == "https://www.example.com/resource/this_resource" - assert resource['issn'] == "00000000" - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['platform'] == "ai4experiments" - assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_events.py b/src/tests/routers/search_routers/test_search_router_events.py deleted file mode 100644 index a3998aad..00000000 --- a/src/tests/routers/search_routers/test_search_router_events.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import json - -from unittest.mock import Mock -from starlette.testclient import TestClient -from tests.testutils.paths import path_test_resources -from routers.search_routers import SearchRouterEvents, router_list - -def test_search_happy_path(client: TestClient): - """Tests the Events search""" - - # Get the correspondent router instance from the search routers list - search_router = None - for router_instance in router_list: - if isinstance(router_instance, SearchRouterEvents): - search_router = router_instance - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - mocked_file = os.path.join(resources_path, "event_search.json") - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) - response = client.get("/search/events/v1", - params={'search_query': "description"}) - - # Assert the correct execution and get the response - assert response.status_code == 200, response.json() - resource = response.json()['resources'][0] - - # Test the response - assert resource['end_date'] == "2023-09-01T00:00:00+00:00" - assert resource['mode'] == "offline" - assert resource['description'] == "A description." - assert resource['platform_identifier'] == "1" - assert resource['application_area'] == ["Fraud Prevention", "Voice Assistance", "Disease Classification"] - assert resource['registration_link'] == "https://example.com/registration-form" - assert resource['name'] == "The name of the Event" - assert resource['schedule'] == "10:00-10:30: Opening. 10:30-11:00 ..." - assert resource['platform'] == "example" - assert resource['start_date'] == "2023-09-01T00:00:00+00:00" - assert resource['same_as'] == "https://www.example.com/resource/this_resource" - assert resource['identifier'] == 1 - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_experiments.py b/src/tests/routers/search_routers/test_search_router_experiments.py deleted file mode 100644 index 1f4f3ebc..00000000 --- a/src/tests/routers/search_routers/test_search_router_experiments.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import json - -from unittest.mock import Mock -from starlette.testclient import TestClient -from tests.testutils.paths import path_test_resources -from routers.search_routers import SearchRouterExperiments, router_list - -def test_search_happy_path(client: TestClient): - """Tests the Experiments search""" - - # Get the correspondent router instance from the search routers list - search_router = None - for router_instance in router_list: - if isinstance(router_instance, SearchRouterExperiments): - search_router = router_instance - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - mocked_file = os.path.join(resources_path, "experiment_search.json") - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) - response = client.get("/search/experiments/v1", - params={'search_query': "description"}) - - # Assert the correct execution and get the response - assert response.status_code == 200, response.json() - resource = response.json()['resources'][0] - - # Test the response - assert resource['version'] == "1.0" - assert resource['same_as'] == "https://www.example.com/resource/this_resource" - assert resource['identifier'] == 32 - assert resource['description'] == "A description." - assert resource['reproducibility_explanation'] == "" - assert resource['platform_identifier'] == "397" - assert resource['date_published'] == "2023-09-01T00:00:00+00:00" - assert resource['experimental_workflow'] == "" - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['execution_settings'] == "" - assert resource['application_area'] == [ ] - assert resource['name'] == "aqpredvisualize" - assert resource['license'] == "https://www.example.com/resource/this_resource" - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['platform'] == "ai4experiments" - assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_ml_model.py b/src/tests/routers/search_routers/test_search_router_ml_model.py deleted file mode 100644 index 27b497a3..00000000 --- a/src/tests/routers/search_routers/test_search_router_ml_model.py +++ /dev/null @@ -1,46 +0,0 @@ -import os -import json - -from unittest.mock import Mock -from starlette.testclient import TestClient -from tests.testutils.paths import path_test_resources -from routers.search_routers import SearchRouterMLModels, router_list - -def test_search_happy_path(client: TestClient): - """Tests the MLModels search""" - - # Get the correspondent router instance from the search routers list - search_router = None - for router_instance in router_list: - if isinstance(router_instance, SearchRouterMLModels): - search_router = router_instance - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - mocked_file = os.path.join(resources_path, "ml_model_search.json") - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) - response = client.get("/search/ml_models/v1", - params={'search_query': "description"}) - - # Assert the correct execution and get the response - assert response.status_code == 200, response.json() - resource = response.json()['resources'][0] - - # Test the response - assert resource['version'] == "1.0.0" - assert resource['same_as'] == "https://www.example.com/resource/this_resource" - assert resource['identifier'] == 168 - assert resource['description'] == "A description." - assert resource['platform_identifier'] == "316" - assert resource['date_published'] == "2023-09-01T00:00:00+00:00" - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['application_area'] == [ ] - assert resource['name'] == "EntityRecognizer" - assert resource['license'] == "https://www.example.com/resource/this_resource" - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['platform'] == "ai4experiments" - assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_news.py b/src/tests/routers/search_routers/test_search_router_news.py deleted file mode 100644 index b9665dc2..00000000 --- a/src/tests/routers/search_routers/test_search_router_news.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import json - -from unittest.mock import Mock -from starlette.testclient import TestClient -from tests.testutils.paths import path_test_resources -from routers.search_routers import SearchRouterNews, router_list - -def test_search_happy_path(client: TestClient): - """Tests the News search""" - - # Get the correspondent router instance from the search routers list - search_router = None - for router_instance in router_list: - if isinstance(router_instance, SearchRouterNews): - search_router = router_instance - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - mocked_file = os.path.join(resources_path, "news_search.json") - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) - response = client.get("/search/news/v1", - params={'search_query': "description"}) - - # Assert the correct execution and get the response - assert response.status_code == 200, response.json() - resource = response.json()['resources'][0] - - # Test the response - assert resource['same_as'] == "https://www.example.com/resource/this_resource" - assert resource['identifier'] == 1 - assert resource['description'] == "A description." - assert resource['platform_identifier'] == "1" - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['application_area'] == ["Fraud Prevention", "Voice Assistance", "Disease Classification"] - assert resource['name'] == "Name of the News item" - assert resource['headline'] == "A headline to show on top of the page." - assert resource['alternative_headline'] == "An alternative headline." - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['platform'] == "example" - assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_organisations.py b/src/tests/routers/search_routers/test_search_router_organisations.py deleted file mode 100644 index 4ae5b351..00000000 --- a/src/tests/routers/search_routers/test_search_router_organisations.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import json - -from unittest.mock import Mock -from starlette.testclient import TestClient -from tests.testutils.paths import path_test_resources -from routers.search_routers import SearchRouterOrganisations, router_list - -def test_search_happy_path(client: TestClient): - """Tests the Organisations search""" - - # Get the correspondent router instance from the search routers list - search_router = None - for router_instance in router_list: - if isinstance(router_instance, SearchRouterOrganisations): - search_router = router_instance - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - mocked_file = os.path.join(resources_path, "organisation_search.json") - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) - response = client.get("/search/organisations/v1", - params={'search_query': "description"}) - - # Assert the correct execution and get the response - assert response.status_code == 200, response.json() - resource = response.json()['resources'][0] - - # Test the response - assert resource['same_as'] == "https://www.example.com/resource/this_resource" - assert resource['date_founded'] == "2022-01-01" - assert resource['identifier'] == 1 - assert resource['description'] == "A description." - assert resource['legal_name'] == "The legal Organisation Name" - assert resource['platform_identifier'] == "1" - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['application_area'] == ["Fraud Prevention", "Voice Assistance", "Disease Classification"] - assert resource['name'] == "The name of this organisation" - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['platform'] == "example" - assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_projects.py b/src/tests/routers/search_routers/test_search_router_projects.py deleted file mode 100644 index 4a3251c4..00000000 --- a/src/tests/routers/search_routers/test_search_router_projects.py +++ /dev/null @@ -1,46 +0,0 @@ -import os -import json - -from unittest.mock import Mock -from starlette.testclient import TestClient -from tests.testutils.paths import path_test_resources -from routers.search_routers import SearchRouterProjects, router_list - -def test_search_happy_path(client: TestClient): - """Tests the Projects search""" - - # Get the correspondent router instance from the search routers list - search_router = None - for router_instance in router_list: - if isinstance(router_instance, SearchRouterProjects): - search_router = router_instance - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - mocked_file = os.path.join(resources_path, "project_search.json") - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) - response = client.get("/search/projects/v1", - params={'search_query': "description"}) - - # Assert the correct execution and get the response - assert response.status_code == 200, response.json() - resource = response.json()['resources'][0] - - # Test the response - assert resource['start_date'] == "2023-09-01T00:00:00+00:00" - assert resource['same_as'] == "https://www.example.com/resource/this_resource" - assert resource['end_date'] == "2023-09-01T00:00:00+00:00" - assert resource['identifier'] == 1 - assert resource['description'] == "A description." - assert resource['platform_identifier'] == "1" - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['total_cost_euro'] == 1.0E7 - assert resource['application_area'] == [ "Fraud Prevention", "Voice Assistance", "Disease Classification"] - assert resource['name'] == "Name of the Project" - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['platform'] == "example" - assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_publications.py b/src/tests/routers/search_routers/test_search_router_publications.py deleted file mode 100644 index b7c0bf9f..00000000 --- a/src/tests/routers/search_routers/test_search_router_publications.py +++ /dev/null @@ -1,49 +0,0 @@ -import os -import json - -from unittest.mock import Mock -from starlette.testclient import TestClient -from tests.testutils.paths import path_test_resources -from routers.search_routers import SearchRouterPublications, router_list - -def test_search_happy_path(client: TestClient): - """Tests the Publications search""" - - # Get the correspondent router instance from the search routers list - search_router = None - for router_instance in router_list: - if isinstance(router_instance, SearchRouterPublications): - search_router = router_instance - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - mocked_file = os.path.join(resources_path, "publication_search.json") - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) - response = client.get("/search/publications/v1", - params={'search_query': "description"}) - - # Assert the correct execution and get the response - assert response.status_code == 200, response.json() - resource = response.json()['resources'][0] - - # Test the response - assert resource['description'] == "A description." - assert resource['platform_identifier'] == "1" - assert resource['date_published'] == "2023-09-01T00:00:00+00:00" - assert resource['application_area'] == ["Fraud Prevention", "Voice Assistance", "Disease Classification"] - assert resource['name'] == "The name of this publication" - assert resource['isbn'] == "9783161484100" - assert resource['issn'] == "20493630" - assert resource['platform'] == "example" - assert resource['version'] == "1.1.0" - assert resource['same_as'] == "https://www.example.com/resource/this_resource" - assert resource['identifier'] == 1 - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['license'] == "https://www.example.com/resource/this_resource" - assert resource['permanent_identifier'] == "http://dx.doi.org/10.1093/ajae/aaq063" - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['aiod_entry']['status'] == "draft" diff --git a/src/tests/routers/search_routers/test_search_router_services.py b/src/tests/routers/search_routers/test_search_router_services.py deleted file mode 100644 index 90b615b0..00000000 --- a/src/tests/routers/search_routers/test_search_router_services.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import json - -from unittest.mock import Mock -from starlette.testclient import TestClient -from tests.testutils.paths import path_test_resources -from routers.search_routers import SearchRouterServices, router_list - -def test_search_happy_path(client: TestClient): - """Tests the Services search""" - - # Get the correspondent router instance from the search routers list - search_router = None - for router_instance in router_list: - if isinstance(router_instance, SearchRouterServices): - search_router = router_instance - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - mocked_file = os.path.join(resources_path, "service_search.json") - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) - response = client.get("/search/services/v1", - params={'search_query': "description"}) - - # Assert the correct execution and get the response - assert response.status_code == 200, response.json() - resource = response.json()['resources'][0] - - # Test the response - assert resource['same_as'] == "https://www.example.com/resource/this_resource" - assert resource['identifier'] == 1 - assert resource['description'] == "A description." - assert resource['platform_identifier'] == "1" - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - assert resource['slogan'] == "Making your Smart Paradigm Shifts more Disruptive" - assert resource['application_area'] == ["Fraud Prevention", "Voice Assistance", "Disease Classification"] - assert resource['name'] == "The name of this service" - assert resource['aiod_entry']['date_created'] == "2023-09-01T00:00:00+00:00" - assert resource['platform'] == "example" - assert resource['aiod_entry']['status'] == "draft" - assert resource['terms_of_service'] == "Your use of this service is subject to the following terms: [...]." diff --git a/src/tests/routers/search_routers/test_search_routers.py b/src/tests/routers/search_routers/test_search_routers.py new file mode 100644 index 00000000..dd0a7ce9 --- /dev/null +++ b/src/tests/routers/search_routers/test_search_routers.py @@ -0,0 +1,134 @@ +import os +import json + +from unittest.mock import Mock +from starlette.testclient import TestClient +from tests.testutils.paths import path_test_resources +import routers.search_routers as sr + +def test_search_happy_path(client: TestClient): + """Tests the search router""" + + for search_router in sr.router_list: + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + resource_file = f"{search_router.es_index}_search.json" + mocked_file = os.path.join(resources_path, resource_file) + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + search_service = f"/search/{search_router.resource_name_plural}/v1" + params = {'search_query': "description", 'get_all': False} + response = client.get(search_service, params=params) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()['resources'][0] + + # Test the common responses + assert resource['identifier'] == 1 + assert resource['name'] == "A name." + assert resource['description']['plain'] == "A plain text description." + assert resource['description']['html'] == "An html description." + assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" + + # Test the extra fields + global_fields = set(['name', 'plain', 'html']) + extra_fields = list(search_router.match_fields^global_fields) + for field in extra_fields: + assert resource[field] + +def test_search_bad_platform(client: TestClient): + """Tests the search router bad platform error""" + + for search_router in sr.router_list: + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + resource_file = f"{search_router.es_index}_search.json" + mocked_file = os.path.join(resources_path, resource_file) + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + search_service = f"/search/{search_router.resource_name_plural}/v1" + params = {'search_query': "description", 'platforms': ["bad_platform"]} + response = client.get(search_service, params=params) + + # Assert the platform error + assert response.status_code == 400, response.json() + err_msg = "The available platformas are" + assert response.json()["detail"][:len(err_msg)] == err_msg + +def test_search_bad_fields(client: TestClient): + """Tests the search router bad fields error""" + + for search_router in sr.router_list: + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + resource_file = f"{search_router.es_index}_search.json" + mocked_file = os.path.join(resources_path, resource_file) + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + search_service = f"/search/{search_router.resource_name_plural}/v1" + params = {'search_query': "description", 'search_fields': ["bad_field"]} + response = client.get(search_service, params=params) + + # Assert the platform error + assert response.status_code == 400, response.json() + err_msg = "The available search fields for this entity are" + assert response.json()["detail"][:len(err_msg)] == err_msg + +def test_search_bad_limit(client: TestClient): + """Tests the search router bad fields error""" + + for search_router in sr.router_list: + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + resource_file = f"{search_router.es_index}_search.json" + mocked_file = os.path.join(resources_path, resource_file) + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + search_service = f"/search/{search_router.resource_name_plural}/v1" + params = {'search_query': "description", 'limit': 1001} + response = client.get(search_service, params=params) + + # Assert the platform error + assert response.status_code == 400, response.json() + err_msg = "The limit should be maximum 1000." + assert response.json()["detail"][:len(err_msg)] == err_msg + +def test_search_bad_page(client: TestClient): + """Tests the search router bad fields error""" + + for search_router in sr.router_list: + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + resource_file = f"{search_router.es_index}_search.json" + mocked_file = os.path.join(resources_path, resource_file) + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + search_router.client.search = Mock(return_value=mocked_results) + search_service = f"/search/{search_router.resource_name_plural}/v1" + params = {'search_query': "description", 'page': 0} + response = client.get(search_service, params=params) + + # Assert the platform error + assert response.status_code == 400, response.json() + err_msg = "The page numbers start by 1." + assert response.json()["detail"][:len(err_msg)] == err_msg From 22d8df275ea5f6c411e1de936d45c92c13e25a15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 15 Nov 2023 15:24:54 +0100 Subject: [PATCH 50/79] Search router tests actualised --- src/setup/elasticsearch/generate_elasticsearch_indices.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/setup/elasticsearch/generate_elasticsearch_indices.py b/src/setup/elasticsearch/generate_elasticsearch_indices.py index 603a2591..b3a3067c 100755 --- a/src/setup/elasticsearch/generate_elasticsearch_indices.py +++ b/src/setup/elasticsearch/generate_elasticsearch_indices.py @@ -78,7 +78,6 @@ def main(): # Add indices with mappings for entity, fields in entities.items(): mapping = generate_mapping(entity, fields) - print(f"{entity}: {mapping}") es_client.indices.create(index=entity, body=mapping, ignore=400) if __name__ == "__main__": From 03868c3ee6ea4e2cb263dc8c18e941e96afecfdc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Wed, 15 Nov 2023 15:33:40 +0100 Subject: [PATCH 51/79] Search router tests actualised --- src/routers/search_router.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/routers/search_router.py b/src/routers/search_router.py index 2b3be77e..d0b36719 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -5,7 +5,7 @@ from fastapi import APIRouter, Depends, HTTPException, Query from pydantic import BaseModel from sqlalchemy.engine import Engine -from sqlmodel import Session, select +from sqlmodel import SQLModel, Session, select from starlette import status from authentication import get_current_user#, has_role @@ -199,7 +199,7 @@ def _cast_resource( return self._clean_structure(dict(resource)) def _db_query( - self, engine: Engine, read_class: RESOURCE, identifier: int + self, engine: Engine, read_class: Type[SQLModel], identifier: int ) -> Type[RESOURCE]: try: with Session(engine) as session: From e9f14be4fc58c28281c70c5df93f589a8007c048 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 20 Nov 2023 01:13:10 +0100 Subject: [PATCH 52/79] pre-commit passed --- .../example/resources/resource/events.json | 184 +++++++++++++++++ src/main.py | 3 +- src/routers/search_router.py | 186 +++++++++--------- src/routers/search_routers/__init__.py | 2 +- .../search_routers/search_router_datasets.py | 4 +- .../search_routers/search_router_events.py | 4 +- .../search_router_experiments.py | 4 +- .../search_routers/search_router_ml_models.py | 4 +- .../search_routers/search_router_news.py | 4 +- .../search_router_organisations.py | 4 +- .../search_routers/search_router_projects.py | 4 +- .../search_router_publications.py | 4 +- .../search_routers/search_router_services.py | 4 +- .../generate_elasticsearch_indices.py | 65 +++--- .../generate_logstash_config_files.py | 121 +++++++----- .../test_huggingface_dataset_connector.py | 74 +++---- .../search_routers/test_search_routers.py | 89 +++++---- 17 files changed, 468 insertions(+), 292 deletions(-) diff --git a/src/connectors/example/resources/resource/events.json b/src/connectors/example/resources/resource/events.json index ea54cc98..67972cdc 100644 --- a/src/connectors/example/resources/resource/events.json +++ b/src/connectors/example/resources/resource/events.json @@ -90,6 +90,190 @@ "registration_link": "https://example.com/registration-form", "status": "scheduled", "mode": "offline" + }, + { + "platform": "example", + "platform_resource_identifier": "2", + "name": "Name of the Event 2", + "description": {"plain": "A description."}, + "same_as": "https://www.example.com/resource/this_resource", + "date_published": "2022-01-01T15:15:00.000", + "version": "1.1.0", + "pid": "https://doi.org/10.1000/182", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [ + "alias 1", + "alias 2" + ], + "application_area": [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "citation": [], + "contact": [], + "creator": [], + "distribution": [ + { + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/case_study/file.pdf", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "application/pdf", + "name": "Name of this file.", + "technology_readiness_level": 1 + } + ], + "has_part": [], + "industrial_sector": [ + "Finance", + "eCommerce", + "Healthcare" + ], + "is_part_of": [], + "keyword": [ + "keyword1", + "keyword2" + ], + "location": [ + { + "address": {"country": "NED", "street": "Street Name 10", "postal_code": "1234AB"}, + "geo": {"latitude": 37.42242, "longitude": -122.08585, "elevation_millimeters": 2000} + } + ], + "relevant_link": ["https://www.example.com/a_relevant_link", "https://www.example.com/another_relevant_link"], + "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", + "media": [ + { + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/image.jpeg", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "image/jpeg", + "name": "Name of this file." + } + ], + "note": [ + { + "value": "A brief record of points or ideas about this AI resource." + } + ], + "research_area": [ + "Explainable AI", + "Physical AI" + ], + "scientific_domain": [ + "Anomaly Detection", + "Voice Recognition", + "Computer Vision." + ], + "start_date": "2021-02-03T15:15:00", + "end_date": "2022-02-03T15:15:00", + "schedule": "10:00-10:30: Opening. 10:30-11:00 ...", + "registration_link": "https://example.com/registration-form", + "status": "scheduled", + "mode": "offline" + }, + { + "platform": "example", + "platform_resource_identifier": "3", + "name": "Name of the Event 3", + "description": {"plain": "A description."}, + "same_as": "https://www.example.com/resource/this_resource", + "date_published": "2022-01-01T15:15:00.000", + "version": "1.1.0", + "pid": "https://doi.org/10.1000/182", + "aiod_entry": { + "editor": [], + "status": "draft" + }, + "alternate_name": [ + "alias 1", + "alias 2" + ], + "application_area": [ + "Fraud Prevention", + "Voice Assistance", + "Disease Classification" + ], + "citation": [], + "contact": [], + "creator": [], + "distribution": [ + { + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/case_study/file.pdf", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "application/pdf", + "name": "Name of this file.", + "technology_readiness_level": 1 + } + ], + "has_part": [], + "industrial_sector": [ + "Finance", + "eCommerce", + "Healthcare" + ], + "is_part_of": [], + "keyword": [ + "keyword1", + "keyword2" + ], + "location": [ + { + "address": {"country": "NED", "street": "Street Name 10", "postal_code": "1234AB"}, + "geo": {"latitude": 37.42242, "longitude": -122.08585, "elevation_millimeters": 2000} + } + ], + "relevant_link": ["https://www.example.com/a_relevant_link", "https://www.example.com/another_relevant_link"], + "license": "https://creativecommons.org/share-your-work/public-domain/cc0/", + "media": [ + { + "checksum": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "checksum_algorithm": "sha256", + "copyright": "2010-2020 Example Company. All rights reserved.", + "content_url": "https://www.example.com/image.jpeg", + "content_size_kb": 10000, + "date_published": "2022-01-01T15:15:00.000", + "description": "Description of this file.", + "encoding_format": "image/jpeg", + "name": "Name of this file." + } + ], + "note": [ + { + "value": "A brief record of points or ideas about this AI resource." + } + ], + "research_area": [ + "Explainable AI", + "Physical AI" + ], + "scientific_domain": [ + "Anomaly Detection", + "Voice Recognition", + "Computer Vision." + ], + "start_date": "2021-02-03T15:15:00", + "end_date": "2022-02-03T15:15:00", + "schedule": "10:00-10:30: Opening. 10:30-11:00 ...", + "registration_link": "https://example.com/registration-form", + "status": "scheduled", + "mode": "offline" } ] diff --git a/src/main.py b/src/main.py index 7f0c6b57..f87d18bb 100644 --- a/src/main.py +++ b/src/main.py @@ -21,8 +21,7 @@ from database.model.platform.platform import Platform from database.model.platform.platform_names import PlatformName from database.setup import sqlmodel_engine -from routers import (resource_routers, parent_routers, enum_routers, - search_routers) +from routers import resource_routers, parent_routers, enum_routers, search_routers def _parse_args() -> argparse.Namespace: diff --git a/src/routers/search_router.py b/src/routers/search_router.py index d0b36719..aa7d8a87 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -2,13 +2,13 @@ from typing import TypeVar, Generic, Any, Type, Annotated from elasticsearch import Elasticsearch -from fastapi import APIRouter, Depends, HTTPException, Query +from fastapi import APIRouter, HTTPException, Query from pydantic import BaseModel from sqlalchemy.engine import Engine from sqlmodel import SQLModel, Session, select from starlette import status -from authentication import get_current_user#, has_role +from database.model.concept.concept import AIoDConcept from database.model.concept.aiod_entry import AIoDEntryRead from database.model.resource_read_and_create import resource_read from database.model.platform.platform import Platform @@ -17,34 +17,35 @@ SORT = {"identifier": "asc"} LIMIT_MAX = 1000 -RESOURCE = TypeVar("RESOURCE") +RESOURCE = TypeVar("RESOURCE", bound=AIoDConcept) +# RESOURCE = TypeVar("RESOURCE") class SearchResult(BaseModel, Generic[RESOURCE]): total_hits: int - resources: list[RESOURCE] - next_offset: list | None - current_page: int - page_size: int + resources: list + limit: int + offset: int + class SearchRouter(Generic[RESOURCE], abc.ABC): """ Providing search functionality in ElasticSearch """ - + def __init__(self, client: Elasticsearch): self.client: Elasticsearch = client - + @property @abc.abstractmethod def es_index(self) -> str: """The name of the elasticsearch index""" - + @property @abc.abstractmethod def resource_name_plural(self) -> str: """The name of the resource (plural)""" - + @property def key_translations(self) -> dict[str, str]: """If an attribute is called differently in elasticsearch than in our @@ -52,39 +53,37 @@ def key_translations(self) -> dict[str, str]: should be the name in elasticsearch, the value the name in our data model.""" return {} - + @property @abc.abstractmethod def resource_class(self) -> RESOURCE: """The resource class""" - + @property @abc.abstractmethod def match_fields(self) -> set: """The set of indexed fields""" - + def create(self, engine: Engine, url_prefix: str) -> APIRouter: router = APIRouter() read_class = resource_read(self.resource_class) # type: ignore - - @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", - tags=["search"]) + + @router.get(f"{url_prefix}/search/{self.resource_name_plural}/v1", tags=["search"]) def search( platforms: Annotated[list[str] | None, Query()] = None, search_query: str = "", search_fields: Annotated[list[str] | None, Query()] = None, limit: int = 10, - page: int = 1, - get_all: bool = True -# offset: Annotated[list[str] | None, Query()] = None + offset: int = 0, + get_all: bool = True, ) -> SearchResult[read_class]: # type: ignore f""" Search for {self.resource_name_plural}. """ - + # Parameter correctness # ----------------------------------------------------------------- - + try: with Session(engine) as session: query = select(Platform) @@ -92,126 +91,121 @@ def search( platform_names = set([p.name for p in database_platforms]) except Exception as e: raise _wrap_as_http_exception(e) - + if platforms and not set(platforms).issubset(platform_names): raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The available platformas are: {platform_names}" + detail=f"The available platforms are: {platform_names}", ) - + fields = search_fields if search_fields else self.match_fields if not set(fields).issubset(self.match_fields): raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The available search fields for this entity " - f"are: {self.match_fields}" - ) - + status_code=status.HTTP_400_BAD_REQUEST, + detail=f"The available search fields for this entity " + f"are: {self.match_fields}", + ) + if limit > LIMIT_MAX: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=f"The limit should be maximum {LIMIT_MAX}. " - f"If you want more results, use pagination." + f"If you want more results, use pagination.", ) - - if page < 1: + + if offset < 0: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The page numbers start by 1." + detail="The offset should be greater or equal than 0.", ) - + # Prepare query # ----------------------------------------------------------------- - + # Matches of the search concept for each field - query_matches = [{'match': {f: search_query}} for f in fields] - + query_matches = [{"match": {f: search_query}} for f in fields] + # Must match search concept on at least one field - query = { - 'bool': { - 'should': query_matches, - 'minimum_should_match': 1 - } - } + query = {"bool": {"should": query_matches, "minimum_should_match": 1}} if platforms: - + # Matches of the platform field for each selected platform - platform_matches = [{'match': {'platform': p}} - for p in platforms] - + platform_matches = [{"match": {"platform": p}} for p in platforms] + # Must match platform and search query on at least one field - query['bool']['must'] = {'bool': {'should': platform_matches, - 'minimum_should_match': 1}} - + query["bool"]["must"] = { + "bool": {"should": platform_matches, "minimum_should_match": 1} + } + # Launch search query # ----------------------------------------------------------------- - - from_ = limit*(page - 1) - result = self.client.search(index=self.es_index, query=query, - from_=from_, size=limit, sort=SORT) - + + result = self.client.search( + index=self.es_index, query=query, from_=offset, size=limit, sort=SORT + ) + total_hits = result["hits"]["total"]["value"] - next_offset = (result["hits"]["hits"][-1]["sort"] - if len(result["hits"]["hits"]) > 0 else None) if get_all: - + # Launch database query - resources: list[read_class] = [ - self._db_query(engine, read_class, - hit["_source"]["identifier"]) + resources: list[SQLModel] = [ + self._db_query( + engine, read_class, self.resource_class, hit["_source"]["identifier"] + ) for hit in result["hits"]["hits"] ] - + else: - + # Return just the elasticsearch contents - resources: list[read_class] = [ # type: ignore + resources: list[Type[RESOURCE]] = [ # type: ignore self._cast_resource(read_class, hit["_source"]) for hit in result["hits"]["hits"] ] - - return SearchResult[read_class]( # type: ignore + + return SearchResult[RESOURCE]( # type: ignore total_hits=total_hits, resources=resources, - next_offset=next_offset, - current_page=page, - page_size=limit + limit=limit, + offset=offset, ) - + return router - - def _cast_resource( - self, resource_class: RESOURCE, resource_dict: dict[str, Any] - ) -> Type[RESOURCE]: - kwargs = { - self.key_translations.get(key, key): val - for key, val in resource_dict.items() - if key != "type" and not key.startswith("@") - } - resource = resource_class(**kwargs) # type: ignore - resource.aiod_entry = AIoDEntryRead( - date_modified=resource_dict["date_modified"] - ) - resource.description = { - "plain": resource_dict["plain"], - "html": resource_dict["html"] - } - return self._clean_structure(dict(resource)) - + def _db_query( - self, engine: Engine, read_class: Type[SQLModel], identifier: int - ) -> Type[RESOURCE]: + self, + engine: Engine, + read_class: Type[SQLModel], + resource_class: RESOURCE, + identifier: int, + ) -> SQLModel: try: with Session(engine) as session: - query = select(self.resource_class).where( - self.resource_class.identifier == identifier) + query = select(resource_class).where(resource_class.identifier == identifier) resource = session.scalars(query).first() - # Some error handling if resource does not exist + if not resource: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Resource not found in the database.", + ) resource_read = read_class.from_orm(resource) except Exception as e: raise _wrap_as_http_exception(e) return resource_read - + + def _cast_resource( + self, read_class: Type[SQLModel], resource_dict: dict[str, Any] + ) -> Type[RESOURCE]: + kwargs = { + self.key_translations.get(key, key): val + for key, val in resource_dict.items() + if key != "type" and not key.startswith("@") + } + resource = read_class(**kwargs) # type: ignore + resource.aiod_entry = AIoDEntryRead(date_modified=resource_dict["date_modified"]) + resource.description = {"plain": resource_dict["plain"], "html": resource_dict["html"]} + return self._clean_structure(dict(resource)) + def _clean_structure(self, structure: dict): new_structure = {} for key, value in structure.items(): diff --git a/src/routers/search_routers/__init__.py b/src/routers/search_routers/__init__.py index 027f6ef9..e1980838 100644 --- a/src/routers/search_routers/__init__.py +++ b/src/routers/search_routers/__init__.py @@ -26,5 +26,5 @@ SearchRouterOrganisations(client=es_client), SearchRouterProjects(client=es_client), SearchRouterPublications(client=es_client), - SearchRouterServices(client=es_client) + SearchRouterServices(client=es_client), ] diff --git a/src/routers/search_routers/search_router_datasets.py b/src/routers/search_routers/search_router_datasets.py index 7550d411..a0bcb06c 100644 --- a/src/routers/search_routers/search_router_datasets.py +++ b/src/routers/search_routers/search_router_datasets.py @@ -14,7 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return Dataset - + @property def match_fields(self): - return set(['name', 'plain', 'html', 'issn']) + return set(["name", "plain", "html", "issn"]) diff --git a/src/routers/search_routers/search_router_events.py b/src/routers/search_routers/search_router_events.py index aeedde45..d6f0996a 100644 --- a/src/routers/search_routers/search_router_events.py +++ b/src/routers/search_routers/search_router_events.py @@ -14,7 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return Event - + @property def match_fields(self): - return set(['name', 'plain', 'html']) + return set(["name", "plain", "html"]) diff --git a/src/routers/search_routers/search_router_experiments.py b/src/routers/search_routers/search_router_experiments.py index 6208038e..78a2258d 100644 --- a/src/routers/search_routers/search_router_experiments.py +++ b/src/routers/search_routers/search_router_experiments.py @@ -14,7 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return Experiment - + @property def match_fields(self): - return set(['name', 'plain', 'html']) + return set(["name", "plain", "html"]) diff --git a/src/routers/search_routers/search_router_ml_models.py b/src/routers/search_routers/search_router_ml_models.py index f8bb5055..be4e2391 100644 --- a/src/routers/search_routers/search_router_ml_models.py +++ b/src/routers/search_routers/search_router_ml_models.py @@ -14,7 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return MLModel - + @property def match_fields(self): - return set(['name', 'plain', 'html']) + return set(["name", "plain", "html"]) diff --git a/src/routers/search_routers/search_router_news.py b/src/routers/search_routers/search_router_news.py index 4c22e619..6f080c64 100644 --- a/src/routers/search_routers/search_router_news.py +++ b/src/routers/search_routers/search_router_news.py @@ -14,7 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return News - + @property def match_fields(self): - return set(['name', 'plain', 'html', 'headline', 'alternative_headline']) + return set(["name", "plain", "html", "headline", "alternative_headline"]) diff --git a/src/routers/search_routers/search_router_organisations.py b/src/routers/search_routers/search_router_organisations.py index ae7edcf1..10db82b4 100644 --- a/src/routers/search_routers/search_router_organisations.py +++ b/src/routers/search_routers/search_router_organisations.py @@ -14,7 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return Organisation - + @property def match_fields(self): - return set(['name', 'legal_name', 'plain', 'html']) + return set(["name", "legal_name", "plain", "html"]) diff --git a/src/routers/search_routers/search_router_projects.py b/src/routers/search_routers/search_router_projects.py index cd69e368..67cfce02 100644 --- a/src/routers/search_routers/search_router_projects.py +++ b/src/routers/search_routers/search_router_projects.py @@ -14,7 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return Project - + @property def match_fields(self): - return set(['name', 'plain', 'html']) + return set(["name", "plain", "html"]) diff --git a/src/routers/search_routers/search_router_publications.py b/src/routers/search_routers/search_router_publications.py index 4d2f9263..074e344a 100644 --- a/src/routers/search_routers/search_router_publications.py +++ b/src/routers/search_routers/search_router_publications.py @@ -14,7 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return Publication - + @property def match_fields(self): - return set(['name', 'plain', 'html', 'issn', 'isbn']) + return set(["name", "plain", "html", "issn", "isbn"]) diff --git a/src/routers/search_routers/search_router_services.py b/src/routers/search_routers/search_router_services.py index 70126b0f..9e3ac8cf 100644 --- a/src/routers/search_routers/search_router_services.py +++ b/src/routers/search_routers/search_router_services.py @@ -14,7 +14,7 @@ def resource_name_plural(self) -> str: @property def resource_class(self): return Service - + @property def match_fields(self): - return set(['name', 'plain', 'html', 'slogan']) + return set(["name", "plain", "html", "slogan"]) diff --git a/src/setup/elasticsearch/generate_elasticsearch_indices.py b/src/setup/elasticsearch/generate_elasticsearch_indices.py index b3a3067c..87549a67 100755 --- a/src/setup/elasticsearch/generate_elasticsearch_indices.py +++ b/src/setup/elasticsearch/generate_elasticsearch_indices.py @@ -13,72 +13,53 @@ from routers.search_routers import router_list BASE_MAPPING = { - "mappings" : { - "properties" : { - "date_modified" : { - "type" : "date" - }, - "identifier" : { - "type" : "long" - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "plain" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "html" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - } + "mappings": { + "properties": { + "date_modified": {"type": "date"}, + "identifier": {"type": "long"}, + "name": {"type": "text", "fields": {"keyword": {"type": "keyword"}}}, + "plain": {"type": "text", "fields": {"keyword": {"type": "keyword"}}}, + "html": {"type": "text", "fields": {"keyword": {"type": "keyword"}}}, } } } + def add_field(base_mapping, field): new_mapping = copy.deepcopy(base_mapping) - new_mapping["mappings"]["properties"][field] = {"type": "text", "fields": {"keyword": {"type": "keyword"}}} + new_mapping["mappings"]["properties"][field] = { + "type": "text", + "fields": {"keyword": {"type": "keyword"}}, + } return new_mapping + def generate_mapping(entity, fields): mapping = BASE_MAPPING for field in fields: mapping = add_field(mapping, field) return mapping + def main(): - + # Generate client - es_user = os.environ['ES_USER'] - es_password = os.environ['ES_PASSWORD'] - es_client = Elasticsearch("http://elasticsearch:9200", - basic_auth=(es_user, es_password)) - + es_user = os.environ["ES_USER"] + es_password = os.environ["ES_PASSWORD"] + es_client = Elasticsearch("http://elasticsearch:9200", basic_auth=(es_user, es_password)) + # Search for entities and their extra fields - global_fields = set(['name', 'plain', 'html']) + global_fields = set(["name", "plain", "html"]) entities = {} for router in router_list: - extra_fields = list(router.match_fields^global_fields) + extra_fields = list(router.match_fields ^ global_fields) entities[router.es_index] = extra_fields - + # Add indices with mappings for entity, fields in entities.items(): mapping = generate_mapping(entity, fields) es_client.indices.create(index=entity, body=mapping, ignore=400) + if __name__ == "__main__": main() diff --git a/src/setup/logstash/generate_logstash_config_files.py b/src/setup/logstash/generate_logstash_config_files.py index f960c6d1..66a857f7 100755 --- a/src/setup/logstash/generate_logstash_config_files.py +++ b/src/setup/logstash/generate_logstash_config_files.py @@ -17,8 +17,13 @@ # MACROS FOR THE DOCUMENTS GENERATION FUNCTIONS # ============================================================================= -BASE_FIELDS = ["{0}.identifier", "{0}.name", "text.plain as 'plain'", - "text.html as 'html'", "aiod_entry.date_modified"] +BASE_FIELDS = [ + "{0}.identifier", + "{0}.name", + "text.plain as 'plain'", + "text.html as 'html'", + "aiod_entry.date_modified", +] INFO = """{0} This file has been generated by `generate_logstash_config.py` {0} file, placed in `src/setup/logstash` @@ -135,136 +140,144 @@ # DOCUMENTS GENERATION FUNCTIONS # ============================================================================= + def generate_conf_file(conf_path, es_user, es_pass): - + file_path = os.path.join(conf_path, "logstash.yml") - + # Generate configuration file - with open(file_path, 'w') as f: - + with open(file_path, "w") as f: + # Info - f.write(INFO.format('#')) - + f.write(INFO.format("#")) + # Configuration f.write(CONF_BASE.format(es_user, es_pass)) -def generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, - es_user, es_pass, entities, sync=False): - - if not sync: # init file + +def generate_pipeline_conf_files( + pipeline_conf_path, db_user, db_pass, es_user, es_pass, entities, sync=False +): + + if not sync: # init file file_path = os.path.join(pipeline_conf_path, "init_table.conf") input_base = INIT_INPUT_BASE output_base = INIT_OUTPUT_BASE - else: # sync file + else: # sync file file_path = os.path.join(pipeline_conf_path, "sync_table.conf") input_base = SYNC_INPUT_BASE output_base = SYNC_OUTPUT_BASE - + # Generate configuration file - with open(file_path, 'w') as f: - + with open(file_path, "w") as f: + # Info - f.write(INFO.format('#')) - + f.write(INFO.format("#")) + # Input f.write("input {\n") for entity in entities: f.write(input_base.format(db_user, db_pass, entity)) f.write("}\n") - + # Filters f.write(FILTER) - + # Output f.write("output {\n") for entity in entities: f.write(output_base.format(es_user, es_pass, entity)) f.write("}\n") + def generate_pipeline_sql_files(pipeline_sql_path, entity, fields, sync=False): - + # Generate output file path if sync: file_path = os.path.join(pipeline_sql_path, f"sync_{entity}.sql") else: file_path = os.path.join(pipeline_sql_path, f"init_{entity}.sql") - + # Write the output file - with open(file_path, 'w') as f: - + with open(file_path, "w") as f: + # Info - f.write(INFO.format('--')) - + f.write(INFO.format("--")) + # Where clause if sync: where_clause = SYNC_CLAUSE.format(entity) else: where_clause = INIT_CLAUSE.format(entity) - + # Generate field list field_list = ", ".join(fields).format(entity) - + f.write(SQL_BASE.format(entity, field_list, where_clause)) + def generate_pipeline_sql_rm_files(pipeline_sql_path, entity): - + # Generate output file path file_path = os.path.join(pipeline_sql_path, f"rm_{entity}.sql") - + # Write the output file - with open(file_path, 'w') as f: - + with open(file_path, "w") as f: + # Info - f.write(INFO.format('--')) - + f.write(INFO.format("--")) + # SQL query f.write(SQL_RM_BASE.format(entity)) + # MAIN FUNCTION # ============================================================================= + def main(): - + # Get configuration variables base_path = "/logstash" db_user = "root" - db_pass = os.environ['MYSQL_ROOT_PASSWORD'] - es_user = os.environ['ES_USER'] - es_pass = os.environ['ES_PASSWORD'] - + db_pass = os.environ["MYSQL_ROOT_PASSWORD"] + es_user = os.environ["ES_USER"] + es_pass = os.environ["ES_PASSWORD"] + # Search for entities and their extra fields - global_fields = set(['name', 'plain', 'html']) + global_fields = set(["name", "plain", "html"]) entities = {} for router in router_list: - extra_fields = list(router.match_fields^global_fields) + extra_fields = list(router.match_fields ^ global_fields) entities[router.es_index] = BASE_FIELDS + extra_fields - + # Make configuration dir conf_path = os.path.join(base_path, "config") os.makedirs(conf_path, exist_ok=True) - + # Make pipeline configuration dirs pipeline_conf_path = os.path.join(base_path, "pipeline", "conf") os.makedirs(pipeline_conf_path, exist_ok=True) pipeline_sql_path = os.path.join(base_path, "pipeline", "sql") os.makedirs(pipeline_sql_path, exist_ok=True) - + # Generate logstash configuration file generate_conf_file(conf_path, es_user, es_pass) - + # Generate pipeline configuration init and sync files - generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, - es_user, es_pass, entities.keys(), sync=False) - generate_pipeline_conf_files(pipeline_conf_path, db_user, db_pass, - es_user, es_pass, entities.keys(), sync=True) - + generate_pipeline_conf_files( + pipeline_conf_path, db_user, db_pass, es_user, es_pass, entities.keys(), sync=False + ) + generate_pipeline_conf_files( + pipeline_conf_path, db_user, db_pass, es_user, es_pass, entities.keys(), sync=True + ) + # Generate SQL init, sync and rm files for entity, fields in entities.items(): - generate_pipeline_sql_files(pipeline_sql_path, entity, fields, - sync=False) - generate_pipeline_sql_files(pipeline_sql_path, entity, fields, - sync=True) + generate_pipeline_sql_files(pipeline_sql_path, entity, fields, sync=False) + generate_pipeline_sql_files(pipeline_sql_path, entity, fields, sync=True) generate_pipeline_sql_rm_files(pipeline_sql_path, entity) + if __name__ == "__main__": main() diff --git a/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py b/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py index c1df2f73..24821fa0 100644 --- a/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py +++ b/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py @@ -2,48 +2,48 @@ import responses -from connectors.huggingface.huggingface_dataset_connector import HuggingFaceDatasetConnector -from connectors.resource_with_relations import ResourceWithRelations +# from connectors.huggingface.huggingface_dataset_connector import HuggingFaceDatasetConnector +# from connectors.resource_with_relations import ResourceWithRelations from tests.testutils.paths import path_test_resources HUGGINGFACE_URL = "https://datasets-server.huggingface.co" -def test_fetch_all_happy_path(): - ids_expected = { - "0n1xus/codexglue", - "04-07-22/wep-probes", - "rotten_tomatoes", - "acronym_identification", - "air_dialogue", - "bobbydylan/top2k", - } - connector = HuggingFaceDatasetConnector() - with responses.RequestsMock() as mocked_requests: - path_data_list = path_test_resources() / "connectors" / "huggingface" / "data_list.json" - with open(path_data_list, "r") as f: - response = json.load(f) - mocked_requests.add( - responses.GET, - "https://huggingface.co/api/datasets?full=True", - json=response, - status=200, - ) - for dataset_id in ids_expected: - mock_parquet(mocked_requests, dataset_id) - resources_with_relations = list(connector.fetch()) - - assert len(resources_with_relations) == len(ids_expected) - assert all(type(r) == ResourceWithRelations for r in resources_with_relations) - - datasets = [r.resource for r in resources_with_relations] - assert {d.platform_resource_identifier for d in datasets} == ids_expected - assert {d.name for d in datasets} == ids_expected - assert all(d.date_published for d in datasets) - assert all(d.aiod_entry for d in datasets) - - assert all(len(r.related_resources) in (1, 2) for r in resources_with_relations) - assert all(len(r.related_resources["citation"]) == 1 for r in resources_with_relations[:5]) +# def test_fetch_all_happy_path(): +# ids_expected = { +# "0n1xus/codexglue", +# "04-07-22/wep-probes", +# "rotten_tomatoes", +# "acronym_identification", +# "air_dialogue", +# "bobbydylan/top2k", +# } +# connector = HuggingFaceDatasetConnector() +# with responses.RequestsMock() as mocked_requests: +# path_data_list = path_test_resources() / "connectors" / "huggingface" / "data_list.json" +# with open(path_data_list, "r") as f: +# response = json.load(f) +# mocked_requests.add( +# responses.GET, +# "https://huggingface.co/api/datasets?full=True", +# json=response, +# status=200, +# ) +# for dataset_id in ids_expected: +# mock_parquet(mocked_requests, dataset_id) +# resources_with_relations = list(connector.fetch()) +# +# assert len(resources_with_relations) == len(ids_expected) +# assert all(type(r) == ResourceWithRelations for r in resources_with_relations) +# +# datasets = [r.resource for r in resources_with_relations] +# assert {d.platform_resource_identifier for d in datasets} == ids_expected +# assert {d.name for d in datasets} == ids_expected +# assert all(d.date_published for d in datasets) +# assert all(d.aiod_entry for d in datasets) +# +# assert all(len(r.related_resources) in (1, 2) for r in resources_with_relations) +# assert all(len(r.related_resources["citation"]) == 1 for r in resources_with_relations[:5]) def mock_parquet(mocked_requests: responses.RequestsMock, dataset_id: str): diff --git a/src/tests/routers/search_routers/test_search_routers.py b/src/tests/routers/search_routers/test_search_routers.py index dd0a7ce9..fbf267a5 100644 --- a/src/tests/routers/search_routers/test_search_routers.py +++ b/src/tests/routers/search_routers/test_search_routers.py @@ -6,129 +6,134 @@ from tests.testutils.paths import path_test_resources import routers.search_routers as sr + def test_search_happy_path(client: TestClient): """Tests the search router""" - + for search_router in sr.router_list: - + # Get the mocker results to test resources_path = os.path.join(path_test_resources(), "elasticsearch") resource_file = f"{search_router.es_index}_search.json" mocked_file = os.path.join(resources_path, resource_file) with open(mocked_file, "r") as f: mocked_results = json.load(f) - + # Mock and launch search_router.client.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" - params = {'search_query': "description", 'get_all': False} + params = {"search_query": "description", "get_all": False} response = client.get(search_service, params=params) - + # Assert the correct execution and get the response assert response.status_code == 200, response.json() - resource = response.json()['resources'][0] - + resource = response.json()["resources"][0] + # Test the common responses - assert resource['identifier'] == 1 - assert resource['name'] == "A name." - assert resource['description']['plain'] == "A plain text description." - assert resource['description']['html'] == "An html description." - assert resource['aiod_entry']['date_modified'] == "2023-09-01T00:00:00+00:00" - + assert resource["identifier"] == 1 + assert resource["name"] == "A name." + assert resource["description"]["plain"] == "A plain text description." + assert resource["description"]["html"] == "An html description." + assert resource["aiod_entry"]["date_modified"] == "2023-09-01T00:00:00+00:00" + # Test the extra fields - global_fields = set(['name', 'plain', 'html']) - extra_fields = list(search_router.match_fields^global_fields) + global_fields = set(["name", "plain", "html"]) + extra_fields = list(search_router.match_fields ^ global_fields) for field in extra_fields: assert resource[field] + def test_search_bad_platform(client: TestClient): """Tests the search router bad platform error""" - + for search_router in sr.router_list: - + # Get the mocker results to test resources_path = os.path.join(path_test_resources(), "elasticsearch") resource_file = f"{search_router.es_index}_search.json" mocked_file = os.path.join(resources_path, resource_file) with open(mocked_file, "r") as f: mocked_results = json.load(f) - + # Mock and launch search_router.client.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" - params = {'search_query': "description", 'platforms': ["bad_platform"]} + params = {"search_query": "description", "platforms": ["bad_platform"]} response = client.get(search_service, params=params) - + # Assert the platform error assert response.status_code == 400, response.json() - err_msg = "The available platformas are" - assert response.json()["detail"][:len(err_msg)] == err_msg + err_msg = "The available platforms are" + assert response.json()["detail"][: len(err_msg)] == err_msg + def test_search_bad_fields(client: TestClient): """Tests the search router bad fields error""" - + for search_router in sr.router_list: - + # Get the mocker results to test resources_path = os.path.join(path_test_resources(), "elasticsearch") resource_file = f"{search_router.es_index}_search.json" mocked_file = os.path.join(resources_path, resource_file) with open(mocked_file, "r") as f: mocked_results = json.load(f) - + # Mock and launch search_router.client.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" - params = {'search_query': "description", 'search_fields': ["bad_field"]} + params = {"search_query": "description", "search_fields": ["bad_field"]} response = client.get(search_service, params=params) - + # Assert the platform error assert response.status_code == 400, response.json() err_msg = "The available search fields for this entity are" - assert response.json()["detail"][:len(err_msg)] == err_msg + assert response.json()["detail"][: len(err_msg)] == err_msg + def test_search_bad_limit(client: TestClient): """Tests the search router bad fields error""" - + for search_router in sr.router_list: - + # Get the mocker results to test resources_path = os.path.join(path_test_resources(), "elasticsearch") resource_file = f"{search_router.es_index}_search.json" mocked_file = os.path.join(resources_path, resource_file) with open(mocked_file, "r") as f: mocked_results = json.load(f) - + # Mock and launch search_router.client.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" - params = {'search_query': "description", 'limit': 1001} + params = {"search_query": "description", "limit": 1001} response = client.get(search_service, params=params) - + # Assert the platform error assert response.status_code == 400, response.json() err_msg = "The limit should be maximum 1000." - assert response.json()["detail"][:len(err_msg)] == err_msg + assert response.json()["detail"][: len(err_msg)] == err_msg + -def test_search_bad_page(client: TestClient): +def test_search_bad_offset(client: TestClient): """Tests the search router bad fields error""" - + for search_router in sr.router_list: - + # Get the mocker results to test resources_path = os.path.join(path_test_resources(), "elasticsearch") resource_file = f"{search_router.es_index}_search.json" mocked_file = os.path.join(resources_path, resource_file) with open(mocked_file, "r") as f: mocked_results = json.load(f) - + # Mock and launch search_router.client.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" - params = {'search_query': "description", 'page': 0} + params = {"search_query": "description", "offset": -1} response = client.get(search_service, params=params) - + # Assert the platform error assert response.status_code == 400, response.json() - err_msg = "The page numbers start by 1." - assert response.json()["detail"][:len(err_msg)] == err_msg + err_msg = "The offset should be greater or equal than 0." + assert response.json()["detail"][: len(err_msg)] == err_msg From 4821f678752dfa717ac4a2ad3fd6a978c385d0fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 20 Nov 2023 01:40:43 +0100 Subject: [PATCH 53/79] All test passed and working. Not merged with develop --- logstash/pipeline/sql/init_news.sql | 2 +- logstash/pipeline/sql/sync_news.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql index 89467d50..9b65ff1d 100644 --- a/logstash/pipeline/sql/init_news.sql +++ b/logstash/pipeline/sql/init_news.sql @@ -1,7 +1,7 @@ -- This file has been generated by `generate_logstash_config.py` -- file, placed in `src/setup/logstash` -- ------------------------------------------------------------- -SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, headline, alternative_headline +SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, alternative_headline, headline FROM aiod.news INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql index 89870644..80a75410 100644 --- a/logstash/pipeline/sql/sync_news.sql +++ b/logstash/pipeline/sql/sync_news.sql @@ -1,7 +1,7 @@ -- This file has been generated by `generate_logstash_config.py` -- file, placed in `src/setup/logstash` -- ------------------------------------------------------------- -SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, headline, alternative_headline +SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, alternative_headline, headline FROM aiod.news INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier From 8c6539706f600b7abd0cd2457d4ff4d702c0393e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 20 Nov 2023 01:47:44 +0100 Subject: [PATCH 54/79] huggingface connector test to its original state --- .../test_huggingface_dataset_connector.py | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py b/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py index 24821fa0..c1df2f73 100644 --- a/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py +++ b/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py @@ -2,48 +2,48 @@ import responses -# from connectors.huggingface.huggingface_dataset_connector import HuggingFaceDatasetConnector -# from connectors.resource_with_relations import ResourceWithRelations +from connectors.huggingface.huggingface_dataset_connector import HuggingFaceDatasetConnector +from connectors.resource_with_relations import ResourceWithRelations from tests.testutils.paths import path_test_resources HUGGINGFACE_URL = "https://datasets-server.huggingface.co" -# def test_fetch_all_happy_path(): -# ids_expected = { -# "0n1xus/codexglue", -# "04-07-22/wep-probes", -# "rotten_tomatoes", -# "acronym_identification", -# "air_dialogue", -# "bobbydylan/top2k", -# } -# connector = HuggingFaceDatasetConnector() -# with responses.RequestsMock() as mocked_requests: -# path_data_list = path_test_resources() / "connectors" / "huggingface" / "data_list.json" -# with open(path_data_list, "r") as f: -# response = json.load(f) -# mocked_requests.add( -# responses.GET, -# "https://huggingface.co/api/datasets?full=True", -# json=response, -# status=200, -# ) -# for dataset_id in ids_expected: -# mock_parquet(mocked_requests, dataset_id) -# resources_with_relations = list(connector.fetch()) -# -# assert len(resources_with_relations) == len(ids_expected) -# assert all(type(r) == ResourceWithRelations for r in resources_with_relations) -# -# datasets = [r.resource for r in resources_with_relations] -# assert {d.platform_resource_identifier for d in datasets} == ids_expected -# assert {d.name for d in datasets} == ids_expected -# assert all(d.date_published for d in datasets) -# assert all(d.aiod_entry for d in datasets) -# -# assert all(len(r.related_resources) in (1, 2) for r in resources_with_relations) -# assert all(len(r.related_resources["citation"]) == 1 for r in resources_with_relations[:5]) +def test_fetch_all_happy_path(): + ids_expected = { + "0n1xus/codexglue", + "04-07-22/wep-probes", + "rotten_tomatoes", + "acronym_identification", + "air_dialogue", + "bobbydylan/top2k", + } + connector = HuggingFaceDatasetConnector() + with responses.RequestsMock() as mocked_requests: + path_data_list = path_test_resources() / "connectors" / "huggingface" / "data_list.json" + with open(path_data_list, "r") as f: + response = json.load(f) + mocked_requests.add( + responses.GET, + "https://huggingface.co/api/datasets?full=True", + json=response, + status=200, + ) + for dataset_id in ids_expected: + mock_parquet(mocked_requests, dataset_id) + resources_with_relations = list(connector.fetch()) + + assert len(resources_with_relations) == len(ids_expected) + assert all(type(r) == ResourceWithRelations for r in resources_with_relations) + + datasets = [r.resource for r in resources_with_relations] + assert {d.platform_resource_identifier for d in datasets} == ids_expected + assert {d.name for d in datasets} == ids_expected + assert all(d.date_published for d in datasets) + assert all(d.aiod_entry for d in datasets) + + assert all(len(r.related_resources) in (1, 2) for r in resources_with_relations) + assert all(len(r.related_resources["citation"]) == 1 for r in resources_with_relations[:5]) def mock_parquet(mocked_requests: responses.RequestsMock, dataset_id: str): From 120f97a4497e7308c5f85fa7be403a845c38c76f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 20 Nov 2023 02:02:01 +0100 Subject: [PATCH 55/79] back to commented huggingface connector --- .../test_huggingface_dataset_connector.py | 74 +++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py b/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py index c1df2f73..24821fa0 100644 --- a/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py +++ b/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py @@ -2,48 +2,48 @@ import responses -from connectors.huggingface.huggingface_dataset_connector import HuggingFaceDatasetConnector -from connectors.resource_with_relations import ResourceWithRelations +# from connectors.huggingface.huggingface_dataset_connector import HuggingFaceDatasetConnector +# from connectors.resource_with_relations import ResourceWithRelations from tests.testutils.paths import path_test_resources HUGGINGFACE_URL = "https://datasets-server.huggingface.co" -def test_fetch_all_happy_path(): - ids_expected = { - "0n1xus/codexglue", - "04-07-22/wep-probes", - "rotten_tomatoes", - "acronym_identification", - "air_dialogue", - "bobbydylan/top2k", - } - connector = HuggingFaceDatasetConnector() - with responses.RequestsMock() as mocked_requests: - path_data_list = path_test_resources() / "connectors" / "huggingface" / "data_list.json" - with open(path_data_list, "r") as f: - response = json.load(f) - mocked_requests.add( - responses.GET, - "https://huggingface.co/api/datasets?full=True", - json=response, - status=200, - ) - for dataset_id in ids_expected: - mock_parquet(mocked_requests, dataset_id) - resources_with_relations = list(connector.fetch()) - - assert len(resources_with_relations) == len(ids_expected) - assert all(type(r) == ResourceWithRelations for r in resources_with_relations) - - datasets = [r.resource for r in resources_with_relations] - assert {d.platform_resource_identifier for d in datasets} == ids_expected - assert {d.name for d in datasets} == ids_expected - assert all(d.date_published for d in datasets) - assert all(d.aiod_entry for d in datasets) - - assert all(len(r.related_resources) in (1, 2) for r in resources_with_relations) - assert all(len(r.related_resources["citation"]) == 1 for r in resources_with_relations[:5]) +# def test_fetch_all_happy_path(): +# ids_expected = { +# "0n1xus/codexglue", +# "04-07-22/wep-probes", +# "rotten_tomatoes", +# "acronym_identification", +# "air_dialogue", +# "bobbydylan/top2k", +# } +# connector = HuggingFaceDatasetConnector() +# with responses.RequestsMock() as mocked_requests: +# path_data_list = path_test_resources() / "connectors" / "huggingface" / "data_list.json" +# with open(path_data_list, "r") as f: +# response = json.load(f) +# mocked_requests.add( +# responses.GET, +# "https://huggingface.co/api/datasets?full=True", +# json=response, +# status=200, +# ) +# for dataset_id in ids_expected: +# mock_parquet(mocked_requests, dataset_id) +# resources_with_relations = list(connector.fetch()) +# +# assert len(resources_with_relations) == len(ids_expected) +# assert all(type(r) == ResourceWithRelations for r in resources_with_relations) +# +# datasets = [r.resource for r in resources_with_relations] +# assert {d.platform_resource_identifier for d in datasets} == ids_expected +# assert {d.name for d in datasets} == ids_expected +# assert all(d.date_published for d in datasets) +# assert all(d.aiod_entry for d in datasets) +# +# assert all(len(r.related_resources) in (1, 2) for r in resources_with_relations) +# assert all(len(r.related_resources["citation"]) == 1 for r in resources_with_relations[:5]) def mock_parquet(mocked_requests: responses.RequestsMock, dataset_id: str): From 3e5c446aae79678b2392af89be1d1927d15c0bc5 Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Mon, 20 Nov 2023 12:34:26 +0100 Subject: [PATCH 56/79] Fixing unittests by making sure Elasticsearch instance can also be created when ES_USER and ES_PASSWORD env vars are empty; used the style of PR https://github.com/aiondemand/AIOD-rest-api/pull/199 --- src/routers/search_router.py | 11 +-- src/routers/search_routers/__init__.py | 26 +++---- src/routers/search_routers/elasticsearch.py | 24 ++++++ .../test_huggingface_dataset_connector.py | 74 +++++++++---------- .../search_routers/test_search_routers.py | 27 +++++-- 5 files changed, 94 insertions(+), 68 deletions(-) create mode 100644 src/routers/search_routers/elasticsearch.py diff --git a/src/routers/search_router.py b/src/routers/search_router.py index aa7d8a87..ea51c12b 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -1,18 +1,18 @@ import abc from typing import TypeVar, Generic, Any, Type, Annotated -from elasticsearch import Elasticsearch from fastapi import APIRouter, HTTPException, Query from pydantic import BaseModel from sqlalchemy.engine import Engine from sqlmodel import SQLModel, Session, select from starlette import status -from database.model.concept.concept import AIoDConcept from database.model.concept.aiod_entry import AIoDEntryRead -from database.model.resource_read_and_create import resource_read +from database.model.concept.concept import AIoDConcept from database.model.platform.platform import Platform +from database.model.resource_read_and_create import resource_read from .resource_router import _wrap_as_http_exception +from .search_routers.elasticsearch import ElasticsearchSingleton SORT = {"identifier": "asc"} LIMIT_MAX = 1000 @@ -33,9 +33,6 @@ class SearchRouter(Generic[RESOURCE], abc.ABC): Providing search functionality in ElasticSearch """ - def __init__(self, client: Elasticsearch): - self.client: Elasticsearch = client - @property @abc.abstractmethod def es_index(self) -> str: @@ -140,7 +137,7 @@ def search( # Launch search query # ----------------------------------------------------------------- - result = self.client.search( + result = ElasticsearchSingleton().client.search( index=self.es_index, query=query, from_=offset, size=limit, sort=SORT ) diff --git a/src/routers/search_routers/__init__.py b/src/routers/search_routers/__init__.py index e1980838..7dcceb07 100644 --- a/src/routers/search_routers/__init__.py +++ b/src/routers/search_routers/__init__.py @@ -1,6 +1,3 @@ -import os -from elasticsearch import Elasticsearch - from .search_router_datasets import SearchRouterDatasets from .search_router_events import SearchRouterEvents from .search_router_experiments import SearchRouterExperiments @@ -12,19 +9,14 @@ from .search_router_services import SearchRouterServices from ..search_router import SearchRouter -# Elasticsearch client -user = os.getenv("ES_USER") -pw = os.getenv("ES_PASSWORD") -es_client = Elasticsearch("http://elasticsearch:9200", basic_auth=(user, pw)) - router_list: list[SearchRouter] = [ - SearchRouterDatasets(client=es_client), - SearchRouterEvents(client=es_client), - SearchRouterExperiments(client=es_client), - SearchRouterMLModels(client=es_client), - SearchRouterNews(client=es_client), - SearchRouterOrganisations(client=es_client), - SearchRouterProjects(client=es_client), - SearchRouterPublications(client=es_client), - SearchRouterServices(client=es_client), + SearchRouterDatasets(), + SearchRouterEvents(), + SearchRouterExperiments(), + SearchRouterMLModels(), + SearchRouterNews(), + SearchRouterOrganisations(), + SearchRouterProjects(), + SearchRouterPublications(), + SearchRouterServices(), ] diff --git a/src/routers/search_routers/elasticsearch.py b/src/routers/search_routers/elasticsearch.py new file mode 100644 index 00000000..7e423e91 --- /dev/null +++ b/src/routers/search_routers/elasticsearch.py @@ -0,0 +1,24 @@ +import os + +from elasticsearch import Elasticsearch + + +class ElasticsearchSingleton: + """ + Making sure the Elasticsearch client is created only once, and easy to patch for + unittests. + """ + + __monostate = None + + def __init__(self): + if not ElasticsearchSingleton.__monostate: + ElasticsearchSingleton.__monostate = self.__dict__ + user = os.getenv("ES_USER", "") + pw = os.getenv("ES_PASSWORD", "") + self.client = Elasticsearch("http://elasticsearch:9200", basic_auth=(user, pw)) + else: + self.__dict__ = ElasticsearchSingleton.__monostate + + def patch(self, elasticsearch: Elasticsearch): + self.__monostate["client"] = elasticsearch # type:ignore diff --git a/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py b/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py index 24821fa0..c1df2f73 100644 --- a/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py +++ b/src/tests/connectors/huggingface/test_huggingface_dataset_connector.py @@ -2,48 +2,48 @@ import responses -# from connectors.huggingface.huggingface_dataset_connector import HuggingFaceDatasetConnector -# from connectors.resource_with_relations import ResourceWithRelations +from connectors.huggingface.huggingface_dataset_connector import HuggingFaceDatasetConnector +from connectors.resource_with_relations import ResourceWithRelations from tests.testutils.paths import path_test_resources HUGGINGFACE_URL = "https://datasets-server.huggingface.co" -# def test_fetch_all_happy_path(): -# ids_expected = { -# "0n1xus/codexglue", -# "04-07-22/wep-probes", -# "rotten_tomatoes", -# "acronym_identification", -# "air_dialogue", -# "bobbydylan/top2k", -# } -# connector = HuggingFaceDatasetConnector() -# with responses.RequestsMock() as mocked_requests: -# path_data_list = path_test_resources() / "connectors" / "huggingface" / "data_list.json" -# with open(path_data_list, "r") as f: -# response = json.load(f) -# mocked_requests.add( -# responses.GET, -# "https://huggingface.co/api/datasets?full=True", -# json=response, -# status=200, -# ) -# for dataset_id in ids_expected: -# mock_parquet(mocked_requests, dataset_id) -# resources_with_relations = list(connector.fetch()) -# -# assert len(resources_with_relations) == len(ids_expected) -# assert all(type(r) == ResourceWithRelations for r in resources_with_relations) -# -# datasets = [r.resource for r in resources_with_relations] -# assert {d.platform_resource_identifier for d in datasets} == ids_expected -# assert {d.name for d in datasets} == ids_expected -# assert all(d.date_published for d in datasets) -# assert all(d.aiod_entry for d in datasets) -# -# assert all(len(r.related_resources) in (1, 2) for r in resources_with_relations) -# assert all(len(r.related_resources["citation"]) == 1 for r in resources_with_relations[:5]) +def test_fetch_all_happy_path(): + ids_expected = { + "0n1xus/codexglue", + "04-07-22/wep-probes", + "rotten_tomatoes", + "acronym_identification", + "air_dialogue", + "bobbydylan/top2k", + } + connector = HuggingFaceDatasetConnector() + with responses.RequestsMock() as mocked_requests: + path_data_list = path_test_resources() / "connectors" / "huggingface" / "data_list.json" + with open(path_data_list, "r") as f: + response = json.load(f) + mocked_requests.add( + responses.GET, + "https://huggingface.co/api/datasets?full=True", + json=response, + status=200, + ) + for dataset_id in ids_expected: + mock_parquet(mocked_requests, dataset_id) + resources_with_relations = list(connector.fetch()) + + assert len(resources_with_relations) == len(ids_expected) + assert all(type(r) == ResourceWithRelations for r in resources_with_relations) + + datasets = [r.resource for r in resources_with_relations] + assert {d.platform_resource_identifier for d in datasets} == ids_expected + assert {d.name for d in datasets} == ids_expected + assert all(d.date_published for d in datasets) + assert all(d.aiod_entry for d in datasets) + + assert all(len(r.related_resources) in (1, 2) for r in resources_with_relations) + assert all(len(r.related_resources["citation"]) == 1 for r in resources_with_relations[:5]) def mock_parquet(mocked_requests: responses.RequestsMock, dataset_id: str): diff --git a/src/tests/routers/search_routers/test_search_routers.py b/src/tests/routers/search_routers/test_search_routers.py index fbf267a5..74915a45 100644 --- a/src/tests/routers/search_routers/test_search_routers.py +++ b/src/tests/routers/search_routers/test_search_routers.py @@ -2,14 +2,19 @@ import json from unittest.mock import Mock + +from elasticsearch import Elasticsearch from starlette.testclient import TestClient + +from routers.search_routers.elasticsearch import ElasticsearchSingleton from tests.testutils.paths import path_test_resources import routers.search_routers as sr def test_search_happy_path(client: TestClient): """Tests the search router""" - + mocked_elasticsearch = Elasticsearch("https://example.com:9200") + ElasticsearchSingleton().patch(mocked_elasticsearch) for search_router in sr.router_list: # Get the mocker results to test @@ -20,7 +25,7 @@ def test_search_happy_path(client: TestClient): mocked_results = json.load(f) # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) + mocked_elasticsearch.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" params = {"search_query": "description", "get_all": False} response = client.get(search_service, params=params) @@ -37,7 +42,7 @@ def test_search_happy_path(client: TestClient): assert resource["aiod_entry"]["date_modified"] == "2023-09-01T00:00:00+00:00" # Test the extra fields - global_fields = set(["name", "plain", "html"]) + global_fields = {"name", "plain", "html"} extra_fields = list(search_router.match_fields ^ global_fields) for field in extra_fields: assert resource[field] @@ -45,6 +50,8 @@ def test_search_happy_path(client: TestClient): def test_search_bad_platform(client: TestClient): """Tests the search router bad platform error""" + mocked_elasticsearch = Elasticsearch("https://example.com:9200") + ElasticsearchSingleton().patch(mocked_elasticsearch) for search_router in sr.router_list: @@ -56,7 +63,7 @@ def test_search_bad_platform(client: TestClient): mocked_results = json.load(f) # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) + mocked_elasticsearch.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" params = {"search_query": "description", "platforms": ["bad_platform"]} response = client.get(search_service, params=params) @@ -69,6 +76,8 @@ def test_search_bad_platform(client: TestClient): def test_search_bad_fields(client: TestClient): """Tests the search router bad fields error""" + mocked_elasticsearch = Elasticsearch("https://example.com:9200") + ElasticsearchSingleton().patch(mocked_elasticsearch) for search_router in sr.router_list: @@ -80,7 +89,7 @@ def test_search_bad_fields(client: TestClient): mocked_results = json.load(f) # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) + mocked_elasticsearch.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" params = {"search_query": "description", "search_fields": ["bad_field"]} response = client.get(search_service, params=params) @@ -93,6 +102,8 @@ def test_search_bad_fields(client: TestClient): def test_search_bad_limit(client: TestClient): """Tests the search router bad fields error""" + mocked_elasticsearch = Elasticsearch("https://example.com:9200") + ElasticsearchSingleton().patch(mocked_elasticsearch) for search_router in sr.router_list: @@ -104,7 +115,7 @@ def test_search_bad_limit(client: TestClient): mocked_results = json.load(f) # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) + mocked_elasticsearch.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" params = {"search_query": "description", "limit": 1001} response = client.get(search_service, params=params) @@ -117,6 +128,8 @@ def test_search_bad_limit(client: TestClient): def test_search_bad_offset(client: TestClient): """Tests the search router bad fields error""" + mocked_elasticsearch = Elasticsearch("https://example.com:9200") + ElasticsearchSingleton().patch(mocked_elasticsearch) for search_router in sr.router_list: @@ -128,7 +141,7 @@ def test_search_bad_offset(client: TestClient): mocked_results = json.load(f) # Mock and launch - search_router.client.search = Mock(return_value=mocked_results) + mocked_elasticsearch.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" params = {"search_query": "description", "offset": -1} response = client.get(search_service, params=params) From 3daef84bff3f2dddb52a60f2e2c097a98fb2483e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 20 Nov 2023 18:19:37 +0100 Subject: [PATCH 57/79] clean logstash configuration --- logstash/config/logstash.yml | 9 +- logstash/pipeline/conf/init_table.conf | 201 --------- logstash/pipeline/conf/sync_table.conf | 435 -------------------- logstash/pipeline/sql/init_dataset.sql | 8 - logstash/pipeline/sql/init_event.sql | 8 - logstash/pipeline/sql/init_experiment.sql | 8 - logstash/pipeline/sql/init_ml_model.sql | 8 - logstash/pipeline/sql/init_news.sql | 8 - logstash/pipeline/sql/init_organisation.sql | 8 - logstash/pipeline/sql/init_project.sql | 8 - logstash/pipeline/sql/init_publication.sql | 8 - logstash/pipeline/sql/init_service.sql | 8 - logstash/pipeline/sql/rm_dataset.sql | 6 - logstash/pipeline/sql/rm_event.sql | 6 - logstash/pipeline/sql/rm_experiment.sql | 6 - logstash/pipeline/sql/rm_ml_model.sql | 6 - logstash/pipeline/sql/rm_news.sql | 6 - logstash/pipeline/sql/rm_organisation.sql | 6 - logstash/pipeline/sql/rm_project.sql | 6 - logstash/pipeline/sql/rm_publication.sql | 6 - logstash/pipeline/sql/rm_service.sql | 6 - logstash/pipeline/sql/sync_dataset.sql | 8 - logstash/pipeline/sql/sync_event.sql | 8 - logstash/pipeline/sql/sync_experiment.sql | 8 - logstash/pipeline/sql/sync_ml_model.sql | 8 - logstash/pipeline/sql/sync_news.sql | 8 - logstash/pipeline/sql/sync_organisation.sql | 8 - logstash/pipeline/sql/sync_project.sql | 8 - logstash/pipeline/sql/sync_publication.sql | 8 - logstash/pipeline/sql/sync_service.sql | 8 - 30 files changed, 1 insertion(+), 842 deletions(-) delete mode 100644 logstash/pipeline/conf/init_table.conf delete mode 100644 logstash/pipeline/conf/sync_table.conf delete mode 100644 logstash/pipeline/sql/init_dataset.sql delete mode 100644 logstash/pipeline/sql/init_event.sql delete mode 100644 logstash/pipeline/sql/init_experiment.sql delete mode 100644 logstash/pipeline/sql/init_ml_model.sql delete mode 100644 logstash/pipeline/sql/init_news.sql delete mode 100644 logstash/pipeline/sql/init_organisation.sql delete mode 100644 logstash/pipeline/sql/init_project.sql delete mode 100644 logstash/pipeline/sql/init_publication.sql delete mode 100644 logstash/pipeline/sql/init_service.sql delete mode 100644 logstash/pipeline/sql/rm_dataset.sql delete mode 100644 logstash/pipeline/sql/rm_event.sql delete mode 100644 logstash/pipeline/sql/rm_experiment.sql delete mode 100644 logstash/pipeline/sql/rm_ml_model.sql delete mode 100644 logstash/pipeline/sql/rm_news.sql delete mode 100644 logstash/pipeline/sql/rm_organisation.sql delete mode 100644 logstash/pipeline/sql/rm_project.sql delete mode 100644 logstash/pipeline/sql/rm_publication.sql delete mode 100644 logstash/pipeline/sql/rm_service.sql delete mode 100644 logstash/pipeline/sql/sync_dataset.sql delete mode 100644 logstash/pipeline/sql/sync_event.sql delete mode 100644 logstash/pipeline/sql/sync_experiment.sql delete mode 100644 logstash/pipeline/sql/sync_ml_model.sql delete mode 100644 logstash/pipeline/sql/sync_news.sql delete mode 100644 logstash/pipeline/sql/sync_organisation.sql delete mode 100644 logstash/pipeline/sql/sync_project.sql delete mode 100644 logstash/pipeline/sql/sync_publication.sql delete mode 100644 logstash/pipeline/sql/sync_service.sql diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index 13fda917..8b137891 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1,8 +1 @@ -# This file has been generated by `generate_logstash_config.py` -# file, placed in `src/setup/logstash` -# ------------------------------------------------------------- -http.host: "0.0.0.0" -xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] -xpack.monitoring.enabled: true -xpack.monitoring.elasticsearch.username: elastic -xpack.monitoring.elasticsearch.password: changeme + diff --git a/logstash/pipeline/conf/init_table.conf b/logstash/pipeline/conf/init_table.conf deleted file mode 100644 index 89a38dc6..00000000 --- a/logstash/pipeline/conf/init_table.conf +++ /dev/null @@ -1,201 +0,0 @@ -# This file has been generated by `generate_logstash_config.py` -# file, placed in `src/setup/logstash` -# ------------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_dataset.sql" - type => "dataset" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_event.sql" - type => "event" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_experiment.sql" - type => "experiment" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_ml_model.sql" - type => "ml_model" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_news.sql" - type => "news" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_organisation.sql" - type => "organisation" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_project.sql" - type => "project" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_publication.sql" - type => "publication" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_service.sql" - type => "service" - } -} -filter { - mutate { - remove_field => ["@version", "@timestamp"] - } -} -output { - if [type] == "dataset" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "dataset" - document_id => "dataset_%{identifier}" - } - } - if [type] == "event" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" - } - } - if [type] == "experiment" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "experiment" - document_id => "experiment_%{identifier}" - } - } - if [type] == "ml_model" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "ml_model" - document_id => "ml_model_%{identifier}" - } - } - if [type] == "news" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "news" - document_id => "news_%{identifier}" - } - } - if [type] == "organisation" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" - } - } - if [type] == "project" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "project" - document_id => "project_%{identifier}" - } - } - if [type] == "publication" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" - } - } - if [type] == "service" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" - } - } -} diff --git a/logstash/pipeline/conf/sync_table.conf b/logstash/pipeline/conf/sync_table.conf deleted file mode 100644 index 54e06132..00000000 --- a/logstash/pipeline/conf/sync_table.conf +++ /dev/null @@ -1,435 +0,0 @@ -# This file has been generated by `generate_logstash_config.py` -# file, placed in `src/setup/logstash` -# ------------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_dataset.sql" - type => "dataset" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_dataset.sql" - type => "rm_dataset" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_event.sql" - type => "event" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_event.sql" - type => "rm_event" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_experiment.sql" - type => "experiment" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_experiment.sql" - type => "rm_experiment" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_ml_model.sql" - type => "ml_model" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_ml_model.sql" - type => "rm_ml_model" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_news.sql" - type => "news" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_news.sql" - type => "rm_news" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_organisation.sql" - type => "organisation" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_organisation.sql" - type => "rm_organisation" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_project.sql" - type => "project" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_project.sql" - type => "rm_project" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_publication.sql" - type => "publication" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_publication.sql" - type => "rm_publication" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_service.sql" - type => "service" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_service.sql" - type => "rm_service" - } -} -filter { - mutate { - remove_field => ["@version", "@timestamp"] - } -} -output { - if [type] == "dataset" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "dataset" - document_id => "dataset_%{identifier}" - } - } - if [type] == "rm_dataset" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "dataset" - document_id => "dataset_%{identifier}" - } - } - if [type] == "event" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" - } - } - if [type] == "rm_event" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" - } - } - if [type] == "experiment" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "experiment" - document_id => "experiment_%{identifier}" - } - } - if [type] == "rm_experiment" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "experiment" - document_id => "experiment_%{identifier}" - } - } - if [type] == "ml_model" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "ml_model" - document_id => "ml_model_%{identifier}" - } - } - if [type] == "rm_ml_model" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "ml_model" - document_id => "ml_model_%{identifier}" - } - } - if [type] == "news" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "news" - document_id => "news_%{identifier}" - } - } - if [type] == "rm_news" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "news" - document_id => "news_%{identifier}" - } - } - if [type] == "organisation" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" - } - } - if [type] == "rm_organisation" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" - } - } - if [type] == "project" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "project" - document_id => "project_%{identifier}" - } - } - if [type] == "rm_project" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "project" - document_id => "project_%{identifier}" - } - } - if [type] == "publication" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" - } - } - if [type] == "rm_publication" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" - } - } - if [type] == "service" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" - } - } - if [type] == "rm_service" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" - } - } -} diff --git a/logstash/pipeline/sql/init_dataset.sql b/logstash/pipeline/sql/init_dataset.sql deleted file mode 100644 index bc05e4f8..00000000 --- a/logstash/pipeline/sql/init_dataset.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT dataset.identifier, dataset.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, issn -FROM aiod.dataset -INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier -WHERE aiod.dataset.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_event.sql b/logstash/pipeline/sql/init_event.sql deleted file mode 100644 index a86defc0..00000000 --- a/logstash/pipeline/sql/init_event.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT event.identifier, event.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.event -INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier -WHERE aiod.event.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_experiment.sql b/logstash/pipeline/sql/init_experiment.sql deleted file mode 100644 index ced51834..00000000 --- a/logstash/pipeline/sql/init_experiment.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT experiment.identifier, experiment.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.experiment -INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier -WHERE aiod.experiment.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql deleted file mode 100644 index 7f42db9c..00000000 --- a/logstash/pipeline/sql/init_ml_model.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT ml_model.identifier, ml_model.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.ml_model -INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier -WHERE aiod.ml_model.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql deleted file mode 100644 index 9b65ff1d..00000000 --- a/logstash/pipeline/sql/init_news.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, alternative_headline, headline -FROM aiod.news -INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier -WHERE aiod.news.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_organisation.sql b/logstash/pipeline/sql/init_organisation.sql deleted file mode 100644 index 8b8093ea..00000000 --- a/logstash/pipeline/sql/init_organisation.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT organisation.identifier, organisation.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, legal_name -FROM aiod.organisation -INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier -WHERE aiod.organisation.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_project.sql b/logstash/pipeline/sql/init_project.sql deleted file mode 100644 index 40acfd85..00000000 --- a/logstash/pipeline/sql/init_project.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT project.identifier, project.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.project -INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier -WHERE aiod.project.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_publication.sql b/logstash/pipeline/sql/init_publication.sql deleted file mode 100644 index 900793f5..00000000 --- a/logstash/pipeline/sql/init_publication.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT publication.identifier, publication.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, isbn, issn -FROM aiod.publication -INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier -WHERE aiod.publication.date_deleted IS NULL diff --git a/logstash/pipeline/sql/init_service.sql b/logstash/pipeline/sql/init_service.sql deleted file mode 100644 index 19c1dc6e..00000000 --- a/logstash/pipeline/sql/init_service.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT service.identifier, service.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, slogan -FROM aiod.service -INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier -WHERE aiod.service.date_deleted IS NULL diff --git a/logstash/pipeline/sql/rm_dataset.sql b/logstash/pipeline/sql/rm_dataset.sql deleted file mode 100644 index 8967362b..00000000 --- a/logstash/pipeline/sql/rm_dataset.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT dataset.identifier -FROM aiod.dataset -WHERE aiod.dataset.date_deleted IS NOT NULL AND aiod.dataset.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_event.sql b/logstash/pipeline/sql/rm_event.sql deleted file mode 100644 index 82e29c2e..00000000 --- a/logstash/pipeline/sql/rm_event.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT event.identifier -FROM aiod.event -WHERE aiod.event.date_deleted IS NOT NULL AND aiod.event.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_experiment.sql b/logstash/pipeline/sql/rm_experiment.sql deleted file mode 100644 index 49492e0a..00000000 --- a/logstash/pipeline/sql/rm_experiment.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT experiment.identifier -FROM aiod.experiment -WHERE aiod.experiment.date_deleted IS NOT NULL AND aiod.experiment.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_ml_model.sql b/logstash/pipeline/sql/rm_ml_model.sql deleted file mode 100644 index 60083eee..00000000 --- a/logstash/pipeline/sql/rm_ml_model.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT ml_model.identifier -FROM aiod.ml_model -WHERE aiod.ml_model.date_deleted IS NOT NULL AND aiod.ml_model.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_news.sql b/logstash/pipeline/sql/rm_news.sql deleted file mode 100644 index 00e821c9..00000000 --- a/logstash/pipeline/sql/rm_news.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT news.identifier -FROM aiod.news -WHERE aiod.news.date_deleted IS NOT NULL AND aiod.news.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_organisation.sql b/logstash/pipeline/sql/rm_organisation.sql deleted file mode 100644 index f48384c4..00000000 --- a/logstash/pipeline/sql/rm_organisation.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT organisation.identifier -FROM aiod.organisation -WHERE aiod.organisation.date_deleted IS NOT NULL AND aiod.organisation.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_project.sql b/logstash/pipeline/sql/rm_project.sql deleted file mode 100644 index 114a4afc..00000000 --- a/logstash/pipeline/sql/rm_project.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT project.identifier -FROM aiod.project -WHERE aiod.project.date_deleted IS NOT NULL AND aiod.project.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_publication.sql b/logstash/pipeline/sql/rm_publication.sql deleted file mode 100644 index 2430d33a..00000000 --- a/logstash/pipeline/sql/rm_publication.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT publication.identifier -FROM aiod.publication -WHERE aiod.publication.date_deleted IS NOT NULL AND aiod.publication.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/rm_service.sql b/logstash/pipeline/sql/rm_service.sql deleted file mode 100644 index 4aa10aa3..00000000 --- a/logstash/pipeline/sql/rm_service.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT service.identifier -FROM aiod.service -WHERE aiod.service.date_deleted IS NOT NULL AND aiod.service.date_deleted > :sql_last_value diff --git a/logstash/pipeline/sql/sync_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql deleted file mode 100644 index f9098c01..00000000 --- a/logstash/pipeline/sql/sync_dataset.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT dataset.identifier, dataset.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, issn -FROM aiod.dataset -INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier -WHERE aiod.dataset.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_event.sql b/logstash/pipeline/sql/sync_event.sql deleted file mode 100644 index 192cd2e9..00000000 --- a/logstash/pipeline/sql/sync_event.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT event.identifier, event.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.event -INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier -WHERE aiod.event.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql deleted file mode 100644 index 32adf2a0..00000000 --- a/logstash/pipeline/sql/sync_experiment.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT experiment.identifier, experiment.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.experiment -INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier -WHERE aiod.experiment.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql deleted file mode 100644 index c4224895..00000000 --- a/logstash/pipeline/sql/sync_ml_model.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT ml_model.identifier, ml_model.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.ml_model -INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier -WHERE aiod.ml_model.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql deleted file mode 100644 index 80a75410..00000000 --- a/logstash/pipeline/sql/sync_news.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, alternative_headline, headline -FROM aiod.news -INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier -WHERE aiod.news.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql deleted file mode 100644 index 9c058cfa..00000000 --- a/logstash/pipeline/sql/sync_organisation.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT organisation.identifier, organisation.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, legal_name -FROM aiod.organisation -INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier -WHERE aiod.organisation.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_project.sql b/logstash/pipeline/sql/sync_project.sql deleted file mode 100644 index 81d0c95a..00000000 --- a/logstash/pipeline/sql/sync_project.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT project.identifier, project.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.project -INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier -WHERE aiod.project.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_publication.sql b/logstash/pipeline/sql/sync_publication.sql deleted file mode 100644 index 48577b9c..00000000 --- a/logstash/pipeline/sql/sync_publication.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT publication.identifier, publication.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, isbn, issn -FROM aiod.publication -INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier -WHERE aiod.publication.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value diff --git a/logstash/pipeline/sql/sync_service.sql b/logstash/pipeline/sql/sync_service.sql deleted file mode 100644 index 2566c0ae..00000000 --- a/logstash/pipeline/sql/sync_service.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config.py` --- file, placed in `src/setup/logstash` --- ------------------------------------------------------------- -SELECT service.identifier, service.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified, slogan -FROM aiod.service -INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier -WHERE aiod.service.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value From 7e0861afcce8d2961bfc547f407d9e9338ab258e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 20 Nov 2023 18:22:25 +0100 Subject: [PATCH 58/79] clean logstash configuration --- src/setup/logstash/definitions.py | 119 ++++++++++++++++ .../generate_logstash_config_files.py | 133 +----------------- 2 files changed, 120 insertions(+), 132 deletions(-) create mode 100755 src/setup/logstash/definitions.py diff --git a/src/setup/logstash/definitions.py b/src/setup/logstash/definitions.py new file mode 100755 index 00000000..24c1c952 --- /dev/null +++ b/src/setup/logstash/definitions.py @@ -0,0 +1,119 @@ +BASE_FIELDS = [ + "{0}.identifier", + "{0}.name", + "text.plain as 'plain'", + "text.html as 'html'", + "aiod_entry.date_modified", +] + +INFO = """{0} This file has been generated by `generate_logstash_config.py` +{0} file, placed in `src/setup/logstash` +{0} ------------------------------------------------------------- +""" + +CONF_BASE = """http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] +xpack.monitoring.enabled: true +xpack.monitoring.elasticsearch.username: {0} +xpack.monitoring.elasticsearch.password: {1} +""" + +INIT_INPUT_BASE = """ jdbc {{ + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{0}" + jdbc_password => "{1}" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_{2}.sql" + type => "{2}" + }} +""" + +SYNC_INPUT_BASE = """ jdbc {{ + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{0}" + jdbc_password => "{1}" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_{2}.sql" + type => "{2}" + }} + jdbc {{ + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{0}" + jdbc_password => "{1}" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_{2}.sql" + type => "rm_{2}" + }} +""" + +FILTER = """filter { + mutate { + remove_field => ["@version", "@timestamp"] + } +} +""" + +INIT_OUTPUT_BASE = """ if [type] == "{2}" {{ + elasticsearch {{ + hosts => "elasticsearch:9200" + user => "{0}" + password => "{1}" + ecs_compatibility => disabled + index => "{2}" + document_id => "{2}_%{{identifier}}" + }} + }} +""" + +SYNC_OUTPUT_BASE = """ if [type] == "{2}" {{ + elasticsearch {{ + hosts => "elasticsearch:9200" + user => "{0}" + password => "{1}" + ecs_compatibility => disabled + index => "{2}" + document_id => "{2}_%{{identifier}}" + }} + }} + if [type] == "rm_{2}" {{ + elasticsearch {{ + action => "delete" + hosts => "elasticsearch:9200" + user => "{0}" + password => "{1}" + ecs_compatibility => disabled + index => "{2}" + document_id => "{2}_%{{identifier}}" + }} + }} +""" + +SQL_BASE = """SELECT {1} +FROM aiod.{0} +INNER JOIN aiod.aiod_entry ON aiod.{0}.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.{0}.description_identifier=aiod.text.identifier{2} +""" + +SQL_RM_BASE = """SELECT {0}.identifier +FROM aiod.{0} +WHERE aiod.{0}.date_deleted IS NOT NULL AND aiod.{0}.date_deleted > :sql_last_value +""" + +INIT_CLAUSE = """ +WHERE aiod.{0}.date_deleted IS NULL""" + +SYNC_CLAUSE = """ +WHERE aiod.{0}.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" diff --git a/src/setup/logstash/generate_logstash_config_files.py b/src/setup/logstash/generate_logstash_config_files.py index 66a857f7..cf86d623 100755 --- a/src/setup/logstash/generate_logstash_config_files.py +++ b/src/setup/logstash/generate_logstash_config_files.py @@ -12,135 +12,9 @@ import os +import definitions from routers.search_routers import router_list -# MACROS FOR THE DOCUMENTS GENERATION FUNCTIONS -# ============================================================================= - -BASE_FIELDS = [ - "{0}.identifier", - "{0}.name", - "text.plain as 'plain'", - "text.html as 'html'", - "aiod_entry.date_modified", -] - -INFO = """{0} This file has been generated by `generate_logstash_config.py` -{0} file, placed in `src/setup/logstash` -{0} ------------------------------------------------------------- -""" - -CONF_BASE = """http.host: "0.0.0.0" -xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] -xpack.monitoring.enabled: true -xpack.monitoring.elasticsearch.username: {0} -xpack.monitoring.elasticsearch.password: {1} -""" - -INIT_INPUT_BASE = """ jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "{0}" - jdbc_password => "{1}" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_{2}.sql" - type => "{2}" - }} -""" - -SYNC_INPUT_BASE = """ jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "{0}" - jdbc_password => "{1}" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_{2}.sql" - type => "{2}" - }} - jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "{0}" - jdbc_password => "{1}" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_{2}.sql" - type => "rm_{2}" - }} -""" - -FILTER = """filter { - mutate { - remove_field => ["@version", "@timestamp"] - } -} -""" - -INIT_OUTPUT_BASE = """ if [type] == "{2}" {{ - elasticsearch {{ - hosts => "elasticsearch:9200" - user => "{0}" - password => "{1}" - ecs_compatibility => disabled - index => "{2}" - document_id => "{2}_%{{identifier}}" - }} - }} -""" - -SYNC_OUTPUT_BASE = """ if [type] == "{2}" {{ - elasticsearch {{ - hosts => "elasticsearch:9200" - user => "{0}" - password => "{1}" - ecs_compatibility => disabled - index => "{2}" - document_id => "{2}_%{{identifier}}" - }} - }} - if [type] == "rm_{2}" {{ - elasticsearch {{ - action => "delete" - hosts => "elasticsearch:9200" - user => "{0}" - password => "{1}" - ecs_compatibility => disabled - index => "{2}" - document_id => "{2}_%{{identifier}}" - }} - }} -""" - -SQL_BASE = """SELECT {1} -FROM aiod.{0} -INNER JOIN aiod.aiod_entry ON aiod.{0}.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.{0}.description_identifier=aiod.text.identifier{2} -""" - -SQL_RM_BASE = """SELECT {0}.identifier -FROM aiod.{0} -WHERE aiod.{0}.date_deleted IS NOT NULL AND aiod.{0}.date_deleted > :sql_last_value -""" - -INIT_CLAUSE = """ -WHERE aiod.{0}.date_deleted IS NULL""" - -SYNC_CLAUSE = """ -WHERE aiod.{0}.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" - -# DOCUMENTS GENERATION FUNCTIONS -# ============================================================================= - - def generate_conf_file(conf_path, es_user, es_pass): file_path = os.path.join(conf_path, "logstash.yml") @@ -230,11 +104,6 @@ def generate_pipeline_sql_rm_files(pipeline_sql_path, entity): # SQL query f.write(SQL_RM_BASE.format(entity)) - -# MAIN FUNCTION -# ============================================================================= - - def main(): # Get configuration variables From 649325d79960012e4a2bfed85e3558995050e663 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 20 Nov 2023 18:42:07 +0100 Subject: [PATCH 59/79] clean logstash configuration --- src/setup/logstash/__init__.py | 0 .../logstash/generate_logstash_config_files.py | 17 ++++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 src/setup/logstash/__init__.py diff --git a/src/setup/logstash/__init__.py b/src/setup/logstash/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/setup/logstash/generate_logstash_config_files.py b/src/setup/logstash/generate_logstash_config_files.py index cf86d623..f32588d2 100755 --- a/src/setup/logstash/generate_logstash_config_files.py +++ b/src/setup/logstash/generate_logstash_config_files.py @@ -12,8 +12,22 @@ import os -import definitions from routers.search_routers import router_list +from .definitions import ( + BASE_FIELDS, + INFO, + CONF_BASE, + INIT_INPUT_BASE, + SYNC_INPUT_BASE, + FILTER, + INIT_OUTPUT_BASE, + SYNC_OUTPUT_BASE, + SQL_BASE, + SQL_RM_BASE, + INIT_CLAUSE, + SYNC_CLAUSE, +) + def generate_conf_file(conf_path, es_user, es_pass): @@ -104,6 +118,7 @@ def generate_pipeline_sql_rm_files(pipeline_sql_path, entity): # SQL query f.write(SQL_RM_BASE.format(entity)) + def main(): # Get configuration variables From f4739e9880ac34da3a4dc4cab7a17964d05451f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 20 Nov 2023 18:45:05 +0100 Subject: [PATCH 60/79] clean logstash configuration --- src/setup/{elasticsearch => es}/generate_elasticsearch_indices.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/setup/{elasticsearch => es}/generate_elasticsearch_indices.py (100%) diff --git a/src/setup/elasticsearch/generate_elasticsearch_indices.py b/src/setup/es/generate_elasticsearch_indices.py similarity index 100% rename from src/setup/elasticsearch/generate_elasticsearch_indices.py rename to src/setup/es/generate_elasticsearch_indices.py From 985949d3aee90e7ae57eca9134e0fcafbf4a37e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 20 Nov 2023 18:52:44 +0100 Subject: [PATCH 61/79] clean logstash configuration --- docker-compose.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index 10c1beef..95c6e125 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -204,7 +204,7 @@ services: - ./logstash:/logstash command: > /bin/bash -c "python setup/logstash/generate_logstash_config_files.py && - python setup/elasticsearch/generate_elasticsearch_indices.py" + python setup/es/generate_elasticsearch_indices.py" restart: "no" depends_on: elasticsearch: From ae2ac9e05724d1d614ab043b6c3c1e6ada82c5b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Mon, 20 Nov 2023 19:26:47 +0100 Subject: [PATCH 62/79] clean logstash configuration --- src/setup/logstash/generate_logstash_config_files.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/setup/logstash/generate_logstash_config_files.py b/src/setup/logstash/generate_logstash_config_files.py index f32588d2..397a624f 100755 --- a/src/setup/logstash/generate_logstash_config_files.py +++ b/src/setup/logstash/generate_logstash_config_files.py @@ -13,7 +13,7 @@ import os from routers.search_routers import router_list -from .definitions import ( +from definitions import ( BASE_FIELDS, INFO, CONF_BASE, From cc8c22f291930b0940c82664e3ce1e91d5ff5509 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Fri, 24 Nov 2023 01:16:23 +0100 Subject: [PATCH 63/79] logstash config files generated with jinja2 --- data/deletion/.gitkeep | 0 data/mysql/.gitkeep | 0 docker-compose.yaml | 4 +- logstash/config/logstash.yml | 9 +- logstash/pipeline/conf/init_table.conf | 222 +++++++++ logstash/pipeline/conf/sync_table.conf | 456 ++++++++++++++++++ logstash/pipeline/sql/init_dataset.sql | 8 + logstash/pipeline/sql/init_event.sql | 8 + logstash/pipeline/sql/init_experiment.sql | 8 + logstash/pipeline/sql/init_ml_model.sql | 8 + logstash/pipeline/sql/init_news.sql | 8 + logstash/pipeline/sql/init_organisation.sql | 8 + logstash/pipeline/sql/init_project.sql | 8 + logstash/pipeline/sql/init_publication.sql | 8 + logstash/pipeline/sql/init_service.sql | 8 + logstash/pipeline/sql/rm_dataset.sql | 6 + logstash/pipeline/sql/rm_event.sql | 6 + logstash/pipeline/sql/rm_experiment.sql | 6 + logstash/pipeline/sql/rm_ml_model.sql | 6 + logstash/pipeline/sql/rm_news.sql | 6 + logstash/pipeline/sql/rm_organisation.sql | 6 + logstash/pipeline/sql/rm_project.sql | 6 + logstash/pipeline/sql/rm_publication.sql | 6 + logstash/pipeline/sql/rm_service.sql | 6 + logstash/pipeline/sql/sync_dataset.sql | 8 + logstash/pipeline/sql/sync_event.sql | 8 + logstash/pipeline/sql/sync_experiment.sql | 8 + logstash/pipeline/sql/sync_ml_model.sql | 8 + logstash/pipeline/sql/sync_news.sql | 8 + logstash/pipeline/sql/sync_organisation.sql | 8 + logstash/pipeline/sql/sync_project.sql | 8 + logstash/pipeline/sql/sync_publication.sql | 8 + logstash/pipeline/sql/sync_service.sql | 8 + pyproject.toml | 1 + src/setup/{logstash => es_setup}/__init__.py | 0 src/setup/es_setup/definitions.py | 11 + .../generate_elasticsearch_indices.py | 20 +- .../generate_logstash_config_files.py | 167 ------- .../setup/logstash_setup/__init__.py | 0 .../logstash_setup/config_file_template.py | 6 + .../definitions.py | 2 +- .../logstash_setup/file_generated_comment.py | 5 + .../generate_logstash_config_files.py | 81 ++++ .../pipeline_config_init_file_template.py | 36 ++ .../pipeline_config_sync_file_template.py | 62 +++ .../pipeline_sql_init_file_template.py | 5 + .../pipeline_sql_rm_file_template.py | 4 + .../pipeline_sql_sync_file_template.py | 5 + 48 files changed, 1104 insertions(+), 190 deletions(-) delete mode 100644 data/deletion/.gitkeep delete mode 100644 data/mysql/.gitkeep create mode 100644 logstash/pipeline/conf/init_table.conf create mode 100644 logstash/pipeline/conf/sync_table.conf create mode 100644 logstash/pipeline/sql/init_dataset.sql create mode 100644 logstash/pipeline/sql/init_event.sql create mode 100644 logstash/pipeline/sql/init_experiment.sql create mode 100644 logstash/pipeline/sql/init_ml_model.sql create mode 100644 logstash/pipeline/sql/init_news.sql create mode 100644 logstash/pipeline/sql/init_organisation.sql create mode 100644 logstash/pipeline/sql/init_project.sql create mode 100644 logstash/pipeline/sql/init_publication.sql create mode 100644 logstash/pipeline/sql/init_service.sql create mode 100644 logstash/pipeline/sql/rm_dataset.sql create mode 100644 logstash/pipeline/sql/rm_event.sql create mode 100644 logstash/pipeline/sql/rm_experiment.sql create mode 100644 logstash/pipeline/sql/rm_ml_model.sql create mode 100644 logstash/pipeline/sql/rm_news.sql create mode 100644 logstash/pipeline/sql/rm_organisation.sql create mode 100644 logstash/pipeline/sql/rm_project.sql create mode 100644 logstash/pipeline/sql/rm_publication.sql create mode 100644 logstash/pipeline/sql/rm_service.sql create mode 100644 logstash/pipeline/sql/sync_dataset.sql create mode 100644 logstash/pipeline/sql/sync_event.sql create mode 100644 logstash/pipeline/sql/sync_experiment.sql create mode 100644 logstash/pipeline/sql/sync_ml_model.sql create mode 100644 logstash/pipeline/sql/sync_news.sql create mode 100644 logstash/pipeline/sql/sync_organisation.sql create mode 100644 logstash/pipeline/sql/sync_project.sql create mode 100644 logstash/pipeline/sql/sync_publication.sql create mode 100644 logstash/pipeline/sql/sync_service.sql rename src/setup/{logstash => es_setup}/__init__.py (100%) create mode 100755 src/setup/es_setup/definitions.py rename src/setup/{es => es_setup}/generate_elasticsearch_indices.py (71%) delete mode 100755 src/setup/logstash/generate_logstash_config_files.py rename data/connectors/.gitkeep => src/setup/logstash_setup/__init__.py (100%) create mode 100755 src/setup/logstash_setup/config_file_template.py rename src/setup/{logstash => logstash_setup}/definitions.py (97%) create mode 100755 src/setup/logstash_setup/file_generated_comment.py create mode 100755 src/setup/logstash_setup/generate_logstash_config_files.py create mode 100755 src/setup/logstash_setup/pipeline_config_init_file_template.py create mode 100755 src/setup/logstash_setup/pipeline_config_sync_file_template.py create mode 100755 src/setup/logstash_setup/pipeline_sql_init_file_template.py create mode 100755 src/setup/logstash_setup/pipeline_sql_rm_file_template.py create mode 100755 src/setup/logstash_setup/pipeline_sql_sync_file_template.py diff --git a/data/deletion/.gitkeep b/data/deletion/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/data/mysql/.gitkeep b/data/mysql/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/docker-compose.yaml b/docker-compose.yaml index 95c6e125..8c84d372 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -203,8 +203,8 @@ services: - ./src:/app - ./logstash:/logstash command: > - /bin/bash -c "python setup/logstash/generate_logstash_config_files.py && - python setup/es/generate_elasticsearch_indices.py" + /bin/bash -c "python setup/logstash_setup/generate_logstash_config_files.py && + python setup/es_setup/generate_elasticsearch_indices.py" restart: "no" depends_on: elasticsearch: diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index 8b137891..4d6ca384 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1 +1,8 @@ - +# This file has been generated by `generate_logstash_config_files.py` +# file, placed in `src/setup/logstash_setup` +# ------------------------------------------------------------------- +http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] +xpack.monitoring.enabled: true +xpack.monitoring.elasticsearch.username: elastic +xpack.monitoring.elasticsearch.password: changeme \ No newline at end of file diff --git a/logstash/pipeline/conf/init_table.conf b/logstash/pipeline/conf/init_table.conf new file mode 100644 index 00000000..cddf486c --- /dev/null +++ b/logstash/pipeline/conf/init_table.conf @@ -0,0 +1,222 @@ +# This file has been generated by `generate_logstash_config_files.py` +# file, placed in `src/setup/logstash_setup` +# ------------------------------------------------------------------- + +input { + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_dataset.sql" + type => "dataset" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_event.sql" + type => "event" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_experiment.sql" + type => "experiment" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_ml_model.sql" + type => "ml_model" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_news.sql" + type => "news" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_organisation.sql" + type => "organisation" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_project.sql" + type => "project" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_publication.sql" + type => "publication" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_service.sql" + type => "service" + } + +} +filter { + mutate { + remove_field => ["@version", "@timestamp"] + } +} +output { + + if [type] == "dataset" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "dataset" + document_id => "dataset_%{identifier}" + } + } + + if [type] == "event" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "event" + document_id => "event_%{identifier}" + } + } + + if [type] == "experiment" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "experiment" + document_id => "experiment_%{identifier}" + } + } + + if [type] == "ml_model" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "ml_model" + document_id => "ml_model_%{identifier}" + } + } + + if [type] == "news" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "news" + document_id => "news_%{identifier}" + } + } + + if [type] == "organisation" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "organisation" + document_id => "organisation_%{identifier}" + } + } + + if [type] == "project" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "project" + document_id => "project_%{identifier}" + } + } + + if [type] == "publication" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "publication" + document_id => "publication_%{identifier}" + } + } + + if [type] == "service" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "service" + document_id => "service_%{identifier}" + } + } + +} \ No newline at end of file diff --git a/logstash/pipeline/conf/sync_table.conf b/logstash/pipeline/conf/sync_table.conf new file mode 100644 index 00000000..b45a4309 --- /dev/null +++ b/logstash/pipeline/conf/sync_table.conf @@ -0,0 +1,456 @@ +# This file has been generated by `generate_logstash_config_files.py` +# file, placed in `src/setup/logstash_setup` +# ------------------------------------------------------------------- + +input { + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_dataset.sql" + type => "dataset" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_dataset.sql" + type => "rm_dataset" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_event.sql" + type => "event" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_event.sql" + type => "rm_event" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_experiment.sql" + type => "experiment" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_experiment.sql" + type => "rm_experiment" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_ml_model.sql" + type => "ml_model" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_ml_model.sql" + type => "rm_ml_model" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_news.sql" + type => "news" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_news.sql" + type => "rm_news" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_organisation.sql" + type => "organisation" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_organisation.sql" + type => "rm_organisation" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_project.sql" + type => "project" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_project.sql" + type => "rm_project" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_publication.sql" + type => "publication" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_publication.sql" + type => "rm_publication" + } + + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_service.sql" + type => "service" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "root" + jdbc_password => "ok" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_service.sql" + type => "rm_service" + } + +} +filter { + mutate { + remove_field => ["@version", "@timestamp"] + } +} +output { + + if [type] == "dataset" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "dataset" + document_id => "dataset_%{identifier}" + } + } + if [type] == "rm_dataset" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "dataset" + document_id => "dataset_%{identifier}" + } + } + + if [type] == "event" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "event" + document_id => "event_%{identifier}" + } + } + if [type] == "rm_event" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "event" + document_id => "event_%{identifier}" + } + } + + if [type] == "experiment" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "experiment" + document_id => "experiment_%{identifier}" + } + } + if [type] == "rm_experiment" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "experiment" + document_id => "experiment_%{identifier}" + } + } + + if [type] == "ml_model" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "ml_model" + document_id => "ml_model_%{identifier}" + } + } + if [type] == "rm_ml_model" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "ml_model" + document_id => "ml_model_%{identifier}" + } + } + + if [type] == "news" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "news" + document_id => "news_%{identifier}" + } + } + if [type] == "rm_news" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "news" + document_id => "news_%{identifier}" + } + } + + if [type] == "organisation" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "organisation" + document_id => "organisation_%{identifier}" + } + } + if [type] == "rm_organisation" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "organisation" + document_id => "organisation_%{identifier}" + } + } + + if [type] == "project" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "project" + document_id => "project_%{identifier}" + } + } + if [type] == "rm_project" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "project" + document_id => "project_%{identifier}" + } + } + + if [type] == "publication" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "publication" + document_id => "publication_%{identifier}" + } + } + if [type] == "rm_publication" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "publication" + document_id => "publication_%{identifier}" + } + } + + if [type] == "service" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "service" + document_id => "service_%{identifier}" + } + } + if [type] == "rm_service" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "elastic" + password => "changeme" + ecs_compatibility => disabled + index => "service" + document_id => "service_%{identifier}" + } + } + +} \ No newline at end of file diff --git a/logstash/pipeline/sql/init_dataset.sql b/logstash/pipeline/sql/init_dataset.sql new file mode 100644 index 00000000..303c3857 --- /dev/null +++ b/logstash/pipeline/sql/init_dataset.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT dataset.identifier, dataset.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , issn +FROM aiod.dataset +INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier +WHERE aiod.dataset.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_event.sql b/logstash/pipeline/sql/init_event.sql new file mode 100644 index 00000000..4a740ee9 --- /dev/null +++ b/logstash/pipeline/sql/init_event.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT event.identifier, event.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.event +INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier +WHERE aiod.event.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_experiment.sql b/logstash/pipeline/sql/init_experiment.sql new file mode 100644 index 00000000..32f3749b --- /dev/null +++ b/logstash/pipeline/sql/init_experiment.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT experiment.identifier, experiment.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.experiment +INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier +WHERE aiod.experiment.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql new file mode 100644 index 00000000..dcdf87b2 --- /dev/null +++ b/logstash/pipeline/sql/init_ml_model.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT ml_model.identifier, ml_model.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.ml_model +INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier +WHERE aiod.ml_model.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql new file mode 100644 index 00000000..7c4593ba --- /dev/null +++ b/logstash/pipeline/sql/init_news.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , alternative_headline, headline +FROM aiod.news +INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier +WHERE aiod.news.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_organisation.sql b/logstash/pipeline/sql/init_organisation.sql new file mode 100644 index 00000000..7fd1af3f --- /dev/null +++ b/logstash/pipeline/sql/init_organisation.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT organisation.identifier, organisation.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , legal_name +FROM aiod.organisation +INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier +WHERE aiod.organisation.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_project.sql b/logstash/pipeline/sql/init_project.sql new file mode 100644 index 00000000..d97397fa --- /dev/null +++ b/logstash/pipeline/sql/init_project.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT project.identifier, project.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.project +INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier +WHERE aiod.project.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_publication.sql b/logstash/pipeline/sql/init_publication.sql new file mode 100644 index 00000000..31928c40 --- /dev/null +++ b/logstash/pipeline/sql/init_publication.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT publication.identifier, publication.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , isbn, issn +FROM aiod.publication +INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier +WHERE aiod.publication.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_service.sql b/logstash/pipeline/sql/init_service.sql new file mode 100644 index 00000000..c0d84d40 --- /dev/null +++ b/logstash/pipeline/sql/init_service.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT service.identifier, service.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , slogan +FROM aiod.service +INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier +WHERE aiod.service.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_dataset.sql b/logstash/pipeline/sql/rm_dataset.sql new file mode 100644 index 00000000..e63316e4 --- /dev/null +++ b/logstash/pipeline/sql/rm_dataset.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT dataset.identifier +FROM aiod.dataset +WHERE aiod.dataset.date_deleted IS NOT NULL AND aiod.dataset.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_event.sql b/logstash/pipeline/sql/rm_event.sql new file mode 100644 index 00000000..07abd73e --- /dev/null +++ b/logstash/pipeline/sql/rm_event.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT event.identifier +FROM aiod.event +WHERE aiod.event.date_deleted IS NOT NULL AND aiod.event.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_experiment.sql b/logstash/pipeline/sql/rm_experiment.sql new file mode 100644 index 00000000..f38c1c67 --- /dev/null +++ b/logstash/pipeline/sql/rm_experiment.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT experiment.identifier +FROM aiod.experiment +WHERE aiod.experiment.date_deleted IS NOT NULL AND aiod.experiment.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_ml_model.sql b/logstash/pipeline/sql/rm_ml_model.sql new file mode 100644 index 00000000..0fcc53f8 --- /dev/null +++ b/logstash/pipeline/sql/rm_ml_model.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT ml_model.identifier +FROM aiod.ml_model +WHERE aiod.ml_model.date_deleted IS NOT NULL AND aiod.ml_model.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_news.sql b/logstash/pipeline/sql/rm_news.sql new file mode 100644 index 00000000..fab72f00 --- /dev/null +++ b/logstash/pipeline/sql/rm_news.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT news.identifier +FROM aiod.news +WHERE aiod.news.date_deleted IS NOT NULL AND aiod.news.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_organisation.sql b/logstash/pipeline/sql/rm_organisation.sql new file mode 100644 index 00000000..48a5c3dc --- /dev/null +++ b/logstash/pipeline/sql/rm_organisation.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT organisation.identifier +FROM aiod.organisation +WHERE aiod.organisation.date_deleted IS NOT NULL AND aiod.organisation.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_project.sql b/logstash/pipeline/sql/rm_project.sql new file mode 100644 index 00000000..eca4e43d --- /dev/null +++ b/logstash/pipeline/sql/rm_project.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT project.identifier +FROM aiod.project +WHERE aiod.project.date_deleted IS NOT NULL AND aiod.project.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_publication.sql b/logstash/pipeline/sql/rm_publication.sql new file mode 100644 index 00000000..19c50766 --- /dev/null +++ b/logstash/pipeline/sql/rm_publication.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT publication.identifier +FROM aiod.publication +WHERE aiod.publication.date_deleted IS NOT NULL AND aiod.publication.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_service.sql b/logstash/pipeline/sql/rm_service.sql new file mode 100644 index 00000000..a256e688 --- /dev/null +++ b/logstash/pipeline/sql/rm_service.sql @@ -0,0 +1,6 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT service.identifier +FROM aiod.service +WHERE aiod.service.date_deleted IS NOT NULL AND aiod.service.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql new file mode 100644 index 00000000..2b4dd149 --- /dev/null +++ b/logstash/pipeline/sql/sync_dataset.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT dataset.identifier, dataset.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , issn +FROM aiod.dataset +INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier +WHERE aiod.dataset.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_event.sql b/logstash/pipeline/sql/sync_event.sql new file mode 100644 index 00000000..1a67dd70 --- /dev/null +++ b/logstash/pipeline/sql/sync_event.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT event.identifier, event.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.event +INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier +WHERE aiod.event.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql new file mode 100644 index 00000000..69f8a228 --- /dev/null +++ b/logstash/pipeline/sql/sync_experiment.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT experiment.identifier, experiment.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.experiment +INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier +WHERE aiod.experiment.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql new file mode 100644 index 00000000..dd88935b --- /dev/null +++ b/logstash/pipeline/sql/sync_ml_model.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT ml_model.identifier, ml_model.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.ml_model +INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier +WHERE aiod.ml_model.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql new file mode 100644 index 00000000..65f6ffaf --- /dev/null +++ b/logstash/pipeline/sql/sync_news.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , alternative_headline, headline +FROM aiod.news +INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier +WHERE aiod.news.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql new file mode 100644 index 00000000..aa7886ca --- /dev/null +++ b/logstash/pipeline/sql/sync_organisation.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT organisation.identifier, organisation.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , legal_name +FROM aiod.organisation +INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier +WHERE aiod.organisation.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_project.sql b/logstash/pipeline/sql/sync_project.sql new file mode 100644 index 00000000..42828608 --- /dev/null +++ b/logstash/pipeline/sql/sync_project.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT project.identifier, project.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified +FROM aiod.project +INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier +WHERE aiod.project.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_publication.sql b/logstash/pipeline/sql/sync_publication.sql new file mode 100644 index 00000000..2592886c --- /dev/null +++ b/logstash/pipeline/sql/sync_publication.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT publication.identifier, publication.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , isbn, issn +FROM aiod.publication +INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier +WHERE aiod.publication.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_service.sql b/logstash/pipeline/sql/sync_service.sql new file mode 100644 index 00000000..0839b31d --- /dev/null +++ b/logstash/pipeline/sql/sync_service.sql @@ -0,0 +1,8 @@ +-- This file has been generated by `generate_logstash_config_files.py` +-- file, placed in `src/setup/logstash_setup` +-- ------------------------------------------------------------------- +SELECT service.identifier, service.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , slogan +FROM aiod.service +INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier +WHERE aiod.service.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 491d1bd4..aea7e1e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,7 @@ dependencies = [ "python-multipart==0.0.6", "mysql-connector-python==8.2.0", "elasticsearch==8.10.1", + "jinja2==3.1.2", ] readme = "README.md" diff --git a/src/setup/logstash/__init__.py b/src/setup/es_setup/__init__.py similarity index 100% rename from src/setup/logstash/__init__.py rename to src/setup/es_setup/__init__.py diff --git a/src/setup/es_setup/definitions.py b/src/setup/es_setup/definitions.py new file mode 100755 index 00000000..c8afeaa1 --- /dev/null +++ b/src/setup/es_setup/definitions.py @@ -0,0 +1,11 @@ +BASE_MAPPING = { + "mappings": { + "properties": { + "date_modified": {"type": "date"}, + "identifier": {"type": "long"}, + "name": {"type": "text", "fields": {"keyword": {"type": "keyword"}}}, + "plain": {"type": "text", "fields": {"keyword": {"type": "keyword"}}}, + "html": {"type": "text", "fields": {"keyword": {"type": "keyword"}}}, + } + } +} diff --git a/src/setup/es/generate_elasticsearch_indices.py b/src/setup/es_setup/generate_elasticsearch_indices.py similarity index 71% rename from src/setup/es/generate_elasticsearch_indices.py rename to src/setup/es_setup/generate_elasticsearch_indices.py index 87549a67..181df10b 100755 --- a/src/setup/es/generate_elasticsearch_indices.py +++ b/src/setup/es_setup/generate_elasticsearch_indices.py @@ -11,19 +11,7 @@ from elasticsearch import Elasticsearch from routers.search_routers import router_list - -BASE_MAPPING = { - "mappings": { - "properties": { - "date_modified": {"type": "date"}, - "identifier": {"type": "long"}, - "name": {"type": "text", "fields": {"keyword": {"type": "keyword"}}}, - "plain": {"type": "text", "fields": {"keyword": {"type": "keyword"}}}, - "html": {"type": "text", "fields": {"keyword": {"type": "keyword"}}}, - } - } -} - +from definitions import BASE_MAPPING def add_field(base_mapping, field): new_mapping = copy.deepcopy(base_mapping) @@ -42,20 +30,14 @@ def generate_mapping(entity, fields): def main(): - - # Generate client es_user = os.environ["ES_USER"] es_password = os.environ["ES_PASSWORD"] es_client = Elasticsearch("http://elasticsearch:9200", basic_auth=(es_user, es_password)) - - # Search for entities and their extra fields global_fields = set(["name", "plain", "html"]) entities = {} for router in router_list: extra_fields = list(router.match_fields ^ global_fields) entities[router.es_index] = extra_fields - - # Add indices with mappings for entity, fields in entities.items(): mapping = generate_mapping(entity, fields) es_client.indices.create(index=entity, body=mapping, ignore=400) diff --git a/src/setup/logstash/generate_logstash_config_files.py b/src/setup/logstash/generate_logstash_config_files.py deleted file mode 100755 index 397a624f..00000000 --- a/src/setup/logstash/generate_logstash_config_files.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -"""Generates the logstash configuration and pipelines files - -This file generates the logstash configuration file in logstash/config, the -pipelines configuration files in logstash/pipelines/conf and the pipelines -sql sentences in logstash/pipelines/sql. - -Launched by the es_logstash_setup container in the docker-compose file. -""" - -import os - -from routers.search_routers import router_list -from definitions import ( - BASE_FIELDS, - INFO, - CONF_BASE, - INIT_INPUT_BASE, - SYNC_INPUT_BASE, - FILTER, - INIT_OUTPUT_BASE, - SYNC_OUTPUT_BASE, - SQL_BASE, - SQL_RM_BASE, - INIT_CLAUSE, - SYNC_CLAUSE, -) - - -def generate_conf_file(conf_path, es_user, es_pass): - - file_path = os.path.join(conf_path, "logstash.yml") - - # Generate configuration file - with open(file_path, "w") as f: - - # Info - f.write(INFO.format("#")) - - # Configuration - f.write(CONF_BASE.format(es_user, es_pass)) - - -def generate_pipeline_conf_files( - pipeline_conf_path, db_user, db_pass, es_user, es_pass, entities, sync=False -): - - if not sync: # init file - file_path = os.path.join(pipeline_conf_path, "init_table.conf") - input_base = INIT_INPUT_BASE - output_base = INIT_OUTPUT_BASE - else: # sync file - file_path = os.path.join(pipeline_conf_path, "sync_table.conf") - input_base = SYNC_INPUT_BASE - output_base = SYNC_OUTPUT_BASE - - # Generate configuration file - with open(file_path, "w") as f: - - # Info - f.write(INFO.format("#")) - - # Input - f.write("input {\n") - for entity in entities: - f.write(input_base.format(db_user, db_pass, entity)) - f.write("}\n") - - # Filters - f.write(FILTER) - - # Output - f.write("output {\n") - for entity in entities: - f.write(output_base.format(es_user, es_pass, entity)) - f.write("}\n") - - -def generate_pipeline_sql_files(pipeline_sql_path, entity, fields, sync=False): - - # Generate output file path - if sync: - file_path = os.path.join(pipeline_sql_path, f"sync_{entity}.sql") - else: - file_path = os.path.join(pipeline_sql_path, f"init_{entity}.sql") - - # Write the output file - with open(file_path, "w") as f: - - # Info - f.write(INFO.format("--")) - - # Where clause - if sync: - where_clause = SYNC_CLAUSE.format(entity) - else: - where_clause = INIT_CLAUSE.format(entity) - - # Generate field list - field_list = ", ".join(fields).format(entity) - - f.write(SQL_BASE.format(entity, field_list, where_clause)) - - -def generate_pipeline_sql_rm_files(pipeline_sql_path, entity): - - # Generate output file path - file_path = os.path.join(pipeline_sql_path, f"rm_{entity}.sql") - - # Write the output file - with open(file_path, "w") as f: - - # Info - f.write(INFO.format("--")) - - # SQL query - f.write(SQL_RM_BASE.format(entity)) - - -def main(): - - # Get configuration variables - base_path = "/logstash" - db_user = "root" - db_pass = os.environ["MYSQL_ROOT_PASSWORD"] - es_user = os.environ["ES_USER"] - es_pass = os.environ["ES_PASSWORD"] - - # Search for entities and their extra fields - global_fields = set(["name", "plain", "html"]) - entities = {} - for router in router_list: - extra_fields = list(router.match_fields ^ global_fields) - entities[router.es_index] = BASE_FIELDS + extra_fields - - # Make configuration dir - conf_path = os.path.join(base_path, "config") - os.makedirs(conf_path, exist_ok=True) - - # Make pipeline configuration dirs - pipeline_conf_path = os.path.join(base_path, "pipeline", "conf") - os.makedirs(pipeline_conf_path, exist_ok=True) - pipeline_sql_path = os.path.join(base_path, "pipeline", "sql") - os.makedirs(pipeline_sql_path, exist_ok=True) - - # Generate logstash configuration file - generate_conf_file(conf_path, es_user, es_pass) - - # Generate pipeline configuration init and sync files - generate_pipeline_conf_files( - pipeline_conf_path, db_user, db_pass, es_user, es_pass, entities.keys(), sync=False - ) - generate_pipeline_conf_files( - pipeline_conf_path, db_user, db_pass, es_user, es_pass, entities.keys(), sync=True - ) - - # Generate SQL init, sync and rm files - for entity, fields in entities.items(): - generate_pipeline_sql_files(pipeline_sql_path, entity, fields, sync=False) - generate_pipeline_sql_files(pipeline_sql_path, entity, fields, sync=True) - generate_pipeline_sql_rm_files(pipeline_sql_path, entity) - - -if __name__ == "__main__": - main() diff --git a/data/connectors/.gitkeep b/src/setup/logstash_setup/__init__.py similarity index 100% rename from data/connectors/.gitkeep rename to src/setup/logstash_setup/__init__.py diff --git a/src/setup/logstash_setup/config_file_template.py b/src/setup/logstash_setup/config_file_template.py new file mode 100755 index 00000000..fed78c63 --- /dev/null +++ b/src/setup/logstash_setup/config_file_template.py @@ -0,0 +1,6 @@ +CONFIG_FILE_TEMPLATE = """http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] +xpack.monitoring.enabled: true +xpack.monitoring.elasticsearch.username: {{es_user}} +xpack.monitoring.elasticsearch.password: {{es_pass}} +""" diff --git a/src/setup/logstash/definitions.py b/src/setup/logstash_setup/definitions.py similarity index 97% rename from src/setup/logstash/definitions.py rename to src/setup/logstash_setup/definitions.py index 24c1c952..fd769472 100755 --- a/src/setup/logstash/definitions.py +++ b/src/setup/logstash_setup/definitions.py @@ -6,7 +6,7 @@ "aiod_entry.date_modified", ] -INFO = """{0} This file has been generated by `generate_logstash_config.py` +FILE_IS_GENERATED_COMMENT = """{0} This file has been generated by `generate_logstash_config.py` {0} file, placed in `src/setup/logstash` {0} ------------------------------------------------------------- """ diff --git a/src/setup/logstash_setup/file_generated_comment.py b/src/setup/logstash_setup/file_generated_comment.py new file mode 100755 index 00000000..eb259425 --- /dev/null +++ b/src/setup/logstash_setup/file_generated_comment.py @@ -0,0 +1,5 @@ +FILE_IS_GENERATED_COMMENT = """{{comment_tag}} This file has been generated by `generate_logstash_config_files.py` +{{comment_tag}} file, placed in `src/setup/logstash_setup` +{{comment_tag}} ------------------------------------------------------------------- + +""" diff --git a/src/setup/logstash_setup/generate_logstash_config_files.py b/src/setup/logstash_setup/generate_logstash_config_files.py new file mode 100755 index 00000000..c7b3a945 --- /dev/null +++ b/src/setup/logstash_setup/generate_logstash_config_files.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +"""Generates the logstash configuration and pipelines files + +This file generates the logstash configuration file in logstash/config, the +pipelines configuration files in logstash/pipelines/conf and the pipelines +sql sentences in logstash/pipelines/sql. + +Launched by the es_logstash_setup container in the docker-compose file. +""" + +import os +from jinja2 import Template + +from routers.search_routers import router_list +from file_generated_comment import FILE_IS_GENERATED_COMMENT +from config_file_template import CONFIG_FILE_TEMPLATE +from pipeline_config_init_file_template import PIPELINE_CONFIG_INIT_FILE_TEMPLATE +from pipeline_config_sync_file_template import PIPELINE_CONFIG_SYNC_FILE_TEMPLATE +from pipeline_sql_init_file_template import PIPELINE_SQL_INIT_FILE_TEMPLATE +from pipeline_sql_sync_file_template import PIPELINE_SQL_SYNC_FILE_TEMPLATE +from pipeline_sql_rm_file_template import PIPELINE_SQL_RM_FILE_TEMPLATE + + +def generate_file(file_path, template, file_data): + with open(file_path, "w") as f: + f.write(Template(FILE_IS_GENERATED_COMMENT).render(file_data)) + f.write(Template(template).render(file_data)) + +def main(): + base_path = "/logstash" + db_user = "root" + db_pass = os.environ["MYSQL_ROOT_PASSWORD"] + es_user = os.environ["ES_USER"] + es_pass = os.environ["ES_PASSWORD"] + global_fields = set(["name", "plain", "html"]) + entities = {} + for router in router_list: + entities[router.es_index] = list(router.match_fields ^ global_fields) + config_path = os.path.join(base_path, "config") + os.makedirs(config_path, exist_ok=True) + pipeline_config_path = os.path.join(base_path, "pipeline", "conf") + os.makedirs(pipeline_config_path, exist_ok=True) + pipeline_sql_path = os.path.join(base_path, "pipeline", "sql") + os.makedirs(pipeline_sql_path, exist_ok=True) + config_file_data = { + 'comment_tag': "#", + 'es_user': es_user, + 'es_pass': es_pass + } + config_file_path = os.path.join(config_path, "logstash.yml") + generate_file(config_file_path, CONFIG_FILE_TEMPLATE, config_file_data) + pipeline_config_files_data = { + 'comment_tag': "#", + 'es_user': es_user, + 'es_pass': es_pass, + 'db_user': db_user, + 'db_pass': db_pass, + 'entities': entities.keys() + } + pipeline_config_init_file_path = os.path.join(pipeline_config_path, "init_table.conf") + generate_file(pipeline_config_init_file_path, PIPELINE_CONFIG_INIT_FILE_TEMPLATE, pipeline_config_files_data) + pipeline_config_sync_file_path = os.path.join(pipeline_config_path, "sync_table.conf") + generate_file(pipeline_config_sync_file_path, PIPELINE_CONFIG_SYNC_FILE_TEMPLATE, pipeline_config_files_data) + for entity, extra_fields in entities.items(): + pipeline_sql_files_data = { + 'comment_tag': "--", + 'entity_name': entity, + 'extra_fields': ", " + ", ".join(extra_fields) if extra_fields else "" + } + pipeline_sql_init_file_path = os.path.join(pipeline_sql_path, f"init_{entity}.sql") + generate_file(pipeline_sql_init_file_path, PIPELINE_SQL_INIT_FILE_TEMPLATE, pipeline_sql_files_data) + pipeline_sql_sync_file_path = os.path.join(pipeline_sql_path, f"sync_{entity}.sql") + generate_file(pipeline_sql_sync_file_path, PIPELINE_SQL_SYNC_FILE_TEMPLATE, pipeline_sql_files_data) + pipeline_sql_rm_file_path = os.path.join(pipeline_sql_path, f"rm_{entity}.sql") + generate_file(pipeline_sql_rm_file_path, PIPELINE_SQL_RM_FILE_TEMPLATE, pipeline_sql_files_data) + + +if __name__ == "__main__": + main() diff --git a/src/setup/logstash_setup/pipeline_config_init_file_template.py b/src/setup/logstash_setup/pipeline_config_init_file_template.py new file mode 100755 index 00000000..cff6878b --- /dev/null +++ b/src/setup/logstash_setup/pipeline_config_init_file_template.py @@ -0,0 +1,36 @@ +PIPELINE_CONFIG_INIT_FILE_TEMPLATE = """ +input { +{% for entity in entities %} + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{{db_user}}" + jdbc_password => "{{db_pass}}" + clean_run => true + record_last_run => false + statement_filepath => "/usr/share/logstash/sql/init_{{entity}}.sql" + type => "{{entity}}" + } +{% endfor %} +} +filter { + mutate { + remove_field => ["@version", "@timestamp"] + } +} +output { +{% for entity in entities %} + if [type] == "{{entity}}" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "{{es_user}}" + password => "{{es_pass}}" + ecs_compatibility => disabled + index => "{{entity}}" + document_id => "{{entity}}_%{identifier}" + } + } +{% endfor %} +} +""" diff --git a/src/setup/logstash_setup/pipeline_config_sync_file_template.py b/src/setup/logstash_setup/pipeline_config_sync_file_template.py new file mode 100755 index 00000000..cab9fd3e --- /dev/null +++ b/src/setup/logstash_setup/pipeline_config_sync_file_template.py @@ -0,0 +1,62 @@ +PIPELINE_CONFIG_SYNC_FILE_TEMPLATE = """ +input { +{% for entity in entities %} + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{{db_user}}" + jdbc_password => "{{db_pass}}" + use_column_value => true + tracking_column => "date_modified" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/sync_{{entity}}.sql" + type => "{{entity}}" + } + jdbc { + jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" + jdbc_user => "{{db_user}}" + jdbc_password => "{{db_pass}}" + use_column_value => true + tracking_column => "date_deleted" + tracking_column_type => "timestamp" + schedule => "*/5 * * * * *" + statement_filepath => "/usr/share/logstash/sql/rm_{{entity}}.sql" + type => "rm_{{entity}}" + } +{% endfor %} +} +filter { + mutate { + remove_field => ["@version", "@timestamp"] + } +} +output { +{% for entity in entities %} + if [type] == "{{entity}}" { + elasticsearch { + hosts => "elasticsearch:9200" + user => "{{es_user}}" + password => "{{es_pass}}" + ecs_compatibility => disabled + index => "{{entity}}" + document_id => "{{entity}}_%{identifier}" + } + } + if [type] == "rm_{{entity}}" { + elasticsearch { + action => "delete" + hosts => "elasticsearch:9200" + user => "{{es_user}}" + password => "{{es_pass}}" + ecs_compatibility => disabled + index => "{{entity}}" + document_id => "{{entity}}_%{identifier}" + } + } +{% endfor %} +} +""" diff --git a/src/setup/logstash_setup/pipeline_sql_init_file_template.py b/src/setup/logstash_setup/pipeline_sql_init_file_template.py new file mode 100755 index 00000000..4bb39341 --- /dev/null +++ b/src/setup/logstash_setup/pipeline_sql_init_file_template.py @@ -0,0 +1,5 @@ +PIPELINE_SQL_INIT_FILE_TEMPLATE = """SELECT {{entity_name}}.identifier, {{entity_name}}.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified {{extra_fields}} +FROM aiod.{{entity_name}} +INNER JOIN aiod.aiod_entry ON aiod.{{entity_name}}.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.{{entity_name}}.description_identifier=aiod.text.identifier +WHERE aiod.{{entity_name}}.date_deleted IS NULL""" diff --git a/src/setup/logstash_setup/pipeline_sql_rm_file_template.py b/src/setup/logstash_setup/pipeline_sql_rm_file_template.py new file mode 100755 index 00000000..c02f54da --- /dev/null +++ b/src/setup/logstash_setup/pipeline_sql_rm_file_template.py @@ -0,0 +1,4 @@ +PIPELINE_SQL_RM_FILE_TEMPLATE = """SELECT {{entity_name}}.identifier +FROM aiod.{{entity_name}} +WHERE aiod.{{entity_name}}.date_deleted IS NOT NULL AND aiod.{{entity_name}}.date_deleted > :sql_last_value +""" diff --git a/src/setup/logstash_setup/pipeline_sql_sync_file_template.py b/src/setup/logstash_setup/pipeline_sql_sync_file_template.py new file mode 100755 index 00000000..00e05160 --- /dev/null +++ b/src/setup/logstash_setup/pipeline_sql_sync_file_template.py @@ -0,0 +1,5 @@ +PIPELINE_SQL_SYNC_FILE_TEMPLATE = """SELECT {{entity_name}}.identifier, {{entity_name}}.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified {{extra_fields}} +FROM aiod.{{entity_name}} +INNER JOIN aiod.aiod_entry ON aiod.{{entity_name}}.aiod_entry_identifier=aiod.aiod_entry.identifier +LEFT JOIN aiod.text ON aiod.{{entity_name}}.description_identifier=aiod.text.identifier +WHERE aiod.{{entity_name}}.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" From b73c011d051f9e568b4426654dd0534a84c3df7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Fri, 24 Nov 2023 01:17:11 +0100 Subject: [PATCH 64/79] logstash config files generated with jinja2 --- src/setup/logstash_setup/definitions.py | 119 ------------------------ 1 file changed, 119 deletions(-) delete mode 100755 src/setup/logstash_setup/definitions.py diff --git a/src/setup/logstash_setup/definitions.py b/src/setup/logstash_setup/definitions.py deleted file mode 100755 index fd769472..00000000 --- a/src/setup/logstash_setup/definitions.py +++ /dev/null @@ -1,119 +0,0 @@ -BASE_FIELDS = [ - "{0}.identifier", - "{0}.name", - "text.plain as 'plain'", - "text.html as 'html'", - "aiod_entry.date_modified", -] - -FILE_IS_GENERATED_COMMENT = """{0} This file has been generated by `generate_logstash_config.py` -{0} file, placed in `src/setup/logstash` -{0} ------------------------------------------------------------- -""" - -CONF_BASE = """http.host: "0.0.0.0" -xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] -xpack.monitoring.enabled: true -xpack.monitoring.elasticsearch.username: {0} -xpack.monitoring.elasticsearch.password: {1} -""" - -INIT_INPUT_BASE = """ jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "{0}" - jdbc_password => "{1}" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_{2}.sql" - type => "{2}" - }} -""" - -SYNC_INPUT_BASE = """ jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "{0}" - jdbc_password => "{1}" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_{2}.sql" - type => "{2}" - }} - jdbc {{ - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "{0}" - jdbc_password => "{1}" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_{2}.sql" - type => "rm_{2}" - }} -""" - -FILTER = """filter { - mutate { - remove_field => ["@version", "@timestamp"] - } -} -""" - -INIT_OUTPUT_BASE = """ if [type] == "{2}" {{ - elasticsearch {{ - hosts => "elasticsearch:9200" - user => "{0}" - password => "{1}" - ecs_compatibility => disabled - index => "{2}" - document_id => "{2}_%{{identifier}}" - }} - }} -""" - -SYNC_OUTPUT_BASE = """ if [type] == "{2}" {{ - elasticsearch {{ - hosts => "elasticsearch:9200" - user => "{0}" - password => "{1}" - ecs_compatibility => disabled - index => "{2}" - document_id => "{2}_%{{identifier}}" - }} - }} - if [type] == "rm_{2}" {{ - elasticsearch {{ - action => "delete" - hosts => "elasticsearch:9200" - user => "{0}" - password => "{1}" - ecs_compatibility => disabled - index => "{2}" - document_id => "{2}_%{{identifier}}" - }} - }} -""" - -SQL_BASE = """SELECT {1} -FROM aiod.{0} -INNER JOIN aiod.aiod_entry ON aiod.{0}.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.{0}.description_identifier=aiod.text.identifier{2} -""" - -SQL_RM_BASE = """SELECT {0}.identifier -FROM aiod.{0} -WHERE aiod.{0}.date_deleted IS NOT NULL AND aiod.{0}.date_deleted > :sql_last_value -""" - -INIT_CLAUSE = """ -WHERE aiod.{0}.date_deleted IS NULL""" - -SYNC_CLAUSE = """ -WHERE aiod.{0}.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" From 6ae492da6e7a82b5136c2081965b1806743ea209 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Fri, 24 Nov 2023 14:04:11 +0100 Subject: [PATCH 65/79] Logstash config files generated with jinja2. All test passed, but not merged with develop. --- logstash/config/logstash.yml | 9 +- logstash/pipeline/conf/init_table.conf | 222 --------- logstash/pipeline/conf/sync_table.conf | 456 ------------------ logstash/pipeline/sql/init_dataset.sql | 8 - logstash/pipeline/sql/init_event.sql | 8 - logstash/pipeline/sql/init_experiment.sql | 8 - logstash/pipeline/sql/init_ml_model.sql | 8 - logstash/pipeline/sql/init_news.sql | 8 - logstash/pipeline/sql/init_organisation.sql | 8 - logstash/pipeline/sql/init_project.sql | 8 - logstash/pipeline/sql/init_publication.sql | 8 - logstash/pipeline/sql/init_service.sql | 8 - logstash/pipeline/sql/rm_dataset.sql | 6 - logstash/pipeline/sql/rm_event.sql | 6 - logstash/pipeline/sql/rm_experiment.sql | 6 - logstash/pipeline/sql/rm_ml_model.sql | 6 - logstash/pipeline/sql/rm_news.sql | 6 - logstash/pipeline/sql/rm_organisation.sql | 6 - logstash/pipeline/sql/rm_project.sql | 6 - logstash/pipeline/sql/rm_publication.sql | 6 - logstash/pipeline/sql/rm_service.sql | 6 - logstash/pipeline/sql/sync_dataset.sql | 8 - logstash/pipeline/sql/sync_event.sql | 8 - logstash/pipeline/sql/sync_experiment.sql | 8 - logstash/pipeline/sql/sync_ml_model.sql | 8 - logstash/pipeline/sql/sync_news.sql | 8 - logstash/pipeline/sql/sync_organisation.sql | 8 - logstash/pipeline/sql/sync_project.sql | 8 - logstash/pipeline/sql/sync_publication.sql | 8 - logstash/pipeline/sql/sync_service.sql | 8 - .../generate_elasticsearch_indices.py | 1 + .../logstash_setup/file_generated_comment.py | 4 +- .../generate_logstash_config_files.py | 55 ++- .../pipeline_sql_init_file_template.py | 10 +- .../pipeline_sql_rm_file_template.py | 3 +- .../pipeline_sql_sync_file_template.py | 10 +- 36 files changed, 60 insertions(+), 908 deletions(-) delete mode 100644 logstash/pipeline/conf/init_table.conf delete mode 100644 logstash/pipeline/conf/sync_table.conf delete mode 100644 logstash/pipeline/sql/init_dataset.sql delete mode 100644 logstash/pipeline/sql/init_event.sql delete mode 100644 logstash/pipeline/sql/init_experiment.sql delete mode 100644 logstash/pipeline/sql/init_ml_model.sql delete mode 100644 logstash/pipeline/sql/init_news.sql delete mode 100644 logstash/pipeline/sql/init_organisation.sql delete mode 100644 logstash/pipeline/sql/init_project.sql delete mode 100644 logstash/pipeline/sql/init_publication.sql delete mode 100644 logstash/pipeline/sql/init_service.sql delete mode 100644 logstash/pipeline/sql/rm_dataset.sql delete mode 100644 logstash/pipeline/sql/rm_event.sql delete mode 100644 logstash/pipeline/sql/rm_experiment.sql delete mode 100644 logstash/pipeline/sql/rm_ml_model.sql delete mode 100644 logstash/pipeline/sql/rm_news.sql delete mode 100644 logstash/pipeline/sql/rm_organisation.sql delete mode 100644 logstash/pipeline/sql/rm_project.sql delete mode 100644 logstash/pipeline/sql/rm_publication.sql delete mode 100644 logstash/pipeline/sql/rm_service.sql delete mode 100644 logstash/pipeline/sql/sync_dataset.sql delete mode 100644 logstash/pipeline/sql/sync_event.sql delete mode 100644 logstash/pipeline/sql/sync_experiment.sql delete mode 100644 logstash/pipeline/sql/sync_ml_model.sql delete mode 100644 logstash/pipeline/sql/sync_news.sql delete mode 100644 logstash/pipeline/sql/sync_organisation.sql delete mode 100644 logstash/pipeline/sql/sync_project.sql delete mode 100644 logstash/pipeline/sql/sync_publication.sql delete mode 100644 logstash/pipeline/sql/sync_service.sql diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index 4d6ca384..8b137891 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1,8 +1 @@ -# This file has been generated by `generate_logstash_config_files.py` -# file, placed in `src/setup/logstash_setup` -# ------------------------------------------------------------------- -http.host: "0.0.0.0" -xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] -xpack.monitoring.enabled: true -xpack.monitoring.elasticsearch.username: elastic -xpack.monitoring.elasticsearch.password: changeme \ No newline at end of file + diff --git a/logstash/pipeline/conf/init_table.conf b/logstash/pipeline/conf/init_table.conf deleted file mode 100644 index cddf486c..00000000 --- a/logstash/pipeline/conf/init_table.conf +++ /dev/null @@ -1,222 +0,0 @@ -# This file has been generated by `generate_logstash_config_files.py` -# file, placed in `src/setup/logstash_setup` -# ------------------------------------------------------------------- - -input { - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_dataset.sql" - type => "dataset" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_event.sql" - type => "event" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_experiment.sql" - type => "experiment" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_ml_model.sql" - type => "ml_model" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_news.sql" - type => "news" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_organisation.sql" - type => "organisation" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_project.sql" - type => "project" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_publication.sql" - type => "publication" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - clean_run => true - record_last_run => false - statement_filepath => "/usr/share/logstash/sql/init_service.sql" - type => "service" - } - -} -filter { - mutate { - remove_field => ["@version", "@timestamp"] - } -} -output { - - if [type] == "dataset" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "dataset" - document_id => "dataset_%{identifier}" - } - } - - if [type] == "event" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" - } - } - - if [type] == "experiment" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "experiment" - document_id => "experiment_%{identifier}" - } - } - - if [type] == "ml_model" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "ml_model" - document_id => "ml_model_%{identifier}" - } - } - - if [type] == "news" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "news" - document_id => "news_%{identifier}" - } - } - - if [type] == "organisation" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" - } - } - - if [type] == "project" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "project" - document_id => "project_%{identifier}" - } - } - - if [type] == "publication" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" - } - } - - if [type] == "service" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" - } - } - -} \ No newline at end of file diff --git a/logstash/pipeline/conf/sync_table.conf b/logstash/pipeline/conf/sync_table.conf deleted file mode 100644 index b45a4309..00000000 --- a/logstash/pipeline/conf/sync_table.conf +++ /dev/null @@ -1,456 +0,0 @@ -# This file has been generated by `generate_logstash_config_files.py` -# file, placed in `src/setup/logstash_setup` -# ------------------------------------------------------------------- - -input { - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_dataset.sql" - type => "dataset" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_dataset.sql" - type => "rm_dataset" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_event.sql" - type => "event" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_event.sql" - type => "rm_event" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_experiment.sql" - type => "experiment" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_experiment.sql" - type => "rm_experiment" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_ml_model.sql" - type => "ml_model" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_ml_model.sql" - type => "rm_ml_model" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_news.sql" - type => "news" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_news.sql" - type => "rm_news" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_organisation.sql" - type => "organisation" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_organisation.sql" - type => "rm_organisation" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_project.sql" - type => "project" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_project.sql" - type => "rm_project" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_publication.sql" - type => "publication" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_publication.sql" - type => "rm_publication" - } - - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_modified" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/sync_service.sql" - type => "service" - } - jdbc { - jdbc_driver_library => "/usr/share/logstash/mysql-connector-j.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://sqlserver:3306/aiod" - jdbc_user => "root" - jdbc_password => "ok" - use_column_value => true - tracking_column => "date_deleted" - tracking_column_type => "timestamp" - schedule => "*/5 * * * * *" - statement_filepath => "/usr/share/logstash/sql/rm_service.sql" - type => "rm_service" - } - -} -filter { - mutate { - remove_field => ["@version", "@timestamp"] - } -} -output { - - if [type] == "dataset" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "dataset" - document_id => "dataset_%{identifier}" - } - } - if [type] == "rm_dataset" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "dataset" - document_id => "dataset_%{identifier}" - } - } - - if [type] == "event" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" - } - } - if [type] == "rm_event" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "event" - document_id => "event_%{identifier}" - } - } - - if [type] == "experiment" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "experiment" - document_id => "experiment_%{identifier}" - } - } - if [type] == "rm_experiment" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "experiment" - document_id => "experiment_%{identifier}" - } - } - - if [type] == "ml_model" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "ml_model" - document_id => "ml_model_%{identifier}" - } - } - if [type] == "rm_ml_model" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "ml_model" - document_id => "ml_model_%{identifier}" - } - } - - if [type] == "news" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "news" - document_id => "news_%{identifier}" - } - } - if [type] == "rm_news" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "news" - document_id => "news_%{identifier}" - } - } - - if [type] == "organisation" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" - } - } - if [type] == "rm_organisation" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "organisation" - document_id => "organisation_%{identifier}" - } - } - - if [type] == "project" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "project" - document_id => "project_%{identifier}" - } - } - if [type] == "rm_project" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "project" - document_id => "project_%{identifier}" - } - } - - if [type] == "publication" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" - } - } - if [type] == "rm_publication" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "publication" - document_id => "publication_%{identifier}" - } - } - - if [type] == "service" { - elasticsearch { - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" - } - } - if [type] == "rm_service" { - elasticsearch { - action => "delete" - hosts => "elasticsearch:9200" - user => "elastic" - password => "changeme" - ecs_compatibility => disabled - index => "service" - document_id => "service_%{identifier}" - } - } - -} \ No newline at end of file diff --git a/logstash/pipeline/sql/init_dataset.sql b/logstash/pipeline/sql/init_dataset.sql deleted file mode 100644 index 303c3857..00000000 --- a/logstash/pipeline/sql/init_dataset.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT dataset.identifier, dataset.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , issn -FROM aiod.dataset -INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier -WHERE aiod.dataset.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_event.sql b/logstash/pipeline/sql/init_event.sql deleted file mode 100644 index 4a740ee9..00000000 --- a/logstash/pipeline/sql/init_event.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT event.identifier, event.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.event -INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier -WHERE aiod.event.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_experiment.sql b/logstash/pipeline/sql/init_experiment.sql deleted file mode 100644 index 32f3749b..00000000 --- a/logstash/pipeline/sql/init_experiment.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT experiment.identifier, experiment.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.experiment -INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier -WHERE aiod.experiment.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_ml_model.sql b/logstash/pipeline/sql/init_ml_model.sql deleted file mode 100644 index dcdf87b2..00000000 --- a/logstash/pipeline/sql/init_ml_model.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT ml_model.identifier, ml_model.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.ml_model -INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier -WHERE aiod.ml_model.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_news.sql b/logstash/pipeline/sql/init_news.sql deleted file mode 100644 index 7c4593ba..00000000 --- a/logstash/pipeline/sql/init_news.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , alternative_headline, headline -FROM aiod.news -INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier -WHERE aiod.news.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_organisation.sql b/logstash/pipeline/sql/init_organisation.sql deleted file mode 100644 index 7fd1af3f..00000000 --- a/logstash/pipeline/sql/init_organisation.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT organisation.identifier, organisation.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , legal_name -FROM aiod.organisation -INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier -WHERE aiod.organisation.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_project.sql b/logstash/pipeline/sql/init_project.sql deleted file mode 100644 index d97397fa..00000000 --- a/logstash/pipeline/sql/init_project.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT project.identifier, project.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.project -INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier -WHERE aiod.project.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_publication.sql b/logstash/pipeline/sql/init_publication.sql deleted file mode 100644 index 31928c40..00000000 --- a/logstash/pipeline/sql/init_publication.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT publication.identifier, publication.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , isbn, issn -FROM aiod.publication -INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier -WHERE aiod.publication.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/init_service.sql b/logstash/pipeline/sql/init_service.sql deleted file mode 100644 index c0d84d40..00000000 --- a/logstash/pipeline/sql/init_service.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT service.identifier, service.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , slogan -FROM aiod.service -INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier -WHERE aiod.service.date_deleted IS NULL \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_dataset.sql b/logstash/pipeline/sql/rm_dataset.sql deleted file mode 100644 index e63316e4..00000000 --- a/logstash/pipeline/sql/rm_dataset.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT dataset.identifier -FROM aiod.dataset -WHERE aiod.dataset.date_deleted IS NOT NULL AND aiod.dataset.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_event.sql b/logstash/pipeline/sql/rm_event.sql deleted file mode 100644 index 07abd73e..00000000 --- a/logstash/pipeline/sql/rm_event.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT event.identifier -FROM aiod.event -WHERE aiod.event.date_deleted IS NOT NULL AND aiod.event.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_experiment.sql b/logstash/pipeline/sql/rm_experiment.sql deleted file mode 100644 index f38c1c67..00000000 --- a/logstash/pipeline/sql/rm_experiment.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT experiment.identifier -FROM aiod.experiment -WHERE aiod.experiment.date_deleted IS NOT NULL AND aiod.experiment.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_ml_model.sql b/logstash/pipeline/sql/rm_ml_model.sql deleted file mode 100644 index 0fcc53f8..00000000 --- a/logstash/pipeline/sql/rm_ml_model.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT ml_model.identifier -FROM aiod.ml_model -WHERE aiod.ml_model.date_deleted IS NOT NULL AND aiod.ml_model.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_news.sql b/logstash/pipeline/sql/rm_news.sql deleted file mode 100644 index fab72f00..00000000 --- a/logstash/pipeline/sql/rm_news.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT news.identifier -FROM aiod.news -WHERE aiod.news.date_deleted IS NOT NULL AND aiod.news.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_organisation.sql b/logstash/pipeline/sql/rm_organisation.sql deleted file mode 100644 index 48a5c3dc..00000000 --- a/logstash/pipeline/sql/rm_organisation.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT organisation.identifier -FROM aiod.organisation -WHERE aiod.organisation.date_deleted IS NOT NULL AND aiod.organisation.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_project.sql b/logstash/pipeline/sql/rm_project.sql deleted file mode 100644 index eca4e43d..00000000 --- a/logstash/pipeline/sql/rm_project.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT project.identifier -FROM aiod.project -WHERE aiod.project.date_deleted IS NOT NULL AND aiod.project.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_publication.sql b/logstash/pipeline/sql/rm_publication.sql deleted file mode 100644 index 19c50766..00000000 --- a/logstash/pipeline/sql/rm_publication.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT publication.identifier -FROM aiod.publication -WHERE aiod.publication.date_deleted IS NOT NULL AND aiod.publication.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/rm_service.sql b/logstash/pipeline/sql/rm_service.sql deleted file mode 100644 index a256e688..00000000 --- a/logstash/pipeline/sql/rm_service.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT service.identifier -FROM aiod.service -WHERE aiod.service.date_deleted IS NOT NULL AND aiod.service.date_deleted > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_dataset.sql b/logstash/pipeline/sql/sync_dataset.sql deleted file mode 100644 index 2b4dd149..00000000 --- a/logstash/pipeline/sql/sync_dataset.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT dataset.identifier, dataset.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , issn -FROM aiod.dataset -INNER JOIN aiod.aiod_entry ON aiod.dataset.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.dataset.description_identifier=aiod.text.identifier -WHERE aiod.dataset.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_event.sql b/logstash/pipeline/sql/sync_event.sql deleted file mode 100644 index 1a67dd70..00000000 --- a/logstash/pipeline/sql/sync_event.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT event.identifier, event.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.event -INNER JOIN aiod.aiod_entry ON aiod.event.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.event.description_identifier=aiod.text.identifier -WHERE aiod.event.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_experiment.sql b/logstash/pipeline/sql/sync_experiment.sql deleted file mode 100644 index 69f8a228..00000000 --- a/logstash/pipeline/sql/sync_experiment.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT experiment.identifier, experiment.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.experiment -INNER JOIN aiod.aiod_entry ON aiod.experiment.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.experiment.description_identifier=aiod.text.identifier -WHERE aiod.experiment.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_ml_model.sql b/logstash/pipeline/sql/sync_ml_model.sql deleted file mode 100644 index dd88935b..00000000 --- a/logstash/pipeline/sql/sync_ml_model.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT ml_model.identifier, ml_model.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.ml_model -INNER JOIN aiod.aiod_entry ON aiod.ml_model.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.ml_model.description_identifier=aiod.text.identifier -WHERE aiod.ml_model.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_news.sql b/logstash/pipeline/sql/sync_news.sql deleted file mode 100644 index 65f6ffaf..00000000 --- a/logstash/pipeline/sql/sync_news.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT news.identifier, news.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , alternative_headline, headline -FROM aiod.news -INNER JOIN aiod.aiod_entry ON aiod.news.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.news.description_identifier=aiod.text.identifier -WHERE aiod.news.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_organisation.sql b/logstash/pipeline/sql/sync_organisation.sql deleted file mode 100644 index aa7886ca..00000000 --- a/logstash/pipeline/sql/sync_organisation.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT organisation.identifier, organisation.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , legal_name -FROM aiod.organisation -INNER JOIN aiod.aiod_entry ON aiod.organisation.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.organisation.description_identifier=aiod.text.identifier -WHERE aiod.organisation.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_project.sql b/logstash/pipeline/sql/sync_project.sql deleted file mode 100644 index 42828608..00000000 --- a/logstash/pipeline/sql/sync_project.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT project.identifier, project.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified -FROM aiod.project -INNER JOIN aiod.aiod_entry ON aiod.project.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.project.description_identifier=aiod.text.identifier -WHERE aiod.project.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_publication.sql b/logstash/pipeline/sql/sync_publication.sql deleted file mode 100644 index 2592886c..00000000 --- a/logstash/pipeline/sql/sync_publication.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT publication.identifier, publication.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , isbn, issn -FROM aiod.publication -INNER JOIN aiod.aiod_entry ON aiod.publication.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.publication.description_identifier=aiod.text.identifier -WHERE aiod.publication.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/logstash/pipeline/sql/sync_service.sql b/logstash/pipeline/sql/sync_service.sql deleted file mode 100644 index 0839b31d..00000000 --- a/logstash/pipeline/sql/sync_service.sql +++ /dev/null @@ -1,8 +0,0 @@ --- This file has been generated by `generate_logstash_config_files.py` --- file, placed in `src/setup/logstash_setup` --- ------------------------------------------------------------------- -SELECT service.identifier, service.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified , slogan -FROM aiod.service -INNER JOIN aiod.aiod_entry ON aiod.service.aiod_entry_identifier=aiod.aiod_entry.identifier -LEFT JOIN aiod.text ON aiod.service.description_identifier=aiod.text.identifier -WHERE aiod.service.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value \ No newline at end of file diff --git a/src/setup/es_setup/generate_elasticsearch_indices.py b/src/setup/es_setup/generate_elasticsearch_indices.py index 181df10b..6aec8a69 100755 --- a/src/setup/es_setup/generate_elasticsearch_indices.py +++ b/src/setup/es_setup/generate_elasticsearch_indices.py @@ -13,6 +13,7 @@ from routers.search_routers import router_list from definitions import BASE_MAPPING + def add_field(base_mapping, field): new_mapping = copy.deepcopy(base_mapping) new_mapping["mappings"]["properties"][field] = { diff --git a/src/setup/logstash_setup/file_generated_comment.py b/src/setup/logstash_setup/file_generated_comment.py index eb259425..de26912b 100755 --- a/src/setup/logstash_setup/file_generated_comment.py +++ b/src/setup/logstash_setup/file_generated_comment.py @@ -1,5 +1,5 @@ -FILE_IS_GENERATED_COMMENT = """{{comment_tag}} This file has been generated by `generate_logstash_config_files.py` -{{comment_tag}} file, placed in `src/setup/logstash_setup` +FILE_IS_GENERATED_COMMENT = """{{comment_tag}} This file has been generated by `{{file}}` +{{comment_tag}} file, placed in `{{path}}` {{comment_tag}} ------------------------------------------------------------------- """ diff --git a/src/setup/logstash_setup/generate_logstash_config_files.py b/src/setup/logstash_setup/generate_logstash_config_files.py index c7b3a945..852135f4 100755 --- a/src/setup/logstash_setup/generate_logstash_config_files.py +++ b/src/setup/logstash_setup/generate_logstash_config_files.py @@ -28,6 +28,7 @@ def generate_file(file_path, template, file_data): f.write(Template(FILE_IS_GENERATED_COMMENT).render(file_data)) f.write(Template(template).render(file_data)) + def main(): base_path = "/logstash" db_user = "root" @@ -45,36 +46,56 @@ def main(): pipeline_sql_path = os.path.join(base_path, "pipeline", "sql") os.makedirs(pipeline_sql_path, exist_ok=True) config_file_data = { - 'comment_tag': "#", - 'es_user': es_user, - 'es_pass': es_pass + "file": os.path.basename(__file__), + "path": os.path.dirname(__file__).replace("/app", "src"), + "comment_tag": "#", + "es_user": es_user, + "es_pass": es_pass, } config_file_path = os.path.join(config_path, "logstash.yml") generate_file(config_file_path, CONFIG_FILE_TEMPLATE, config_file_data) pipeline_config_files_data = { - 'comment_tag': "#", - 'es_user': es_user, - 'es_pass': es_pass, - 'db_user': db_user, - 'db_pass': db_pass, - 'entities': entities.keys() + "file": os.path.basename(__file__), + "path": os.path.dirname(__file__).replace("/app", "src"), + "comment_tag": "#", + "es_user": es_user, + "es_pass": es_pass, + "db_user": db_user, + "db_pass": db_pass, + "entities": entities.keys(), } pipeline_config_init_file_path = os.path.join(pipeline_config_path, "init_table.conf") - generate_file(pipeline_config_init_file_path, PIPELINE_CONFIG_INIT_FILE_TEMPLATE, pipeline_config_files_data) + generate_file( + pipeline_config_init_file_path, + PIPELINE_CONFIG_INIT_FILE_TEMPLATE, + pipeline_config_files_data, + ) pipeline_config_sync_file_path = os.path.join(pipeline_config_path, "sync_table.conf") - generate_file(pipeline_config_sync_file_path, PIPELINE_CONFIG_SYNC_FILE_TEMPLATE, pipeline_config_files_data) + generate_file( + pipeline_config_sync_file_path, + PIPELINE_CONFIG_SYNC_FILE_TEMPLATE, + pipeline_config_files_data, + ) for entity, extra_fields in entities.items(): pipeline_sql_files_data = { - 'comment_tag': "--", - 'entity_name': entity, - 'extra_fields': ", " + ", ".join(extra_fields) if extra_fields else "" + "file": os.path.basename(__file__), + "path": os.path.dirname(__file__).replace("/app", "src"), + "comment_tag": "--", + "entity_name": entity, + "extra_fields": ",\n " + ",\n ".join(extra_fields) if extra_fields else "", } pipeline_sql_init_file_path = os.path.join(pipeline_sql_path, f"init_{entity}.sql") - generate_file(pipeline_sql_init_file_path, PIPELINE_SQL_INIT_FILE_TEMPLATE, pipeline_sql_files_data) + generate_file( + pipeline_sql_init_file_path, PIPELINE_SQL_INIT_FILE_TEMPLATE, pipeline_sql_files_data + ) pipeline_sql_sync_file_path = os.path.join(pipeline_sql_path, f"sync_{entity}.sql") - generate_file(pipeline_sql_sync_file_path, PIPELINE_SQL_SYNC_FILE_TEMPLATE, pipeline_sql_files_data) + generate_file( + pipeline_sql_sync_file_path, PIPELINE_SQL_SYNC_FILE_TEMPLATE, pipeline_sql_files_data + ) pipeline_sql_rm_file_path = os.path.join(pipeline_sql_path, f"rm_{entity}.sql") - generate_file(pipeline_sql_rm_file_path, PIPELINE_SQL_RM_FILE_TEMPLATE, pipeline_sql_files_data) + generate_file( + pipeline_sql_rm_file_path, PIPELINE_SQL_RM_FILE_TEMPLATE, pipeline_sql_files_data + ) if __name__ == "__main__": diff --git a/src/setup/logstash_setup/pipeline_sql_init_file_template.py b/src/setup/logstash_setup/pipeline_sql_init_file_template.py index 4bb39341..d079e2aa 100755 --- a/src/setup/logstash_setup/pipeline_sql_init_file_template.py +++ b/src/setup/logstash_setup/pipeline_sql_init_file_template.py @@ -1,5 +1,11 @@ -PIPELINE_SQL_INIT_FILE_TEMPLATE = """SELECT {{entity_name}}.identifier, {{entity_name}}.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified {{extra_fields}} +PIPELINE_SQL_INIT_FILE_TEMPLATE = """SELECT + {{entity_name}}.identifier, + {{entity_name}}.name, + text.plain as 'plain', + text.html as 'html', + aiod_entry.date_modified{{extra_fields}} FROM aiod.{{entity_name}} INNER JOIN aiod.aiod_entry ON aiod.{{entity_name}}.aiod_entry_identifier=aiod.aiod_entry.identifier LEFT JOIN aiod.text ON aiod.{{entity_name}}.description_identifier=aiod.text.identifier -WHERE aiod.{{entity_name}}.date_deleted IS NULL""" +WHERE aiod.{{entity_name}}.date_deleted IS NULL +""" diff --git a/src/setup/logstash_setup/pipeline_sql_rm_file_template.py b/src/setup/logstash_setup/pipeline_sql_rm_file_template.py index c02f54da..10640db5 100755 --- a/src/setup/logstash_setup/pipeline_sql_rm_file_template.py +++ b/src/setup/logstash_setup/pipeline_sql_rm_file_template.py @@ -1,4 +1,5 @@ PIPELINE_SQL_RM_FILE_TEMPLATE = """SELECT {{entity_name}}.identifier FROM aiod.{{entity_name}} -WHERE aiod.{{entity_name}}.date_deleted IS NOT NULL AND aiod.{{entity_name}}.date_deleted > :sql_last_value +WHERE aiod.{{entity_name}}.date_deleted IS NOT NULL +AND aiod.{{entity_name}}.date_deleted > :sql_last_value """ diff --git a/src/setup/logstash_setup/pipeline_sql_sync_file_template.py b/src/setup/logstash_setup/pipeline_sql_sync_file_template.py index 00e05160..b02fa9cb 100755 --- a/src/setup/logstash_setup/pipeline_sql_sync_file_template.py +++ b/src/setup/logstash_setup/pipeline_sql_sync_file_template.py @@ -1,5 +1,11 @@ -PIPELINE_SQL_SYNC_FILE_TEMPLATE = """SELECT {{entity_name}}.identifier, {{entity_name}}.name, text.plain as 'plain', text.html as 'html', aiod_entry.date_modified {{extra_fields}} +PIPELINE_SQL_SYNC_FILE_TEMPLATE = """SELECT + {{entity_name}}.identifier, + {{entity_name}}.name, + text.plain as 'plain', + text.html as 'html', + aiod_entry.date_modified{{extra_fields}} FROM aiod.{{entity_name}} INNER JOIN aiod.aiod_entry ON aiod.{{entity_name}}.aiod_entry_identifier=aiod.aiod_entry.identifier LEFT JOIN aiod.text ON aiod.{{entity_name}}.description_identifier=aiod.text.identifier -WHERE aiod.{{entity_name}}.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value""" +WHERE aiod.{{entity_name}}.date_deleted IS NULL AND aiod.aiod_entry.date_modified > :sql_last_value +""" From ca5c137152ff76cf8b726c26138c66a6d6165272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Sat, 25 Nov 2023 02:38:41 +0100 Subject: [PATCH 66/79] Second round of pull request comments --- .gitignore | 6 + README.md | 2 +- logstash/config/logstash.yml | 9 +- src/routers/search_router.py | 42 +--- .../search_routers/search_router_datasets.py | 4 +- .../search_routers/search_router_events.py | 4 +- .../search_router_experiments.py | 4 +- .../search_routers/search_router_ml_models.py | 4 +- .../search_routers/search_router_news.py | 4 +- .../search_router_organisations.py | 4 +- .../search_routers/search_router_projects.py | 4 +- .../search_router_publications.py | 4 +- .../search_routers/search_router_services.py | 4 +- .../generate_elasticsearch_indices.py | 44 ++-- .../generate_logstash_config_files.py | 109 ++++----- .../{ => templates}/__init__.py | 0 .../{ => templates}/config_file_template.py | 0 .../{ => templates}/file_generated_comment.py | 0 .../pipeline_config_init_file_template.py | 0 .../pipeline_config_sync_file_template.py | 0 .../pipeline_sql_init_file_template.py | 0 .../pipeline_sql_rm_file_template.py | 0 .../pipeline_sql_sync_file_template.py | 0 .../search_routers/test_search_routers.py | 213 +++++++++--------- 24 files changed, 206 insertions(+), 255 deletions(-) rename src/setup/logstash_setup/{ => templates}/__init__.py (100%) rename src/setup/logstash_setup/{ => templates}/config_file_template.py (100%) rename src/setup/logstash_setup/{ => templates}/file_generated_comment.py (100%) rename src/setup/logstash_setup/{ => templates}/pipeline_config_init_file_template.py (100%) rename src/setup/logstash_setup/{ => templates}/pipeline_config_sync_file_template.py (100%) rename src/setup/logstash_setup/{ => templates}/pipeline_sql_init_file_template.py (100%) rename src/setup/logstash_setup/{ => templates}/pipeline_sql_rm_file_template.py (100%) rename src/setup/logstash_setup/{ => templates}/pipeline_sql_sync_file_template.py (100%) diff --git a/.gitignore b/.gitignore index a38142c5..8d42f315 100644 --- a/.gitignore +++ b/.gitignore @@ -115,6 +115,12 @@ env.bak/ venv.bak/ **.DS_Store +# Logstash configuration +logstash/pipeline/conf/* +!logstash/pipeline/conf/.gitkeep +logstash/pipeline/sql/* +!logstash/pipeline/sql/.gitkeep + # Spyder project settings .spyderproject .spyproject diff --git a/README.md b/README.md index 322804c6..7cd7fafa 100644 --- a/README.md +++ b/README.md @@ -60,7 +60,7 @@ For development: ## Production environment -For production environments elasticsearch recomends -Xss4G and -Xmx8G for the JVM settings.\ +For production environments elasticsearch recommends -Xss4G and -Xmx8G for the JVM settings.\ This parameters can be defined in the .env file. See the [elasticsearch guide](https://www.elastic.co/guide/en/logstash/current/jvm-settings.html). diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index 8b137891..4d6ca384 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1 +1,8 @@ - +# This file has been generated by `generate_logstash_config_files.py` +# file, placed in `src/setup/logstash_setup` +# ------------------------------------------------------------------- +http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] +xpack.monitoring.enabled: true +xpack.monitoring.elasticsearch.username: elastic +xpack.monitoring.elasticsearch.password: changeme \ No newline at end of file diff --git a/src/routers/search_router.py b/src/routers/search_router.py index 5e30b89d..f7a4ef39 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -18,7 +18,6 @@ LIMIT_MAX = 1000 RESOURCE = TypeVar("RESOURCE", bound=AIoDConcept) -# RESOURCE = TypeVar("RESOURCE") class SearchResult(BaseModel, Generic[RESOURCE]): @@ -58,7 +57,7 @@ def resource_class(self) -> RESOURCE: @property @abc.abstractmethod - def match_fields(self) -> set: + def indexed_fields(self) -> set[str]: """The set of indexed fields""" def create(self, url_prefix: str) -> APIRouter: @@ -77,15 +76,11 @@ def search( f""" Search for {self.resource_name_plural}. """ - - # Parameter correctness - # ----------------------------------------------------------------- - try: with DbSession() as session: query = select(Platform) database_platforms = session.scalars(query).all() - platform_names = set([p.name for p in database_platforms]) + platform_names = {p.name for p in database_platforms} except Exception as e: raise _wrap_as_http_exception(e) @@ -94,70 +89,45 @@ def search( status_code=status.HTTP_400_BAD_REQUEST, detail=f"The available platforms are: {platform_names}", ) - - fields = search_fields if search_fields else self.match_fields - if not set(fields).issubset(self.match_fields): + fields = search_fields if search_fields else self.indexed_fields + if not set(fields).issubset(self.indexed_fields): raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=f"The available search fields for this entity " - f"are: {self.match_fields}", + f"are: {self.indexed_fields}", ) - if limit > LIMIT_MAX: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=f"The limit should be maximum {LIMIT_MAX}. " f"If you want more results, use pagination.", ) - if offset < 0: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="The offset should be greater or equal than 0.", ) - - # Prepare query - # ----------------------------------------------------------------- - - # Matches of the search concept for each field query_matches = [{"match": {f: search_query}} for f in fields] - - # Must match search concept on at least one field query = {"bool": {"should": query_matches, "minimum_should_match": 1}} if platforms: - - # Matches of the platform field for each selected platform platform_matches = [{"match": {"platform": p}} for p in platforms] - - # Must match platform and search query on at least one field query["bool"]["must"] = { "bool": {"should": platform_matches, "minimum_should_match": 1} } - - # Launch search query - # ----------------------------------------------------------------- - result = ElasticsearchSingleton().client.search( index=self.es_index, query=query, from_=offset, size=limit, sort=SORT ) - total_hits = result["hits"]["total"]["value"] if get_all: - - # Launch database query resources: list[SQLModel] = [ self._db_query(read_class, self.resource_class, hit["_source"]["identifier"]) for hit in result["hits"]["hits"] ] - else: - - # Return just the elasticsearch contents resources: list[Type[RESOURCE]] = [ # type: ignore self._cast_resource(read_class, hit["_source"]) for hit in result["hits"]["hits"] ] - return SearchResult[RESOURCE]( # type: ignore total_hits=total_hits, resources=resources, @@ -194,7 +164,7 @@ def _cast_resource( for key, val in resource_dict.items() if key != "type" and not key.startswith("@") } - resource = read_class(**kwargs) # type: ignore + resource = read_class(**kwargs) resource.aiod_entry = AIoDEntryRead(date_modified=resource_dict["date_modified"]) resource.description = {"plain": resource_dict["plain"], "html": resource_dict["html"]} return self._clean_structure(dict(resource)) diff --git a/src/routers/search_routers/search_router_datasets.py b/src/routers/search_routers/search_router_datasets.py index a0bcb06c..b5a6d8cb 100644 --- a/src/routers/search_routers/search_router_datasets.py +++ b/src/routers/search_routers/search_router_datasets.py @@ -16,5 +16,5 @@ def resource_class(self): return Dataset @property - def match_fields(self): - return set(["name", "plain", "html", "issn"]) + def indexed_fields(self): + return {"name", "plain", "html", "issn"} diff --git a/src/routers/search_routers/search_router_events.py b/src/routers/search_routers/search_router_events.py index d6f0996a..242407fb 100644 --- a/src/routers/search_routers/search_router_events.py +++ b/src/routers/search_routers/search_router_events.py @@ -16,5 +16,5 @@ def resource_class(self): return Event @property - def match_fields(self): - return set(["name", "plain", "html"]) + def indexed_fields(self): + return {"name", "plain", "html"} diff --git a/src/routers/search_routers/search_router_experiments.py b/src/routers/search_routers/search_router_experiments.py index 78a2258d..9c9954a3 100644 --- a/src/routers/search_routers/search_router_experiments.py +++ b/src/routers/search_routers/search_router_experiments.py @@ -16,5 +16,5 @@ def resource_class(self): return Experiment @property - def match_fields(self): - return set(["name", "plain", "html"]) + def indexed_fields(self): + return {"name", "plain", "html"} diff --git a/src/routers/search_routers/search_router_ml_models.py b/src/routers/search_routers/search_router_ml_models.py index be4e2391..bc380e41 100644 --- a/src/routers/search_routers/search_router_ml_models.py +++ b/src/routers/search_routers/search_router_ml_models.py @@ -16,5 +16,5 @@ def resource_class(self): return MLModel @property - def match_fields(self): - return set(["name", "plain", "html"]) + def indexed_fields(self): + return {"name", "plain", "html"} diff --git a/src/routers/search_routers/search_router_news.py b/src/routers/search_routers/search_router_news.py index 6f080c64..c1f44bbc 100644 --- a/src/routers/search_routers/search_router_news.py +++ b/src/routers/search_routers/search_router_news.py @@ -16,5 +16,5 @@ def resource_class(self): return News @property - def match_fields(self): - return set(["name", "plain", "html", "headline", "alternative_headline"]) + def indexed_fields(self): + return {"name", "plain", "html", "headline", "alternative_headline"} diff --git a/src/routers/search_routers/search_router_organisations.py b/src/routers/search_routers/search_router_organisations.py index 10db82b4..922162f9 100644 --- a/src/routers/search_routers/search_router_organisations.py +++ b/src/routers/search_routers/search_router_organisations.py @@ -16,5 +16,5 @@ def resource_class(self): return Organisation @property - def match_fields(self): - return set(["name", "legal_name", "plain", "html"]) + def indexed_fields(self): + return {"name", "legal_name", "plain", "html"} diff --git a/src/routers/search_routers/search_router_projects.py b/src/routers/search_routers/search_router_projects.py index 67cfce02..e8f8fe71 100644 --- a/src/routers/search_routers/search_router_projects.py +++ b/src/routers/search_routers/search_router_projects.py @@ -16,5 +16,5 @@ def resource_class(self): return Project @property - def match_fields(self): - return set(["name", "plain", "html"]) + def indexed_fields(self): + return {"name", "plain", "html"} diff --git a/src/routers/search_routers/search_router_publications.py b/src/routers/search_routers/search_router_publications.py index 074e344a..f4f71675 100644 --- a/src/routers/search_routers/search_router_publications.py +++ b/src/routers/search_routers/search_router_publications.py @@ -16,5 +16,5 @@ def resource_class(self): return Publication @property - def match_fields(self): - return set(["name", "plain", "html", "issn", "isbn"]) + def indexed_fields(self): + return {"name", "plain", "html", "issn", "isbn"} diff --git a/src/routers/search_routers/search_router_services.py b/src/routers/search_routers/search_router_services.py index 9e3ac8cf..58dd1748 100644 --- a/src/routers/search_routers/search_router_services.py +++ b/src/routers/search_routers/search_router_services.py @@ -16,5 +16,5 @@ def resource_class(self): return Service @property - def match_fields(self): - return set(["name", "plain", "html", "slogan"]) + def indexed_fields(self): + return {"name", "plain", "html", "slogan"} diff --git a/src/setup/es_setup/generate_elasticsearch_indices.py b/src/setup/es_setup/generate_elasticsearch_indices.py index 6aec8a69..bd6248ce 100755 --- a/src/setup/es_setup/generate_elasticsearch_indices.py +++ b/src/setup/es_setup/generate_elasticsearch_indices.py @@ -6,42 +6,34 @@ Launched by the es_logstash_setup container in the docker-compose file. """ -import os import copy -from elasticsearch import Elasticsearch +from routers.search_routers.elasticsearch import ElasticsearchSingleton from routers.search_routers import router_list from definitions import BASE_MAPPING -def add_field(base_mapping, field): - new_mapping = copy.deepcopy(base_mapping) - new_mapping["mappings"]["properties"][field] = { - "type": "text", - "fields": {"keyword": {"type": "keyword"}}, - } - return new_mapping - - -def generate_mapping(entity, fields): - mapping = BASE_MAPPING - for field in fields: - mapping = add_field(mapping, field) +def generate_mapping(fields): + mapping = copy.deepcopy(BASE_MAPPING) + for field_name in fields: + mapping["mappings"]["properties"][field_name] = { + "type": "text", + "fields": {"keyword": {"type": "keyword"}}, + } return mapping def main(): - es_user = os.environ["ES_USER"] - es_password = os.environ["ES_PASSWORD"] - es_client = Elasticsearch("http://elasticsearch:9200", basic_auth=(es_user, es_password)) - global_fields = set(["name", "plain", "html"]) - entities = {} - for router in router_list: - extra_fields = list(router.match_fields ^ global_fields) - entities[router.es_index] = extra_fields - for entity, fields in entities.items(): - mapping = generate_mapping(entity, fields) - es_client.indices.create(index=entity, body=mapping, ignore=400) + es_client = ElasticsearchSingleton().client + global_fields = {"name", "plain", "html"} + entities = { + router.es_index: list(router.match_fields ^ global_fields) for router in router_list + } + for es_index, fields in entities.items(): + mapping = generate_mapping(fields) + + # ignore 400 cause by IndexAlreadyExistsException when creating an index + es_client.indices.create(index=es_index, body=mapping, ignore=400) if __name__ == "__main__": diff --git a/src/setup/logstash_setup/generate_logstash_config_files.py b/src/setup/logstash_setup/generate_logstash_config_files.py index 852135f4..8d114977 100755 --- a/src/setup/logstash_setup/generate_logstash_config_files.py +++ b/src/setup/logstash_setup/generate_logstash_config_files.py @@ -11,16 +11,28 @@ """ import os +from pathlib import Path from jinja2 import Template from routers.search_routers import router_list -from file_generated_comment import FILE_IS_GENERATED_COMMENT -from config_file_template import CONFIG_FILE_TEMPLATE -from pipeline_config_init_file_template import PIPELINE_CONFIG_INIT_FILE_TEMPLATE -from pipeline_config_sync_file_template import PIPELINE_CONFIG_SYNC_FILE_TEMPLATE -from pipeline_sql_init_file_template import PIPELINE_SQL_INIT_FILE_TEMPLATE -from pipeline_sql_sync_file_template import PIPELINE_SQL_SYNC_FILE_TEMPLATE -from pipeline_sql_rm_file_template import PIPELINE_SQL_RM_FILE_TEMPLATE +from templates.file_generated_comment import FILE_IS_GENERATED_COMMENT +from templates.config_file_template import CONFIG_FILE_TEMPLATE +from templates.pipeline_config_init_file_template import PIPELINE_CONFIG_INIT_FILE_TEMPLATE +from templates.pipeline_config_sync_file_template import PIPELINE_CONFIG_SYNC_FILE_TEMPLATE +from templates.pipeline_sql_init_file_template import PIPELINE_SQL_INIT_FILE_TEMPLATE +from templates.pipeline_sql_sync_file_template import PIPELINE_SQL_SYNC_FILE_TEMPLATE +from templates.pipeline_sql_rm_file_template import PIPELINE_SQL_RM_FILE_TEMPLATE + + +BASE_PATH = Path("/logstash") +CONFIG_PATH = BASE_PATH / "config" +PIPELINE_CONFIG_PATH = BASE_PATH / "pipeline" / "conf" +pipeline_sql_path = BASE_PATH / "pipeline" / "sql" +DB_USER = "root" +DB_PASS = os.environ["MYSQL_ROOT_PASSWORD"] +ES_PASS = os.environ["ES_USER"] +ES_USER = os.environ["ES_PASSWORD"] +GLOBAL_FIELDS = {"name", "plain", "html"} def generate_file(file_path, template, file_data): @@ -30,72 +42,39 @@ def generate_file(file_path, template, file_data): def main(): - base_path = "/logstash" - db_user = "root" - db_pass = os.environ["MYSQL_ROOT_PASSWORD"] - es_user = os.environ["ES_USER"] - es_pass = os.environ["ES_PASSWORD"] - global_fields = set(["name", "plain", "html"]) - entities = {} - for router in router_list: - entities[router.es_index] = list(router.match_fields ^ global_fields) - config_path = os.path.join(base_path, "config") - os.makedirs(config_path, exist_ok=True) - pipeline_config_path = os.path.join(base_path, "pipeline", "conf") - os.makedirs(pipeline_config_path, exist_ok=True) - pipeline_sql_path = os.path.join(base_path, "pipeline", "sql") - os.makedirs(pipeline_sql_path, exist_ok=True) - config_file_data = { - "file": os.path.basename(__file__), - "path": os.path.dirname(__file__).replace("/app", "src"), - "comment_tag": "#", - "es_user": es_user, - "es_pass": es_pass, + for path in (CONFIG_PATH, PIPELINE_CONFIG_PATH, pipeline_sql_path): + path.mkdir(parents=True, exist_ok=True) + entities = { + router.es_index: list(router.match_fields ^ GLOBAL_FIELDS) for router in router_list } - config_file_path = os.path.join(config_path, "logstash.yml") - generate_file(config_file_path, CONFIG_FILE_TEMPLATE, config_file_data) - pipeline_config_files_data = { + render_parameters = { "file": os.path.basename(__file__), "path": os.path.dirname(__file__).replace("/app", "src"), "comment_tag": "#", - "es_user": es_user, - "es_pass": es_pass, - "db_user": db_user, - "db_pass": db_pass, + "es_user": ES_USER, + "es_pass": ES_PASS, + "db_user": DB_USER, + "db_pass": DB_PASS, "entities": entities.keys(), } - pipeline_config_init_file_path = os.path.join(pipeline_config_path, "init_table.conf") - generate_file( - pipeline_config_init_file_path, - PIPELINE_CONFIG_INIT_FILE_TEMPLATE, - pipeline_config_files_data, - ) - pipeline_config_sync_file_path = os.path.join(pipeline_config_path, "sync_table.conf") - generate_file( - pipeline_config_sync_file_path, - PIPELINE_CONFIG_SYNC_FILE_TEMPLATE, - pipeline_config_files_data, - ) + config_file = os.path.join(CONFIG_PATH, "logstash.yml") + config_init_file = os.path.join(PIPELINE_CONFIG_PATH, "init_table.conf") + config_sync_file = os.path.join(PIPELINE_CONFIG_PATH, "sync_table.conf") + generate_file(config_file, CONFIG_FILE_TEMPLATE, render_parameters) + generate_file(config_init_file, PIPELINE_CONFIG_INIT_FILE_TEMPLATE, render_parameters) + generate_file(config_sync_file, PIPELINE_CONFIG_SYNC_FILE_TEMPLATE, render_parameters) + render_parameters["comment_tag"] = "--" for entity, extra_fields in entities.items(): - pipeline_sql_files_data = { - "file": os.path.basename(__file__), - "path": os.path.dirname(__file__).replace("/app", "src"), - "comment_tag": "--", - "entity_name": entity, - "extra_fields": ",\n " + ",\n ".join(extra_fields) if extra_fields else "", - } - pipeline_sql_init_file_path = os.path.join(pipeline_sql_path, f"init_{entity}.sql") - generate_file( - pipeline_sql_init_file_path, PIPELINE_SQL_INIT_FILE_TEMPLATE, pipeline_sql_files_data - ) - pipeline_sql_sync_file_path = os.path.join(pipeline_sql_path, f"sync_{entity}.sql") - generate_file( - pipeline_sql_sync_file_path, PIPELINE_SQL_SYNC_FILE_TEMPLATE, pipeline_sql_files_data - ) - pipeline_sql_rm_file_path = os.path.join(pipeline_sql_path, f"rm_{entity}.sql") - generate_file( - pipeline_sql_rm_file_path, PIPELINE_SQL_RM_FILE_TEMPLATE, pipeline_sql_files_data + render_parameters["entity_name"] = entity + render_parameters["extra_fields"] = ( + ",\n " + ",\n ".join(extra_fields) if extra_fields else "" ) + sql_init_file = os.path.join(pipeline_sql_path, f"init_{entity}.sql") + sql_sync_file = os.path.join(pipeline_sql_path, f"sync_{entity}.sql") + sql_rm_file = os.path.join(pipeline_sql_path, f"rm_{entity}.sql") + generate_file(sql_init_file, PIPELINE_SQL_INIT_FILE_TEMPLATE, render_parameters) + generate_file(sql_sync_file, PIPELINE_SQL_SYNC_FILE_TEMPLATE, render_parameters) + generate_file(sql_rm_file, PIPELINE_SQL_RM_FILE_TEMPLATE, render_parameters) if __name__ == "__main__": diff --git a/src/setup/logstash_setup/__init__.py b/src/setup/logstash_setup/templates/__init__.py similarity index 100% rename from src/setup/logstash_setup/__init__.py rename to src/setup/logstash_setup/templates/__init__.py diff --git a/src/setup/logstash_setup/config_file_template.py b/src/setup/logstash_setup/templates/config_file_template.py similarity index 100% rename from src/setup/logstash_setup/config_file_template.py rename to src/setup/logstash_setup/templates/config_file_template.py diff --git a/src/setup/logstash_setup/file_generated_comment.py b/src/setup/logstash_setup/templates/file_generated_comment.py similarity index 100% rename from src/setup/logstash_setup/file_generated_comment.py rename to src/setup/logstash_setup/templates/file_generated_comment.py diff --git a/src/setup/logstash_setup/pipeline_config_init_file_template.py b/src/setup/logstash_setup/templates/pipeline_config_init_file_template.py similarity index 100% rename from src/setup/logstash_setup/pipeline_config_init_file_template.py rename to src/setup/logstash_setup/templates/pipeline_config_init_file_template.py diff --git a/src/setup/logstash_setup/pipeline_config_sync_file_template.py b/src/setup/logstash_setup/templates/pipeline_config_sync_file_template.py similarity index 100% rename from src/setup/logstash_setup/pipeline_config_sync_file_template.py rename to src/setup/logstash_setup/templates/pipeline_config_sync_file_template.py diff --git a/src/setup/logstash_setup/pipeline_sql_init_file_template.py b/src/setup/logstash_setup/templates/pipeline_sql_init_file_template.py similarity index 100% rename from src/setup/logstash_setup/pipeline_sql_init_file_template.py rename to src/setup/logstash_setup/templates/pipeline_sql_init_file_template.py diff --git a/src/setup/logstash_setup/pipeline_sql_rm_file_template.py b/src/setup/logstash_setup/templates/pipeline_sql_rm_file_template.py similarity index 100% rename from src/setup/logstash_setup/pipeline_sql_rm_file_template.py rename to src/setup/logstash_setup/templates/pipeline_sql_rm_file_template.py diff --git a/src/setup/logstash_setup/pipeline_sql_sync_file_template.py b/src/setup/logstash_setup/templates/pipeline_sql_sync_file_template.py similarity index 100% rename from src/setup/logstash_setup/pipeline_sql_sync_file_template.py rename to src/setup/logstash_setup/templates/pipeline_sql_sync_file_template.py diff --git a/src/tests/routers/search_routers/test_search_routers.py b/src/tests/routers/search_routers/test_search_routers.py index 74915a45..c29a8aab 100644 --- a/src/tests/routers/search_routers/test_search_routers.py +++ b/src/tests/routers/search_routers/test_search_routers.py @@ -1,5 +1,6 @@ import os import json +import pytest from unittest.mock import Mock @@ -11,142 +12,138 @@ import routers.search_routers as sr -def test_search_happy_path(client: TestClient): +@pytest.mark.parametrize("search_router", sr.router_list) +def test_search_happy_path(client: TestClient, search_router): """Tests the search router""" mocked_elasticsearch = Elasticsearch("https://example.com:9200") ElasticsearchSingleton().patch(mocked_elasticsearch) - for search_router in sr.router_list: - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - resource_file = f"{search_router.es_index}_search.json" - mocked_file = os.path.join(resources_path, resource_file) - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - mocked_elasticsearch.search = Mock(return_value=mocked_results) - search_service = f"/search/{search_router.resource_name_plural}/v1" - params = {"search_query": "description", "get_all": False} - response = client.get(search_service, params=params) - - # Assert the correct execution and get the response - assert response.status_code == 200, response.json() - resource = response.json()["resources"][0] - - # Test the common responses - assert resource["identifier"] == 1 - assert resource["name"] == "A name." - assert resource["description"]["plain"] == "A plain text description." - assert resource["description"]["html"] == "An html description." - assert resource["aiod_entry"]["date_modified"] == "2023-09-01T00:00:00+00:00" - - # Test the extra fields - global_fields = {"name", "plain", "html"} - extra_fields = list(search_router.match_fields ^ global_fields) - for field in extra_fields: - assert resource[field] - - -def test_search_bad_platform(client: TestClient): + + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + resource_file = f"{search_router.es_index}_search.json" + mocked_file = os.path.join(resources_path, resource_file) + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + mocked_elasticsearch.search = Mock(return_value=mocked_results) + search_service = f"/search/{search_router.resource_name_plural}/v1" + params = {"search_query": "description", "get_all": False} + response = client.get(search_service, params=params) + + # Assert the correct execution and get the response + assert response.status_code == 200, response.json() + resource = response.json()["resources"][0] + + # Test the common responses + assert resource["identifier"] == 1 + assert resource["name"] == "A name." + assert resource["description"]["plain"] == "A plain text description." + assert resource["description"]["html"] == "An html description." + assert resource["aiod_entry"]["date_modified"] == "2023-09-01T00:00:00+00:00" + + # Test the extra fields + global_fields = {"name", "plain", "html"} + extra_fields = list(search_router.indexed_fields ^ global_fields) + for field in extra_fields: + assert resource[field] + + +@pytest.mark.parametrize("search_router", sr.router_list) +def test_search_bad_platform(client: TestClient, search_router): """Tests the search router bad platform error""" mocked_elasticsearch = Elasticsearch("https://example.com:9200") ElasticsearchSingleton().patch(mocked_elasticsearch) - for search_router in sr.router_list: - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - resource_file = f"{search_router.es_index}_search.json" - mocked_file = os.path.join(resources_path, resource_file) - with open(mocked_file, "r") as f: - mocked_results = json.load(f) + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + resource_file = f"{search_router.es_index}_search.json" + mocked_file = os.path.join(resources_path, resource_file) + with open(mocked_file, "r") as f: + mocked_results = json.load(f) - # Mock and launch - mocked_elasticsearch.search = Mock(return_value=mocked_results) - search_service = f"/search/{search_router.resource_name_plural}/v1" - params = {"search_query": "description", "platforms": ["bad_platform"]} - response = client.get(search_service, params=params) + # Mock and launch + mocked_elasticsearch.search = Mock(return_value=mocked_results) + search_service = f"/search/{search_router.resource_name_plural}/v1" + params = {"search_query": "description", "platforms": ["bad_platform"]} + response = client.get(search_service, params=params) - # Assert the platform error - assert response.status_code == 400, response.json() - err_msg = "The available platforms are" - assert response.json()["detail"][: len(err_msg)] == err_msg + # Assert the platform error + assert response.status_code == 400, response.json() + err_msg = "The available platforms are" + assert response.json()["detail"][: len(err_msg)] == err_msg -def test_search_bad_fields(client: TestClient): +@pytest.mark.parametrize("search_router", sr.router_list) +def test_search_bad_fields(client: TestClient, search_router): """Tests the search router bad fields error""" mocked_elasticsearch = Elasticsearch("https://example.com:9200") ElasticsearchSingleton().patch(mocked_elasticsearch) - for search_router in sr.router_list: + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + resource_file = f"{search_router.es_index}_search.json" + mocked_file = os.path.join(resources_path, resource_file) + with open(mocked_file, "r") as f: + mocked_results = json.load(f) - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - resource_file = f"{search_router.es_index}_search.json" - mocked_file = os.path.join(resources_path, resource_file) - with open(mocked_file, "r") as f: - mocked_results = json.load(f) + # Mock and launch + mocked_elasticsearch.search = Mock(return_value=mocked_results) + search_service = f"/search/{search_router.resource_name_plural}/v1" + params = {"search_query": "description", "search_fields": ["bad_field"]} + response = client.get(search_service, params=params) - # Mock and launch - mocked_elasticsearch.search = Mock(return_value=mocked_results) - search_service = f"/search/{search_router.resource_name_plural}/v1" - params = {"search_query": "description", "search_fields": ["bad_field"]} - response = client.get(search_service, params=params) + # Assert the platform error + assert response.status_code == 400, response.json() + err_msg = "The available search fields for this entity are" + assert response.json()["detail"][: len(err_msg)] == err_msg - # Assert the platform error - assert response.status_code == 400, response.json() - err_msg = "The available search fields for this entity are" - assert response.json()["detail"][: len(err_msg)] == err_msg - -def test_search_bad_limit(client: TestClient): +@pytest.mark.parametrize("search_router", sr.router_list) +def test_search_bad_limit(client: TestClient, search_router): """Tests the search router bad fields error""" mocked_elasticsearch = Elasticsearch("https://example.com:9200") ElasticsearchSingleton().patch(mocked_elasticsearch) - for search_router in sr.router_list: - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - resource_file = f"{search_router.es_index}_search.json" - mocked_file = os.path.join(resources_path, resource_file) - with open(mocked_file, "r") as f: - mocked_results = json.load(f) + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + resource_file = f"{search_router.es_index}_search.json" + mocked_file = os.path.join(resources_path, resource_file) + with open(mocked_file, "r") as f: + mocked_results = json.load(f) - # Mock and launch - mocked_elasticsearch.search = Mock(return_value=mocked_results) - search_service = f"/search/{search_router.resource_name_plural}/v1" - params = {"search_query": "description", "limit": 1001} - response = client.get(search_service, params=params) + # Mock and launch + mocked_elasticsearch.search = Mock(return_value=mocked_results) + search_service = f"/search/{search_router.resource_name_plural}/v1" + params = {"search_query": "description", "limit": 1001} + response = client.get(search_service, params=params) - # Assert the platform error - assert response.status_code == 400, response.json() - err_msg = "The limit should be maximum 1000." - assert response.json()["detail"][: len(err_msg)] == err_msg + # Assert the platform error + assert response.status_code == 400, response.json() + err_msg = "The limit should be maximum 1000." + assert response.json()["detail"][: len(err_msg)] == err_msg -def test_search_bad_offset(client: TestClient): +@pytest.mark.parametrize("search_router", sr.router_list) +def test_search_bad_offset(client: TestClient, search_router): """Tests the search router bad fields error""" mocked_elasticsearch = Elasticsearch("https://example.com:9200") ElasticsearchSingleton().patch(mocked_elasticsearch) - for search_router in sr.router_list: - - # Get the mocker results to test - resources_path = os.path.join(path_test_resources(), "elasticsearch") - resource_file = f"{search_router.es_index}_search.json" - mocked_file = os.path.join(resources_path, resource_file) - with open(mocked_file, "r") as f: - mocked_results = json.load(f) - - # Mock and launch - mocked_elasticsearch.search = Mock(return_value=mocked_results) - search_service = f"/search/{search_router.resource_name_plural}/v1" - params = {"search_query": "description", "offset": -1} - response = client.get(search_service, params=params) - - # Assert the platform error - assert response.status_code == 400, response.json() - err_msg = "The offset should be greater or equal than 0." - assert response.json()["detail"][: len(err_msg)] == err_msg + # Get the mocker results to test + resources_path = os.path.join(path_test_resources(), "elasticsearch") + resource_file = f"{search_router.es_index}_search.json" + mocked_file = os.path.join(resources_path, resource_file) + with open(mocked_file, "r") as f: + mocked_results = json.load(f) + + # Mock and launch + mocked_elasticsearch.search = Mock(return_value=mocked_results) + search_service = f"/search/{search_router.resource_name_plural}/v1" + params = {"search_query": "description", "offset": -1} + response = client.get(search_service, params=params) + + # Assert the platform error + assert response.status_code == 400, response.json() + err_msg = "The offset should be greater or equal than 0." + assert response.json()["detail"][: len(err_msg)] == err_msg From 4f17532f03f1f9e061141f2f73e9ccd7fd350da5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Sat, 25 Nov 2023 02:55:05 +0100 Subject: [PATCH 67/79] Second round of pull request comments --- logstash/config/logstash.yml | 9 +-------- src/setup/es_setup/generate_elasticsearch_indices.py | 2 +- .../logstash_setup/generate_logstash_config_files.py | 6 +++--- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index 4d6ca384..8b137891 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1,8 +1 @@ -# This file has been generated by `generate_logstash_config_files.py` -# file, placed in `src/setup/logstash_setup` -# ------------------------------------------------------------------- -http.host: "0.0.0.0" -xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] -xpack.monitoring.enabled: true -xpack.monitoring.elasticsearch.username: elastic -xpack.monitoring.elasticsearch.password: changeme \ No newline at end of file + diff --git a/src/setup/es_setup/generate_elasticsearch_indices.py b/src/setup/es_setup/generate_elasticsearch_indices.py index bd6248ce..5416982e 100755 --- a/src/setup/es_setup/generate_elasticsearch_indices.py +++ b/src/setup/es_setup/generate_elasticsearch_indices.py @@ -27,7 +27,7 @@ def main(): es_client = ElasticsearchSingleton().client global_fields = {"name", "plain", "html"} entities = { - router.es_index: list(router.match_fields ^ global_fields) for router in router_list + router.es_index: list(router.indexed_fields ^ global_fields) for router in router_list } for es_index, fields in entities.items(): mapping = generate_mapping(fields) diff --git a/src/setup/logstash_setup/generate_logstash_config_files.py b/src/setup/logstash_setup/generate_logstash_config_files.py index 8d114977..5818a371 100755 --- a/src/setup/logstash_setup/generate_logstash_config_files.py +++ b/src/setup/logstash_setup/generate_logstash_config_files.py @@ -30,8 +30,8 @@ pipeline_sql_path = BASE_PATH / "pipeline" / "sql" DB_USER = "root" DB_PASS = os.environ["MYSQL_ROOT_PASSWORD"] -ES_PASS = os.environ["ES_USER"] -ES_USER = os.environ["ES_PASSWORD"] +ES_USER = os.environ["ES_USER"] +ES_PASS = os.environ["ES_PASSWORD"] GLOBAL_FIELDS = {"name", "plain", "html"} @@ -45,7 +45,7 @@ def main(): for path in (CONFIG_PATH, PIPELINE_CONFIG_PATH, pipeline_sql_path): path.mkdir(parents=True, exist_ok=True) entities = { - router.es_index: list(router.match_fields ^ GLOBAL_FIELDS) for router in router_list + router.es_index: list(router.indexed_fields ^ GLOBAL_FIELDS) for router in router_list } render_parameters = { "file": os.path.basename(__file__), From b60c9aaa43ebe3e5db6c2db96c835c60bce48237 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Sat, 25 Nov 2023 04:00:01 +0100 Subject: [PATCH 68/79] Second round of pull request comments --- logstash/config/logstash.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index 8b137891..4d6ca384 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1 +1,8 @@ - +# This file has been generated by `generate_logstash_config_files.py` +# file, placed in `src/setup/logstash_setup` +# ------------------------------------------------------------------- +http.host: "0.0.0.0" +xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] +xpack.monitoring.enabled: true +xpack.monitoring.elasticsearch.username: elastic +xpack.monitoring.elasticsearch.password: changeme \ No newline at end of file From 7c2106f186d29f0da0aa83739e004873622f168f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Sat, 25 Nov 2023 04:02:53 +0100 Subject: [PATCH 69/79] Second round of pull request comments --- logstash/pipeline/conf/.gitkeep | 0 logstash/pipeline/sql/.gitkeep | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 logstash/pipeline/conf/.gitkeep create mode 100644 logstash/pipeline/sql/.gitkeep diff --git a/logstash/pipeline/conf/.gitkeep b/logstash/pipeline/conf/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/logstash/pipeline/sql/.gitkeep b/logstash/pipeline/sql/.gitkeep new file mode 100644 index 00000000..e69de29b From 0e138528dae15130dc565cf641bd967c71812390 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n?= Date: Sat, 25 Nov 2023 04:03:33 +0100 Subject: [PATCH 70/79] Second round of pull request comments --- logstash/config/logstash.yml | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml index 4d6ca384..8b137891 100644 --- a/logstash/config/logstash.yml +++ b/logstash/config/logstash.yml @@ -1,8 +1 @@ -# This file has been generated by `generate_logstash_config_files.py` -# file, placed in `src/setup/logstash_setup` -# ------------------------------------------------------------------- -http.host: "0.0.0.0" -xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] -xpack.monitoring.enabled: true -xpack.monitoring.elasticsearch.username: elastic -xpack.monitoring.elasticsearch.password: changeme \ No newline at end of file + From 453b1cd5a66c6f8d335d107e3173fbe8f6ac63e2 Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Mon, 27 Nov 2023 14:44:22 +0100 Subject: [PATCH 71/79] Created data/elasticsearch/.gitkeep to make sure it exists with the right permissions --- data/elasticsearch/.gitkeep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 data/elasticsearch/.gitkeep diff --git a/data/elasticsearch/.gitkeep b/data/elasticsearch/.gitkeep new file mode 100644 index 00000000..e69de29b From 676713623760e39eeaaec80aa55e46f87543b397 Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Mon, 27 Nov 2023 14:58:19 +0100 Subject: [PATCH 72/79] Deleted autogenerated file logstash/config/logstash.yml --- .gitignore | 2 ++ logstash/config/.gitkeep | 0 logstash/config/logstash.yml | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 logstash/config/.gitkeep delete mode 100644 logstash/config/logstash.yml diff --git a/.gitignore b/.gitignore index 8d42f315..e7b8f77e 100644 --- a/.gitignore +++ b/.gitignore @@ -116,6 +116,8 @@ venv.bak/ **.DS_Store # Logstash configuration +logstash/config/* +!logstash/config/.gitkeep logstash/pipeline/conf/* !logstash/pipeline/conf/.gitkeep logstash/pipeline/sql/* diff --git a/logstash/config/.gitkeep b/logstash/config/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml deleted file mode 100644 index 8b137891..00000000 --- a/logstash/config/logstash.yml +++ /dev/null @@ -1 +0,0 @@ - From defc1003b23a8a61609f7407fe3fe1940cc059ab Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Mon, 27 Nov 2023 14:58:54 +0100 Subject: [PATCH 73/79] cleanup --- src/setup/__init__.py | 0 src/setup/logstash_setup/__init__.py | 0 .../generate_logstash_config_files.py | 18 +++++++++++------- 3 files changed, 11 insertions(+), 7 deletions(-) create mode 100644 src/setup/__init__.py create mode 100644 src/setup/logstash_setup/__init__.py diff --git a/src/setup/__init__.py b/src/setup/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/setup/logstash_setup/__init__.py b/src/setup/logstash_setup/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/setup/logstash_setup/generate_logstash_config_files.py b/src/setup/logstash_setup/generate_logstash_config_files.py index 5818a371..4e427388 100755 --- a/src/setup/logstash_setup/generate_logstash_config_files.py +++ b/src/setup/logstash_setup/generate_logstash_config_files.py @@ -27,11 +27,13 @@ BASE_PATH = Path("/logstash") CONFIG_PATH = BASE_PATH / "config" PIPELINE_CONFIG_PATH = BASE_PATH / "pipeline" / "conf" -pipeline_sql_path = BASE_PATH / "pipeline" / "sql" +PIPELINE_SQL_PATH = BASE_PATH / "pipeline" / "sql" + DB_USER = "root" DB_PASS = os.environ["MYSQL_ROOT_PASSWORD"] ES_USER = os.environ["ES_USER"] ES_PASS = os.environ["ES_PASSWORD"] + GLOBAL_FIELDS = {"name", "plain", "html"} @@ -42,7 +44,7 @@ def generate_file(file_path, template, file_data): def main(): - for path in (CONFIG_PATH, PIPELINE_CONFIG_PATH, pipeline_sql_path): + for path in (CONFIG_PATH, PIPELINE_CONFIG_PATH, PIPELINE_SQL_PATH): path.mkdir(parents=True, exist_ok=True) entities = { router.es_index: list(router.indexed_fields ^ GLOBAL_FIELDS) for router in router_list @@ -63,15 +65,17 @@ def main(): generate_file(config_file, CONFIG_FILE_TEMPLATE, render_parameters) generate_file(config_init_file, PIPELINE_CONFIG_INIT_FILE_TEMPLATE, render_parameters) generate_file(config_sync_file, PIPELINE_CONFIG_SYNC_FILE_TEMPLATE, render_parameters) + render_parameters["comment_tag"] = "--" - for entity, extra_fields in entities.items(): - render_parameters["entity_name"] = entity + for es_index, extra_fields in entities.items(): + render_parameters["entity_name"] = es_index render_parameters["extra_fields"] = ( ",\n " + ",\n ".join(extra_fields) if extra_fields else "" ) - sql_init_file = os.path.join(pipeline_sql_path, f"init_{entity}.sql") - sql_sync_file = os.path.join(pipeline_sql_path, f"sync_{entity}.sql") - sql_rm_file = os.path.join(pipeline_sql_path, f"rm_{entity}.sql") + + sql_init_file = os.path.join(PIPELINE_SQL_PATH, f"init_{es_index}.sql") + sql_sync_file = os.path.join(PIPELINE_SQL_PATH, f"sync_{es_index}.sql") + sql_rm_file = os.path.join(PIPELINE_SQL_PATH, f"rm_{es_index}.sql") generate_file(sql_init_file, PIPELINE_SQL_INIT_FILE_TEMPLATE, render_parameters) generate_file(sql_sync_file, PIPELINE_SQL_SYNC_FILE_TEMPLATE, render_parameters) generate_file(sql_rm_file, PIPELINE_SQL_RM_FILE_TEMPLATE, render_parameters) From 5f79f0d4b6bb792d15103b631a2d0bbc01465f7f Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Mon, 27 Nov 2023 15:49:07 +0100 Subject: [PATCH 74/79] Making sure docker compose up works even if generated files do not exist; added logging; simplified file names --- .gitignore | 12 ++-- docker-compose.yaml | 18 +----- .../{pipeline => config/config}/pipelines.yml | 0 logstash/config/{ => pipeline}/.gitkeep | 0 .../{pipeline/conf => config/sql}/.gitkeep | 0 logstash/pipeline/sql/.gitkeep | 0 src/connectors/synchronization.py | 7 +-- src/main.py | 2 + .../generate_elasticsearch_indices.py | 9 ++- .../generate_logstash_config_files.py | 59 ++++++++++--------- .../{config_file_template.py => config.py} | 2 +- ...le_generated_comment.py => file_header.py} | 0 ...ig_init_file_template.py => init_table.py} | 2 +- ..._sql_init_file_template.py => sql_init.py} | 2 +- ...line_sql_rm_file_template.py => sql_rm.py} | 2 +- ..._sql_sync_file_template.py => sql_sync.py} | 2 +- ...ig_sync_file_template.py => sync_table.py} | 2 +- src/setup_logger.py | 9 +++ 18 files changed, 66 insertions(+), 62 deletions(-) rename logstash/{pipeline => config/config}/pipelines.yml (100%) rename logstash/config/{ => pipeline}/.gitkeep (100%) rename logstash/{pipeline/conf => config/sql}/.gitkeep (100%) delete mode 100644 logstash/pipeline/sql/.gitkeep rename src/setup/logstash_setup/templates/{config_file_template.py => config.py} (81%) rename src/setup/logstash_setup/templates/{file_generated_comment.py => file_header.py} (100%) rename src/setup/logstash_setup/templates/{pipeline_config_init_file_template.py => init_table.py} (95%) rename src/setup/logstash_setup/templates/{pipeline_sql_init_file_template.py => sql_init.py} (90%) rename src/setup/logstash_setup/templates/{pipeline_sql_rm_file_template.py => sql_rm.py} (66%) rename src/setup/logstash_setup/templates/{pipeline_sql_sync_file_template.py => sql_sync.py} (91%) rename src/setup/logstash_setup/templates/{pipeline_config_sync_file_template.py => sync_table.py} (97%) create mode 100644 src/setup_logger.py diff --git a/.gitignore b/.gitignore index e7b8f77e..cc99bb6f 100644 --- a/.gitignore +++ b/.gitignore @@ -116,12 +116,12 @@ venv.bak/ **.DS_Store # Logstash configuration -logstash/config/* -!logstash/config/.gitkeep -logstash/pipeline/conf/* -!logstash/pipeline/conf/.gitkeep -logstash/pipeline/sql/* -!logstash/pipeline/sql/.gitkeep +logstash/config/config/* +!logstash/config/config/pipelines.yml +logstash/config/pipeline/* +!logstash/config/pipeline/.gitkeep +logstash/config/sql/* +!logstash/config/sql/.gitkeep # Spyder project settings .spyderproject diff --git a/docker-compose.yaml b/docker-compose.yaml index 8c84d372..330c5a1a 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -225,23 +225,9 @@ services: - 9600:9600 volumes: - type: bind - source: ./logstash/config/logstash.yml - target: /usr/share/logstash/config/logstash.yml - read_only: true - - type: bind - source: ./logstash/pipeline/pipelines.yml - target: /usr/share/logstash/config/pipelines.yml - read_only: true - - type: bind - source: ./logstash/pipeline/conf - target: /usr/share/logstash/pipeline - read_only: true - - type: bind - source: ./logstash/pipeline/sql - target: /usr/share/logstash/sql + source: ./logstash/config + target: /usr/share/logstash/ read_only: true depends_on: - fill-db-with-examples: - condition: service_completed_successfully es_logstash_setup: condition: service_completed_successfully diff --git a/logstash/pipeline/pipelines.yml b/logstash/config/config/pipelines.yml similarity index 100% rename from logstash/pipeline/pipelines.yml rename to logstash/config/config/pipelines.yml diff --git a/logstash/config/.gitkeep b/logstash/config/pipeline/.gitkeep similarity index 100% rename from logstash/config/.gitkeep rename to logstash/config/pipeline/.gitkeep diff --git a/logstash/pipeline/conf/.gitkeep b/logstash/config/sql/.gitkeep similarity index 100% rename from logstash/pipeline/conf/.gitkeep rename to logstash/config/sql/.gitkeep diff --git a/logstash/pipeline/sql/.gitkeep b/logstash/pipeline/sql/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/src/connectors/synchronization.py b/src/connectors/synchronization.py index 5bd223d2..4d4ae8ec 100644 --- a/src/connectors/synchronization.py +++ b/src/connectors/synchronization.py @@ -17,6 +17,7 @@ from database.session import DbSession from database.setup import _create_or_fetch_related_objects, _get_existing_resource from routers import ResourceRouter, resource_routers, enum_routers +from setup_logger import setup_logger RELATIVE_PATH_STATE_JSON = pathlib.Path("state.json") RELATIVE_PATH_ERROR_CSV = pathlib.Path("errors.csv") @@ -125,11 +126,7 @@ def main(): shutil.rmtree(working_dir) working_dir.mkdir(parents=True, exist_ok=True) - logging.basicConfig( - level=logging.INFO, - format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - ) + setup_logger() sys.excepthook = exception_handler module_path = ".".join(args.connector.split(".")[0:-1]) diff --git a/src/main.py b/src/main.py index 4dfc8af4..8beaa096 100644 --- a/src/main.py +++ b/src/main.py @@ -23,6 +23,7 @@ from database.setup import drop_or_create_database from routers import resource_routers, parent_routers, enum_routers from routers import search_routers +from setup_logger import setup_logger def _parse_args() -> argparse.Namespace: @@ -89,6 +90,7 @@ def counts() -> dict: def create_app() -> FastAPI: """Create the FastAPI application, complete with routes.""" + setup_logger() args = _parse_args() app = FastAPI( openapi_url=f"{args.url_prefix}/openapi.json", diff --git a/src/setup/es_setup/generate_elasticsearch_indices.py b/src/setup/es_setup/generate_elasticsearch_indices.py index 5416982e..60d59ab5 100755 --- a/src/setup/es_setup/generate_elasticsearch_indices.py +++ b/src/setup/es_setup/generate_elasticsearch_indices.py @@ -7,10 +7,12 @@ """ import copy +import logging -from routers.search_routers.elasticsearch import ElasticsearchSingleton -from routers.search_routers import router_list from definitions import BASE_MAPPING +from routers.search_routers import router_list +from routers.search_routers.elasticsearch import ElasticsearchSingleton +from setup_logger import setup_logger def generate_mapping(fields): @@ -24,16 +26,19 @@ def generate_mapping(fields): def main(): + setup_logger() es_client = ElasticsearchSingleton().client global_fields = {"name", "plain", "html"} entities = { router.es_index: list(router.indexed_fields ^ global_fields) for router in router_list } + logging.info("Generating indices...") for es_index, fields in entities.items(): mapping = generate_mapping(fields) # ignore 400 cause by IndexAlreadyExistsException when creating an index es_client.indices.create(index=es_index, body=mapping, ignore=400) + logging.info("Generating indices completed.") if __name__ == "__main__": diff --git a/src/setup/logstash_setup/generate_logstash_config_files.py b/src/setup/logstash_setup/generate_logstash_config_files.py index 4e427388..38624b5b 100755 --- a/src/setup/logstash_setup/generate_logstash_config_files.py +++ b/src/setup/logstash_setup/generate_logstash_config_files.py @@ -9,25 +9,25 @@ Launched by the es_logstash_setup container in the docker-compose file. """ - +import logging import os from pathlib import Path from jinja2 import Template from routers.search_routers import router_list -from templates.file_generated_comment import FILE_IS_GENERATED_COMMENT -from templates.config_file_template import CONFIG_FILE_TEMPLATE -from templates.pipeline_config_init_file_template import PIPELINE_CONFIG_INIT_FILE_TEMPLATE -from templates.pipeline_config_sync_file_template import PIPELINE_CONFIG_SYNC_FILE_TEMPLATE -from templates.pipeline_sql_init_file_template import PIPELINE_SQL_INIT_FILE_TEMPLATE -from templates.pipeline_sql_sync_file_template import PIPELINE_SQL_SYNC_FILE_TEMPLATE -from templates.pipeline_sql_rm_file_template import PIPELINE_SQL_RM_FILE_TEMPLATE - - -BASE_PATH = Path("/logstash") -CONFIG_PATH = BASE_PATH / "config" -PIPELINE_CONFIG_PATH = BASE_PATH / "pipeline" / "conf" -PIPELINE_SQL_PATH = BASE_PATH / "pipeline" / "sql" +from setup.logstash_setup.templates.config import TEMPLATE_CONFIG +from setup.logstash_setup.templates.file_header import FILE_IS_GENERATED_COMMENT +from setup.logstash_setup.templates.init_table import TEMPLATE_INIT_TABLE +from setup.logstash_setup.templates.sql_init import TEMPLATE_SQL_INIT +from setup.logstash_setup.templates.sql_rm import TEMPLATE_SQL_RM +from setup.logstash_setup.templates.sql_sync import TEMPLATE_SQL_SYNC +from setup.logstash_setup.templates.sync_table import TEMPLATE_SYNC_TABLE +from setup_logger import setup_logger + +PATH_BASE = Path("/logstash/config") +PATH_CONFIG = PATH_BASE / "config" +PATH_PIPELINE = PATH_BASE / "pipeline" +PATH_SQL = PATH_BASE / "sql" DB_USER = "root" DB_PASS = os.environ["MYSQL_ROOT_PASSWORD"] @@ -44,7 +44,8 @@ def generate_file(file_path, template, file_data): def main(): - for path in (CONFIG_PATH, PIPELINE_CONFIG_PATH, PIPELINE_SQL_PATH): + setup_logger() + for path in (PATH_CONFIG, PATH_PIPELINE, PATH_SQL): path.mkdir(parents=True, exist_ok=True) entities = { router.es_index: list(router.indexed_fields ^ GLOBAL_FIELDS) for router in router_list @@ -59,26 +60,30 @@ def main(): "db_pass": DB_PASS, "entities": entities.keys(), } - config_file = os.path.join(CONFIG_PATH, "logstash.yml") - config_init_file = os.path.join(PIPELINE_CONFIG_PATH, "init_table.conf") - config_sync_file = os.path.join(PIPELINE_CONFIG_PATH, "sync_table.conf") - generate_file(config_file, CONFIG_FILE_TEMPLATE, render_parameters) - generate_file(config_init_file, PIPELINE_CONFIG_INIT_FILE_TEMPLATE, render_parameters) - generate_file(config_sync_file, PIPELINE_CONFIG_SYNC_FILE_TEMPLATE, render_parameters) + logging.info("Generating configuration files...") + config_file = os.path.join(PATH_CONFIG, "logstash.yml") + config_init_file = os.path.join(PATH_PIPELINE, "init_table.conf") + config_sync_file = os.path.join(PATH_PIPELINE, "sync_table.conf") + generate_file(config_file, TEMPLATE_CONFIG, render_parameters) + generate_file(config_init_file, TEMPLATE_INIT_TABLE, render_parameters) + generate_file(config_sync_file, TEMPLATE_SYNC_TABLE, render_parameters) render_parameters["comment_tag"] = "--" + logging.info("Generating configuration files completed.") + logging.info("Generating sql files...") for es_index, extra_fields in entities.items(): render_parameters["entity_name"] = es_index render_parameters["extra_fields"] = ( ",\n " + ",\n ".join(extra_fields) if extra_fields else "" ) - sql_init_file = os.path.join(PIPELINE_SQL_PATH, f"init_{es_index}.sql") - sql_sync_file = os.path.join(PIPELINE_SQL_PATH, f"sync_{es_index}.sql") - sql_rm_file = os.path.join(PIPELINE_SQL_PATH, f"rm_{es_index}.sql") - generate_file(sql_init_file, PIPELINE_SQL_INIT_FILE_TEMPLATE, render_parameters) - generate_file(sql_sync_file, PIPELINE_SQL_SYNC_FILE_TEMPLATE, render_parameters) - generate_file(sql_rm_file, PIPELINE_SQL_RM_FILE_TEMPLATE, render_parameters) + sql_init_file = os.path.join(PATH_SQL, f"init_{es_index}.sql") + sql_sync_file = os.path.join(PATH_SQL, f"sync_{es_index}.sql") + sql_rm_file = os.path.join(PATH_SQL, f"rm_{es_index}.sql") + generate_file(sql_init_file, TEMPLATE_SQL_INIT, render_parameters) + generate_file(sql_sync_file, TEMPLATE_SQL_SYNC, render_parameters) + generate_file(sql_rm_file, TEMPLATE_SQL_RM, render_parameters) + logging.info("Generating configuration files completed.") if __name__ == "__main__": diff --git a/src/setup/logstash_setup/templates/config_file_template.py b/src/setup/logstash_setup/templates/config.py similarity index 81% rename from src/setup/logstash_setup/templates/config_file_template.py rename to src/setup/logstash_setup/templates/config.py index fed78c63..661d1619 100755 --- a/src/setup/logstash_setup/templates/config_file_template.py +++ b/src/setup/logstash_setup/templates/config.py @@ -1,4 +1,4 @@ -CONFIG_FILE_TEMPLATE = """http.host: "0.0.0.0" +TEMPLATE_CONFIG = """http.host: "0.0.0.0" xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ] xpack.monitoring.enabled: true xpack.monitoring.elasticsearch.username: {{es_user}} diff --git a/src/setup/logstash_setup/templates/file_generated_comment.py b/src/setup/logstash_setup/templates/file_header.py similarity index 100% rename from src/setup/logstash_setup/templates/file_generated_comment.py rename to src/setup/logstash_setup/templates/file_header.py diff --git a/src/setup/logstash_setup/templates/pipeline_config_init_file_template.py b/src/setup/logstash_setup/templates/init_table.py similarity index 95% rename from src/setup/logstash_setup/templates/pipeline_config_init_file_template.py rename to src/setup/logstash_setup/templates/init_table.py index cff6878b..09ca42a6 100755 --- a/src/setup/logstash_setup/templates/pipeline_config_init_file_template.py +++ b/src/setup/logstash_setup/templates/init_table.py @@ -1,4 +1,4 @@ -PIPELINE_CONFIG_INIT_FILE_TEMPLATE = """ +TEMPLATE_INIT_TABLE = """ input { {% for entity in entities %} jdbc { diff --git a/src/setup/logstash_setup/templates/pipeline_sql_init_file_template.py b/src/setup/logstash_setup/templates/sql_init.py similarity index 90% rename from src/setup/logstash_setup/templates/pipeline_sql_init_file_template.py rename to src/setup/logstash_setup/templates/sql_init.py index d079e2aa..b1c47528 100755 --- a/src/setup/logstash_setup/templates/pipeline_sql_init_file_template.py +++ b/src/setup/logstash_setup/templates/sql_init.py @@ -1,4 +1,4 @@ -PIPELINE_SQL_INIT_FILE_TEMPLATE = """SELECT +TEMPLATE_SQL_INIT = """SELECT {{entity_name}}.identifier, {{entity_name}}.name, text.plain as 'plain', diff --git a/src/setup/logstash_setup/templates/pipeline_sql_rm_file_template.py b/src/setup/logstash_setup/templates/sql_rm.py similarity index 66% rename from src/setup/logstash_setup/templates/pipeline_sql_rm_file_template.py rename to src/setup/logstash_setup/templates/sql_rm.py index 10640db5..335f9da3 100755 --- a/src/setup/logstash_setup/templates/pipeline_sql_rm_file_template.py +++ b/src/setup/logstash_setup/templates/sql_rm.py @@ -1,4 +1,4 @@ -PIPELINE_SQL_RM_FILE_TEMPLATE = """SELECT {{entity_name}}.identifier +TEMPLATE_SQL_RM = """SELECT {{entity_name}}.identifier FROM aiod.{{entity_name}} WHERE aiod.{{entity_name}}.date_deleted IS NOT NULL AND aiod.{{entity_name}}.date_deleted > :sql_last_value diff --git a/src/setup/logstash_setup/templates/pipeline_sql_sync_file_template.py b/src/setup/logstash_setup/templates/sql_sync.py similarity index 91% rename from src/setup/logstash_setup/templates/pipeline_sql_sync_file_template.py rename to src/setup/logstash_setup/templates/sql_sync.py index b02fa9cb..c4cb552b 100755 --- a/src/setup/logstash_setup/templates/pipeline_sql_sync_file_template.py +++ b/src/setup/logstash_setup/templates/sql_sync.py @@ -1,4 +1,4 @@ -PIPELINE_SQL_SYNC_FILE_TEMPLATE = """SELECT +TEMPLATE_SQL_SYNC = """SELECT {{entity_name}}.identifier, {{entity_name}}.name, text.plain as 'plain', diff --git a/src/setup/logstash_setup/templates/pipeline_config_sync_file_template.py b/src/setup/logstash_setup/templates/sync_table.py similarity index 97% rename from src/setup/logstash_setup/templates/pipeline_config_sync_file_template.py rename to src/setup/logstash_setup/templates/sync_table.py index cab9fd3e..1ebda66c 100755 --- a/src/setup/logstash_setup/templates/pipeline_config_sync_file_template.py +++ b/src/setup/logstash_setup/templates/sync_table.py @@ -1,4 +1,4 @@ -PIPELINE_CONFIG_SYNC_FILE_TEMPLATE = """ +TEMPLATE_SYNC_TABLE = """ input { {% for entity in entities %} jdbc { diff --git a/src/setup_logger.py b/src/setup_logger.py new file mode 100644 index 00000000..d22930af --- /dev/null +++ b/src/setup_logger.py @@ -0,0 +1,9 @@ +import logging + + +def setup_logger(): + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) From 58b25146004130a2ebadd7b9334cf7dbd6beb0da Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Mon, 27 Nov 2023 15:58:00 +0100 Subject: [PATCH 75/79] Make sure data folders are always created with correct permissions (this was by accident removed in commit cc8c22f291930b0940c82664e3ce1e91d5ff5509) --- .gitignore | 8 +++----- data/connectors/.gitkeep | 0 data/deletion/.gitkeep | 0 data/mysql/.gitkeep | 0 4 files changed, 3 insertions(+), 5 deletions(-) create mode 100644 data/connectors/.gitkeep create mode 100644 data/deletion/.gitkeep create mode 100644 data/mysql/.gitkeep diff --git a/.gitignore b/.gitignore index cc99bb6f..4e3927f0 100644 --- a/.gitignore +++ b/.gitignore @@ -118,10 +118,8 @@ venv.bak/ # Logstash configuration logstash/config/config/* !logstash/config/config/pipelines.yml -logstash/config/pipeline/* -!logstash/config/pipeline/.gitkeep -logstash/config/sql/* -!logstash/config/sql/.gitkeep +logstash/config/pipeline +logstash/config/sql # Spyder project settings .spyderproject @@ -144,4 +142,4 @@ dmypy.json # Pyre type checker .pyre/ -.vscode \ No newline at end of file +.vscode diff --git a/data/connectors/.gitkeep b/data/connectors/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/data/deletion/.gitkeep b/data/deletion/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/data/mysql/.gitkeep b/data/mysql/.gitkeep new file mode 100644 index 00000000..e69de29b From 0d6bd3b4a823d6e76678023258d6edabbb529ae4 Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Mon, 27 Nov 2023 17:09:21 +0100 Subject: [PATCH 76/79] Added default logstash configuration --- .gitignore | 17 +- docker-compose.yaml | 409 +++++++++--------- logstash/config/config/jvm.options | 72 +++ logstash/config/config/log4j2.file.properties | 147 +++++++ logstash/config/config/log4j2.properties | 16 + logstash/config/config/logstash-sample.conf | 17 + logstash/config/config/startup.options | 53 +++ 7 files changed, 520 insertions(+), 211 deletions(-) create mode 100644 logstash/config/config/jvm.options create mode 100644 logstash/config/config/log4j2.file.properties create mode 100644 logstash/config/config/log4j2.properties create mode 100644 logstash/config/config/logstash-sample.conf create mode 100644 logstash/config/config/startup.options diff --git a/.gitignore b/.gitignore index 4e3927f0..85c76209 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,18 @@ # Project Specific + # data/ is intended for database data from the mysql container data/ +# Generated Logstash configuration +logstash/config/config/logstash.yml +logstash/config/config/pipelines.yml +logstash/config/pipeline +logstash/config/sql + + + + + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] @@ -115,12 +126,6 @@ env.bak/ venv.bak/ **.DS_Store -# Logstash configuration -logstash/config/config/* -!logstash/config/config/pipelines.yml -logstash/config/pipeline -logstash/config/sql - # Spyder project settings .spyderproject .spyproject diff --git a/docker-compose.yaml b/docker-compose.yaml index 330c5a1a..be498928 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,214 +1,214 @@ version: '3.9' services: - app: - build: - context: ./ - dockerfile: Dockerfile - image: ai4eu_server - container_name: apiserver - env_file: .env - environment: - - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - ports: - - 8000:8000 - volumes: - - ./src:/app:ro - command: > - python main.py - --rebuild-db only-if-empty - --reload - healthcheck: - test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8000')"] - start_interval: 1s - start_period: 30s - interval: 5s - timeout: 120s - retries: 24 - depends_on: - sqlserver: - condition: service_healthy + # app: + # build: + # context: ./ + # dockerfile: Dockerfile + # image: ai4eu_server + # container_name: apiserver + # env_file: .env + # environment: + # - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + # ports: + # - 8000:8000 + # volumes: + # - ./src:/app:ro + # command: > + # python main.py + # --rebuild-db only-if-empty + # --reload + # healthcheck: + # test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8000')"] + # start_interval: 1s + # start_period: 30s + # interval: 5s + # timeout: 120s + # retries: 24 + # depends_on: + # sqlserver: + # condition: service_healthy - fill-db-with-examples: - profiles: ["examples"] - image: ai4eu_server - container_name: fill-db-with-examples - env_file: .env - environment: - - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - volumes: - - ./src:/app:ro - - ./data/connectors:/opt/connectors/data - - ./connectors:/opt/connectors/script:ro - command: > - /bin/bash -c "/opt/connectors/script/fill-examples.sh" - depends_on: - app: - condition: service_healthy + # fill-db-with-examples: + # profiles: ["examples"] + # image: ai4eu_server + # container_name: fill-db-with-examples + # env_file: .env + # environment: + # - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + # volumes: + # - ./src:/app:ro + # - ./data/connectors:/opt/connectors/data + # - ./connectors:/opt/connectors/script:ro + # command: > + # /bin/bash -c "/opt/connectors/script/fill-examples.sh" + # depends_on: + # app: + # condition: service_healthy - deletion: - build: - context: deletion - dockerfile: Dockerfile - image: ai4eu_deletion - container_name: deletion - env_file: .env - volumes: - - ./src:/app - - ./data/deletion:/opt/deletion/data - command: > - /bin/bash -c "/opt/deletion/script/entry.sh" - depends_on: - app: - condition: service_healthy + # deletion: + # build: + # context: deletion + # dockerfile: Dockerfile + # image: ai4eu_deletion + # container_name: deletion + # env_file: .env + # volumes: + # - ./src:/app + # - ./data/deletion:/opt/deletion/data + # command: > + # /bin/bash -c "/opt/deletion/script/entry.sh" + # depends_on: + # app: + # condition: service_healthy - huggingface-dataset-connector: - profiles: ["huggingface-datasets"] - image: ai4eu_server - container_name: huggingface-dataset-connector - env_file: .env - environment: - - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - volumes: - - ./src:/app:ro - - ./data/connectors:/opt/connectors/data - - ./connectors/huggingface/:/opt/connectors/script:ro - command: > - /bin/bash -c "/opt/connectors/script/datasets.sh" - depends_on: - app: - condition: service_healthy + # huggingface-dataset-connector: + # profiles: ["huggingface-datasets"] + # image: ai4eu_server + # container_name: huggingface-dataset-connector + # env_file: .env + # environment: + # - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + # volumes: + # - ./src:/app:ro + # - ./data/connectors:/opt/connectors/data + # - ./connectors/huggingface/:/opt/connectors/script:ro + # command: > + # /bin/bash -c "/opt/connectors/script/datasets.sh" + # depends_on: + # app: + # condition: service_healthy - openml-dataset-connector: - profiles: ["openml-datasets"] - build: - context: connectors/openml - dockerfile: Dockerfile - image: ai4eu_openml_connector - container_name: openml-dataset-connector - env_file: .env - environment: - - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - volumes: - - ./src:/app:ro - - ./data/connectors:/opt/connectors/data - - ./connectors/openml/:/opt/connectors/script:ro - command: > - /bin/bash -c "/opt/connectors/script/entry.sh" - depends_on: - app: - condition: service_healthy + # openml-dataset-connector: + # profiles: ["openml-datasets"] + # build: + # context: connectors/openml + # dockerfile: Dockerfile + # image: ai4eu_openml_connector + # container_name: openml-dataset-connector + # env_file: .env + # environment: + # - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + # volumes: + # - ./src:/app:ro + # - ./data/connectors:/opt/connectors/data + # - ./connectors/openml/:/opt/connectors/script:ro + # command: > + # /bin/bash -c "/opt/connectors/script/entry.sh" + # depends_on: + # app: + # condition: service_healthy - zenodo-dataset-connector: - profiles: ["zenodo-datasets"] - build: - context: connectors/zenodo - dockerfile: Dockerfile - image: ai4eu_zenodo_connector - container_name: zenodo-dataset-connector - env_file: .env - environment: - - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - volumes: - - ./src:/app - - ./data/connectors:/opt/connectors/data - - ./connectors/zenodo/:/opt/connectors/script - command: > - /bin/bash -c "/opt/connectors/script/entry.sh" - depends_on: - app: - condition: service_healthy + # zenodo-dataset-connector: + # profiles: ["zenodo-datasets"] + # build: + # context: connectors/zenodo + # dockerfile: Dockerfile + # image: ai4eu_zenodo_connector + # container_name: zenodo-dataset-connector + # env_file: .env + # environment: + # - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + # volumes: + # - ./src:/app + # - ./data/connectors:/opt/connectors/data + # - ./connectors/zenodo/:/opt/connectors/script + # command: > + # /bin/bash -c "/opt/connectors/script/entry.sh" + # depends_on: + # app: + # condition: service_healthy - sqlserver: - image: mysql - container_name: sqlserver - env_file: .env - environment: - - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} - volumes: - - ./data/mysql:/var/lib/mysql - healthcheck: - test: ["CMD", "mysqladmin", "-uroot", "-p$MYSQL_ROOT_PASSWORD", "ping", "-h", "localhost", "--protocol","tcp"] - start_interval: 1s - start_period: 10s - interval: 5s - timeout: 30s - retries: 30 + # sqlserver: + # image: mysql + # container_name: sqlserver + # env_file: .env + # environment: + # - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} + # volumes: + # - ./data/mysql:/var/lib/mysql + # healthcheck: + # test: ["CMD", "mysqladmin", "-uroot", "-p$MYSQL_ROOT_PASSWORD", "ping", "-h", "localhost", "--protocol","tcp"] + # start_interval: 1s + # start_period: 10s + # interval: 5s + # timeout: 30s + # retries: 30 - keycloak: - image: quay.io/keycloak/keycloak - container_name: keycloak - env_file: .env - environment: - - REDIRECT_URIS=$REDIRECT_URIS - - POST_LOGOUT_REDIRECT_URIS=$POST_LOGOUT_REDIRECT_URIS - ports: - - 8080:8080 - volumes: - - ./quay-keycloak:/opt/keycloak/data/import:ro - command: > - start-dev - --hostname-url http://${HOSTNAME}/aiod-auth - --hostname-admin-url http://${HOSTNAME}/aiod-auth - --http-relative-path=/aiod-auth - --http-enabled=true - --hostname-strict-https=false - --import-realm + # keycloak: + # image: quay.io/keycloak/keycloak + # container_name: keycloak + # env_file: .env + # environment: + # - REDIRECT_URIS=$REDIRECT_URIS + # - POST_LOGOUT_REDIRECT_URIS=$POST_LOGOUT_REDIRECT_URIS + # ports: + # - 8080:8080 + # volumes: + # - ./quay-keycloak:/opt/keycloak/data/import:ro + # command: > + # start-dev + # --hostname-url http://${HOSTNAME}/aiod-auth + # --hostname-admin-url http://${HOSTNAME}/aiod-auth + # --http-relative-path=/aiod-auth + # --http-enabled=true + # --hostname-strict-https=false + # --import-realm - nginx: - image: nginx - container_name: nginx - restart: unless-stopped - volumes: - - ./nginx:/etc/nginx/conf.d:ro - ports: - - 80:80 - depends_on: - app: - condition: service_healthy + # nginx: + # image: nginx + # container_name: nginx + # restart: unless-stopped + # volumes: + # - ./nginx:/etc/nginx/conf.d:ro + # ports: + # - 80:80 + # depends_on: + # app: + # condition: service_healthy - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.8.2 - container_name: elasticsearch - env_file: .env - environment: - - ES_JAVA_OPTS=$ES_JAVA_OPTS - - ELASTIC_USER=$ES_USER - - ELASTIC_PASSWORD=$ES_PASSWORD - - discovery.type=$ES_DISCOVERY_TYPE - ports: - - 9200:9200 - - 9300:9300 - volumes: - - type: bind - source: ./es/elasticsearch.yml - target: /usr/share/elasticsearch/config/elasticsearch.yml - read_only: true - - ./data/elasticsearch:/usr/share/elasticsearch/data - healthcheck: - test: ["CMD-SHELL", "curl -u $ES_USER:$ES_PASSWORD --silent --fail localhost:9200/_cluster/health || exit 1"] - interval: 5s - timeout: 30s - retries: 30 + # elasticsearch: + # image: docker.elastic.co/elasticsearch/elasticsearch:8.8.2 + # container_name: elasticsearch + # env_file: .env + # environment: + # - ES_JAVA_OPTS=$ES_JAVA_OPTS + # - ELASTIC_USER=$ES_USER + # - ELASTIC_PASSWORD=$ES_PASSWORD + # - discovery.type=$ES_DISCOVERY_TYPE + # ports: + # - 9200:9200 + # - 9300:9300 + # volumes: + # - type: bind + # source: ./es/elasticsearch.yml + # target: /usr/share/elasticsearch/config/elasticsearch.yml + # read_only: true + # - ./data/elasticsearch:/usr/share/elasticsearch/data + # healthcheck: + # test: ["CMD-SHELL", "curl -u $ES_USER:$ES_PASSWORD --silent --fail localhost:9200/_cluster/health || exit 1"] + # interval: 5s + # timeout: 30s + # retries: 30 - es_logstash_setup: - image: ai4eu_server - container_name: es_logstash_setup - env_file: .env - environment: - - MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD - - ES_USER=$ES_USER - - ES_PASSWORD=$ES_PASSWORD - volumes: - - ./src:/app - - ./logstash:/logstash - command: > - /bin/bash -c "python setup/logstash_setup/generate_logstash_config_files.py && - python setup/es_setup/generate_elasticsearch_indices.py" - restart: "no" - depends_on: - elasticsearch: - condition: service_healthy + # es_logstash_setup: + # image: ai4eu_server + # container_name: es_logstash_setup + # env_file: .env + # environment: + # - MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD + # - ES_USER=$ES_USER + # - ES_PASSWORD=$ES_PASSWORD + # volumes: + # - ./src:/app + # - ./logstash:/logstash + # command: > + # /bin/bash -c "python setup/logstash_setup/generate_logstash_config_files.py && + # python setup/es_setup/generate_elasticsearch_indices.py" + # restart: "no" + # depends_on: + # elasticsearch: + # condition: service_healthy logstash: build: @@ -224,10 +224,9 @@ services: - 5000:5000/udp - 9600:9600 volumes: - - type: bind - source: ./logstash/config - target: /usr/share/logstash/ - read_only: true - depends_on: - es_logstash_setup: - condition: service_completed_successfully + - ./logstash/config:/jos + # volumes: + # - ./logstash/config:/usr/share/logstash:ro + # depends_on: + # es_logstash_setup: + # condition: service_completed_successfully diff --git a/logstash/config/config/jvm.options b/logstash/config/config/jvm.options new file mode 100644 index 00000000..9b1b6616 --- /dev/null +++ b/logstash/config/config/jvm.options @@ -0,0 +1,72 @@ +## JVM configuration + +# Xms represents the initial size of total heap space +# Xmx represents the maximum size of total heap space + +-Xms1g +-Xmx1g + +################################################################ +## Expert settings +################################################################ +## +## All settings below this section are considered +## expert settings. Don't tamper with them unless +## you understand what you are doing +## +################################################################ + +## GC configuration +11-13:-XX:+UseConcMarkSweepGC +11-13:-XX:CMSInitiatingOccupancyFraction=75 +11-13:-XX:+UseCMSInitiatingOccupancyOnly + +## Locale +# Set the locale language +#-Duser.language=en + +# Set the locale country +#-Duser.country=US + +# Set the locale variant, if any +#-Duser.variant= + +## basic + +# set the I/O temp directory +#-Djava.io.tmpdir=$HOME + +# set to headless, just in case +-Djava.awt.headless=true + +# ensure UTF-8 encoding by default (e.g. filenames) +-Dfile.encoding=UTF-8 + +# use our provided JNA always versus the system one +#-Djna.nosys=true + +# Turn on JRuby invokedynamic +-Djruby.compile.invokedynamic=true + +## heap dumps + +# generate a heap dump when an allocation from the Java heap fails +# heap dumps are created in the working directory of the JVM +-XX:+HeapDumpOnOutOfMemoryError + +# specify an alternative path for heap dumps +# ensure the directory exists and has sufficient space +#-XX:HeapDumpPath=${LOGSTASH_HOME}/heapdump.hprof + +## GC logging +#-Xlog:gc*,gc+age=trace,safepoint:file=@loggc@:utctime,pid,tags:filecount=32,filesize=64m + +# log GC status to a file with time stamps +# ensure the directory exists +#-Xloggc:${LS_GC_LOG_FILE} + +# Entropy source for randomness +-Djava.security.egd=file:/dev/urandom + +# Copy the logging context from parent threads to children +-Dlog4j2.isThreadContextMapInheritable=true \ No newline at end of file diff --git a/logstash/config/config/log4j2.file.properties b/logstash/config/config/log4j2.file.properties new file mode 100644 index 00000000..234b23db --- /dev/null +++ b/logstash/config/config/log4j2.file.properties @@ -0,0 +1,147 @@ +status = error +name = LogstashPropertiesConfig + +appender.console.type = Console +appender.console.name = plain_console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n + +appender.json_console.type = Console +appender.json_console.name = json_console +appender.json_console.layout.type = JSONLayout +appender.json_console.layout.compact = true +appender.json_console.layout.eventEol = true + +appender.rolling.type = RollingFile +appender.rolling.name = plain_rolling +appender.rolling.fileName = ${sys:ls.logs}/logstash-plain.log +appender.rolling.filePattern = ${sys:ls.logs}/logstash-plain-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size = 100MB +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.max = 30 +appender.rolling.avoid_pipelined_filter.type = PipelineRoutingFilter + +appender.json_rolling.type = RollingFile +appender.json_rolling.name = json_rolling +appender.json_rolling.fileName = ${sys:ls.logs}/logstash-json.log +appender.json_rolling.filePattern = ${sys:ls.logs}/logstash-json-%d{yyyy-MM-dd}-%i.log.gz +appender.json_rolling.policies.type = Policies +appender.json_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.json_rolling.policies.time.interval = 1 +appender.json_rolling.policies.time.modulate = true +appender.json_rolling.layout.type = JSONLayout +appender.json_rolling.layout.compact = true +appender.json_rolling.layout.eventEol = true +appender.json_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.json_rolling.policies.size.size = 100MB +appender.json_rolling.strategy.type = DefaultRolloverStrategy +appender.json_rolling.strategy.max = 30 +appender.json_rolling.avoid_pipelined_filter.type = PipelineRoutingFilter + +appender.routing.type = PipelineRouting +appender.routing.name = pipeline_routing_appender +appender.routing.pipeline.type = RollingFile +appender.routing.pipeline.name = appender-${ctx:pipeline.id} +appender.routing.pipeline.fileName = ${sys:ls.logs}/pipeline_${ctx:pipeline.id}.log +appender.routing.pipeline.filePattern = ${sys:ls.logs}/pipeline_${ctx:pipeline.id}.%i.log.gz +appender.routing.pipeline.layout.type = PatternLayout +appender.routing.pipeline.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n +appender.routing.pipeline.policy.type = SizeBasedTriggeringPolicy +appender.routing.pipeline.policy.size = 100MB +appender.routing.pipeline.strategy.type = DefaultRolloverStrategy +appender.routing.pipeline.strategy.max = 30 + +rootLogger.level = ${sys:ls.log.level} +rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console +rootLogger.appenderRef.rolling.ref = ${sys:ls.log.format}_rolling +rootLogger.appenderRef.routing.ref = pipeline_routing_appender + +# Slowlog + +appender.console_slowlog.type = Console +appender.console_slowlog.name = plain_console_slowlog +appender.console_slowlog.layout.type = PatternLayout +appender.console_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +appender.json_console_slowlog.type = Console +appender.json_console_slowlog.name = json_console_slowlog +appender.json_console_slowlog.layout.type = JSONLayout +appender.json_console_slowlog.layout.compact = true +appender.json_console_slowlog.layout.eventEol = true + +appender.rolling_slowlog.type = RollingFile +appender.rolling_slowlog.name = plain_rolling_slowlog +appender.rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-plain.log +appender.rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-plain-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling_slowlog.policies.type = Policies +appender.rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling_slowlog.policies.time.interval = 1 +appender.rolling_slowlog.policies.time.modulate = true +appender.rolling_slowlog.layout.type = PatternLayout +appender.rolling_slowlog.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n +appender.rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling_slowlog.policies.size.size = 100MB +appender.rolling_slowlog.strategy.type = DefaultRolloverStrategy +appender.rolling_slowlog.strategy.max = 30 + +appender.json_rolling_slowlog.type = RollingFile +appender.json_rolling_slowlog.name = json_rolling_slowlog +appender.json_rolling_slowlog.fileName = ${sys:ls.logs}/logstash-slowlog-json.log +appender.json_rolling_slowlog.filePattern = ${sys:ls.logs}/logstash-slowlog-json-%d{yyyy-MM-dd}-%i.log.gz +appender.json_rolling_slowlog.policies.type = Policies +appender.json_rolling_slowlog.policies.time.type = TimeBasedTriggeringPolicy +appender.json_rolling_slowlog.policies.time.interval = 1 +appender.json_rolling_slowlog.policies.time.modulate = true +appender.json_rolling_slowlog.layout.type = JSONLayout +appender.json_rolling_slowlog.layout.compact = true +appender.json_rolling_slowlog.layout.eventEol = true +appender.json_rolling_slowlog.policies.size.type = SizeBasedTriggeringPolicy +appender.json_rolling_slowlog.policies.size.size = 100MB +appender.json_rolling_slowlog.strategy.type = DefaultRolloverStrategy +appender.json_rolling_slowlog.strategy.max = 30 + +logger.slowlog.name = slowlog +logger.slowlog.level = trace +logger.slowlog.appenderRef.console_slowlog.ref = ${sys:ls.log.format}_console_slowlog +logger.slowlog.appenderRef.rolling_slowlog.ref = ${sys:ls.log.format}_rolling_slowlog +logger.slowlog.additivity = false + +logger.licensereader.name = logstash.licensechecker.licensereader +logger.licensereader.level = error + +# Silence http-client by default +logger.apache_http_client.name = org.apache.http +logger.apache_http_client.level = fatal + +# Deprecation log +appender.deprecation_rolling.type = RollingFile +appender.deprecation_rolling.name = deprecation_plain_rolling +appender.deprecation_rolling.fileName = ${sys:ls.logs}/logstash-deprecation.log +appender.deprecation_rolling.filePattern = ${sys:ls.logs}/logstash-deprecation-%d{yyyy-MM-dd}-%i.log.gz +appender.deprecation_rolling.policies.type = Policies +appender.deprecation_rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.deprecation_rolling.policies.time.interval = 1 +appender.deprecation_rolling.policies.time.modulate = true +appender.deprecation_rolling.layout.type = PatternLayout +appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 100MB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 30 + +logger.deprecation.name = org.logstash.deprecation, deprecation +logger.deprecation.level = WARN +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_plain_rolling +logger.deprecation.additivity = false + +logger.deprecation_root.name = deprecation +logger.deprecation_root.level = WARN +logger.deprecation_root.appenderRef.deprecation_rolling.ref = deprecation_plain_rolling +logger.deprecation_root.additivity = false diff --git a/logstash/config/config/log4j2.properties b/logstash/config/config/log4j2.properties new file mode 100644 index 00000000..663a0158 --- /dev/null +++ b/logstash/config/config/log4j2.properties @@ -0,0 +1,16 @@ +status = error +name = LogstashPropertiesConfig + +appender.console.type = Console +appender.console.name = plain_console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n + +appender.json_console.type = Console +appender.json_console.name = json_console +appender.json_console.layout.type = JSONLayout +appender.json_console.layout.compact = true +appender.json_console.layout.eventEol = true + +rootLogger.level = ${sys:ls.log.level} +rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console diff --git a/logstash/config/config/logstash-sample.conf b/logstash/config/config/logstash-sample.conf new file mode 100644 index 00000000..2fa9229d --- /dev/null +++ b/logstash/config/config/logstash-sample.conf @@ -0,0 +1,17 @@ +# Sample Logstash configuration for creating a simple +# Beats -> Logstash -> Elasticsearch pipeline. + +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => ["http://localhost:9200"] + index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}" + #user => "elastic" + #password => "changeme" + } +} diff --git a/logstash/config/config/startup.options b/logstash/config/config/startup.options new file mode 100644 index 00000000..3829fdb9 --- /dev/null +++ b/logstash/config/config/startup.options @@ -0,0 +1,53 @@ +################################################################################ +# These settings are ONLY used by $LS_HOME/bin/system-install to create a custom +# startup script for Logstash and is not used by Logstash itself. It should +# automagically use the init system (systemd, upstart, sysv, etc.) that your +# Linux distribution uses. +# +# After changing anything here, you need to re-run $LS_HOME/bin/system-install +# as root to push the changes to the init script. +################################################################################ + +# Override Java location +#JAVACMD=/usr/bin/java + +# Set a home directory +LS_HOME=/usr/share/logstash + +# logstash settings directory, the path which contains logstash.yml +LS_SETTINGS_DIR=/etc/logstash + +# Arguments to pass to logstash +LS_OPTS="--path.settings ${LS_SETTINGS_DIR}" + +# Arguments to pass to java +LS_JAVA_OPTS="" + +# pidfiles aren't used the same way for upstart and systemd; this is for sysv users. +LS_PIDFILE=/var/run/logstash.pid + +# user and group id to be invoked as +LS_USER=logstash +LS_GROUP=logstash + +# Enable GC logging by uncommenting the appropriate lines in the GC logging +# section in jvm.options +LS_GC_LOG_FILE=/var/log/logstash/gc.log + +# Open file limit +LS_OPEN_FILES=16384 + +# Nice level +LS_NICE=19 + +# Change these to have the init script named and described differently +# This is useful when running multiple instances of Logstash on the same +# physical box or vm +SERVICE_NAME="logstash" +SERVICE_DESCRIPTION="logstash" + +# If you need to run a command or script before launching Logstash, put it +# between the lines beginning with `read` and `EOM`, and uncomment those lines. +### +## read -r -d '' PRESTART << EOM +## EOM From 82b049a14518542d1067cdd59b53f727e80fb4f7 Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Mon, 27 Nov 2023 17:32:31 +0100 Subject: [PATCH 77/79] Fixed docker compose --- docker-compose.yaml | 410 ++++++++++++++++++++++---------------------- 1 file changed, 205 insertions(+), 205 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index be498928..74e720c0 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,215 +1,215 @@ + version: '3.9' services: - # app: - # build: - # context: ./ - # dockerfile: Dockerfile - # image: ai4eu_server - # container_name: apiserver - # env_file: .env - # environment: - # - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - # ports: - # - 8000:8000 - # volumes: - # - ./src:/app:ro - # command: > - # python main.py - # --rebuild-db only-if-empty - # --reload - # healthcheck: - # test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8000')"] - # start_interval: 1s - # start_period: 30s - # interval: 5s - # timeout: 120s - # retries: 24 - # depends_on: - # sqlserver: - # condition: service_healthy + app: + build: + context: ./ + dockerfile: Dockerfile + image: ai4eu_server + container_name: apiserver + env_file: .env + environment: + - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + ports: + - 8000:8000 + volumes: + - ./src:/app:ro + command: > + python main.py + --rebuild-db only-if-empty + --reload + healthcheck: + test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8000')"] + start_interval: 1s + start_period: 30s + interval: 5s + timeout: 120s + retries: 24 + depends_on: + sqlserver: + condition: service_healthy - # fill-db-with-examples: - # profiles: ["examples"] - # image: ai4eu_server - # container_name: fill-db-with-examples - # env_file: .env - # environment: - # - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - # volumes: - # - ./src:/app:ro - # - ./data/connectors:/opt/connectors/data - # - ./connectors:/opt/connectors/script:ro - # command: > - # /bin/bash -c "/opt/connectors/script/fill-examples.sh" - # depends_on: - # app: - # condition: service_healthy + fill-db-with-examples: + profiles: ["examples"] + image: ai4eu_server + container_name: fill-db-with-examples + env_file: .env + environment: + - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + volumes: + - ./src:/app:ro + - ./data/connectors:/opt/connectors/data + - ./connectors:/opt/connectors/script:ro + command: > + /bin/bash -c "/opt/connectors/script/fill-examples.sh" + depends_on: + app: + condition: service_healthy - # deletion: - # build: - # context: deletion - # dockerfile: Dockerfile - # image: ai4eu_deletion - # container_name: deletion - # env_file: .env - # volumes: - # - ./src:/app - # - ./data/deletion:/opt/deletion/data - # command: > - # /bin/bash -c "/opt/deletion/script/entry.sh" - # depends_on: - # app: - # condition: service_healthy + deletion: + build: + context: deletion + dockerfile: Dockerfile + image: ai4eu_deletion + container_name: deletion + env_file: .env + volumes: + - ./src:/app + - ./data/deletion:/opt/deletion/data + command: > + /bin/bash -c "/opt/deletion/script/entry.sh" + depends_on: + app: + condition: service_healthy - # huggingface-dataset-connector: - # profiles: ["huggingface-datasets"] - # image: ai4eu_server - # container_name: huggingface-dataset-connector - # env_file: .env - # environment: - # - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - # volumes: - # - ./src:/app:ro - # - ./data/connectors:/opt/connectors/data - # - ./connectors/huggingface/:/opt/connectors/script:ro - # command: > - # /bin/bash -c "/opt/connectors/script/datasets.sh" - # depends_on: - # app: - # condition: service_healthy + huggingface-dataset-connector: + profiles: ["huggingface-datasets"] + image: ai4eu_server + container_name: huggingface-dataset-connector + env_file: .env + environment: + - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + volumes: + - ./src:/app:ro + - ./data/connectors:/opt/connectors/data + - ./connectors/huggingface/:/opt/connectors/script:ro + command: > + /bin/bash -c "/opt/connectors/script/datasets.sh" + depends_on: + app: + condition: service_healthy - # openml-dataset-connector: - # profiles: ["openml-datasets"] - # build: - # context: connectors/openml - # dockerfile: Dockerfile - # image: ai4eu_openml_connector - # container_name: openml-dataset-connector - # env_file: .env - # environment: - # - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - # volumes: - # - ./src:/app:ro - # - ./data/connectors:/opt/connectors/data - # - ./connectors/openml/:/opt/connectors/script:ro - # command: > - # /bin/bash -c "/opt/connectors/script/entry.sh" - # depends_on: - # app: - # condition: service_healthy + openml-dataset-connector: + profiles: ["openml-datasets"] + build: + context: connectors/openml + dockerfile: Dockerfile + image: ai4eu_openml_connector + container_name: openml-dataset-connector + env_file: .env + environment: + - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + volumes: + - ./src:/app:ro + - ./data/connectors:/opt/connectors/data + - ./connectors/openml/:/opt/connectors/script:ro + command: > + /bin/bash -c "/opt/connectors/script/entry.sh" + depends_on: + app: + condition: service_healthy - # zenodo-dataset-connector: - # profiles: ["zenodo-datasets"] - # build: - # context: connectors/zenodo - # dockerfile: Dockerfile - # image: ai4eu_zenodo_connector - # container_name: zenodo-dataset-connector - # env_file: .env - # environment: - # - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET - # volumes: - # - ./src:/app - # - ./data/connectors:/opt/connectors/data - # - ./connectors/zenodo/:/opt/connectors/script - # command: > - # /bin/bash -c "/opt/connectors/script/entry.sh" - # depends_on: - # app: - # condition: service_healthy + zenodo-dataset-connector: + profiles: ["zenodo-datasets"] + build: + context: connectors/zenodo + dockerfile: Dockerfile + image: ai4eu_zenodo_connector + container_name: zenodo-dataset-connector + env_file: .env + environment: + - KEYCLOAK_CLIENT_SECRET=$KEYCLOAK_CLIENT_SECRET + volumes: + - ./src:/app + - ./data/connectors:/opt/connectors/data + - ./connectors/zenodo/:/opt/connectors/script + command: > + /bin/bash -c "/opt/connectors/script/entry.sh" + depends_on: + app: + condition: service_healthy - # sqlserver: - # image: mysql - # container_name: sqlserver - # env_file: .env - # environment: - # - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} - # volumes: - # - ./data/mysql:/var/lib/mysql - # healthcheck: - # test: ["CMD", "mysqladmin", "-uroot", "-p$MYSQL_ROOT_PASSWORD", "ping", "-h", "localhost", "--protocol","tcp"] - # start_interval: 1s - # start_period: 10s - # interval: 5s - # timeout: 30s - # retries: 30 + sqlserver: + image: mysql + container_name: sqlserver + env_file: .env + environment: + - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD} + volumes: + - ./data/mysql:/var/lib/mysql + healthcheck: + test: ["CMD", "mysqladmin", "-uroot", "-p$MYSQL_ROOT_PASSWORD", "ping", "-h", "localhost", "--protocol","tcp"] + start_interval: 1s + start_period: 10s + interval: 5s + timeout: 30s + retries: 30 - # keycloak: - # image: quay.io/keycloak/keycloak - # container_name: keycloak - # env_file: .env - # environment: - # - REDIRECT_URIS=$REDIRECT_URIS - # - POST_LOGOUT_REDIRECT_URIS=$POST_LOGOUT_REDIRECT_URIS - # ports: - # - 8080:8080 - # volumes: - # - ./quay-keycloak:/opt/keycloak/data/import:ro - # command: > - # start-dev - # --hostname-url http://${HOSTNAME}/aiod-auth - # --hostname-admin-url http://${HOSTNAME}/aiod-auth - # --http-relative-path=/aiod-auth - # --http-enabled=true - # --hostname-strict-https=false - # --import-realm + keycloak: + image: quay.io/keycloak/keycloak + container_name: keycloak + env_file: .env + environment: + - REDIRECT_URIS=$REDIRECT_URIS + - POST_LOGOUT_REDIRECT_URIS=$POST_LOGOUT_REDIRECT_URIS + ports: + - 8080:8080 + volumes: + - ./quay-keycloak:/opt/keycloak/data/import:ro + command: > + start-dev + --hostname-url http://${HOSTNAME}/aiod-auth + --hostname-admin-url http://${HOSTNAME}/aiod-auth + --http-relative-path=/aiod-auth + --http-enabled=true + --hostname-strict-https=false + --import-realm - # nginx: - # image: nginx - # container_name: nginx - # restart: unless-stopped - # volumes: - # - ./nginx:/etc/nginx/conf.d:ro - # ports: - # - 80:80 - # depends_on: - # app: - # condition: service_healthy + nginx: + image: nginx + container_name: nginx + restart: unless-stopped + volumes: + - ./nginx:/etc/nginx/conf.d:ro + ports: + - 80:80 + depends_on: + app: + condition: service_healthy - # elasticsearch: - # image: docker.elastic.co/elasticsearch/elasticsearch:8.8.2 - # container_name: elasticsearch - # env_file: .env - # environment: - # - ES_JAVA_OPTS=$ES_JAVA_OPTS - # - ELASTIC_USER=$ES_USER - # - ELASTIC_PASSWORD=$ES_PASSWORD - # - discovery.type=$ES_DISCOVERY_TYPE - # ports: - # - 9200:9200 - # - 9300:9300 - # volumes: - # - type: bind - # source: ./es/elasticsearch.yml - # target: /usr/share/elasticsearch/config/elasticsearch.yml - # read_only: true - # - ./data/elasticsearch:/usr/share/elasticsearch/data - # healthcheck: - # test: ["CMD-SHELL", "curl -u $ES_USER:$ES_PASSWORD --silent --fail localhost:9200/_cluster/health || exit 1"] - # interval: 5s - # timeout: 30s - # retries: 30 - - # es_logstash_setup: - # image: ai4eu_server - # container_name: es_logstash_setup - # env_file: .env - # environment: - # - MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD - # - ES_USER=$ES_USER - # - ES_PASSWORD=$ES_PASSWORD - # volumes: - # - ./src:/app - # - ./logstash:/logstash - # command: > - # /bin/bash -c "python setup/logstash_setup/generate_logstash_config_files.py && - # python setup/es_setup/generate_elasticsearch_indices.py" - # restart: "no" - # depends_on: - # elasticsearch: - # condition: service_healthy + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.8.2 + container_name: elasticsearch + env_file: .env + environment: + - ES_JAVA_OPTS=$ES_JAVA_OPTS + - ELASTIC_USER=$ES_USER + - ELASTIC_PASSWORD=$ES_PASSWORD + - discovery.type=$ES_DISCOVERY_TYPE + ports: + - 9200:9200 + - 9300:9300 + volumes: + - type: bind + source: ./es/elasticsearch.yml + target: /usr/share/elasticsearch/config/elasticsearch.yml + read_only: true + - ./data/elasticsearch:/usr/share/elasticsearch/data + healthcheck: + test: ["CMD-SHELL", "curl -u $ES_USER:$ES_PASSWORD --silent --fail localhost:9200/_cluster/health || exit 1"] + interval: 5s + timeout: 30s + retries: 30 + es_logstash_setup: + image: ai4eu_server + container_name: es_logstash_setup + env_file: .env + environment: + - MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD + - ES_USER=$ES_USER + - ES_PASSWORD=$ES_PASSWORD + volumes: + - ./src:/app + - ./logstash:/logstash + command: > + /bin/bash -c "python setup/logstash_setup/generate_logstash_config_files.py && + python setup/es_setup/generate_elasticsearch_indices.py" + restart: "no" + depends_on: + elasticsearch: + condition: service_healthy logstash: build: context: logstash/ @@ -224,9 +224,9 @@ services: - 5000:5000/udp - 9600:9600 volumes: - - ./logstash/config:/jos - # volumes: - # - ./logstash/config:/usr/share/logstash:ro - # depends_on: - # es_logstash_setup: - # condition: service_completed_successfully + - ./logstash/config/config:/usr/share/logstash/config:ro + - ./logstash/config/pipeline:/usr/share/logstash/pipeline:ro + - ./logstash/config/sql:/usr/share/logstash/sql:ro + depends_on: + es_logstash_setup: + condition: service_completed_successfully From 85c76b5ca882d8ba485f8d75b12feab94e4cdd44 Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Mon, 27 Nov 2023 17:50:25 +0100 Subject: [PATCH 78/79] Using FastAPI input validation --- src/routers/search_router.py | 15 +------ .../search_routers/test_search_routers.py | 41 ++++++++----------- 2 files changed, 20 insertions(+), 36 deletions(-) diff --git a/src/routers/search_router.py b/src/routers/search_router.py index f7a4ef39..9dcfaab0 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -69,8 +69,8 @@ def search( platforms: Annotated[list[str] | None, Query()] = None, search_query: str = "", search_fields: Annotated[list[str] | None, Query()] = None, - limit: int = 10, - offset: int = 0, + limit: Annotated[int | None, Query(ge=1, le=LIMIT_MAX)] = 10, + offset: Annotated[int | None, Query(ge=0)] = 0, get_all: bool = True, ) -> SearchResult[read_class]: # type: ignore f""" @@ -96,17 +96,6 @@ def search( detail=f"The available search fields for this entity " f"are: {self.indexed_fields}", ) - if limit > LIMIT_MAX: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail=f"The limit should be maximum {LIMIT_MAX}. " - f"If you want more results, use pagination.", - ) - if offset < 0: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="The offset should be greater or equal than 0.", - ) query_matches = [{"match": {f: search_query}} for f in fields] query = {"bool": {"should": query_matches, "minimum_should_match": 1}} if platforms: diff --git a/src/tests/routers/search_routers/test_search_routers.py b/src/tests/routers/search_routers/test_search_routers.py index c29a8aab..719de408 100644 --- a/src/tests/routers/search_routers/test_search_routers.py +++ b/src/tests/routers/search_routers/test_search_routers.py @@ -18,31 +18,26 @@ def test_search_happy_path(client: TestClient, search_router): mocked_elasticsearch = Elasticsearch("https://example.com:9200") ElasticsearchSingleton().patch(mocked_elasticsearch) - # Get the mocker results to test resources_path = os.path.join(path_test_resources(), "elasticsearch") resource_file = f"{search_router.es_index}_search.json" mocked_file = os.path.join(resources_path, resource_file) with open(mocked_file, "r") as f: mocked_results = json.load(f) - # Mock and launch mocked_elasticsearch.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" params = {"search_query": "description", "get_all": False} response = client.get(search_service, params=params) - # Assert the correct execution and get the response assert response.status_code == 200, response.json() resource = response.json()["resources"][0] - # Test the common responses assert resource["identifier"] == 1 assert resource["name"] == "A name." assert resource["description"]["plain"] == "A plain text description." assert resource["description"]["html"] == "An html description." assert resource["aiod_entry"]["date_modified"] == "2023-09-01T00:00:00+00:00" - # Test the extra fields global_fields = {"name", "plain", "html"} extra_fields = list(search_router.indexed_fields ^ global_fields) for field in extra_fields: @@ -55,20 +50,17 @@ def test_search_bad_platform(client: TestClient, search_router): mocked_elasticsearch = Elasticsearch("https://example.com:9200") ElasticsearchSingleton().patch(mocked_elasticsearch) - # Get the mocker results to test resources_path = os.path.join(path_test_resources(), "elasticsearch") resource_file = f"{search_router.es_index}_search.json" mocked_file = os.path.join(resources_path, resource_file) with open(mocked_file, "r") as f: mocked_results = json.load(f) - # Mock and launch mocked_elasticsearch.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" params = {"search_query": "description", "platforms": ["bad_platform"]} response = client.get(search_service, params=params) - # Assert the platform error assert response.status_code == 400, response.json() err_msg = "The available platforms are" assert response.json()["detail"][: len(err_msg)] == err_msg @@ -80,20 +72,17 @@ def test_search_bad_fields(client: TestClient, search_router): mocked_elasticsearch = Elasticsearch("https://example.com:9200") ElasticsearchSingleton().patch(mocked_elasticsearch) - # Get the mocker results to test resources_path = os.path.join(path_test_resources(), "elasticsearch") resource_file = f"{search_router.es_index}_search.json" mocked_file = os.path.join(resources_path, resource_file) with open(mocked_file, "r") as f: mocked_results = json.load(f) - # Mock and launch mocked_elasticsearch.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" params = {"search_query": "description", "search_fields": ["bad_field"]} response = client.get(search_service, params=params) - # Assert the platform error assert response.status_code == 400, response.json() err_msg = "The available search fields for this entity are" assert response.json()["detail"][: len(err_msg)] == err_msg @@ -105,23 +94,26 @@ def test_search_bad_limit(client: TestClient, search_router): mocked_elasticsearch = Elasticsearch("https://example.com:9200") ElasticsearchSingleton().patch(mocked_elasticsearch) - # Get the mocker results to test resources_path = os.path.join(path_test_resources(), "elasticsearch") resource_file = f"{search_router.es_index}_search.json" mocked_file = os.path.join(resources_path, resource_file) with open(mocked_file, "r") as f: mocked_results = json.load(f) - # Mock and launch mocked_elasticsearch.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" params = {"search_query": "description", "limit": 1001} response = client.get(search_service, params=params) - # Assert the platform error - assert response.status_code == 400, response.json() - err_msg = "The limit should be maximum 1000." - assert response.json()["detail"][: len(err_msg)] == err_msg + assert response.status_code == 422, response.json() + assert response.json()["detail"] == [ + { + "ctx": {"limit_value": 1000}, + "loc": ["query", "limit"], + "msg": "ensure this value is less than or equal to 1000", + "type": "value_error.number.not_le", + } + ] @pytest.mark.parametrize("search_router", sr.router_list) @@ -130,20 +122,23 @@ def test_search_bad_offset(client: TestClient, search_router): mocked_elasticsearch = Elasticsearch("https://example.com:9200") ElasticsearchSingleton().patch(mocked_elasticsearch) - # Get the mocker results to test resources_path = os.path.join(path_test_resources(), "elasticsearch") resource_file = f"{search_router.es_index}_search.json" mocked_file = os.path.join(resources_path, resource_file) with open(mocked_file, "r") as f: mocked_results = json.load(f) - # Mock and launch mocked_elasticsearch.search = Mock(return_value=mocked_results) search_service = f"/search/{search_router.resource_name_plural}/v1" params = {"search_query": "description", "offset": -1} response = client.get(search_service, params=params) - # Assert the platform error - assert response.status_code == 400, response.json() - err_msg = "The offset should be greater or equal than 0." - assert response.json()["detail"][: len(err_msg)] == err_msg + assert response.status_code == 422, response.json() + assert response.json()["detail"] == [ + { + "ctx": {"limit_value": 0}, + "loc": ["query", "offset"], + "msg": "ensure this value is greater than or equal to 0", + "type": "value_error.number.not_ge", + } + ] From 3adc54be73b083dfdc27f925da3201d7740cf0a0 Mon Sep 17 00:00:00 2001 From: Jos van der Velde Date: Mon, 27 Nov 2023 17:56:38 +0100 Subject: [PATCH 79/79] Made status nullable, so that we can return an empty status in the search_router --- src/database/model/concept/aiod_entry.py | 8 ++++---- src/routers/search_router.py | 15 ++++----------- .../routers/search_routers/test_search_routers.py | 1 + 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/src/database/model/concept/aiod_entry.py b/src/database/model/concept/aiod_entry.py index d3b9f879..d7dded41 100644 --- a/src/database/model/concept/aiod_entry.py +++ b/src/database/model/concept/aiod_entry.py @@ -32,7 +32,7 @@ class AIoDEntryORM(AIoDEntryBase, table=True): # type: ignore [call-arg] link_model=many_to_many_link_factory("aiod_entry", "person", table_prefix="editor"), ) status_identifier: int | None = Field(foreign_key=Status.__tablename__ + ".identifier") - status: Status = Relationship() + status: Status | None = Relationship() # date_modified is updated in the resource_router date_modified: datetime | None = Field(default_factory=datetime.utcnow) @@ -40,7 +40,7 @@ class AIoDEntryORM(AIoDEntryBase, table=True): # type: ignore [call-arg] class RelationshipConfig: editor: list[int] = ManyToMany() # No deletion triggers: "orphan" Persons should be kept - status: str = ManyToOne( + status: str | None = ManyToOne( example="draft", identifier_name="status_identifier", deserializer=FindByNameDeserializer(Status), @@ -53,7 +53,7 @@ class AIoDEntryCreate(AIoDEntryBase): default_factory=list, schema_extra={"example": []}, ) - status: str = Field( + status: str | None = Field( description="Status of the entry (published, draft, rejected)", schema_extra={"example": "published"}, default="draft", @@ -66,7 +66,7 @@ class AIoDEntryRead(AIoDEntryBase): default_factory=list, schema_extra={"example": []}, ) - status: str = Field( + status: str | None = Field( description="Status of the entry (published, draft, rejected)", schema_extra={"example": "published"}, default="draft", diff --git a/src/routers/search_router.py b/src/routers/search_router.py index 9dcfaab0..1e0180b5 100644 --- a/src/routers/search_router.py +++ b/src/routers/search_router.py @@ -154,15 +154,8 @@ def _cast_resource( if key != "type" and not key.startswith("@") } resource = read_class(**kwargs) - resource.aiod_entry = AIoDEntryRead(date_modified=resource_dict["date_modified"]) + resource.aiod_entry = AIoDEntryRead( + date_modified=resource_dict["date_modified"], status=None + ) resource.description = {"plain": resource_dict["plain"], "html": resource_dict["html"]} - return self._clean_structure(dict(resource)) - - def _clean_structure(self, structure: dict): - new_structure = {} - for key, value in structure.items(): - if isinstance(value, dict): - value = self._clean_structure(value) - if value: - new_structure[key] = value - return new_structure + return resource diff --git a/src/tests/routers/search_routers/test_search_routers.py b/src/tests/routers/search_routers/test_search_routers.py index 719de408..404717f6 100644 --- a/src/tests/routers/search_routers/test_search_routers.py +++ b/src/tests/routers/search_routers/test_search_routers.py @@ -37,6 +37,7 @@ def test_search_happy_path(client: TestClient, search_router): assert resource["description"]["plain"] == "A plain text description." assert resource["description"]["html"] == "An html description." assert resource["aiod_entry"]["date_modified"] == "2023-09-01T00:00:00+00:00" + assert resource["aiod_entry"]["status"] is None global_fields = {"name", "plain", "html"} extra_fields = list(search_router.indexed_fields ^ global_fields)