From c9db063c95df47db9e193a0af29e177127755c0a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 15:52:18 -0700 Subject: [PATCH] chore(openchallenges): 2024-07-02 DB update (#2736) Co-authored-by: vpchung <9377970+vpchung@users.noreply.github.com> --- .../service/service/ChallengeAnalyticsService.java | 2 +- .../src/main/resources/db/challenges.csv | 9 +++++---- .../src/main/resources/db/incentives.csv | 1 + .../src/main/resources/db/submission_types.csv | 1 + 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/apps/openchallenges/challenge-service/src/main/java/org/sagebionetworks/openchallenges/challenge/service/service/ChallengeAnalyticsService.java b/apps/openchallenges/challenge-service/src/main/java/org/sagebionetworks/openchallenges/challenge/service/service/ChallengeAnalyticsService.java index 7113e9a870..b156cff9b2 100644 --- a/apps/openchallenges/challenge-service/src/main/java/org/sagebionetworks/openchallenges/challenge/service/service/ChallengeAnalyticsService.java +++ b/apps/openchallenges/challenge-service/src/main/java/org/sagebionetworks/openchallenges/challenge/service/service/ChallengeAnalyticsService.java @@ -19,7 +19,7 @@ public ChallengesPerYearDto getChallengesPerYear() { // The following line will be auto-updated by a script and should NOT be modified manually. List challengeCounts = /* AUTO-UPDATE MARKER */ - Arrays.asList(6, 9, 13, 17, 23, 29, 34, 41, 49, 59, 86, 97, 116, 135, 183, 242, 306, 343); + Arrays.asList(6, 9, 13, 17, 23, 29, 34, 41, 49, 59, 86, 97, 116, 135, 183, 242, 306, 344); Integer undatedChallengeCount = 171; // int currentYear = Year.now().getValue(); diff --git a/apps/openchallenges/challenge-service/src/main/resources/db/challenges.csv b/apps/openchallenges/challenge-service/src/main/resources/db/challenges.csv index 6f2f84ba6c..4238278934 100644 --- a/apps/openchallenges/challenge-service/src/main/resources/db/challenges.csv +++ b/apps/openchallenges/challenge-service/src/main/resources/db/challenges.csv @@ -187,7 +187,7 @@ "186","making-sense-of-electronic-health-record-ehr-race-and-ethnicity-data","Making Sense of Electronic Health Record (EHR) Race and Ethnicity Data","Make sense of electronic health record race and ethnicity data","The urgency of the coronavirus disease 2019 (COVID-19) pandemic has heightened interest in the use of real-world data (RWD) to obtain timely information about patients and populations and has focused attention on EHRs. The pandemic has also heightened awareness of long-standing racial and ethnic health disparities along a continuum from underlying social determinants of health, exposure to risk, access to insurance and care, quality of care, and responses to treatments. This highlighted the potential that EHRs can be used to describe and contribute to our understanding of racial and ethnic health disparities and their solutions. The OMB Revisions to the Standards for the Classification of Federal Data on Race and Ethnicity provides minimum standards for maintaining, collecting, and presenting data on race and ethnicity for all Federal reporting purposes, and defines the two separate constructs of race and ethnicity.","","https://precision.fda.gov/challenges/30","completed","6","","2023-05-31","2023-06-23","\N","2023-08-10 18:28:06","2023-11-14 19:34:58" "187","the-veterans-cardiac-health-and-ai-model-predictions-v-champs","The Veterans Cardiac Health and AI Model Predictions (V-CHAMPS)","Predict cardiovascular health related outcomes in veterans","To better understand the risk and protective factors in the Veteran population, the VHA IE and its collaborating partners are calling upon the public to develop AI/ML models to predict cardiovascular health outcomes, including readmission and mortality, using synthetically generated Veteran health records. The Challenge consists of two Phases-Phase 1 is focused on synthetic data. In this Phase of the Challenge, AI/ML models will be developed by Challenge participants and trained and tested on the synthetic data sets provided to them, with a view towards predicting outcome variables for Veterans who have been diagnosed with chronic heart failure (please note that in Phase 1, the data is synthetic Veteran health records). Phase 2 will focus on validating and further exploring the limits of the AI/ML models. During this Phase, high-performing AI/ML models from Phase 1 will be brought into the VA system and validated on the real-world Veterans health data within the VHA. These models...","","https://precision.fda.gov/challenges/31","completed","6","","2023-05-25","2023-08-02","\N","2023-08-10 21:41:10","2023-11-14 19:35:53" "188","predicting-high-risk-breast-cancer-phase-1","Predicting High Risk Breast Cancer - Phase 1","Predicting High Risk Breast Cancer-a Nightingale OS & AHLI data challenge","Every year, 40 million women get a mammogram; some go on to have an invasive biopsy to better examine a concerning area. Underneath these routine tests lies a deep—and disturbing—mystery. Since the 1990s, we have found far more ‘cancers'', which has in turn prompted vastly more surgical procedures and chemotherapy. But death rates from metastatic breast cancer have hardly changed. When a pathologist looks at a biopsy slide, she is looking for known signs of cancer-tubules, cells with atypical looking nuclei, evidence of rapid cell division. These features, first identified in 1928, still underlie critical decisions today-which women must receive urgent treatment with surgery and chemotherapy? And which can be prescribed “watchful waiting”, sparing them invasive procedures for cancers that would not harm them? There is already evidence that algorithms can predict which cancers will metastasize and harm patients on the basis of the biopsy image. Fascinatingly, these algorithms also ...","","https://app.nightingalescience.org/contests/3jmp2y128nxd","completed","15","","2022-06-01","2023-01-12","\N","2023-08-22 17:07:00","2023-10-12 17:55:10" -"189","predicting-high-risk-breast-cancer-phase-2","Predicting High Risk Breast Cancer - Phase 2","Predicting High Risk Breast Cancer-a Nightingale OS & AHLI data challenge","Every year, 40 million women get a mammogram; some go on to have an invasive biopsy to better examine a concerning area. Underneath these routine tests lies a deep—and disturbing—mystery. Since the 1990s, we have found far more ‘cancers'', which has in turn prompted vastly more surgical procedures and chemotherapy. But death rates from metastatic breast cancer have hardly changed. When a pathologist looks at a biopsy slide, she is looking for known signs of cancer-tubules, cells with atypical looking nuclei, evidence of rapid cell division. These features, first identified in 1928, still underlie critical decisions today-which women must receive urgent treatment with surgery and chemotherapy? And which can be prescribed “watchful waiting”, sparing them invasive procedures for cancers that would not harm them? There is already evidence that algorithms can predict which cancers will metastasize and harm patients on the basis of the biopsy image. Fascinatingly, these algorithms als...","","https://app.nightingalescience.org/contests/vd8g98zv9w0p","completed","15","","2023-02-03","2023-05-13","\N","2023-08-22 17:07:01","2023-10-12 17:55:08" +"189","predicting-high-risk-breast-cancer-phase-2","Predicting High Risk Breast Cancer - Phase 2","Predicting High Risk Breast Cancer-a Nightingale OS & AHLI data challenge","Every year, 40 million women get a mammogram; some go on to have an invasive biopsy to better examine a concerning area. Underneath these routine tests lies a deep—and disturbing—mystery. Since the 1990s, we have found far more ‘cancers'', which has in turn prompted vastly more surgical procedures and chemotherapy. But death rates from metastatic breast cancer have hardly changed. When a pathologist looks at a biopsy slide, she is looking for known signs of cancer-tubules, cells with atypical looking nuclei, evidence of rapid cell division. These features, first identified in 1928, still underlie critical decisions today-which women must receive urgent treatment with surgery and chemotherapy? And which can be prescribed “watchful waiting”, sparing them invasive procedures for cancers that would not harm them? There is already evidence that algorithms can predict which cancers will metastasize and harm patients on the basis of the biopsy image. Fascinatingly, these algorithms als...","","https://app.nightingalescience.org/contests/vd8g98zv9w0p","completed","15","","2023-02-03","2023-05-03","\N","2023-08-22 17:07:01","2024-07-02 22:45:16" "190","dream-2-in-silico-network-inference","DREAM 2 - In Silico Network Inference","Predict the connectivity and properties of in-silico networks","Three in-silico networks were created and endowed with a dynamics that simulate biological interactions. The challenge consists of predicting the connectivity and some of the properties of one or more of these three networks.","","https://www.synapse.org/#!Synapse:syn2825394/wiki/71150","completed","1","","2007-03-25","\N","\N","2023-08-24 18:54:05","2023-10-12 17:55:03" "191","dream-3-in-silico-network-challenge","DREAM 3 - In Silico Network Challenge","Reverse engineering of gene networks from biological data","The goal of the in silico challenges is the reverse engineering of gene networks from steady state and time series data. Participants are challenged to predict the directed unsigned network topology from the given in silico generated gene topic_3170sets.","","https://www.synapse.org/#!Synapse:syn2853594/wiki/71567","completed","1","https://doi.org/10.1089/cmb.2008.09TT","2008-06-09","\N","\N","2023-08-25 16:43:41","2023-11-14 19:35:58" "192","dream-4-in-silico-network-challenge","DREAM 4 - In Silico Network Challenge","Reverse engineer gene regulatory networks","The goal of the in silico network challenge is to reverse engineer gene regulation networks from simulated steady-state and time-series data. Participants are challenged to infer the network structure from the given in silico gene topic_3170sets. Optionally, participants may also predict the response of the networks to a set of novel perturbations that were not included in the provided datasets.","","https://www.synapse.org/#!Synapse:syn3049712/wiki/74628","completed","1","https://doi.org/10.1073/pnas.0913357107","2009-06-09","\N","\N","2023-08-25 16:43:42","2023-11-14 19:36:02" @@ -502,15 +502,15 @@ "501","isbi-bodymaps24-3d-atlas-of-human-body","ISBI BodyMaps24: 3D Atlas of Human Body","","Variations in organ sizes and shapes can indicate a range of medical conditions, from benign anomalies to life-threatening diseases. Precise organ volume measurement is fundamental for effective patient care, but manual organ contouring is extremely time-consuming and exhibits considerable variability among expert radiologists. Artificial Intelligence (AI) holds the promise of improving volume measurement accuracy and reducing manual contouring efforts. We formulate our challenge as a semantic segmentation task, which automatically identifies and delineates the boundary of various anatomical structures essential for numerous downstream applications such as disease diagnosis and treatment planning. Our primary goal is to promote the development of advanced AI algorithms and to benchmark the state of the art in this field. The BodyMaps challenge particularly focuses on assessing and improving the generalizability and efficiency of AI algorithms in medical segmentation across divers...","","https://codalab.lisn.upsaclay.fr/competitions/16919","completed","9","","2024-01-10","2024-04-15","\N","2024-03-06 20:12:50","2024-03-06 20:16:23" "502","precisionfda-automated-machine-learning-automl-app-a-thon","precisionFDA Automated Machine Learning (AutoML) App-a-thon","Unlock new insights into its potential applications in healthcare and medicine","Say goodbye to the days when machine learning (ML) access was the exclusive purview of data scientists and hello to automated ML (AutoML), a low-code ML technique designed to empower professionals without a data science background and enable their access to ML. Although ML and artificial intelligence (AI) have been highly discussed topics in healthcare and medicine, only 15% of hospitals are routinely using ML due to lack of ML expertise and a lengthy data provisioning process. Can AutoML help bridge this gap and expand ML throughout healthcare? The goal of this app-a-thon is to evaluate the effectiveness of AutoML when applied to biomedical datasets. This app-a-thon aligns with the new Executive Order on Safe, Secure, and Trustworthy Development and Use of AI, which calls for agencies to promote competition in AI. The results of this app-a-thon will be used to help inform regulatory science by evaluating whether AutoML can match or improve the performance of traditional, human-c...","","https://precision.fda.gov/challenges/32","completed","6","","2024-02-26","2024-04-26","\N","2024-03-11 22:58:43","2024-03-11 23:02:12" "503","dream-olfactory-mixtures-prediction","DREAM olfactory mixtures prediction","Predicting smell from molecule features","The goal of the DREAM Olfaction Challenge is to find models that can predict how close two mixtures of molecules are in the odor perceptual space (on a 0-1 scale, 0 is total overlap, 1 is the furthest away) using physical and chemical features. For this challenge, we are providing a large published training-set of 500 mixtures measurements obtained from 3 publications, mixtures have varying number of molecules and an unpublished test-set of 46 equi-intense mixtures of 10 molecules whose distance was rated by 35 human subjects.","","https://www.synapse.org/#!Synapse:syn53470621/wiki/626022","active","1","","2024-04-19","2024-08-01","2319","2024-04-22 18:21:54","2024-04-22 21:54:39" -"504","fets-2024","Federated Tumor Segmentation (FeTS) 2024 Challenge","Benchmarking weight aggregation methods for federated training","Contrary to previous years, this time we only focus on one task and invite participants to compete in “Federated Training” for effective weight aggregation methods for the creation of a consensus model given a pre-defined segmentation algorithm for training, while also (optionally) accounting for network outages. The same data is used as in FeTS 2022 challenge, but this year the epmhasis is on instance segmentation of brain tumors.","","https://www.synapse.org/fets2024","active","1","","2024-04-01","2024-07-01","\N","2024-04-22 22:07:18","2024-04-22 22:07:18" +"504","fets-2024","Federated Tumor Segmentation (FeTS) 2024 Challenge","Benchmarking weight aggregation methods for federated training","Contrary to previous years, this time we only focus on one task and invite participants to compete in “Federated Training” for effective weight aggregation methods for the creation of a consensus model given a pre-defined segmentation algorithm for training, while also (optionally) accounting for network outages. The same data is used as in FeTS 2022 challenge, but this year the epmhasis is on instance segmentation of brain tumors.","","https://www.synapse.org/fets2024","completed","1","","2024-04-01","2024-07-01","\N","2024-04-22 22:07:18","2024-04-22 22:07:18" "505","mario","🕹️ 🍄 MARIO : Monitoring AMD progression in OCT","Improve the planning of anti-VEGF treatments","Age-related Macular Degeneration (AMD) is a progressive degeneration of the macula, the central part of the retina, affecting nearly 196 million people worldwide 1. It can appear from the age of 50, and more frequently from the age of 65 onwards, causing a significant weakening of visual capacities, without destroying them. It is a complex and multifactorial pathology in which genetic and environmental risk factors are intertwined. Advanced stages of the disease (atrophy and neovascularization) affect nearly 20% of patients: they are the first cause of severe visual impairment and blindness in developed countries. Since their introduction in 2007, Anti–vascular endothelial growth factor (anti-VEGF) treatments have proven their ability to slow disease progression and even improve visual function in neovascular forms of AMD 2. This effectiveness is optimized by ensuring a short time between the diagnosis of the pathology and the start of treatment as well as by performing regular ch...","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/666/square_image_mario_t8tUYoc.png","https://www.codabench.org/competitions/2851/","active","10","","2024-04-01","2024-07-10","\N","2024-04-29 18:13:15","2024-05-10 16:48:04" "506","hntsmrg24","Head and Neck Tumor Segmentation for MR-Guided Applications","Head and Neck Tumor Segmentation","This challenge focuses on developing algorithms to automatically segment head and neck cancer gross tumor volumes on multi-timepoint MRI","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/745/logo_v0.png","https://hntsmrg24.grand-challenge.org/","active","5","","2024-05-01","2024-09-15","\N","2024-04-29 18:15:37","2024-05-20 16:37:46" "507","acouslic-ai","Abdominal Circumference Operator-agnostic UltraSound measurement","Fetal growth restriction prediction","Fetal growth restriction (FGR), affecting up to 10% of pregnancies, is a critical factor contributing to perinatal morbidity and mortality (1-3). Strongly linked to stillbirths, FGR can also lead to preterm labor, posing risks to the mother (4,5). This condition often results from an impediment to the fetus' genetic growth potential due to various maternal, fetal, and placental factors (6). Measurements of the fetal abdominal circumference (AC) as seen on prenatal ultrasound are a key aspect of monitoring fetal growth. When smaller than expected, these measurements can be indicative of FGR, a condition linked to approximately 60% of fetal deaths (4). FGR diagnosis relies on repeated measurements of either the fetal abdominal circumference (AC), the expected fetal weight, or both. These measurements must be taken at least twice, with a minimum interval of two weeks between them for a reliable diagnosis (7). Additionally, an AC measurement that falls below the third percentile is, b...","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/753/acouslicai-logo_tjZmpqL.png","https://acouslic-ai.grand-challenge.org/","active","5","","2024-05-05","2024-07-31","\N","2024-04-29 18:21:37","2024-05-20 16:38:17" "508","leopard","The LEOPARD Challenge","Uncover finer morphological features' prognostic value","Recently, deep learning was shown (H. Pinckaers et al., 2022; O. Eminaga et. al., 2024) to be able to predict the biochemical recurrence of prostate cancer. Hypothesizing that deep learning could uncover finer morphological features' prognostic value, we are organizing the LEarning biOchemical Prostate cAncer Recurrence from histopathology sliDes (LEOPARD) challenge. The goal of this challenge is to yield top-performance deep learning solutions to predict the time to biochemical recurrence from H&E-stained histopathological tissue sections, i.e. based on morphological features.","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/754/logo.png","https://leopard.grand-challenge.org/","active","5","","2024-04-10","2024-08-01","\N","2024-04-29 18:28:44","2024-05-20 16:38:34" -"509","autopet-iii","AutoPET III","Refine the automated segmentation of tumor lesions in PET/CT scans","We invite you to participate in the third autoPET Challenge. The focus of this year's challenge is to further refine the automated segmentation of tumor lesions in Positron Emission Tomography/Computed Tomography (PET/CT) scans in a multitracer multicenter setting. Over the past decades, PET/CT has emerged as a pivotal tool in oncological diagnostics, management and treatment planning. In clinical routine, medical experts typically rely on a qualitative analysis of the PET/CT images, although quantitative analysis would enable more precise and individualized tumor characterization and therapeutic decisions. A major barrier to clinical adoption is lesion segmentation, a necessary step for quantitative image analysis. Performed manually, it's tedious, time-consuming and costly. Machine Learning offers the potential for fast and fully automated quantitative analysis of PET/CT images, as previously demonstrated in the first two autoPET challenges. Building upon the insights gai...","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/755/autopet-2024.png","https://autopet-iii.grand-challenge.org/","upcoming","5","","2024-06-30","2024-09-15","\N","2024-04-29 18:29:47","2024-05-20 16:39:18" +"509","autopet-iii","AutoPET III","Refine the automated segmentation of tumor lesions in PET/CT scans","We invite you to participate in the third autoPET Challenge. The focus of this year's challenge is to further refine the automated segmentation of tumor lesions in Positron Emission Tomography/Computed Tomography (PET/CT) scans in a multitracer multicenter setting. Over the past decades, PET/CT has emerged as a pivotal tool in oncological diagnostics, management and treatment planning. In clinical routine, medical experts typically rely on a qualitative analysis of the PET/CT images, although quantitative analysis would enable more precise and individualized tumor characterization and therapeutic decisions. A major barrier to clinical adoption is lesion segmentation, a necessary step for quantitative image analysis. Performed manually, it's tedious, time-consuming and costly. Machine Learning offers the potential for fast and fully automated quantitative analysis of PET/CT images, as previously demonstrated in the first two autoPET challenges. Building upon the insights gai...","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/755/autopet-2024.png","https://autopet-iii.grand-challenge.org/","active","5","","2024-06-30","2024-09-15","\N","2024-04-29 18:29:47","2024-05-20 16:39:18" "510","ai4life-mdc24","AI4Life Microscopy Denoising Challenge","Unsupervised denoising of microscopy images","Wellcome to AI4Life-MDC24! In this challenge, we want to focus on an unsupervised denoising of microscopy images. By participating, researchers can contribute to a critical area of scientific research, aiding in interpreting microscopy images and potentially unlocking discoveries in biology and medicine.","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/756/Challenge_square.png","https://ai4life-mdc24.grand-challenge.org/","active","5","","2024-05-04","2024-10-06","\N","2024-04-29 18:32:57","2024-05-20 16:39:01" "511","isles-24","Ischemic Stroke Lesion Segmentation Challenge 2024","ischemic stroke prediction","Clinical decisions regarding the treatment of ischemic stroke patients depend on the accurate estimation of core (irreversibly damaged tissue) and penumbra (salvageable tissue) volumes (Albers et al. 2018). The clinical standard method for estimating perfusion volumes is deconvolution analysis, consisting of i) estimating perfusion maps through perfusion CT (CTP) deconvolution and ii) thresholding the perfusion maps (Lin et al. 2016). However, the different deconvolution algorithms, their technical implementations, and the variable thresholds used in software packages significantly impact the estimated lesions (Fahmi et al. 2012). Moreover, core tissue tends to expand over time due to irreversible damage of penumbral tissue, with infarct growth rates being patient-specific and dependent on diverse factors such as thrombus location and collateral circulation. Understanding the core's growth rate is clinically crucial for assessing the relevance of transferring a patient to a compre...","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/757/ISLES24_1_c8Cz4NN.png","https://isles-24.grand-challenge.org/","active","5","","2024-06-15","2024-08-15","\N","2024-04-29 18:34:37","2024-05-20 16:39:42" -"512","toothfairy2","ToothFairy2: Multi-Structure Segmentation in CBCT Volumes","Multi-Structure Segmentation in CBCT Volumes","This is the first edition of the ToothFairy challenge organized by the University of Modena and Reggio Emilia with the collaboration of Radboud University Medical Center. The challenge is hosted by grand-challenge and is part of MICCAI2024.","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/759/GrandChallenge-Logo.png","https://toothfairy2.grand-challenge.org/","upcoming","5","","2024-06-30","2024-08-16","\N","2024-04-29 18:36:08","2024-05-20 16:40:11" +"512","toothfairy2","ToothFairy2: Multi-Structure Segmentation in CBCT Volumes","Multi-Structure Segmentation in CBCT Volumes","This is the first edition of the ToothFairy challenge organized by the University of Modena and Reggio Emilia with the collaboration of Radboud University Medical Center. The challenge is hosted by grand-challenge and is part of MICCAI2024.","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/759/GrandChallenge-Logo.png","https://toothfairy2.grand-challenge.org/","active","5","","2024-06-30","2024-08-08","\N","2024-04-29 18:36:08","2024-07-02 22:42:59" "513","pengwin","Pelvic Bone Fragments with Injuries Segmentation Challenge","Pelvic fractures characterization","Pelvic fractures, typically resulting from high-energy traumas, are among the most severe injuries, characterized by a disability rate over 50% and a mortality rate over 13%, ranking them as the deadliest of all compound fractures. The complexity of pelvic anatomy, along with surrounding soft tissues, makes surgical interventions especially challenging. Recent years have seen a shift towards the use of robotic-assisted closed fracture reduction surgeries, which have shown improved surgical outcomes. Accurate segmentation of pelvic fractures is essential, serving as a critical step in trauma diagnosis and image-guided surgery. In 3D CT scans, fracture segmentation is crucial for fracture typing, pre-operative planning for fracture reduction, and screw fixation planning. For 2D X-ray images, segmentation plays a vital role in transferring the surgical plan to the operating room via registration, a key step for precise surgical navigation.","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/760/PENGWIN_qZTjVoC.jpg","https://pengwin.grand-challenge.org/","active","5","","2024-05-14","2024-07-31","\N","2024-04-29 18:37:01","2024-05-20 16:40:28" "514","aortaseg24","Multi-Class Segmentation of Aortic Branches and Zones in CTA","Aorta medical imaging","3D Segmentation of Aortic Branches and Zones on Computed Tomography Angiography (CTA)","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/761/Grand_Challenge_Logo.png","https://aortaseg24.grand-challenge.org/","active","5","","2024-05-16","2024-08-16","\N","2024-04-29 18:38:07","2024-05-20 16:41:36" "515","aims-tbi","Automated Identification of Mod-Sev TBI Lesions","Traumatic Brain Injury characterization","Moderate to Severe Traumatic Brain Injury (msTBI) is caused by external forces (eg: traffic accidents, falls, sports) causing the brain to move rapidly within the skull, resulting in complex pathophysiological changes. Multiple primary, secondary, and surgery related processes has the potential to cause structural deformation in the brain. Each patient with msTBI has a unique accumulation of these structural changes, contributing to extremely heterogeneous lesions, considered a hallmark of msTBI (Covington & Duff, 2021). These lesions differ from other common brain pathologies (stroke, MS, brain tumor) in that they can be both focal or diffuse, varying in size, number and laterality, extending through multiple tissue types (GM/WM/CSF), and can also occur in homologous regions of both hemispheres. Lesions such as these can complicate image registration, normalization, and are known to introduce both local and global errors in brain parcellation (Diamond et al., 2020; King et al., 2...","https://rumc-gcorg-p-public.s3.amazonaws.com/logos/challenge/762/AIMS-TBI_logo_G50gkm9.png","https://aims-tbi.grand-challenge.org/","active","5","","2024-05-15","2024-08-16","\N","2024-04-29 18:38:56","2024-05-20 16:41:55" @@ -520,3 +520,4 @@ "519","chaimeleon","Chaimeleon","Re-identify pseudonymized medical studies","Participants will be challenged to re-identify pseudonymized medical studies with two de-identification methods used in ongoing AI4HI projects.","","https://chaimeleon.eu/re-identification-challenge/","active","\N","","2024-05-01","2024-08-01","2869","2024-05-20 16:52:45","2024-05-20 16:52:45" "520","digitally-derived-endpoints-for-freezing-of-gait-detection-defogd-challenge","Digitally-Derived Endpoints for Freezing-of-Gait Detection (DEFoGD) Challenge","Digitally-Derived Endpoints for Freezing-of-Gait Detection in Parkinson''s (PD)","Digital health technologies (DHTs) have the potential to capture information about a person''s health, continuously in real time outside the clinic. This unlocks the ability to meet people where they are in their environment and at various stages along their health journey. When people are engaging with DHTs and the data are organized and analyzed to create new or better measures of health, there is opportunity for digitally-derived endpoints to extend clinical reach and facilitate early disease detection and prevention, or support disease treatment and management of care in the home.","","https://precision.fda.gov/challenges/33","active","6","","2024-05-28","2024-08-02","\N","2024-05-21 22:03:46","2024-06-11 21:28:57" "521","placental-clock-dream-challenge","Placental Clock DREAM Challenge","Develop a new clock to achieve greater accuracy in predicting gestational age!","Since 2011, three generations of epigenetic clocks have been developed to estimate biological age[1-3]. The first-generation of models were generated by using DNA methylation data in various tissues to predict chronological age (outcome)[1, 4-6]. Second-generation models, such as PhenoAge and GrimAge used health outcomes, including all-cause mortality, for a more accurate determination of the latent biological age[7-9]. The latest, third-generation clocks like DunedinPoAm use longitudinal data to estimate the rate of aging[10]. This generation also includes universal clocks applicable to multiple species, such as the universal pan-mammalian epigenetic clock[11]. Biological age, as captured by these DNA methylation clocks, can be influenced by environmental factors, including smoking, obesity, sleep patterns, diet and exercise, stress, as well as diseases like cancer, diabetes, and Down syndrome[12-18]. The role of epigenetic programming in fetal development is crucial [19-21]. Th...","","http://synapse.org/placentalclock","active","1","","2024-06-03","2024-08-27","\N","2024-06-03 16:59:54","2024-06-03 17:03:06" +"522","detecting-active-tuberculosis-bacilli-2024","Detecting Active Tuberculosis Bacilli - 2024","","Tuberculosis is one of the leading infectious causes of death worldwide 1. Each year, millions of individuals contract and develop active TB without knowing 2. Case identification and treatment are the primary methods for controlling spread as there is no effective TB vaccine for adults. Unfortunately, delays in diagnosis are common, especially in resource-limited settings, and can worsen individual outcomes and perpetuate transmission of the disease 3,4. Without a timely diagnosis, patients needing treatment would head home from a clinic without knowing they were positive. If they miss their follow up, they would not learn about their diagnosis and nor would they start their treatment. Automated TB diagnosis could play a role reducing the loss of follow up and get patients to be treated more timely. Automated digital microscopy has been proposed as a cost-effective solution 6,7. An automated algorithm that could reliably detect mycobacterium on samples from patients with suspecte...","","https://app.nightingalescience.org/contests/m3rl61qq21wo","completed","15","","2024-03-01","2024-04-01","\N","2024-07-02 22:45:34","2024-07-02 22:47:34" diff --git a/apps/openchallenges/challenge-service/src/main/resources/db/incentives.csv b/apps/openchallenges/challenge-service/src/main/resources/db/incentives.csv index 7761d0b091..a1a3b19097 100644 --- a/apps/openchallenges/challenge-service/src/main/resources/db/incentives.csv +++ b/apps/openchallenges/challenge-service/src/main/resources/db/incentives.csv @@ -645,3 +645,4 @@ "644","monetary","521","2024-06-03 16:59:54" "645","publication","521","2024-06-03 16:59:54" "646","speaking_engagement","521","2024-06-03 16:59:54" +"647","monetary","522","2024-07-02 22:45:34" diff --git a/apps/openchallenges/challenge-service/src/main/resources/db/submission_types.csv b/apps/openchallenges/challenge-service/src/main/resources/db/submission_types.csv index c360f64316..a71cfe8775 100644 --- a/apps/openchallenges/challenge-service/src/main/resources/db/submission_types.csv +++ b/apps/openchallenges/challenge-service/src/main/resources/db/submission_types.csv @@ -532,3 +532,4 @@ "531","notebook","520","2024-05-21 22:03:46" "532","container_image","521","2024-06-03 16:59:54" "533","other","521","2024-06-03 16:59:54" +"534","prediction_file","522","2024-07-02 22:45:34"