diff --git a/.github/workflows/ghpages.yml b/.github/workflows/ghpages.yml
new file mode 100644
index 0000000..3780672
--- /dev/null
+++ b/.github/workflows/ghpages.yml
@@ -0,0 +1,21 @@
+name: Build and Deploy
+on:
+ push:
+ branches:
+ - main
+jobs:
+ build-and-publish-live-demo:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Install and Build
+ run: |
+ npm install
+ npm run build
+ - name: Deploy
+ uses: JamesIves/github-pages-deploy-action@v4
+ with:
+ branch: demo # The branch the action should deploy to.
+ folder: dist # The folder the action should deploy.
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..a547bf3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,24 @@
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+lerna-debug.log*
+
+node_modules
+dist
+dist-ssr
+*.local
+
+# Editor directories and files
+.vscode/*
+!.vscode/extensions.json
+.idea
+.DS_Store
+*.suo
+*.ntvs*
+*.njsproj
+*.sln
+*.sw?
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..abd6c49
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,23 @@
+MIT License
+
+Copyright (c) 2021 neuroneural/brainchop
+
+Ported to NiiVue 2024 NiiVue developers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..1d8269e
--- /dev/null
+++ b/README.md
@@ -0,0 +1,26 @@
+# NiiVue brainchop
+
+This is an experimental version of [brainchop](https://github.com/neuroneural/brainchop) that uses [NiiVue](https://github.com/niivue/niivue) to segment and visualize voxel based images.
+
+## Live preview
+
+https://niivue.github.io/niivue-brainchop/
+
+## Getting started
+
+### To run
+
+```bash
+git clone https://github.com/niivue/niivue-brainchop
+cd niivue-brainchop
+npm install
+npm run dev
+```
+
+### To build
+
+```bash
+npm run build
+```
+
+
diff --git a/brainchop.js b/brainchop.js
new file mode 100644
index 0000000..71e0b93
--- /dev/null
+++ b/brainchop.js
@@ -0,0 +1,2671 @@
+import { mat3, mat4, vec3, vec4 } from 'gl-matrix'
+import * as tf from '@tensorflow/tfjs'
+export { chop, inferenceModelsList }
+import {bwlabeler} from './bwlabels.js'
+
+var gOpts = {
+ // General settings for input shape [batchSize, batch_D, batch_H, batch_W, numOfChan]
+ batchSize: 1, //How many batches are used during each inference iteration
+ numOfChan: 1, // num of channel of the input shape
+ isColorEnable: true, // If false, grey scale will enabled
+ isAutoColors: true, // If false, manualColorsRange will be in use
+ bgLabelValue: 0, // Semenatic Segmentation background label value
+ drawBoundingVolume: false, // plot bounding volume used to crop the brain
+ isBrainCropMaskBased: true, // Check if brain masking will be used for cropping & optional show or brain tissue will be used
+ showPhase1Output: false, // This will load to papaya the output of phase-1 (ie. brain mask or brain tissue)
+ isPostProcessEnable: true, // If true 3D Connected Components filter will apply
+ isContoursViewEnable: false, // If true 3D contours of the labeled regions will apply
+ browserArrayBufferMaxZDim: 30, // This value depends on Memory available
+ telemetryFlag: false, // Ethical and transparent collection of browser usage while adhering to security and privacy standards
+ chartXaxisStepPercent: 10, // percent from total labels on Xaxis
+ uiSampleName: "BC_UI_Sample", // Sample name used by interface
+ atlasSelectedColorTable: "Fire" // Select from ["Hot-and-Cold", "Fire", "Grayscale", "Gold", "Spectrum"]
+}
+
+ // Inference Models, the ids must start from 1 in sequence
+var inferenceModelsList = [
+ {
+ id: 1,
+ type: "Segmentation",
+ path: "./models/model5_gw_ae/model.json",
+ modelName: "+\u26A1 Tissue GWM (light)",
+ labelsPath: "./models/model5_gw_ae/labels.json",
+ colorsPath: "./models/model5_gw_ae/colorLUT.json",
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 0, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: null, // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "Gray and white matter segmentation model. Operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the subvolume model."
+ }
+
+ ,{
+ id: 2,
+ type: "Segmentation",
+ path:"./models/model20chan3cls/model.json",
+ modelName:"+\u{1F52A} Tissue GWM (High Acc)",
+ labelsPath: "./models/model20chan3cls/labels.json",
+ colorsPath: "./models/model20chan3cls/colorLUT.json",
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 0, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .",
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides the best accuracy with hard cropping for better speed"
+ }
+
+ ,{
+ id: 3,
+ type: "Segmentation",
+ path:"./models/model20chan3cls/model.json",
+ modelName:"-\u{1F52A} Tissue GWM (High Acc, Low Mem)",
+ labelsPath: "./models/model20chan3cls/labels.json",
+ colorsPath: "./models/model20chan3cls/colorLUT.json",
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 0, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .",
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides high accuracy and fit low memory available but slower"
+ }
+
+
+
+ ,{
+ id: 4,
+ type: "Atlas",
+ path:"./models/model30chan18cls/model.json",
+ modelName:"+\u{1FA93} Subcortical + GWM (High Mem, Fast)",
+ labelsPath: "./models/model30chan18cls/labels.json",
+ colorsPath: "./models/model30chan18cls/colorLUT.json",
+ preModelId: null,// Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 200, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."
+ }
+
+ ,{
+ id: 5,
+ type: "Atlas",
+ path:"./models/model30chan18cls/model.json",
+ modelName:"-\u{1FA93} Subcortical + GWM (Low Mem, Slow)",
+ labelsPath: "./models/model30chan18cls/labels.json",
+ colorsPath: "./models/model30chan18cls/colorLUT.json",
+ preModelId: null,// Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 200, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."
+ }
+
+ ,{
+ id: 6,
+ type: "Atlas",
+ path:"./models/model18cls/model.json",
+ modelName:"-\u{1FA93} Subcortical + GWM (Low Mem, Faster)",
+ labelsPath: "./models/model18cls/labels.json",
+ colorsPath: "./models/model18cls/colorLUT.json",
+ preModelId: null, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 200, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."
+ }
+
+ ,{
+ id: 7,
+ type: "Atlas",
+ path:"./models/model30chan18cls/model.json",
+ modelName:"-\u{1F52A}\u{1FA93} Subcortical + GWM (Failsafe, Less Acc)",
+ labelsPath: "./models/model30chan18cls/labels.json",
+ colorsPath: "./models/model30chan18cls/colorLUT.json",
+ preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 200, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is not a robust model, it may work on low data quality, including varying saturation, and even clinical scans. It may work also on infant brains, but your mileage may vary."
+ }
+
+ ,{
+ id: 8,
+ type: "Atlas",
+ path:"./models/model30chan50cls/model.json",
+ modelName:"-\u{1F52A} Aparc+Aseg 50 (High Mem, Fast)",
+ labelsPath: "./models/model30chan50cls/labels.json",
+ colorsPath: "./models/model30chan50cls/colorLUT.json",
+ preModelId: 1,// Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 200, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class."
+ }
+
+ ,{
+ id: 9,
+ type: "Atlas",
+ path:"./models/model30chan50cls/model.json",
+ modelName:"-\u{1F52A} Aparc+Aseg 50 (Low Mem, Slow)",
+ labelsPath: "./models/model30chan50cls/labels.json",
+ colorsPath: "./models/model30chan50cls/colorLUT.json",
+ preModelId: 1,// Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 200, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last laye
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time."
+ }
+
+
+ ,{
+ id: 10,
+ type: "Brain_Extraction",
+ path: "./models/model5_gw_ae/model.json",
+ modelName:"+\u26A1 Extract the Brain (FAST)",
+ labelsPath: null,
+ colorsPath: null,
+ preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 0, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: null, // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "Extract the brain fast model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the failsafe version."
+ }
+
+ ,{
+ id: 11,
+ type: "Brain_Extraction",
+ path: "./models/model11_gw_ae/model.json",
+ modelName:"-\u{1F52A} Extract the Brain (High Acc, Slow)",
+ labelsPath: null,
+ colorsPath: null,
+ preModelId: 1, // Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 0, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .",
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "Extract the brain high accuracy model operates on full T1 image in a single pass, but uses only 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than the fast version."
+ }
+
+ ,{
+ id: 12,
+ type: "Brain_Masking",
+ path: "./models/model5_gw_ae/model.json",
+ modelName:"+\u26A1 Brain Mask (FAST)",
+ labelsPath: null,
+ colorsPath: null,
+ preModelId: null,// Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 0, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: null, // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "This fast masking model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than failsafe version."
+ }
+
+ ,{
+ id: 13,
+ type: "Brain_Masking",
+ path: "./models/model11_gw_ae/model.json",
+ modelName:"-\u{1F52A} Brain Mask (High Acc, Low Mem)",
+ labelsPath: null,
+ colorsPath: null,
+ preModelId: 1,// Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 0, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .",
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "This masking model operates on full T1 image in a single pass, but uses 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than fast version."
+ }
+
+ ,{
+ id: 14,
+ type: "Atlas",
+ path:"./models/model21_104class/model.json",
+ modelName:"-\u{1F52A} Aparc+Aseg 104 (High Mem, Fast)",
+ labelsPath: "./models/model21_104class/labels.json",
+ colorsPath: "./models/model21_104class/colorLUT.json",
+ preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 200, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions."
+ }
+
+ ,{
+ id: 15,
+ type: "Atlas",
+ path:"./models/model21_104class/model.json",
+ modelName:"-\u{1F52A} Aparc+Aseg 104 (Low Mem, Slow)",
+ labelsPath: "./models/model21_104class/labels.json",
+ colorsPath: "./models/model21_104class/colorLUT.json",
+ preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 200, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. "
+ }
+] //inferenceModelsListX
+
+async function checkZero( timeValue) {
+ return timeValue < 10 ? timeValue : "0" + timeValue
+}
+
+async function detectBrowser() {
+ if ( navigator.userAgent.indexOf("OPR/") > -1) {
+ return "Opera"
+ } else if (navigator.userAgent.indexOf("Edg/") > -1) {
+ return "Edge"
+ } else if (navigator.userAgent.indexOf("Falkon/") > -1) {
+ return "Falkon"
+ } else if (navigator.userAgent.indexOf("Chrome/") > -1) {
+ return "Chrome"
+ } else if (navigator.userAgent.indexOf("Firefox/") > -1) {
+ return "Firefox"
+ } else if (navigator.userAgent.indexOf("Safari/") > -1) {
+ return "Safari"
+ } else if (navigator.userAgent.indexOf("MSIE/") > -1 || navigator.userAgent.indexOf("rv:") > -1) {
+ return "IExplorer"
+ } else {
+ return "Unknown"
+ }
+}
+
+async function detectBrowserVersion() {
+ if ( navigator.userAgent.indexOf("OPR/") > -1) {
+ return parseInt(navigator.userAgent.split('OPR/')[1])
+ } else if (navigator.userAgent.indexOf("Edg/") > -1) {
+ return parseInt(navigator.userAgent.split('Edg/')[1])
+ } else if (navigator.userAgent.indexOf("Falkon/") > -1) {
+ return parseInt(navigator.userAgent.split('Falkon/')[1])
+ } else if (navigator.userAgent.indexOf("Chrome/") > -1) {
+ return parseInt(navigator.userAgent.split('Chrome/')[1])
+ } else if (navigator.userAgent.indexOf("Firefox/") > -1) {
+ return parseInt(navigator.userAgent.split('Firefox/')[1])
+ } else if (navigator.userAgent.indexOf("Safari/") > -1) {
+ return parseInt(navigator.userAgent.split('Safari/')[1])
+ } else if (navigator.userAgent.indexOf("MSIE/") > -1 || navigator.userAgent.indexOf("rv:") > -1) {
+ return parseInt(navigator.userAgent.split('MSIE/')[1])
+ } else {
+ return Infinity
+ }
+}
+
+async function detectOperatingSys() {
+ if (navigator.userAgent.indexOf("Win") > -1) {
+ return "Windows"
+ } else if (navigator.userAgent.indexOf("Mac") > -1) {
+ return "MacOS"
+ } else if (navigator.userAgent.indexOf("Linux") > -1) {
+ return "Linux"
+ } else if (navigator.userAgent.indexOf("UNIX") > -1) {
+ return "UNIX"
+ } else {
+ return "Unknown"
+ }
+}
+
+async function checkWebGl2(callbackUI) {
+ const gl = document.createElement('canvas').getContext('webgl2')
+ if (!gl) {
+ if (typeof WebGL2RenderingContext !== 'undefined') {
+ let msg = 'WebGL2 may be disabled. Please try updating video card drivers'
+ callbackUI(msg, -1, msg)
+ } else {
+ console.log('WebGL2 is not supported')
+ }
+ return false
+ } else {
+ console.log('WebGl2 is enabled')
+ return true
+ }
+}
+
+async function detectGPUVendor() {
+ let gl = document.createElement('canvas').getContext('webgl')
+ let debugInfo
+ if(gl) {
+ debugInfo = gl.getExtension('WEBGL_debug_renderer_info')
+ if (debugInfo) {
+ let result = gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL)
+ //--e.g. : NVIDIA Corporation
+ if( (result.indexOf( "(" ) > -1) && (result.indexOf( ")" ) > -1) ) {
+ return result.substring( result.indexOf( '(' ) + 1, result.indexOf( ')' ) )
+ }
+ return result
+ }
+ }
+ return null
+}
+
+async function detectGPUVendor_v0() {
+ let gl = document.createElement('canvas').getContext('webgl')
+
+ if(gl) {
+ let debugInfo = gl.getExtension('WEBGL_debug_renderer_info')
+ return debugInfo ? gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL) : null
+
+ } else {
+ return null
+ }
+}
+
+async function detectGPUCardType_v0() {
+ let gl = document.createElement('canvas').getContext('webgl')
+ if(gl) {
+ if(detectBrowser() === "Firefox" ) {
+ //-- return e.g: "GeForce GTX 980/PCIe/SSE2"
+ return gl.getParameter(gl.RENDERER)
+
+ }
+
+ let debugInfo = gl.getExtension('WEBGL_debug_renderer_info')
+ return debugInfo ? gl.getParameter(debugInfo.UNMASKED_RENDERER_WEBGL) : null
+
+ } else {
+ return null
+ }
+ }
+
+async function detectGPUCardType() {
+ let gl = document.createElement('canvas').getContext('webgl')
+ let debugInfo
+
+ if(gl) {
+ if(detectBrowser() === "Firefox" ) {
+ //-- return e.g: "GeForce GTX 980/PCIe/SSE2"
+ return gl.getParameter(gl.RENDERER)
+
+ }
+
+ debugInfo = gl.getExtension('WEBGL_debug_renderer_info')
+
+ if (debugInfo) {
+
+ let result = gl.getParameter(debugInfo.UNMASKED_RENDERER_WEBGL)
+ //--e.g. : ANGLE (NVIDIA Corporation, GeForce GTX 1050 Ti/PCIe/SSE2, OpenGL 4.5.0 NVIDIA 390.144) as with Chrome
+ // Or: GeForce GTX 1050 Ti/PCIe/SSE2 as with fireFox
+
+ if( (result.indexOf( "(" ) > -1) && (result.indexOf( ")" ) > -1) && (result.indexOf( "(R)" ) == -1) ) {
+
+ result = result.substring( result.indexOf( '(' ) + 1, result.indexOf( ')' ) )
+
+ if ( result.split(',').length == 3) {
+ return result.split(',')[1].trim()
+ }
+
+ }
+
+ return result
+
+ }
+ }
+ return null
+}
+
+async function getCPUNumCores() {
+ return navigator.hardwareConcurrency
+}
+
+async function isChrome() {
+ return /Chrome/.test(navigator.userAgent) && /Google Inc/.test(navigator.vendor)
+}
+
+async function submitTiming2GoogleSheet(dataObj, isOnline = false) {
+ if(isOnline()){
+ // -- Fill form with data to submit
+ Object.keys(dataObj).forEach(dataKey =>{
+ document.getElementById(dataKey).value = dataObj[dataKey];
+ })
+ //-- Settings of submission
+ const scriptURL = 'https://script.google.com/macros/s/AKfycbwn-Ix6IVGOwUSU1VBU8hFcABT9PqwCwN90UxfK_fXp5CEfxvIoQHZXs2XQRZQo_N8I/exec'
+ const form = document.forms['google-sheet']
+ //-- Add event handler to the form.
+ form.addEventListener('submit', e => {
+ e.preventDefault()
+ fetch(scriptURL, { method: 'POST', body: new FormData(form)})
+ .then(response => console.log("------Done------"))
+ .catch(error => console.error('Error!', error.message))
+ })
+ //-- Submit the form
+ document.getElementById("SubmitStatisticalData").click();
+ } else {
+ console.log(" Offline Mode ")
+ }
+}
+
+async function getModelNumParameters( modelObj) {
+ let numParameters = 0
+ for(let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx ++ ) {
+ numParameters += modelObj.layers[layerIdx].countParams()
+ }
+ return numParameters
+}
+
+async function getModelNumLayers( modelObj) {
+ return modelObj.layers.length
+}
+
+async function load_model ( modelUrl) {
+ return await tf.loadLayersModel(modelUrl)
+}
+
+async function minMaxNormalizeVolumeData (volumeData) {
+ //Normalize the data to the range 0 - 1 using min-max scaling
+ const volumeData_Max = volumeData.max()
+ const volumeData_Min = volumeData.min()
+ const normalizedSlices_3d = volumeData.sub(volumeData_Min).div(volumeData_Max.sub(volumeData_Min))
+ return normalizedSlices_3d
+}
+
+async function addZeroPaddingTo3dTensor (tensor3d, rowPadArr = [1, 1], colPadArr = [1, 1], depthPadArr = [1, 1]) {
+ if(tensor3d.rank != 3) {
+ throw "Tensor must be 3D"
+ }
+ return tensor3d.pad([ rowPadArr ,colPadArr, depthPadArr ])
+}
+
+async function findArrayMax(array){
+ return array.reduce( (e1, e2) => {
+ return ( e1 > e2 ? e1 : e2 )
+ })
+}
+
+async function removeZeroPaddingFrom3dTensor(tensor3d, rowPad = 1, colPad = 1, depthPad = 1){
+ if(tensor3d.rank != 3) {
+ throw "Tensor must be 3D"
+ }
+ let h, w, d
+ [h, w, d] = tensor3d.shape
+ return tensor3d.slice( [rowPad , colPad, depthPad], [h - (2 * rowPad), w - (2 * colPad), d - (2 * depthPad) ] )
+}
+
+async function applyMriThreshold(tensor, percentage) {
+ // Perform asynchronous operations outside of tf.tidy
+ console.log(tensor)
+ const maxTensor = tensor.max();
+ const thresholdTensor = maxTensor.mul(percentage);
+ const threshold = await thresholdTensor.data(); // Extracts the threshold value
+
+ // Dispose tensors not needed anymore
+ maxTensor.dispose();
+ thresholdTensor.dispose();
+
+ // Use tf.tidy for synchronous operations
+ return tf.tidy(() => {
+ const dataForProcessing = tensor.clone();
+
+ // Thresholding (assuming background has very low values compared to the head)
+ const mask = dataForProcessing.greater(threshold[0]);
+ //-- const denoisedMriData = dataForProcessing.mul(mask);
+
+ // No need to manually dispose dataForProcessing and mask, as tf.tidy() will dispose them auto.
+ return mask;
+ });
+
+ //-- return denoisedMriData;
+}
+
+async function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad, dilationRate, sliceSize) {
+ const batchSize = input.shape[0];
+ const depth = input.shape[1];
+ const height = input.shape[2];
+ const width = input.shape[3];
+ const inChannels = input.shape[4];
+ const outChannels = filter.shape[4];
+
+ // Create an empty array to hold the output channels
+ let outputChannels = null;
+
+ // Slice the input tensor and process one output channel at a time
+ for (let channel = 0; channel < outChannels; channel++) {
+ const numSlices = Math.ceil(inChannels / sliceSize);
+ const biasesSlice = biases.slice([channel], [1]);
+ let outputChannel = null;
+
+ for (let i = 0; i < numSlices; i++) {
+ const startChannel = i * sliceSize;
+ const endChannel = Math.min((i + 1) * sliceSize, inChannels);
+
+ // Only proceed if there are channels to process
+ if (startChannel < inChannels) {
+ const resultSlice = tf.tidy(() => {
+ const inputSlice = input.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, endChannel - startChannel]);
+ const filterSlice = filter.slice([0, 0, 0, startChannel, channel], [-1, -1, -1, endChannel - startChannel, 1]);
+ // Perform the convolution for the current slice and output channel
+ return tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate);
+ });
+
+ if (outputChannel === null) {
+ outputChannel = resultSlice;
+ } else {
+ const updatedOutputChannel = outputChannel.add(resultSlice);
+ outputChannel.dispose();
+ resultSlice.dispose();
+ outputChannel = updatedOutputChannel;
+ }
+ }
+ }
+
+ // Add the biases to the accumulated convolutions for this channel
+ const biasedOutputChannel = outputChannel.add(biasesSlice);
+ outputChannel.dispose();
+ biasesSlice.dispose();
+
+ // Accumulate the channel to the output array
+ if (outputChannels == null){
+ outputChannels = biasedOutputChannel;
+ }else{
+ const updatedOutputChannels = tf.concat([outputChannels, biasedOutputChannel], 4);
+ biasedOutputChannel.dispose();
+ outputChannels.dispose();
+ outputChannels = updatedOutputChannels;
+ }
+ }
+
+ return outputChannels;
+}
+
+class SequentialConvLayer {
+ constructor(model, chunkSize, isChannelLast) {
+ this.model = model;
+ this.outChannels = model.outputLayers[0].kernel.shape[4];
+ this.chunkSize = chunkSize;
+ this.isChannelLast = isChannelLast;
+ }
+
+ /**
+ * Apply sequential convolution layer
+ * @since 3.0.0
+ * @member SequentialConvLayer
+ * @param {tf.Tensor} inputTensor e.g. [ 1, 256, 256, 256, 5 ]
+ * @return {promise}
+ *
+ * convLayer.rank -> 3
+ * typeof(convLayer) -> "object"
+ * convLayer: Object { dataFormat: "channelsLast", dilationRate: Array(3) [ 1, 1, 1 ], inputSpec: Array [ {…} ],
+ * name: "output", padding: "same", strides: Array(3) [ 1, 1, 1 ], ...}
+ *
+ * weights.shape -> Array(5) [ 1, 1, 1, 5, 3 ]
+ * weights.print()
+ * //=> Tensor
+ * [[[[[0.146999 , -1.4474995, -2.8961499],
+ * [1.1067894, 0.6897876 , -0.7573005],
+ * [-0.38512 , -0.2812168, -0.8637539],
+ * [0.9341159, -0.0344299, -2.3668685],
+ * [0.1052373, 1.266812 , 0.6542516 ]]]]]
+ *
+ * biases.shape -> Array [ 3 ]
+ * biases.print()
+ * //=> Tensor
+ * [-0.7850812, -2.3238883, 2.1639345]
+ *
+ * for idx = 0 -> filterWeights.shape -> Array(5) [ 1, 1, 1, 5, 1 ]
+ * filterWeights.print()
+ * //=> Tensor
+ * [[[[[0.146999 ],
+ * [1.1067894],
+ * [-0.38512 ],
+ * [0.9341159],
+ * [0.1052373]]]]]
+ *
+ * for idx = 0 -> filterBiases.shape -> Array [1]
+ * filterBiases.print()
+ * //=> Tensor
+ * [-0.7850812]
+
+ */
+
+ async apply(inputTensor) {
+
+ let oldDeleteTextureThreshold = tf.ENV.get('WEBGL_DELETE_TEXTURE_THRESHOLD');
+ tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0);
+
+ const self = this;
+ // Important to avoid "undefined" class var members inside the timer.
+ // "this" has another meaning inside the timer.
+
+ //document.getElementById("progressBarChild").parentElement.style.visibility = "visible";
+
+ return new Promise((resolve, reject) => {
+
+ const startTime = performance.now();
+
+ const convLayer = self.model.layers[self.model.layers.length - 1];
+ const weights = convLayer.getWeights()[0]; //
+ const biases = convLayer.getWeights()[1];
+ const outputShape = self.isChannelLast ? inputTensor.shape.slice(1,-1) : inputTensor.shape.slice(2);
+ //-- e.g. outputShape : [256,256,256] or cropped Dim
+ //-- if inputTensor [ 1, D, H, W, 50 ], channelLast true -> outputShape : outputShape [D, H, W]
+ //-- if inputTensor [ 1, 50, D, H, W ], channelLast false -> outputShape : outputShape [D, H, W]
+
+ let outB = tf.mul(tf.ones(outputShape), -10000);
+ //-- e.g. outB.shape [256,256,256]
+ let outC = tf.zeros(outputShape);
+ //-- e.g. outC.shape [256,256,256]
+ let chIdx = 0;
+
+ // console.log("---------------------------------------------------------");
+ console.log(" channel loop");
+
+ let seqTimer = window.setInterval(async function() {
+
+ tf.engine().startScope(); // Start TensorFlow.js scope
+ console.log('=======================');
+ const memoryInfo0 = tf.memory();
+ console.log(`| Number of Tensors: ${memoryInfo0.numTensors}`);
+ console.log(`| Number of Data Buffers: ${memoryInfo0.numDataBuffers}`);
+ console.log("Channel : ", chIdx);
+
+ const result = tf.tidy(() => {
+ const filterWeights = weights.slice([0, 0, 0, 0, chIdx], [-1, -1, -1, -1, 1]);
+ // -- e.g. filterWeights.shape [ 1, 1, 1, 5, 1 ]
+ const filterBiases = biases.slice([chIdx], [1]);
+ //-- e.g. filterBiases.shape [1] -> Tensor [-0.7850812]
+ const outA = processTensorInChunks(inputTensor,
+ filterWeights,
+ Math.min(self.chunkSize, self.outChannels))
+ .add(filterBiases);
+ const greater = tf.greater(outA, outB);
+ const newoutB = tf.where(greater, outA, outB);
+ const newoutC = tf.where(greater, tf.fill(outC.shape, chIdx), outC);
+ // Dispose the old tensors before reassigning
+ tf.dispose([outB, outC, filterWeights, filterBiases, outA, greater]);
+ // Dummy operation to trigger cleanup
+ tf.tidy(() => tf.matMul(tf.ones([1, 1]), tf.ones([1, 1])));
+ return [newoutC, newoutB];
+ });
+
+ // -- await showMemStatus(chIdx, self.outChannels);
+
+ const memoryInfo1 = tf.memory();
+ console.log(`| Number of Tensors: ${memoryInfo1.numTensors}`);
+ console.log(`| Number of Data Buffers: ${memoryInfo1.numDataBuffers}`);
+ console.log('=======================');
+
+ // Log memory usage
+
+ const memoryInfo = tf.memory();
+ console.log(`Iteration ${chIdx}:`);
+ console.log(`Number of Tensors: ${memoryInfo.numTensors}`);
+ console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`);
+ console.log(`Bytes In Use: ${memoryInfo.numBytes}`);
+ console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`);
+ console.log(`Unreliable: ${memoryInfo.unreliable}`);
+
+ // Dispose of previous values before assigning new tensors to outC and outB
+ if (typeof outC !== 'undefined') outC.dispose();
+ if (typeof outB !== 'undefined') outB.dispose();
+ // Assign the new values to outC and outB
+ outC = tf.keep(result[0]);
+ outB = tf.keep(result[1]);
+ // // Assign the new values to outC and outB
+ // outC = result[0];
+ // outB = result[1];
+ tf.engine().endScope();
+
+ if(chIdx == (self.outChannels -1)) {
+
+ window.clearInterval( seqTimer );
+ document.getElementById("progressBarChild").style.width = 0 + "%";
+ tf.dispose(outB);
+ const endTime = performance.now();
+ const executionTime = endTime - startTime;
+ console.log(`Execution time for output layer: ${executionTime} milliseconds`);
+ tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', oldDeleteTextureThreshold);
+ resolve(outC);
+ } else {
+
+ chIdx++;
+
+
+ // the seemingly strange sequence of operations
+ // below prevents tfjs from uncontrolably
+ // grabbing buffers, even when all tensors have
+ // already been disposed
+
+ const outCShape = outC.shape;
+ const outCdata = outC.dataSync();
+ const outBShape = outC.shape;
+ const outBdata = outB.dataSync();
+ outC.dispose();
+ outB.dispose();
+ //tf.disposeVariables()
+ outC = tf.tensor(outCdata, outCShape);
+ outB = tf.tensor(outBdata, outBShape);
+
+ document.getElementById("progressBarChild").style.width = (chIdx + 1) * 100 / self.outChannels + "%";
+
+ }
+
+ // Artificially introduce a pause to allow for garbage collection to catch up
+ await new Promise(resolve => setTimeout(resolve, 300));
+
+
+ }, 0);
+ });
+
+
+ }
+
+
+
+} // <<<< End of class
+
+async function inferenceFullVolumeSeqCovLayerPhase2 (opts, modelEntry, model, slices_3d, num_of_slices, slice_height, slice_width, pipeline1_out, callbackUI, statData) {
+
+ //--Phase-2, After remove the skull try to allocate brain volume and make inferece
+ console.log(" ---- Start FullVolume Inference with Sequential Conv Layer for phase-II ---- ");
+ let quantileNorm = modelEntry.enableQuantileNorm;
+
+ if(quantileNorm) {
+ // Quantile normalize function needs specific models to be used
+ console.log("preModel Quantile normalization enabled");
+ slices_3d = await quantileNormalizeVolumeData(slices_3d);
+ } else {
+ // Min Max Nomalize MRI data to be from 0 to 1
+ console.log("preModel Min Max normalization enabled");
+ slices_3d = await minMaxNormalizeVolumeData(slices_3d);
+ }
+
+
+
+ let mask_3d;
+
+ if(pipeline1_out == null) { // preModel is null
+
+ // Check if thresholding the MRI to remove noisy voxels for better cropping is needed.
+ let autoThresholdValue = modelEntry.autoThreshold;
+
+ if( (autoThresholdValue > 0) && (autoThresholdValue <= 1) ) {
+
+ // Filtered MRI from noisy voxel below autoThresholdValue
+ mask_3d = await applyMriThreshold(slices_3d, autoThresholdValue);
+ } else {
+ console.log("No valid crop threshold value");
+ // binarize original image
+ mask_3d = slices_3d.greater([0]).asType('bool');
+ }
+
+ } else {
+
+ mask_3d = pipeline1_out.greater([0]).asType('bool');
+ //-- pipeline1_out.dispose();
+
+ }
+
+ console.log(" mask_3d shape : ", mask_3d.shape);
+
+ const coords = await tf.whereAsync(mask_3d);
+ //-- Get each voxel coords (x, y, z)
+
+ mask_3d.dispose();
+
+ const coordsArr = coords.arraySync();
+
+ let row_min = slice_height, row_max = 0, col_min = slice_width, col_max = 0, depth_min = num_of_slices, depth_max = 0;
+
+ for(let i = 0; i < coordsArr.length; i++) {
+
+ if ( row_min > coordsArr[i][0] ) {
+ row_min = coordsArr[i][0];
+ } else if(row_max < coordsArr[i][0]) {
+ row_max = coordsArr[i][0];
+ }
+
+ if ( col_min > coordsArr[i][1] ) {
+ col_min = coordsArr[i][1];
+ } else if(col_max < coordsArr[i][1]) {
+ col_max = coordsArr[i][1];
+ }
+
+ if ( depth_min > coordsArr[i][2] ) {
+ depth_min = coordsArr[i][2];
+ } else if(depth_max < coordsArr[i][2]) {
+ depth_max = coordsArr[i][2];
+ }
+ }
+
+
+ console.log( "row min and max :", row_min, row_max);
+ console.log( "col min and max :", col_min, col_max);
+ console.log( "depth min and max :", depth_min, depth_max);
+
+ //-- Reference voxel that cropped volume started slice with it
+ let refVoxel = [row_min, col_min, depth_min];
+ // -- Starting form refVoxel, size of bounding volume
+ let boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1];
+
+ coords.dispose();
+
+ //-- Extract 3d object (e.g. brain)
+ let cropped_slices_3d = slices_3d.slice([row_min, col_min, depth_min], [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] )
+
+ slices_3d.dispose();
+
+ //-- Padding size add to cropped brain
+ let pad = modelEntry.cropPadding;
+
+ // Create margin around the bounding volume
+ let cropped_slices_3d_w_pad = await addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad] , [pad, pad], [pad, pad]);
+ console.log(" cropped slices_3d with padding shape: ", cropped_slices_3d_w_pad.shape);
+
+ cropped_slices_3d.dispose();
+
+
+ if(opts.drawBoundingVolume) {
+
+ let testVol = removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad);
+ console.log(" outLabelVolume without padding shape : ", testVol.shape);
+
+ testVol = resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr );
+ console.log(" outLabelVolume final shape after resizing : ", testVol.shape);
+
+ draw3dObjBoundingVolume(tf.unstack(testVol));
+ testVol.dispose();
+
+ return 0;
+ }
+
+
+ statData["Brainchop_Ver"] = "FullVolume";
+//mork
+ //model.then(function (res) {
+ let res = await model
+ try {
+ let startTime = performance.now();
+ let inferenceStartTime = performance.now();
+ // maxLabelPredicted in whole volume of the brain
+ let maxLabelPredicted = 0;
+ let transpose = modelEntry.enableTranspose;
+ let delay = modelEntry.inferenceDelay;
+ console.log("Inference delay :", delay);
+
+ if(transpose) {
+ cropped_slices_3d_w_pad = cropped_slices_3d_w_pad.transpose()
+ console.log("Input transposed for pre-model");
+ } else {
+ console.log("Transpose not enabled for pre-model");
+ }
+
+ let i = 1;
+ let layersLength = res.layers.length;
+ console.log("res.layers.length ", layersLength);
+
+ let isChannelLast = isModelChnlLast(res);
+ const batchSize = opts.batchSize;
+ const numOfChan = opts.numOfChan;
+ let adjusted_input_shape
+ //-- Adjust model input shape
+ if(isChannelLast) {
+
+ res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0];
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1];
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2];
+
+ adjusted_input_shape = [batchSize, res.layers[0].batchInputShape[1],
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ numOfChan];
+
+ } else {
+
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0];
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1];
+ res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2];
+
+ adjusted_input_shape = [batchSize, numOfChan,
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ res.layers[0].batchInputShape[4]];
+
+ }
+
+ console.log(" Model batch input shape : ", res.layers[0].batchInputShape);
+ // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W]
+
+ statData["Input_Shape"] = JSON.stringify(res.layers[0].batchInputShape);
+ statData["Output_Shape"] = JSON.stringify(res.output.shape);
+ statData["Channel_Last"] = isChannelLast;
+ statData["Model_Param"] = getModelNumParameters(res);
+ statData["Model_Layers"] = getModelNumLayers(res);
+ statData["Model"] = modelEntry.modelName;
+ statData["Extra_Info"] = null;
+
+
+ // Determine the number of output channels in the last layer of the model
+ // e.g. 3, 50, 104
+ const outputLayer = res.layers[res.layers.length - 1];
+ console.log("Output Layer : ", outputLayer);
+
+ const expected_Num_labels = isChannelLast ?
+ outputLayer.outputShape[outputLayer.outputShape.length - 1]:
+ outputLayer.outputShape[1];
+ console.log("Num of output channels : ", expected_Num_labels);
+
+
+
+ let curTensor = [];
+ curTensor[0] = cropped_slices_3d_w_pad.reshape(adjusted_input_shape);
+ // console.log("curTensor[0] :", curTensor[0].dataSync());
+
+ // let curProgBar = parseInt(document.getElementById("progressBar").style.width);
+
+ let timer = window.setInterval(async function() {
+
+ try {
+ if (res.layers[i].activation.getClassName() !== 'linear') {
+ curTensor[i] = res.layers[i].apply( curTensor[i-1]);
+ } else {
+
+ curTensor[i] = await convByOutputChannelAndInputSlicing(curTensor[i-1],
+ res.layers[i].getWeights()[0],
+ res.layers[i].getWeights()[1],
+ res.layers[i].strides,
+ res.layers[i].padding,
+ res.layers[i].dilationRate,
+ 3); // important for memory use
+ }
+
+
+ // // Log memory usage
+ // const memoryInfo = tf.memory();
+ // console.log(`Iteration ${i}:`);
+ // console.log(`Number of Tensors: ${memoryInfo.numTensors}`);
+ // console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`);
+ // console.log(`Bytes In Use: ${memoryInfo.numBytes}`);
+ // console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`);
+ // console.log(`Unreliable: ${memoryInfo.unreliable}`);
+
+
+ tf.dispose(curTensor[i-1]);
+
+ } catch(err) {
+
+ if( err.message === "Failed to compile fragment shader.") {
+ webix.confirm({
+ title:"",
+ ok:"Ok",
+ cancel:"Cancel",
+ type: "confirm-error",
+ width: 500,
+ text: "Context lost due to limited Memory available, please check current browser resouces in the toolbar and verified GPUs for each model"
+ })
+ .then(() => {
+ //---
+ $$("browserResourcesWindow").show();
+
+
+ }).fail(() => {
+ //---
+
+ });
+
+ } else {
+ //?? webix.alert(err.message);
+ callbackUI(err.message, -1, err.message)
+ }
+
+ window.clearInterval( timer );
+ tf.engine().endScope();
+ tf.engine().disposeVariables();
+
+ statData["Inference_t"] = Infinity;
+ statData["Postprocess_t"] = Infinity;
+ statData["Status"] = "Fail";
+ statData["Error_Type"] = err.message;
+ statData["Extra_Err_Info"] = "Failed while model layer " + i + " apply";
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData);
+ }
+
+ return 0;
+ }
+
+ console.log("layer ", i);
+ console.log("layer output Tensor shape : ", curTensor[i].shape);
+ console.log("layer count params ", res.layers[i].countParams());
+
+ res.layers[i].dispose();
+ curTensor[i-1].dispose();
+
+//bork
+ callbackUI("Layer " + i.toString(), (i+1)/layersLength)
+ if (tf.memory().unreliable) {
+ const unreliableReasons = "unreliable reasons :" + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+ if( i == layersLength - 2) { //Stop before the last layer or classification layer.
+
+ window.clearInterval( timer );
+
+
+ // // Create an instance of SequentialConvLayer
+ //The second parameter is important for memory,
+ // the larger it is, the more memory it uses
+ // it was 8, but I set it to 3, got a different error
+ let seqConvLayer = new SequentialConvLayer(res, 10, isChannelLast);
+
+
+ // Apply the last output tensor to the seq. instance
+ let outputTensor = null;
+
+ const profileInfo = await tf.profile(async() => {
+ // Your tensor operations here
+ outputTensor = await seqConvLayer.apply(curTensor[i]);
+ });
+
+ console.log("profileInfo : ",profileInfo);
+
+ //-- document.getElementById("progressBarChild").style.width = 0 + "%";;
+
+ // Dispose the previous layer input tensor
+ tf.dispose(curTensor[i]);
+ // delete the used class
+ //? delete seqConvLayer;
+
+ // You can now use 'outputTensor' as needed
+ console.log(outputTensor);
+ console.log(" Output tensor shape : ", outputTensor.shape);
+ // Array(3) [ 256, 256, 256 ]
+
+ if(outputTensor.shape.length != 3) {
+ webix.alert("Output tensor shape should be 3 dims but it is " + outputTensor.shape.length, "alert-error");
+ }
+
+
+ let Inference_t = ((performance.now() - startTime)/1000).toFixed(4);
+
+ console.log(" find array max ");
+ let curBatchMaxLabel = findArrayMax(Array.from(outputTensor.dataSync()));
+
+ if( maxLabelPredicted < curBatchMaxLabel ) {
+ maxLabelPredicted = curBatchMaxLabel;
+ }
+
+ let numSegClasses = maxLabelPredicted + 1;
+ console.log("Predicted num of segmentation classes", numSegClasses);
+ statData["Actual_Labels"] = numSegClasses;
+ statData["Expect_Labels"] = expected_Num_labels;
+ statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false;
+
+ if( numSegClasses != expected_Num_labels ) {
+ webix.alert("expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses + ". For possible solutions please refer to FAQ .", "alert-error");
+ console.log("expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses);
+ }
+
+ //-- Transpose back to fit Papaya display settings
+ let outLabelVolume = outputTensor.reshape([cropped_slices_3d_w_pad.shape[0], cropped_slices_3d_w_pad.shape[1], cropped_slices_3d_w_pad.shape[2]]);
+ tf.dispose(outputTensor);
+
+ // Transpose MRI data to be match pytorch/keras input output
+ if(transpose) {
+ console.log("outLabelVolume transposed");
+ outLabelVolume = outLabelVolume.transpose();
+ }
+
+ outLabelVolume = removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad);
+ console.log(" outLabelVolume without padding shape : ", outLabelVolume.shape);
+ outLabelVolume = resizeWithZeroPadding(outLabelVolume, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr );
+ console.log(" outLabelVolume final shape after resizing : ", outLabelVolume.shape);
+
+ let filterOutWithPreMask = inferenceModelsList[$$("selectModel").getValue() - 1]["filterOutWithPreMask"];
+
+ // To clean the skull area wrongly segmented inphase-2.
+ if(pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) {
+ outLabelVolume = outLabelVolume.mul(binarizeVolumeDataTensor(pipeline1_out));
+ }
+
+
+ startTime = performance.now();
+ // Generate output volume or slices
+ console.log("Generating correct output");
+
+ try {
+ const img = new Uint32Array(outLabelVolume.dataSync());
+ const Vshape = outLabelVolume.shape;
+ const Vtype = outLabelVolume.dtype;
+ tf.dispose(outLabelVolume);
+ tf.engine().endScope();
+ tf.engine().disposeVariables();
+ generateOutputSlicesV2(img, Vshape, Vtype, num_of_slices, numSegClasses, slice_height, slice_width, niftiImage);
+ console.log(" Phase-2 num of tensors after generateOutputSlicesV2: " , tf.memory().numTensors );
+
+ } catch (error) {
+
+ //-- Timing data to collect
+ tf.engine().endScope();
+ tf.engine().disposeVariables();
+ console.log("Error while generating output: ", error)
+
+ webix.alert("Failed while generating output due to limited browser memory available");
+
+ statData["Inference_t"] = Inference_t;
+ statData["Postprocess_t"] = Infinity;
+ statData["Status"] = "Fail";
+ statData["Error_Type"] = error.message;
+ statData["Extra_Err_Info"] = "Failed while generating output";
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData);
+ }
+
+ return 0;
+ }
+
+ let Postprocess_t = ((performance.now() - startTime)/1000).toFixed(4);
+
+ document.getElementById("progressBar").style.width = 0;
+ //webix.message.hide("waitMessage");
+
+ $$("downloadBtn").enable();
+ $$("segmentBtn").enable();
+ // $$("imageUploader").enable();
+ tf.engine().disposeVariables();
+
+ console.log("Processing the whole brain volume in tfjs for multi-class output mask took : ",
+ ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds");
+
+
+ //-- Timing data to collect
+ statData["Inference_t"] = Inference_t;
+ statData["Postprocess_t"] = Postprocess_t;
+ statData["Status"] = "OK";
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData);
+ }
+
+ } else {
+
+ i++;
+ }
+
+ }, delay);
+
+ } catch(err) {
+ callbackUI(err.message, -1, err.message)
+ console.log(
+ "If webgl context is lost, try to restore webgl context by visit the link " +
+ 'here'
+ );
+
+
+ document.getElementById("webGl2Status").style.backgroundColor = isWebGL2ContextLost() ? "Red" : "Green";
+
+ document.getElementById("memoryStatus").style.backgroundColor = tf.memory().unreliable ? "Red" : "Green";
+ }
+ //});
+
+ }
+
+
+async function calculateQuantiles(tensor, lowerQuantile = 0.01, upperQuantile = 0.99) {
+ // Flatten the tensor
+ const flatTensor = tensor.flatten();
+
+ // Convert the flattened tensor to an array to sort it
+ const flatArray = await flatTensor.array();
+ flatArray.sort((a, b) => a - b); // Sort the array in ascending order
+
+ // Convert the sorted array back to a tensor
+ const sortedTensor = tf.tensor1d(flatArray);
+
+ // Calculate the indices for the quantiles
+ const numElements = sortedTensor.shape[0];
+ const lowIndex = Math.floor(numElements * lowerQuantile);
+ const highIndex = Math.ceil(numElements * upperQuantile) - 1; // Subtract 1 because indices are 0-based
+
+ // Slice the sorted tensor to get qmin and qmax
+ const qmin = sortedTensor.slice(lowIndex, 1); // Get the value at the low index
+ const qmax = sortedTensor.slice(highIndex, 1); // Get the value at the high index
+
+ // Get the actual values from the tensors
+ const qminValue = (await qmin.array())[0];
+ const qmaxValue = (await qmax.array())[0];
+
+ // Clean up tensors to free memory
+ flatTensor.dispose();
+ sortedTensor.dispose();
+ qmin.dispose();
+ qmax.dispose();
+
+ return { qmin: qminValue, qmax: qmaxValue };
+}
+
+async function quantileNormalizeVolumeData(tensor, lowerQuantile = 0.05, upperQuantile = 0.95) {
+ // Call calculateQuantiles and wait for the result
+ const { qmin, qmax } = await calculateQuantiles(tensor, lowerQuantile, upperQuantile);
+
+ // Convert qmin and qmax back to scalars
+ const qminScalar = tf.scalar(qmin);
+ const qmaxScalar = tf.scalar(qmax);
+
+ // Perform the operation: (tensor - qmin) / (qmax - qmin)
+ const resultTensor = tensor.sub(qminScalar).div(qmaxScalar.sub(qminScalar));
+
+ // Dispose of the created scalars to free memory
+ qminScalar.dispose();
+ qmaxScalar.dispose();
+
+ // Return the resulting tensor
+ return resultTensor;
+}
+
+async function resizeWithZeroPadding(croppedTensor3d, newDepth, newHeight, newWidth, refVoxel, boundVolSizeArr){
+ let row_pad_befor = refVoxel[0]
+ let col_pad_befor = refVoxel[1]
+ let depth_pad_befor = refVoxel[2]
+ // last and lower volume voxel
+ let row_max = row_pad_befor + boundVolSizeArr[0] -1; // size [2, 2, 2] means 2 voxels total in each dim
+ let col_max = col_pad_befor + boundVolSizeArr[1] -1
+ let depth_max = depth_pad_befor + boundVolSizeArr[2] -1
+
+ let row_pad_after = (newHeight - row_max -1) > 0 ? (newHeight - row_max -1) : 0
+ let col_pad_after = (newWidth - col_max -1) > 0 ? (newWidth - col_max -1) : 0
+ let depth_pad_after = (newDepth - depth_max -1) > 0 ? (newDepth - depth_max -1) : 0
+
+ return croppedTensor3d.pad([ [row_pad_befor, row_pad_after] ,[col_pad_befor, col_pad_after], [depth_pad_befor, depth_pad_after] ])
+}
+
+async function generateOutputSlicesV2 (img, OutVolumeTensorShape, OutVolumeTensorType, num_of_slices, numSegClasses, slice_height, slice_width, modelEntry, opts, niftiImage) {
+ // Convert all slices into 1 Dim array
+ let allOutputSlices3DCC = []
+ let allOutputSlices3DContours = []
+ if(opts.isPostProcessEnable) {
+ const bwInstance = new bwlabeler()
+ const dim = new Uint32Array(OutVolumeTensorShape)
+ const conn = 26; // Example connectivity
+ const binarize = true
+ const onlyLargestClusterPerClass = true
+ const [labelCount, labeledImage] = bwInstance.bwlabel(img,
+ dim,
+ conn,
+ binarize,
+ onlyLargestClusterPerClass)
+ for (let i = 0; i < img.length; i++) {
+ img[i] *= labeledImage[i]
+ }
+ } // if isPostProcessEnable
+ const typedArrayConstructor = {
+ 'float32': Float32Array,
+ 'int32': Int32Array,
+ // Add other cases as needed for different dtypes
+ }[OutVolumeTensorType];
+ // Create a new TypedArray from img with the same type as outLabelVolume
+ let allOutputSlices3DCC1DimArray = new Uint8Array(img);
+
+
+ let maskBrainExtraction = false;
+
+ let labelArrayBuffer;
+ let modelType = modelEntry.type
+
+ //return img
+ switch ( modelType) {
+ case 'Brain_Masking':
+ {
+ const brainMask = new Uint8Array(allOutputSlices3DCC1DimArray.length);
+ for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) {
+ brainMask[i] = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0;
+ }
+ //labelArrayBuffer = createNiftiOutArrayBuffer(rawNiftiData, brainMask);
+ //allOutputSlices3DCC1DimArray = brainMask;
+ // --labelsHistogramMap = null;
+ //maskBrainExtraction = true;
+ return brainMask
+ //break;
+ }
+ case 'Brain_Extraction':
+ {
+ const maskedData = new Uint8Array(allOutputSlices3DCC1DimArray.length);
+ //const brainData = nifti2data(rawNiftiData);
+
+ for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) {
+ // Create the mask - 1 where the value is non-zero, 0 where it is zero.
+ const maskValue = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0;
+ // Apply the mask to the data - multiply by the mask value.
+ maskedData[i] = niftiImage[i] * maskValue;
+ }
+ //labelArrayBuffer = createNiftiOutArrayBuffer(rawNiftiData, maskedData);
+
+ // Update `allOutputSlices3DCC1DimArray` if needed.
+ //allOutputSlices3DCC1DimArray = maskedData;
+
+ // Other operations...
+ //maskBrainExtraction = true;
+ return maskedData
+ //break;
+ }
+ default:
+ {
+ //labelArrayBuffer = createNiftiOutArrayBuffer(rawNiftiData, allOutputSlices3DCC1DimArray);
+ //break;
+ return img
+ }
+ }
+
+ return img
+}
+
+
+async function inferenceFullVolumePhase2 (model, slices_3d, num_of_slices, slice_height, slice_width, pipeline1_out, modelEntry, statData, opts, callbackImg, callbackUI, niftiImage) {
+ let outimg = []
+ //--Phase-2, After remove the skull try to allocate brain volume and make inferece
+ console.log(" ---- Start FullVolume inference phase-II ---- ")
+ let quantileNorm = modelEntry.enableQuantileNorm
+ if(quantileNorm) {
+ // Quantile normalize function needs specific models to be used
+ console.log("preModel Quantile normalization enabled")
+ slices_3d = await quantileNormalizeVolumeData(slices_3d)
+ } else {
+ // Min Max Nomalize MRI data to be from 0 to 1
+ console.log("preModel Min Max normalization enabled")
+ slices_3d = await minMaxNormalizeVolumeData(slices_3d)
+ }
+
+ let mask_3d
+
+ if(pipeline1_out == null) { // preModel is null
+
+ // Check if thresholding the MRI to remove noisy voxels for better cropping is needed.
+ let autoThresholdValue = modelEntry.autoThreshold
+
+ if( (autoThresholdValue > 0) && (autoThresholdValue <= 1) ) {
+
+ // Filtered MRI from noisy voxel below autoThresholdValue
+ mask_3d = await applyMriThreshold(slices_3d, autoThresholdValue)
+ } else {
+ console.log("No valid crop threshold value")
+ // binarize original image
+ mask_3d = slices_3d.greater([0]).asType('bool')
+ }
+ } else {
+ mask_3d = pipeline1_out.greater([0]).asType('bool')
+ //-- pipeline1_out.dispose()
+ }
+ console.log(" mask_3d shape : ", mask_3d.shape)
+ const coords = await tf.whereAsync(mask_3d)
+ //-- Get each voxel coords (x, y, z)
+ mask_3d.dispose()
+ const coordsArr = coords.arraySync()
+
+ let row_min = slice_height, row_max = 0, col_min = slice_width, col_max = 0, depth_min = num_of_slices, depth_max = 0
+
+ for(let i = 0; i < coordsArr.length; i++) {
+
+ if ( row_min > coordsArr[i][0] ) {
+ row_min = coordsArr[i][0]
+ } else if(row_max < coordsArr[i][0]) {
+ row_max = coordsArr[i][0]
+ }
+
+ if ( col_min > coordsArr[i][1] ) {
+ col_min = coordsArr[i][1]
+ } else if(col_max < coordsArr[i][1]) {
+ col_max = coordsArr[i][1]
+ }
+
+ if ( depth_min > coordsArr[i][2] ) {
+ depth_min = coordsArr[i][2]
+ } else if(depth_max < coordsArr[i][2]) {
+ depth_max = coordsArr[i][2]
+ }
+ }
+
+
+ console.log( "row min and max :", row_min, row_max)
+ console.log( "col min and max :", col_min, col_max)
+ console.log( "depth min and max :", depth_min, depth_max)
+
+ //-- Reference voxel that cropped volume started slice with it
+ let refVoxel = [row_min, col_min, depth_min]
+ console.log("refVoxel :", refVoxel)
+
+ // -- Starting form refVoxel, size of bounding volume
+ let boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1]
+
+ console.log("boundVolSizeArr :", boundVolSizeArr)
+
+ coords.dispose()
+
+ //-- Extract 3d object (e.g. brain)
+ let cropped_slices_3d = slices_3d.slice([row_min, col_min, depth_min], [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] )
+
+ slices_3d.dispose()
+
+ //-- Padding size add to cropped brain
+ let pad = modelEntry.cropPadding
+
+ // Create margin around the bounding volume
+ let cropped_slices_3d_w_pad = await addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad] , [pad, pad], [pad, pad])
+ console.log(" cropped slices_3d with padding shape: ", cropped_slices_3d_w_pad.shape)
+
+ cropped_slices_3d.dispose()
+
+
+ //-- Test dim after padding ..
+ // for (let i = 0; i < cropped_slices_3d_w_pad.rank; i++) {
+ // if(cropped_slices_3d_w_pad.shape[i] > 256) {
+ // console.log(" cropped_slices_3d_w_pad > 256 ")
+ // }
+
+ // }
+
+
+
+ if(opts.drawBoundingVolume) {
+
+ let testVol = await removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad)
+ console.log(" outLabelVolume without padding shape : ", testVol.shape)
+
+ testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr )
+ console.log(" outLabelVolume final shape after resizing : ", testVol.shape)
+
+ draw3dObjBoundingVolume(tf.unstack(testVol))
+ testVol.dispose()
+
+ return 0
+ }
+
+
+ statData["Brainchop_Ver"] = "FullVolume"
+ let startTime = performance.now()
+ let adjusted_input_shape = []
+ let res = await model
+ //?
+ //model.then(function (res) {
+ // try {
+ startTime = performance.now()
+ let inferenceStartTime = performance.now()
+ // maxLabelPredicted in whole volume of the brain
+ let maxLabelPredicted = 0
+ let transpose = modelEntry.enableTranspose
+ let delay = modelEntry.inferenceDelay
+ console.log("Inference delay :", delay)
+
+ if(transpose) {
+ cropped_slices_3d_w_pad = cropped_slices_3d_w_pad.transpose()
+ console.log("Input transposed for pre-model")
+ } else {
+ console.log("Transpose not enabled for pre-model")
+ }
+
+ let i = 1
+ let layersLength = res.layers.length
+ console.log("res.layers.length ", layersLength)
+
+ let isChannelLast = isModelChnlLast(res)
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+
+ //-- Adjust model input shape
+ if(isChannelLast) {
+
+ res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0]
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1]
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2]
+
+ adjusted_input_shape = [batchSize, res.layers[0].batchInputShape[1],
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ numOfChan]
+
+ } else {
+
+ res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0]
+ res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1]
+ res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2]
+
+ adjusted_input_shape = [batchSize, numOfChan,
+ res.layers[0].batchInputShape[2],
+ res.layers[0].batchInputShape[3],
+ res.layers[0].batchInputShape[4]]
+
+ }
+
+ console.log(" Model batch input shape : ", res.layers[0].batchInputShape)
+ // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W]
+
+ statData["Input_Shape"] = JSON.stringify(res.layers[0].batchInputShape)
+ statData["Output_Shape"] = JSON.stringify(res.output.shape)
+ statData["Channel_Last"] = isChannelLast
+ statData["Model_Param"] = getModelNumParameters(res)
+ statData["Model_Layers"] = getModelNumLayers(res)
+ statData["Model"] = modelEntry.modelName
+ statData["Extra_Info"] = null
+
+
+ let curTensor = []
+ curTensor[0] = cropped_slices_3d_w_pad.reshape(adjusted_input_shape)
+ // console.log("curTensor[0] :", curTensor[0].dataSync())
+
+ //? let curProgBar = parseInt(document.getElementById("progressBar").style.width)
+
+ let mytimer = await window.setInterval(async function() {
+ try {
+ //-- curTensor[i] = res.layers[i].apply( curTensor[i-1])
+ curTensor[i] = res.layers[i].apply( curTensor[i-1])
+
+ } catch(err) {
+ callbackUI(err.message, -1, err.message)
+ window.clearInterval( mytimer )
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData["Inference_t"] = Infinity
+ statData["Postprocess_t"] = Infinity
+ statData["Status"] = "Fail"
+ statData["Error_Type"] = err.message
+ statData["Extra_Err_Info"] = "Failed while model layer " + i + " apply"
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData)
+ }
+
+ return 0
+ }
+ callbackUI("Layer " + i.toString(), (i+1)/layersLength)
+ console.log("layer output Tensor shape : ", curTensor[i].shape)
+ console.log("layer count params ", res.layers[i].countParams())
+ res.layers[i].dispose()
+ curTensor[i-1].dispose()
+ if (tf.memory().unreliable) {
+ const unreliableReasons = "unreliable reasons :" + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+ //? document.getElementById("memoryStatus").style.backgroundColor = memStatus
+
+
+ if( i == layersLength - 1) {
+ window.clearInterval( mytimer )
+
+ // prediction = res.layers[res.layers.length-1].apply(curTensor[i])
+ // curTensor[i].print()
+ //outputDataBeforArgmx = Array.from(curTensor[i].dataSync())
+
+ let axis = isChannelLast ? -1 : 1
+ console.log(" find argmax ")
+ console.log("last Tensor shape : ", curTensor[i].shape)
+ //-- curTensor[i].shape e.g. [ 1, 256, 256, 256, 3 ]
+ let expected_Num_labels = isChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1]
+ let prediction_argmax
+
+ // Try for argMax with model output tensor.
+
+ try {
+ let argMaxTime = performance.now()
+ console.log(" Try tf.argMax for fullVolume ..")
+ prediction_argmax = tf.argMax(curTensor[i], axis)
+ console.log("tf.argMax for fullVolume takes : ", ((performance.now() - argMaxTime)/1000).toFixed(4) )
+
+ } catch(err1) {
+ // if channel last
+ if(axis == -1) {
+
+ try {
+ let argMaxLargeTime = performance.now()
+ console.log(" tf.argMax failed .. try argMaxLarge ..")
+ let modelOutBuffer = tensor2LightBuffer(curTensor[i].reshape([cropped_slices_3d_w_pad.shape[0], cropped_slices_3d_w_pad.shape[1], cropped_slices_3d_w_pad.shape[2], expected_Num_labels]), 'float16')
+ prediction_argmax = argMaxLarge(modelOutBuffer, cropped_slices_3d_w_pad.shape[0], cropped_slices_3d_w_pad.shape[1], cropped_slices_3d_w_pad.shape[2], expected_Num_labels, 'float16')
+ console.log("argMaxLarge for fullVolume takes : ", ((performance.now() - argMaxLargeTime)/1000).toFixed(4) )
+
+ } catch(err2) {
+
+ let errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+
+ window.clearInterval( mytimer )
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData["Inference_t"] = Infinity
+ statData["Postprocess_t"] = Infinity
+ statData["Status"] = "Fail"
+ statData["Error_Type"] = err2.message
+ statData["Extra_Err_Info"] = "prediction_argmax from argMaxLarge failed"
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData)
+ }
+
+ return 0
+
+ }
+
+ } else {
+ // if channel first ..
+ let errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+
+ prediction_argmax.dispose()
+
+ window.clearInterval( mytimer )
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData["Inference_t"] = Infinity
+ statData["Postprocess_t"] = Infinity
+ statData["Status"] = "Fail"
+ statData["Error_Type"] = err1.message
+ statData["Extra_Err_Info"] = "prediction_argmax from argMaxLarge not support yet channel first"
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData)
+ }
+
+ return 0
+ }
+
+ }
+
+
+
+ console.log(" prediction_argmax shape : ", prediction_argmax.shape)
+ //-- prediction_argmax.shape : [ 1, 256, 256, 256]
+
+ let Inference_t = ((performance.now() - startTime)/1000).toFixed(4)
+
+ //outputDataBeforArgmx = Array.from(prediction_argmax.dataSync())
+ tf.dispose(curTensor[i])
+ // allPredictions.push({"id": allBatches[j].id, "coordinates": allBatches[j].coordinates, "data": Array.from(prediction_argmax.dataSync()) })
+ console.log(" find array max ")
+ //? await
+ let curBatchMaxLabel = await findArrayMax(Array.from(prediction_argmax.dataSync()))
+
+ if( maxLabelPredicted < curBatchMaxLabel ) {
+ maxLabelPredicted = curBatchMaxLabel
+ }
+
+ let numSegClasses = maxLabelPredicted + 1
+ console.log("numSegClasses", numSegClasses)
+ statData["Actual_Labels"] = numSegClasses
+ statData["Expect_Labels"] = expected_Num_labels
+ statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false
+
+
+ if( numSegClasses != expected_Num_labels ) {
+ //errTxt = "expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses + ". For possible solutions please refer to FAQ .", "alert-error"
+ let errTxt = "expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses
+ callbackUI(errTxt, -1, errTxt)
+ }
+
+
+ //-- Transpose back to fit Papaya display settings
+ let outLabelVolume = prediction_argmax.reshape([cropped_slices_3d_w_pad.shape[0], cropped_slices_3d_w_pad.shape[1], cropped_slices_3d_w_pad.shape[2]])
+ tf.dispose(prediction_argmax)
+
+ // Transpose MRI data to be match pytorch/keras input output
+ if(transpose) {
+ console.log("outLabelVolume transposed")
+ outLabelVolume = outLabelVolume.transpose()
+ }
+ //? await
+ outLabelVolume = await removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad)
+ console.log(" outLabelVolume without padding shape : ", outLabelVolume.shape)
+ //? await
+ outLabelVolume = await resizeWithZeroPadding(outLabelVolume, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr )
+ console.log(" outLabelVolume final shape after resizing : ", outLabelVolume.shape)
+
+ let filterOutWithPreMask = modelEntry.filterOutWithPreMask
+ // To clean the skull area wrongly segmented in phase-2.
+ if(pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) {
+ outLabelVolume = outLabelVolume.mul(binarizeVolumeDataTensor(pipeline1_out))
+ }
+
+ startTime = performance.now()
+ // Generate output volume or slices
+ console.log("Generating correct output")
+
+ //try {
+ const img = new Uint32Array(outLabelVolume.dataSync())
+ const Vshape = outLabelVolume.shape
+ const Vtype = outLabelVolume.dtype
+ tf.dispose(outLabelVolume)
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+ outimg = await generateOutputSlicesV2(img, Vshape, Vtype, num_of_slices, numSegClasses, slice_height, slice_width, modelEntry, opts, niftiImage)
+ console.log(" Phase-2 num of tensors after generateOutputSlicesV2: " , tf.memory().numTensors )
+ /*} catch (error) {
+
+ //-- Timing data to collect
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ const errTxt = "Failed while generating output due to limited browser memory available"
+ callbackUI(errTxt, -1, errTxt)
+ statData["Inference_t"] = Inference_t
+ statData["Postprocess_t"] = Infinity
+ statData["Status"] = "Fail"
+ statData["Error_Type"] = error.message
+ statData["Extra_Err_Info"] = "Failed while generating output"
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData)
+ }
+
+ return 0
+ }*/
+
+ let Postprocess_t = ((performance.now() - startTime)/1000).toFixed(4)
+
+ //? document.getElementById("progressBar").style.width = 0
+ //webix.message.hide("waitMessage")
+
+ //? $$("downloadBtn").enable()
+ //? $$("segmentBtn").enable()
+ // $$("imageUploader").enable()
+ //tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ console.log("Processing the whole brain volume in tfjs for multi-class output mask took : ",
+ ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds")
+
+
+ //-- Timing data to collect
+ statData["Inference_t"] = Inference_t
+ statData["Postprocess_t"] = Postprocess_t
+ statData["Status"] = "OK"
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData)
+ }
+ clearInterval(mytimer)
+ callbackImg(outimg, opts, modelEntry)
+ callbackUI("Segmentation finished", 0)
+ }
+ i++
+ }, delay)
+ /* } catch(err) {
+
+ callbackUI(err.message, -1, err.message)
+ console.log(
+ "If webgl context is lost, try to restore webgl context by visit the link " +
+ 'here'
+ )
+ //? document.getElementById("webGl2Status").style.backgroundColor = isWebGL2ContextLost() ? "Red" : "Green"
+
+ //? document.getElementById("memoryStatus").style.backgroundColor = tf.memory().unreliable ? "Red" : "Green"
+ }*/
+// })
+ return mytimer
+}
+
+async function inferenceFullVolumePhase1 (model, slices_3d, num_of_slices, slice_height, slice_width, isModelFullVol, modelEntry, statData, opts, callbackImg, callbackUI, niftiImage) {
+ statData["No_SubVolumes"] = 1
+ let outimg = []
+ // load pre-model for inference first, can be null if no pre-model such as GWM models
+ if(modelEntry["preModelId"]) {
+
+ let preModel = load_model(inferenceModelsList[ modelEntry["preModelId"] - 1]['path'] )
+ let transpose = inferenceModelsList[ modelEntry["preModelId"] - 1]["enableTranspose"]
+ let quantileNorm = inferenceModelsList[ modelEntry["preModelId"] - 1]["enableQuantileNorm"]
+ let preModel_slices_3d = null
+
+ //-- If pre-model is not null then slices_3d mask will be generated..
+ //-- The mask is needed to remove the skull and set noise in background to 0, and get the brain bounding volume properly
+ let slices_3d_mask = null
+
+ if(quantileNorm) {
+ // Quantile normalize function needs specific models to be used
+ console.log("preModel Quantile normalization enabled")
+ preModel_slices_3d = await quantileNormalizeVolumeData(slices_3d)
+ } else {
+ // Min Max Nomalize MRI data to be from 0 to 1
+ console.log("preModel Min Max normalization enabled")
+ preModel_slices_3d = await minMaxNormalizeVolumeData(slices_3d)
+ }
+
+
+ //-- Transpose MRI data to be match pytorch/keras input output
+ //-- Check if pre-model needs transpose..
+ if(transpose) {
+
+ preModel_slices_3d = preModel_slices_3d.transpose()
+ console.log("Input transposed for pre-model")
+
+ } else {
+ console.log("Transpose not enabled for pre-model")
+ }
+
+ statData["Brainchop_Ver"] = "PreModel_FV" ; // e.g. "PreModel_FV"
+
+ preModel.then(function (res) {
+
+ try {
+
+ let inferenceStartTime = performance.now()
+ let preModelObject = res
+
+ // read input shape from model.json object
+ let preModelBatchInputShape = preModelObject.layers[0].batchInputShape
+ console.log(" Pre-Model batch input shape : ", preModelBatchInputShape)
+
+ //-- Verify input shape
+ if(preModelBatchInputShape.length != 5) {
+ const errTxt = "The pre-model input shape must be 5D "
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+
+ let isPreModelChannelLast = isModelChnlLast(preModelObject)
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+ let batch_D, batch_H, batch_W
+ let preModel_input_shape
+ if(isPreModelChannelLast ) {
+ console.log("Pre-Model Channel Last")
+ if (isNaN(preModelBatchInputShape[4]) || (preModelBatchInputShape[4] !=1)) {
+ const errTxt = "The number of channels for pre-model input shape must be 1"
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+
+ batch_D = preModelBatchInputShape[1]
+ batch_H = preModelBatchInputShape[2]
+ batch_W = preModelBatchInputShape[3]
+
+ preModel_input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan]
+
+ } else {
+ console.log("Pre-Model Channel First")
+ if (isNaN(preModelBatchInputShape[1]) || (preModelBatchInputShape[1] !=1)) {
+ const errTxt = "The number of channels for pre-model input shape must be 1"
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+
+ batch_D = preModelBatchInputShape[2]
+ batch_H = preModelBatchInputShape[3]
+ batch_W = preModelBatchInputShape[4]
+
+ preModel_input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W]
+
+ }
+
+
+ statData["Input_Shape"] = JSON.stringify(preModel_input_shape)
+ statData["Output_Shape"] = JSON.stringify(preModelObject.output.shape)
+ statData["Channel_Last"] = isPreModelChannelLast
+ statData["Model_Param"] = getModelNumParameters(preModelObject)
+ statData["Model_Layers"] = getModelNumLayers(preModelObject)
+ //? statData["Model"] = inferenceModelsList[ modelEntry["preModelId"] - 1]["modelName"]
+ //? statData["Extra_Info"] = inferenceModelsList[$$("selectModel").getValue() - 1]["modelName"]
+
+
+ // maxLabelPredicted in whole volume of the brain
+ let maxLabelPredicted = 0
+ let delay = inferenceModelsList[ modelEntry["preModelId"] - 1]["inferenceDelay"]
+
+ let i = 1
+ let layersLength = res.layers.length
+
+ let curTensor = []
+ //-- reshape MRI to model input shape
+ curTensor[0] = preModel_slices_3d.reshape(preModel_input_shape)
+
+ //Dispose the volume
+ tf.dispose(preModel_slices_3d)
+
+ let timer = window.setInterval(async function() {
+
+ try {
+ curTensor[i] = res.layers[i].apply( curTensor[i-1])
+
+ } catch(err) {
+
+ if( err.message === "Failed to compile fragment shader.") {
+ webix.confirm({
+ title:"",
+ ok:"Ok",
+ cancel:"Cancel",
+ type: "confirm-error",
+ width: 500,
+ text: "Context lost due to limited Memory available, please check current browser resouces in the toolbar and verified GPUs for each model"
+ })
+ .then(() => {
+ //---
+ $$("browserResourcesWindow").show()
+
+
+ }).fail(() => {
+ //---
+
+ })
+
+ } else {
+ callbackUI(err.message, -1, err.message)
+ }
+
+ window.clearInterval( timer )
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData["Inference_t"] = Infinity
+ statData["Postprocess_t"] = Infinity
+ statData["Status"] = "Fail"
+ statData["Error_Type"] = err.message
+ statData["Extra_Err_Info"] = "PreModel Failed while model layer " + i + " apply"
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData)
+ }
+
+ return 0
+ }
+
+
+
+ res.layers[i].dispose()
+ curTensor[i-1].dispose()
+
+ callbackUI("Layer " + i.toString(), (i+1)/layersLength)
+ if (tf.memory().unreliable) {
+ const unreliableReasons = "unreliable reasons :" + tf.memory().reasons
+ callbackUI(unreliableReasons, NaN, unreliableReasons)
+ }
+
+ if( i == layersLength - 1) {
+ window.clearInterval( timer )
+
+ //-- prediction = res.layers[res.layers.length-1].apply(curTensor[i])
+ //-- curTensor[i].print()
+ //-- outputDataBeforArgmx = Array.from(curTensor[i].dataSync())
+
+ let axis = isPreModelChannelLast ? -1 : 1
+ console.log(" find argmax ")
+ console.log("last Tensor shape : ", curTensor[i].shape)
+ //-- curTensor[i].shape : [ 1, 256, 256, 256, 3 ]
+ let expected_Num_labels = isPreModelChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1]
+ let prediction_argmax
+
+ // Try for argMax with model output tensor.
+
+ try {
+ console.log(" Try tf.argMax for fullVolume ..")
+ prediction_argmax = tf.argMax(curTensor[i], axis)
+
+ } catch(err1) {
+ // if channel last
+ if(axis == -1) {
+
+ try {
+ let argMaxLargeTime = performance.now()
+ console.log(" tf.argMax failed .. try argMaxLarge ..")
+ let modelOutBuffer = tensor2LightBuffer(curTensor[i].reshape([num_of_slices, slice_height, slice_width, expected_Num_labels]), 'float16')
+ prediction_argmax = argMaxLarge(modelOutBuffer, num_of_slices, slice_height, slice_width, expected_Num_labels, 'float16')
+ console.log("argMaxLarge for fullVolume takes : ", ((performance.now() - argMaxLargeTime)/1000).toFixed(4) )
+
+ } catch(err2) {
+
+ let errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+ prediction_argmax.dispose()
+
+ window.clearInterval( timer )
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData["Inference_t"] = Infinity
+ statData["Postprocess_t"] = Infinity
+ statData["Status"] = "Fail"
+ statData["Error_Type"] = err2.message
+ statData["Extra_Err_Info"] = "preModel prediction_argmax from argMaxLarge failed"
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData)
+ }
+
+ return 0
+
+ }
+
+ } else {
+ // if channel first ..
+ let errTxt = "argMax buffer couldn't be created due to limited memory resources."
+ callbackUI(errTxt, -1, errTxt)
+
+ prediction_argmax.dispose()
+
+ window.clearInterval( timer )
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ statData["Inference_t"] = Infinity
+ statData["Postprocess_t"] = Infinity
+ statData["Status"] = "Fail"
+ statData["Error_Type"] = err1.message
+ statData["Extra_Err_Info"] = "preModel prediction_argmax from argMaxLarge not support yet channel first"
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData)
+ }
+
+ return 0
+ }
+
+ }
+
+
+
+ console.log(" Pre-model prediction_argmax shape : ", prediction_argmax.shape)
+ //-- prediction_argmax.shape : [ 1, 256, 256, 256]
+
+ let Inference_t = ((performance.now() - inferenceStartTime)/1000).toFixed(4)
+
+ tf.dispose(curTensor[i])
+
+ console.log(" Pre-model find array max ")
+ let curBatchMaxLabel = await findArrayMax(Array.from(prediction_argmax.dataSync()))
+
+ if( maxLabelPredicted < curBatchMaxLabel ) {
+ maxLabelPredicted = curBatchMaxLabel
+ }
+
+ let numSegClasses = maxLabelPredicted + 1
+ console.log("Pre-model numSegClasses", numSegClasses)
+
+ statData["Actual_Labels"] = numSegClasses
+ statData["Expect_Labels"] = expected_Num_labels
+ statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false
+
+ //-- Transpose back to fit Papaya display settings
+ let outLabelVolume = prediction_argmax.reshape([num_of_slices, slice_height, slice_width])
+ tf.dispose(prediction_argmax)
+
+ // Transpose MRI data to be match pytorch/keras input output
+ if(transpose) {
+ console.log("Pre-model outLabelVolume transposed")
+ outLabelVolume = outLabelVolume.transpose()
+ }
+
+
+ let startTime = performance.now()
+ // Generate output volume or slices
+ console.log("Generating pre-model output")
+
+ try {
+ slices_3d_mask = tf.tidy(() => {
+ let unstackOutVolumeTensor = tf.unstack(outLabelVolume)
+ tf.dispose(outLabelVolume)
+ return generateBrainMask(unstackOutVolumeTensor, num_of_slices, slice_height, slice_width)
+ })
+
+ console.log(" Phase-1 num of tensors after generateBrainMask: " , tf.memory().numTensors )
+
+ } catch (error) {
+
+ //-- Timing data to collect
+ tf.engine().endScope()
+ tf.engine().disposeVariables()
+
+ const errTxt = "Failed while generating pre-model output due to limited browser memory available"
+ callbackUI(errTxt, -1, errTxt)
+
+ statData["Inference_t"] = Inference_t
+ statData["Postprocess_t"] = Infinity
+ statData["Status"] = "Fail"
+ statData["Error_Type"] = error.message
+ statData["Extra_Err_Info"] = "Pre-model failed while generating output"
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData)
+ }
+
+ return 0
+ }
+
+ let Postprocess_t = ((performance.now() - startTime)/1000).toFixed(4)
+
+
+ console.log("Pre-model processing the whole brain volume in tfjs tooks for multi-class output mask : ",
+ ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds")
+
+
+ //-- Timing data to collect
+ statData["Inference_t"] = Inference_t
+ statData["Postprocess_t"] = Postprocess_t
+ statData["Status"] = "OK"
+
+ if(opts.telemetryFlag) {
+ await submitTiming2GoogleSheet(statData)
+ }
+
+
+ if(slices_3d_mask == null) {
+
+ console.log("slice_3d_mask failed ...")
+ webix.message("slice_3d_mask failed ...")
+ return 0
+
+ } else {
+
+ //--Phase-2, After remove the skull try to allocate brain volume and make inferece
+ console.log("--- pre-model done ---")
+ // --mask_3d = slices_3d_mask.greater([0]).asType('bool')
+ // --slices_3d_mask.dispose()
+
+ if(isModelFullVol) {
+
+ if(modelEntry["enableSeqConv"]) {
+ // Mask cropping & seq conv
+ // Non-Atlas model (e.g. GWM) needs sequential convolution layer.
+ // Sequential convolution layer to be used after cropping - slow but reliable on most machines
+ console.log("------ Mask Cropping & Seq Convoluton ------")
+ await inferenceFullVolumeSeqCovLayerPhase2(opts, modelEntry, model, slices_3d, num_of_slices, slice_height, slice_width, slices_3d_mask, modelEntry, callbackUI, statData)
+ // inferenceFullVolumeSeqCovLayerPhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask)
+ } else {
+ // Mask cropping BUT no seq conv
+ console.log("------ Mask Cropping - NO Seq Convoluton ------")
+ //? await
+ outimg = await inferenceFullVolumePhase2(model, slices_3d, num_of_slices, slice_height, slice_width, slices_3d_mask, modelEntry, statData, opts, callbackImg, callbackUI, niftiImage)
+ // inferenceFullVolumePhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask)
+ }
+
+ } else {
+ // -- In version 3.0.0 this function not used
+ inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width, slices_3d_mask)
+ //inferenceSubVolumes(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask)
+ }
+
+ }
+
+ }
+ i++
+
+ }, delay)
+
+ } catch(err) {
+ callbackUI(err.message, -1, err.message)
+ console.log(
+ "If webgl context is lost, try to restore webgl context by visit the link " +
+ 'here'
+ )
+
+
+ //document.getElementById("webGl2Status").style.backgroundColor = isWebGL2ContextLost() ? "Red" : "Green"
+
+ //document.getElementById("memoryStatus").style.backgroundColor = tf.memory().unreliable ? "Red" : "Green"
+ }
+ })
+
+ //-- if(...) end
+ } else { // No preModel
+
+ //--Phase-2, After remove the skull try to allocate brain volume and make inferece
+ console.log("--- No pre-model is selected ---")
+ console.log("------ Run voxel cropping ------")
+ //-- mask_3d = slices_3d.greater([0]).asType('bool')
+
+ if(isModelFullVol) {
+
+ if(modelEntry["enableSeqConv"]) {
+ // Voxel cropping & seq conv
+ // Non-Atlas model (e.g. GWM) needs sequential convolution layer.
+ // Sequential convolution layer to be used after cropping - slow but reliable on most machines
+ console.log("------ Seq Convoluton ------")
+ await inferenceFullVolumeSeqCovLayerPhase2(opts, modelEntry, model, slices_3d, num_of_slices, slice_height, slice_width, null, callbackUI, statData)
+ } else {
+ // Voxel cropping BUT no seq conv
+ let outimg = await inferenceFullVolumePhase2(model, slices_3d, num_of_slices, slice_height, slice_width, null, modelEntry, statData, opts, callbackImg, callbackUI, niftiImage)
+ }
+
+ } else {
+ // -- In version 3.0.0 this function not used
+ inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width, null)
+ }
+ }
+}
+
+async function enableProductionMode (textureF16Flag = true) {
+ //-- tf.setBackend('cpu')
+ //-- tf.removeBackend('cpu')
+ //-- Calling enableProdMode() method
+ await tf.enableProdMode()
+ //-- Setting debug mode of the environment
+ tf.env().set('DEBUG', false)
+ tf.env().set('WEBGL_FORCE_F16_TEXTURES', textureF16Flag)
+ //-- set this flag so that textures are deleted when tensors are disposed.
+ tf.env().set("WEBGL_DELETE_TEXTURE_THRESHOLD", 0)
+ //-- tf.env().set('WEBGL_PACK', false)
+ //-- Put ready after sets above
+ await tf.ready()
+ //-- Printing output
+ console.log(tf.env().flags)
+ console.log("tf env() features :", tf.env().features)
+ console.log("tf env total features: ", Object.keys(tf.env().features).length)
+ console.log(tf.getBackend())
+}
+
+async function isModelChnlLast(modelObj) {
+ for(let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx ++ ) {
+ if(modelObj.layersByDepth[layerIdx][0]["dataFormat"]) {
+ return modelObj.layersByDepth[layerIdx][0]["dataFormat"] === "channelsLast"? true : false
+ }
+ }
+}
+
+async function getSliceData1D (sliceIdx, niftiHeader, niftiImage) {
+ // Get nifti dimensions
+ let cols = niftiHeader.dims[1]; // Slice width
+ let rows = niftiHeader.dims[2]; // Slice height
+
+ let typedData
+
+ if (niftiHeader.datatypeCode === 2) { //enum from nvimage/utils DT_UINT8 = 2
+ typedData = new Uint8Array(niftiImage)
+ } else if (niftiHeader.datatypeCode === 4) { //DT_INT16 = 4
+ typedData = new Int16Array(niftiImage)
+ } else if (niftiHeader.datatypeCode === 8) { // DT_INT32 = 8
+ typedData = new Int32Array(niftiImage)
+ } else if (niftiHeader.datatypeCode === 16) { // DT_FLOAT32 = 16
+ typedData = new Float32Array(niftiImage)
+ } else if (niftiHeader.datatypeCode === 64) { //DT_FLOAT64 = 64
+ typedData = new Float64Array(niftiImage)
+ } else if (niftiHeader.datatypeCode === 256) { //DT_INT8 = 256
+ typedData = new Int8Array(niftiImage)
+ } else if (niftiHeader.datatypeCode === 512) { //DT_UINT16 = 512
+ typedData = new Uint16Array(niftiImage)
+ } else if (niftiHeader.datatypeCode === 768) { //DT_UINT32 = 768
+ typedData = new Uint32Array(niftiImage)
+ } else {
+ return
+ }
+ // offset to specified slice
+ let sliceSize = cols * rows
+ let sliceOffset = sliceSize * sliceIdx
+ let data1DimArr = []
+ // Draw pixels
+ for (let row = 0; row < rows; row++) {
+ let rowOffset = row * cols
+ for (let col = 0; col < cols; col++) {
+ let offset = sliceOffset + rowOffset + col
+ let value = typedData[offset]
+ // Create 1Dim Array of pixel value, this 1 dim represents one channel
+ data1DimArr[(rowOffset + col)] = value & 0xFF
+ }
+ }
+
+ return data1DimArr
+}
+
+async function getAllSlices2D (allSlices, slice_height, slice_width) {
+ let allSlices_2D = []
+ for(let sliceIdx = 0; sliceIdx < allSlices.length; sliceIdx ++){
+ allSlices_2D.push(tf.tensor(allSlices[sliceIdx], [slice_height, slice_width]))
+ }
+ return allSlices_2D
+}
+
+async function getSlices3D (allSlices_2D) {
+ return tf.stack(allSlices_2D)
+}
+
+async function getAllSlicesData1D (num_of_slices, niftiHeader, niftiImage) {
+ let allSlices = []
+ for(let sliceIdx = 0; sliceIdx < num_of_slices; sliceIdx++) {
+ let slice = await getSliceData1D(sliceIdx, niftiHeader, niftiImage)
+ allSlices.push(slice)
+ }
+ return allSlices
+}
+
+async function runInference(opts, modelEntry, niftiHeader, niftiImage, callbackImg, callbackUI) {
+ let startTime = performance.now()
+ const batchSize = opts.batchSize
+ const numOfChan = opts.numOfChan
+ if (isNaN(batchSize) || batchSize != 1) {
+ errTxt = "The batch Size for input shape must be 1"
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ if (isNaN(numOfChan) || (numOfChan != 1)) {
+ errTxt = "The number of channels for input shape must be 1"
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ tf.engine().startScope()
+ console.log("Batch size: ", batchSize)
+ console.log("Num of Channels: ", numOfChan)
+ let model = await load_model(modelEntry["path"])
+ await enableProductionMode(true)
+ let modelObject = model
+ let batchInputShape = []
+ // free global variable of 16777216 voxel
+ // allOutputSlices3DCC1DimArray = []
+ // outputSceneRendered = false
+ // read input shape from model.json object
+ batchInputShape = modelObject.layers[0].batchInputShape
+ console.log(" Model batch input shape : ", batchInputShape)
+ //-- Verify input shape
+ if(batchInputShape.length != 5) {
+ const errTxt = "The model input shape must be 5D"
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ let batch_D, batch_H, batch_W
+ let input_shape
+ let slice_width = niftiHeader.dims[1]
+ let slice_height = niftiHeader.dims[2]
+ let num_of_slices = niftiHeader.dims[3]
+ let isChannelLast = await isModelChnlLast(modelObject)
+ if(isChannelLast) {
+ console.log("Model Channel Last")
+ if (isNaN(batchInputShape[4]) || (batchInputShape[4] !=1)) {
+ const errTxt = "The number of channels for input shape must be 1"
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ batch_D = batchInputShape[1]
+ batch_H = batchInputShape[2]
+ batch_W = batchInputShape[3]
+
+ input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan]
+
+ } else {
+ console.log("Model Channel First")
+ if (isNaN(batchInputShape[1]) || (batchInputShape[1] !=1)) {
+ const errTxt = "The number of channels for input shape must be 1"
+ callbackUI(errTxt, -1, errTxt)
+ return 0
+ }
+ batch_D = batchInputShape[2]
+ batch_H = batchInputShape[3]
+ batch_W = batchInputShape[4]
+ input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W]
+ }
+ // //-- Atlas version check
+ // if ( (batch_D > 30) && (batch_H == 256) && (batch_W == 256) ) {
+ // const errTxt = "The subvolume dimension in z-axis shouldn't exceed 30 number of slices for browser limitation"
+ // callbackUI(errTxt, -1, errTxt)
+ // return 0
+ // }
+ //--Check whether the model will make inference at once as FullVolumeModel
+ let isModelFullVol
+ if ( (batch_D == 256) && (batch_H == 256) && (batch_W == 256) ) {
+ isModelFullVol = true
+ } else {
+ isModelFullVol = false
+ }
+ let modelNumLayers = modelObject.layers.length
+ // Model output number of segmentations
+ let outLabels = modelObject.layers[ modelNumLayers - 1 ].bias.shape[0]
+ let allSlices = await getAllSlicesData1D(num_of_slices, niftiHeader, niftiImage)
+ let allSlices_2D = await getAllSlices2D(allSlices, slice_height, slice_width)
+ // free array from mem
+ allSlices = null
+ // Get slices_3d tensor
+ let slices_3d = await getSlices3D(allSlices_2D)
+ // free tensor from mem
+ tf.dispose(allSlices_2D)
+ let statData = []
+ if (opts.telemetryFlag) {
+ let Preprocess_t = ((performance.now() - startTime)/1000).toFixed(4)
+ //-- Timing data to collect
+ let today = new Date()
+ if(isModelFullVol) {
+ statData["Brainchop_Ver"] = "FullVolume"
+ } else {
+ statData["Brainchop_Ver"] = "SubVolumes"
+
+ }
+
+
+ /*let geoData = getBrowserLocationInfo()
+ if(geoData) {
+ statData["Country"] = geoData["Country"]
+ statData["State"] = geoData["Region"]
+ statData["City"] = geoData["City"]
+ } else {
+ statData["Country"] = ""
+ statData["State"] = ""
+ statData["City"] = ""
+ }*/
+
+
+
+ statData["Date"] = parseInt(today.getMonth() + 1) + "/" + today.getDate() + "/" + today.getFullYear()
+ statData["Time"] = await checkZero(today.getHours()) + ":" + checkZero(today.getMinutes()) + ":" + checkZero(today.getSeconds())
+ //? statData["File_Name"] = refFileName == "" ? opts.uiSampleName: refFileName
+ statData["Input_Shape"] = JSON.stringify(batchInputShape)
+ statData["Output_Shape"] = JSON.stringify(modelObject.output.shape)
+ statData["Channel_Last"] = isChannelLast
+ statData["Model_Param"] = await getModelNumParameters(modelObject)
+ statData["Model_Layers"] = await getModelNumLayers(modelObject)
+
+ statData["Preprocess_t"] = Preprocess_t
+ statData["Model"] = modelEntry.modelName
+ statData["Browser"] = await detectBrowser()
+ statData["Browser_Ver"] = await detectBrowserVersion()
+ statData["OS"] = await detectOperatingSys()
+ //? NiiVue requires WebGL2, all contemporary browsers support it statData["WebGL1"] = checkWebGl1()
+ statData["WebGL2"] = await checkWebGl2(callbackUI)
+ statData["GPU_Vendor"] = await detectGPUVendor()
+ statData["GPU_Card"] = await detectGPUCardType()
+ statData["GPU_Vendor_Full"] = await detectGPUVendor_v0()
+ statData["GPU_Card_Full"] = await detectGPUCardType_v0()
+ statData["CPU_Cores"] = await getCPUNumCores()
+ statData["TF_Backend"] = tf.getBackend()
+
+ statData["Which_Brainchop"] = "latest"
+ //? statData["Seq_Conv"] = inferenceModelsList[$$("selectModel").getValue() - 1]["enableSeqConv"]
+ statData["Seq_Conv"] = modelEntry.enableSeqConv
+
+ //-- Init
+ statData["Actual_Labels"] = Infinity
+ statData["Expect_Labels"] = Infinity
+ statData["NumLabels_Match"] = null
+ statData["Inference_t"] = Infinity
+ statData["Merge_t"] = Infinity
+ statData["Postprocess_t"] = Infinity
+ statData["Status"] = null
+ statData["Error_Type"] = null
+ statData["Extra_Err_Info"] = null
+ statData["Extra_Info"] = null
+
+
+ if(isChrome()) {
+ statData["Heap_Size_MB"] = window.performance.memory["totalJSHeapSize"]/(1024*1024).toFixed(2)
+ statData["Used_Heap_MB"] = window.performance.memory["usedJSHeapSize"]/(1024*1024).toFixed(2)
+ statData["Heap_Limit_MB"] = window.performance.memory["jsHeapSizeLimit"]/(1024*1024).toFixed(2)
+ }
+ let gl = checkWebGl2() ? document.createElement('canvas').getContext('webgl2') : null
+
+ console.log("MAX_TEXTURE_SIZE :", gl.getParameter(gl.MAX_TEXTURE_SIZE))
+ console.log("MAX_RENDERBUFFER_SIZE :", gl.getParameter(gl.MAX_RENDERBUFFER_SIZE))
+
+ //-- check to see if machine has two graphics card: one is the builtin e.g. Intel Iris Pro, the other is NVIDIA GeForce GT 750M.
+ //-- check browser use which one, if debugInfo is null then installed GPU is not used
+ let debugInfo = gl.getExtension('WEBGL_debug_renderer_info')
+ console.log("VENDOR WEBGL:", gl.getParameter(debugInfo.UNMASKED_VENDOR_WEBGL) )
+
+ if(gl) {
+ statData["Texture_Size"] = gl.getParameter(gl.MAX_TEXTURE_SIZE) //--returns the maximum dimension the GPU can address
+ } else {
+ statData["Texture_Size"] = null
+ }
+ } //if telemetryFlag
+ let transpose = modelEntry.enableTranspose
+ let enableCrop = modelEntry.enableCrop
+ if (isModelFullVol) {
+ if( enableCrop) {
+ // FullVolume with Crop option before inference ..
+ // pre-model to mask the volume, can also be null and the cropping will be on the MRI.
+ await inferenceFullVolumePhase1(model, slices_3d, num_of_slices, slice_height, slice_width, isModelFullVol, modelEntry, statData, opts, callbackImg, callbackUI, niftiImage)
+ } else {
+ // Transpose MRI data to be match pytorch/keras input output
+ console.log("Cropping Disabled")
+
+ if(transpose) {
+ slices_3d = slices_3d.transpose()
+ console.log("Input transposed")
+ } else {
+ console.log("Transpose NOT Enabled")
+ }
+
+ let enableSeqConv = inferenceModelsList[$$("selectModel").getValue() - 1]["enableSeqConv"]
+
+ if(enableSeqConv) {
+ console.log("Seq Convoluton Enabled")
+ await inferenceFullVolumeSeqCovLayer(model, slices_3d, input_shape, isChannelLast, num_of_slices, slice_height, slice_width)
+ } else {
+ console.log("Seq Convoluton Disabled")
+ await inferenceFullVolume(model, slices_3d, input_shape, isChannelLast, num_of_slices, slice_height, slice_width)
+ }
+ }
+ }
+}
+
+// id: 10,
+// type: "Brain_Extraction",
+//
+//opts, modelEntry, niftiHeader, niftiImage, callbackImg, callbackUI
+function chop(modelEntry, niftiHeader, niftiImage, callbackImg, callbackUI) { //for node.js which does not have a GUI alert
+ let opts = gOpts
+ runInference(gOpts, modelEntry, niftiHeader, niftiImage, callbackImg, callbackUI)
+}
diff --git a/bwlabels.js b/bwlabels.js
new file mode 100644
index 0000000..1cb498c
--- /dev/null
+++ b/bwlabels.js
@@ -0,0 +1,276 @@
+export class bwlabeler {
+ // port of https://github.com/rordenlab/niimath/blob/master/src/bwlabel.c
+ // return voxel address given row A, column B, and slice C
+ idx(A, B, C, DIM) {
+ return C * DIM[0] * DIM[1] + B * DIM[0] + A;
+ } // idx()
+ // determine if voxels below candidate voxel have already been assigned a label
+ check_previous_slice(bw, il, r, c, sl, dim, conn, tt) {
+ // const nabo: number[] = [];
+ const nabo = new Uint32Array(27);
+ let nr_set = 0;
+ if (!sl) {
+ return 0;
+ }
+ const val = bw[this.idx(r, c, sl, dim)];
+ if (conn >= 6) {
+ const idx = this.idx(r, c, sl - 1, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ if (conn >= 18) {
+ if (r) {
+ const idx = this.idx(r - 1, c, sl - 1, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ if (c) {
+ const idx = this.idx(r, c - 1, sl - 1, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ if (r < dim[0] - 1) {
+ const idx = this.idx(r + 1, c, sl - 1, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ if (c < dim[1] - 1) {
+ const idx = this.idx(r, c + 1, sl - 1, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ }
+ if (conn === 26) {
+ if (r && c) {
+ const idx = this.idx(r - 1, c - 1, sl - 1, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ if (r < dim[0] - 1 && c) {
+ const idx = this.idx(r + 1, c - 1, sl - 1, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ if (r && c < dim[1] - 1) {
+ const idx = this.idx(r - 1, c + 1, sl - 1, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ if (r < dim[0] - 1 && c < dim[1] - 1) {
+ const idx = this.idx(r + 1, c + 1, sl - 1, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ }
+ if (nr_set) {
+ this.fill_tratab(tt, nabo, nr_set);
+ return nabo[0];
+ }
+ else {
+ return 0;
+ }
+ } // check_previous_slice()
+ // provisionally label all voxels in volume
+ do_initial_labelling(bw, dim, conn) {
+ let label = 1;
+ const kGrowArrayBy = 8192;
+ let ttn = kGrowArrayBy;
+ let tt = new Uint32Array(ttn).fill(0);
+ const il = new Uint32Array(dim[0] * dim[1] * dim[2]).fill(0);
+ const nabo = new Uint32Array(27);
+ for (let sl = 0; sl < dim[2]; sl++) {
+ for (let c = 0; c < dim[1]; c++) {
+ for (let r = 0; r < dim[0]; r++) {
+ let nr_set = 0;
+ const val = bw[this.idx(r, c, sl, dim)];
+ if (val === 0) {
+ continue;
+ }
+ nabo[0] = this.check_previous_slice(bw, il, r, c, sl, dim, conn, tt);
+ if (nabo[0]) {
+ nr_set += 1;
+ }
+ if (conn >= 6) {
+ if (r) {
+ const idx = this.idx(r - 1, c, sl, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ if (c) {
+ const idx = this.idx(r, c - 1, sl, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ }
+ if (conn >= 18) {
+ if (c && r) {
+ const idx = this.idx(r - 1, c - 1, sl, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ if (c && r < dim[0] - 1) {
+ const idx = this.idx(r + 1, c - 1, sl, dim);
+ if (val === bw[idx]) {
+ nabo[nr_set++] = il[idx];
+ }
+ }
+ }
+ if (nr_set) {
+ il[this.idx(r, c, sl, dim)] = nabo[0];
+ this.fill_tratab(tt, nabo, nr_set);
+ }
+ else {
+ il[this.idx(r, c, sl, dim)] = label;
+ if (label >= ttn) {
+ ttn += kGrowArrayBy;
+ const ext = new Uint32Array(ttn);
+ ext.set(tt);
+ tt = ext;
+ }
+ tt[label - 1] = label;
+ label++;
+ }
+ }
+ }
+ }
+ for (let i = 0; i < label - 1; i++) {
+ let j = i;
+ while (tt[j] !== j + 1) {
+ j = tt[j] - 1;
+ }
+ tt[i] = j + 1;
+ }
+ return [label - 1, tt, il];
+ } // do_initial_labelling()
+ // translation table unifies a region that has been assigned multiple classes
+ fill_tratab(tt, nabo, nr_set) {
+ let cntr = 0;
+ const tn = new Uint32Array(nr_set + 5).fill(0);
+ const INT_MAX = 2147483647;
+ let ltn = INT_MAX;
+ for (let i = 0; i < nr_set; i++) {
+ let j = nabo[i];
+ cntr = 0;
+ while (tt[j - 1] !== j) {
+ j = tt[j - 1];
+ cntr++;
+ if (cntr > 100) {
+ console.log('\nOoh no!!');
+ break;
+ }
+ }
+ tn[i] = j;
+ ltn = Math.min(ltn, j);
+ }
+ for (let i = 0; i < nr_set; i++) {
+ tt[tn[i] - 1] = ltn;
+ }
+ } // fill_tratab()
+ // remove any residual gaps so label numbers are dense rather than sparse
+ translate_labels(il, dim, tt, ttn) {
+ const nvox = dim[0] * dim[1] * dim[2];
+ let ml = 0;
+ const l = new Uint32Array(nvox).fill(0);
+ for (let i = 0; i < ttn; i++) {
+ ml = Math.max(ml, tt[i]);
+ }
+ const fl = new Uint32Array(ml).fill(0);
+ let cl = 0;
+ for (let i = 0; i < nvox; i++) {
+ if (il[i]) {
+ if (!fl[tt[il[i] - 1] - 1]) {
+ cl += 1;
+ fl[tt[il[i] - 1] - 1] = cl;
+ }
+ l[i] = fl[tt[il[i] - 1] - 1];
+ }
+ }
+ return [cl, l];
+ } // translate_labels()
+ // retain only the largest cluster for each region
+ largest_original_cluster_labels(bw, cl, ls) {
+ const nvox = bw.length;
+ const ls2bw = new Uint32Array(cl + 1).fill(0);
+ const sumls = new Uint32Array(cl + 1).fill(0);
+ for (let i = 0; i < nvox; i++) {
+ const bwVal = bw[i];
+ const lsVal = ls[i];
+ ls2bw[lsVal] = bwVal;
+ sumls[lsVal]++;
+ }
+ let mxbw = 0;
+ for (let i = 0; i < cl + 1; i++) {
+ const bwVal = ls2bw[i];
+ mxbw = Math.max(mxbw, bwVal);
+ // see if this is largest cluster of this bw-value
+ for (let j = 0; j < cl + 1; j++) {
+ if (j === i) {
+ continue;
+ }
+ if (bwVal !== ls2bw[j]) {
+ continue;
+ }
+ if (sumls[i] < sumls[j]) {
+ ls2bw[i] = 0;
+ }
+ else if (sumls[i] === sumls[j] && i < j) {
+ ls2bw[i] = 0;
+ } // ties: arbitrary winner
+ }
+ }
+ const vxs = new Uint32Array(nvox).fill(0);
+ for (let i = 0; i < nvox; i++) {
+ vxs[i] = ls2bw[ls[i]];
+ }
+ return [mxbw, vxs];
+ }
+ // given a 3D image, return a clustered label map
+ // for an explanation and optimized C code see
+ // https://github.com/seung-lab/connected-components-3d
+ bwlabel(img, dim, conn = 26, binarize = false, onlyLargestClusterPerClass = false) {
+ const start = Date.now();
+ const nvox = dim[0] * dim[1] * dim[2];
+ const bw = new Uint32Array(nvox).fill(0);
+ if (![6, 18, 26].includes(conn)) {
+ console.log('bwlabel: conn must be 6, 18 or 26.');
+ return [0, bw];
+ }
+ if (dim[0] < 2 || dim[1] < 2 || dim[2] < 1) {
+ console.log('bwlabel: img must be 2 or 3-dimensional');
+ return [0, bw];
+ }
+ if (binarize) {
+ for (let i = 0; i < nvox; i++) {
+ if (img[i] !== 0.0) {
+ bw[i] = 1;
+ }
+ }
+ }
+ else {
+ bw.set(img);
+ }
+ let [ttn, tt, il] = this.do_initial_labelling(bw, dim, conn);
+ if (tt === undefined) {
+ tt = new Uint32Array(0);
+ }
+ const [cl, ls] = this.translate_labels(il, dim, tt, ttn);
+ console.log(conn + ' neighbor clustering into ' + cl + ' regions in ' + (Date.now() - start) + 'ms');
+ if (onlyLargestClusterPerClass) {
+ const [nbw, bwMx] = this.largest_original_cluster_labels(bw, cl, ls);
+ return [nbw, bwMx];
+ }
+ return [cl, ls];
+ } // bwlabel()
+}
diff --git a/index.html b/index.html
new file mode 100644
index 0000000..1839105
--- /dev/null
+++ b/index.html
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+ Niivue brain chop
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/main.js b/main.js
new file mode 100644
index 0000000..2bfa285
--- /dev/null
+++ b/main.js
@@ -0,0 +1,90 @@
+import { Niivue } from "@niivue/niivue"
+import { chop, inferenceModelsList } from "./brainchop.js"
+
+async function main() {
+ let defaults = {
+ backColor: [0.4, 0.4, 0.4, 1],
+ show3Dcrosshair: true,
+ onLocationChange: handleLocationChange,
+ }
+ let nv1 = new Niivue(defaults)
+ nv1.attachToCanvas(gl1)
+ nv1.opts.dragMode = nv1.dragModes.pan
+ nv1.opts.multiplanarForceRender = true
+ nv1.opts.yoke3Dto2DZoom = true
+ await nv1.loadVolumes([{ url: "./t1_crop.nii.gz" }])
+
+ aboutBtn.onclick = function () {
+ window.alert("BrainChop models https://github.com/neuroneural/brainchop")
+ }
+ opacitySlider.oninput = function () {
+ nv1.setOpacity(1, opacitySlider.value / 255)
+ }
+
+ async function ensureConformed() {
+ let nii = nv1.volumes[0]
+ let isConformed = ((nii.dims[1] === 256) && (nii.dims[2] === 256) && (nii.dims[3] === 256))
+ if ((nii.permRAS[0] !== -1) || (nii.permRAS[1] !== 3) || (nii.permRAS[2] !== -2))
+ isConformed = false
+ if (isConformed)
+ return
+ let nii2 = await nv1.conform(nii, false)
+ nv1.removeVolume(nv1.volumes[0])
+ nv1.addVolume(nii2)
+ }
+
+ modelSelect.onchange = async function () {
+ await ensureConformed()
+ let model = inferenceModelsList[this.selectedIndex]
+ chop(model, nv1.volumes[0].hdr, nv1.volumes[0].img, callbackImg, callbackUI)
+ }
+
+ saveBtn.onclick = function () {
+ nv1.volumes[1].saveToDisk("Custom.nii")
+ }
+
+ async function callbackImg(img, opts, modelEntry) {
+
+ while (nv1.volumes.length > 1) {
+ nv1.removeVolume(nv1.volumes[1])
+ }
+
+ let overlayVolume = await nv1.volumes[0].clone()
+ overlayVolume.zeroImage()
+ overlayVolume.hdr.scl_inter = 0
+ overlayVolume.hdr.scl_slope = 1
+ overlayVolume.img = new Uint8Array(img)
+ let colormap = opts.atlasSelectedColorTable.toLowerCase()
+ const cmaps = nv1.colormaps()
+ if (!cmaps.includes(colormap))
+ colormap = 'actc'
+ overlayVolume.colormap = colormap
+ overlayVolume.opacity = opacitySlider.value / 255
+ nv1.addVolume(overlayVolume)
+ }
+ function callbackUI(message = "", progressFrac = -1, modalMessage = "") {
+ console.log(message)
+ document.getElementById("location").innerHTML = message
+ if (isNaN(progressFrac)) { //memory issue
+ memstatus.style.color = "red"
+ memstatus.innerHTML = "Memory Issue"
+ } else if (progressFrac >= 0) {
+ modelProgress.value = progressFrac * modelProgress.max
+ }
+ if (modalMessage !== "") {
+ window.alert(modalMessage)
+ }
+ }
+ function handleLocationChange(data) {
+ document.getElementById("location").innerHTML = " " + data.string
+ }
+ for (let i = 0; i < inferenceModelsList.length; i++) {
+ var option = document.createElement("option");
+ option.text = inferenceModelsList[i].modelName
+ option.value = inferenceModelsList[i].id.toString()
+ modelSelect.appendChild(option);
+ }
+ modelSelect.selectedIndex = -1;
+}
+
+main()
\ No newline at end of file
diff --git a/niivue.css b/niivue.css
new file mode 100644
index 0000000..5fa840c
--- /dev/null
+++ b/niivue.css
@@ -0,0 +1,109 @@
+html {
+ height: auto;
+ min-height: 100%;
+ margin: 0;
+}
+body {
+ display: flex;
+ flex-direction: column;
+ margin: 0;
+ min-height: 100%;
+ width: 100%;
+ position: absolute;
+ font-family: system-ui, Arial, Helvetica, sans-serif;
+ user-select: none; /* Standard syntax */
+ color: white;
+ background: #303030;
+}
+header {
+ margin: 10px;
+}
+main {
+ flex: 1;
+ background: #000000;
+ position: relative;
+}
+footer {
+ margin: 10px;
+}
+canvas {
+ position: absolute;
+ cursor: crosshair;
+}
+canvas:focus {
+ outline: 0px;
+}
+div {
+ display: table-row;
+ background-color: blue;
+}
+.dropdown {
+ float: left;
+ overflow: hidden;
+}
+.dropdown .dropbtn {
+ font-size: 16px;
+ border: none;
+ outline: none;
+ color: white;
+ padding: 12px 12px;
+ background-color: #303030;
+ font-family: inherit;
+ margin: 0;
+}
+.dropdown:hover .dropbtn {
+ background-color: #9a9;
+}
+.dropdown-content {
+ display: none;
+ position: absolute;
+ background-color: #303030;
+ min-width: 160px;
+ border-radius: 5px;
+ box-shadow: 0px 8px 16px 0px rgba(0, 0, 0, 0.2);
+ z-index: 1;
+}
+.dropdown-content a {
+ float: none;
+ color: white;
+ padding: 12px 16px;
+ text-decoration: none;
+ display: block;
+ text-align: left;
+ line-height: 6px;
+}
+.dropdown-content a:hover {
+ background-color: #aba;
+}
+.dropdown:hover .dropdown-content {
+ display: block;
+}
+.dropdown-item-checked::before {
+ position: absolute;
+ left: 0.2rem;
+ content: "\2022"; /* or '✓' */
+ font-weight: 600;
+}
+.divider {
+ border-top: 1px solid grey;
+}
+.vertical-divider {
+ border-left: 1px solid grey;
+ height: 40px;
+}
+.help-text {
+ margin: auto;
+ max-width: 150px;
+ padding: 0 10px;
+}
+.slidecontainer {
+ padding: 10px 10px;
+ white-space: normal;
+ word-break: break-word;
+ display: flex;
+ align-items: center;
+ flex: 0 0 auto;
+}
+
+div.footer { width: 100%; display: block; background: #303030;}
+table.footer { width: 100%;height: 100%; table-layout: fixed;}
\ No newline at end of file
diff --git a/package-lock.json b/package-lock.json
new file mode 100644
index 0000000..846d3c1
--- /dev/null
+++ b/package-lock.json
@@ -0,0 +1,1428 @@
+{
+ "name": "niivue_brain_chop",
+ "version": "0.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "niivue_brain_chop",
+ "version": "0.0.0",
+ "dependencies": {
+ "@niivue/niivue": "^0.42.0",
+ "@tensorflow/tfjs": "3.9.0",
+ "gl-matrix": "^3.4.3"
+ },
+ "devDependencies": {
+ "vite": "^5.2.0"
+ }
+ },
+ "node_modules/@esbuild/aix-ppc64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.20.2.tgz",
+ "integrity": "sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "aix"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.20.2.tgz",
+ "integrity": "sha512-t98Ra6pw2VaDhqNWO2Oph2LXbz/EJcnLmKLGBJwEwXX/JAN83Fym1rU8l0JUWK6HkIbWONCSSatf4sf2NBRx/w==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-arm64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.20.2.tgz",
+ "integrity": "sha512-mRzjLacRtl/tWU0SvD8lUEwb61yP9cqQo6noDZP/O8VkwafSYwZ4yWy24kan8jE/IMERpYncRt2dw438LP3Xmg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/android-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.20.2.tgz",
+ "integrity": "sha512-btzExgV+/lMGDDa194CcUQm53ncxzeBrWJcncOBxuC6ndBkKxnHdFJn86mCIgTELsooUmwUm9FkhSp5HYu00Rg==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-arm64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.20.2.tgz",
+ "integrity": "sha512-4J6IRT+10J3aJH3l1yzEg9y3wkTDgDk7TSDFX+wKFiWjqWp/iCfLIYzGyasx9l0SAFPT1HwSCR+0w/h1ES/MjA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/darwin-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.20.2.tgz",
+ "integrity": "sha512-tBcXp9KNphnNH0dfhv8KYkZhjc+H3XBkF5DKtswJblV7KlT9EI2+jeA8DgBjp908WEuYll6pF+UStUCfEpdysA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-arm64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.20.2.tgz",
+ "integrity": "sha512-d3qI41G4SuLiCGCFGUrKsSeTXyWG6yem1KcGZVS+3FYlYhtNoNgYrWcvkOoaqMhwXSMrZRl69ArHsGJ9mYdbbw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/freebsd-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.20.2.tgz",
+ "integrity": "sha512-d+DipyvHRuqEeM5zDivKV1KuXn9WeRX6vqSqIDgwIfPQtwMP4jaDsQsDncjTDDsExT4lR/91OLjRo8bmC1e+Cw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "freebsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.20.2.tgz",
+ "integrity": "sha512-VhLPeR8HTMPccbuWWcEUD1Az68TqaTYyj6nfE4QByZIQEQVWBB8vup8PpR7y1QHL3CpcF6xd5WVBU/+SBEvGTg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-arm64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.20.2.tgz",
+ "integrity": "sha512-9pb6rBjGvTFNira2FLIWqDk/uaf42sSyLE8j1rnUpuzsODBq7FvpwHYZxQ/It/8b+QOS1RYfqgGFNLRI+qlq2A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ia32": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.20.2.tgz",
+ "integrity": "sha512-o10utieEkNPFDZFQm9CoP7Tvb33UutoJqg3qKf1PWVeeJhJw0Q347PxMvBgVVFgouYLGIhFYG0UGdBumROyiig==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-loong64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.20.2.tgz",
+ "integrity": "sha512-PR7sp6R/UC4CFVomVINKJ80pMFlfDfMQMYynX7t1tNTeivQ6XdX5r2XovMmha/VjR1YN/HgHWsVcTRIMkymrgQ==",
+ "cpu": [
+ "loong64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-mips64el": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.20.2.tgz",
+ "integrity": "sha512-4BlTqeutE/KnOiTG5Y6Sb/Hw6hsBOZapOVF6njAESHInhlQAghVVZL1ZpIctBOoTFbQyGW+LsVYZ8lSSB3wkjA==",
+ "cpu": [
+ "mips64el"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-ppc64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.20.2.tgz",
+ "integrity": "sha512-rD3KsaDprDcfajSKdn25ooz5J5/fWBylaaXkuotBDGnMnDP1Uv5DLAN/45qfnf3JDYyJv/ytGHQaziHUdyzaAg==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-riscv64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.20.2.tgz",
+ "integrity": "sha512-snwmBKacKmwTMmhLlz/3aH1Q9T8v45bKYGE3j26TsaOVtjIag4wLfWSiZykXzXuE1kbCE+zJRmwp+ZbIHinnVg==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-s390x": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.20.2.tgz",
+ "integrity": "sha512-wcWISOobRWNm3cezm5HOZcYz1sKoHLd8VL1dl309DiixxVFoFe/o8HnwuIwn6sXre88Nwj+VwZUvJf4AFxkyrQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/linux-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.20.2.tgz",
+ "integrity": "sha512-1MdwI6OOTsfQfek8sLwgyjOXAu+wKhLEoaOLTjbijk6E2WONYpH9ZU2mNtR+lZ2B4uwr+usqGuVfFT9tMtGvGw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/netbsd-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.20.2.tgz",
+ "integrity": "sha512-K8/DhBxcVQkzYc43yJXDSyjlFeHQJBiowJ0uVL6Tor3jGQfSGHNNJcWxNbOI8v5k82prYqzPuwkzHt3J1T1iZQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "netbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/openbsd-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.20.2.tgz",
+ "integrity": "sha512-eMpKlV0SThJmmJgiVyN9jTPJ2VBPquf6Kt/nAoo6DgHAoN57K15ZghiHaMvqjCye/uU4X5u3YSMgVBI1h3vKrQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "openbsd"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/sunos-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.20.2.tgz",
+ "integrity": "sha512-2UyFtRC6cXLyejf/YEld4Hajo7UHILetzE1vsRcGL3earZEW77JxrFjH4Ez2qaTiEfMgAXxfAZCm1fvM/G/o8w==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "sunos"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-arm64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.20.2.tgz",
+ "integrity": "sha512-GRibxoawM9ZCnDxnP3usoUDO9vUkpAxIIZ6GQI+IlVmr5kP3zUq+l17xELTHMWTWzjxa2guPNyrpq1GWmPvcGQ==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-ia32": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.20.2.tgz",
+ "integrity": "sha512-HfLOfn9YWmkSKRQqovpnITazdtquEW8/SoHW7pWpuEeguaZI4QnCRW6b+oZTztdBnZOS2hqJ6im/D5cPzBTTlQ==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@esbuild/win32-x64": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.20.2.tgz",
+ "integrity": "sha512-N49X4lJX27+l9jbLKSqZ6bKNjzQvHaT8IIFUy+YIqmXQdjYCToGWwOItDrfby14c78aDd5NHQl29xingXfCdLQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@lukeed/csprng": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@lukeed/csprng/-/csprng-1.1.0.tgz",
+ "integrity": "sha512-Z7C/xXCiGWsg0KuKsHTKJxbWhpI3Vs5GwLfOean7MGyVFGqdRgBbAjOCh6u4bbjPc/8MJ2pZmK/0DLdCbivLDA==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@lukeed/uuid": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/@lukeed/uuid/-/uuid-2.0.1.tgz",
+ "integrity": "sha512-qC72D4+CDdjGqJvkFMMEAtancHUQ7/d/tAiHf64z8MopFDmcrtbcJuerDtFceuAfQJ2pDSfCKCtbqoGBNnwg0w==",
+ "dependencies": {
+ "@lukeed/csprng": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@niivue/niivue": {
+ "version": "0.42.0",
+ "resolved": "https://registry.npmjs.org/@niivue/niivue/-/niivue-0.42.0.tgz",
+ "integrity": "sha512-aHuSF78MU0GHKFdXecJDmvZOC0/pmV8yqfyfsqeHCJDe5vaCueO8WwGTIUDfoxCQ0uLqtFvRtbcJvehxlxInDw==",
+ "dependencies": {
+ "@lukeed/uuid": "^2.0.1",
+ "@ungap/structured-clone": "^1.2.0",
+ "array-equal": "^1.0.2",
+ "daikon": "^1.2.46",
+ "fflate": "^0.8.2",
+ "gl-matrix": "^3.4.3",
+ "nifti-reader-js": "^0.6.8",
+ "rxjs": "^7.8.1"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-linux-x64-gnu": "^4.13.2"
+ }
+ },
+ "node_modules/@rollup/rollup-android-arm-eabi": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.16.4.tgz",
+ "integrity": "sha512-GkhjAaQ8oUTOKE4g4gsZ0u8K/IHU1+2WQSgS1TwTcYvL+sjbaQjNHFXbOJ6kgqGHIO1DfUhI/Sphi9GkRT9K+Q==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-android-arm64": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.16.4.tgz",
+ "integrity": "sha512-Bvm6D+NPbGMQOcxvS1zUl8H7DWlywSXsphAeOnVeiZLQ+0J6Is8T7SrjGTH29KtYkiY9vld8ZnpV3G2EPbom+w==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "android"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-arm64": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.16.4.tgz",
+ "integrity": "sha512-i5d64MlnYBO9EkCOGe5vPR/EeDwjnKOGGdd7zKFhU5y8haKhQZTN2DgVtpODDMxUr4t2K90wTUJg7ilgND6bXw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-darwin-x64": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.16.4.tgz",
+ "integrity": "sha512-WZupV1+CdUYehaZqjaFTClJI72fjJEgTXdf4NbW69I9XyvdmztUExBtcI2yIIU6hJtYvtwS6pkTkHJz+k08mAQ==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-gnueabihf": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.16.4.tgz",
+ "integrity": "sha512-ADm/xt86JUnmAfA9mBqFcRp//RVRt1ohGOYF6yL+IFCYqOBNwy5lbEK05xTsEoJq+/tJzg8ICUtS82WinJRuIw==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm-musleabihf": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.16.4.tgz",
+ "integrity": "sha512-tJfJaXPiFAG+Jn3cutp7mCs1ePltuAgRqdDZrzb1aeE3TktWWJ+g7xK9SNlaSUFw6IU4QgOxAY4rA+wZUT5Wfg==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-gnu": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.16.4.tgz",
+ "integrity": "sha512-7dy1BzQkgYlUTapDTvK997cgi0Orh5Iu7JlZVBy1MBURk7/HSbHkzRnXZa19ozy+wwD8/SlpJnOOckuNZtJR9w==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-arm64-musl": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.16.4.tgz",
+ "integrity": "sha512-zsFwdUw5XLD1gQe0aoU2HVceI6NEW7q7m05wA46eUAyrkeNYExObfRFQcvA6zw8lfRc5BHtan3tBpo+kqEOxmg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-powerpc64le-gnu": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.16.4.tgz",
+ "integrity": "sha512-p8C3NnxXooRdNrdv6dBmRTddEapfESEUflpICDNKXpHvTjRRq1J82CbU5G3XfebIZyI3B0s074JHMWD36qOW6w==",
+ "cpu": [
+ "ppc64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-riscv64-gnu": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.16.4.tgz",
+ "integrity": "sha512-Lh/8ckoar4s4Id2foY7jNgitTOUQczwMWNYi+Mjt0eQ9LKhr6sK477REqQkmy8YHY3Ca3A2JJVdXnfb3Rrwkng==",
+ "cpu": [
+ "riscv64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-s390x-gnu": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.16.4.tgz",
+ "integrity": "sha512-1xwwn9ZCQYuqGmulGsTZoKrrn0z2fAur2ujE60QgyDpHmBbXbxLaQiEvzJWDrscRq43c8DnuHx3QorhMTZgisQ==",
+ "cpu": [
+ "s390x"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-gnu": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.16.4.tgz",
+ "integrity": "sha512-LuOGGKAJ7dfRtxVnO1i3qWc6N9sh0Em/8aZ3CezixSTM+E9Oq3OvTsvC4sm6wWjzpsIlOCnZjdluINKESflJLA==",
+ "cpu": [
+ "x64"
+ ],
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-linux-x64-musl": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.16.4.tgz",
+ "integrity": "sha512-ch86i7KkJKkLybDP2AtySFTRi5fM3KXp0PnHocHuJMdZwu7BuyIKi35BE9guMlmTpwwBTB3ljHj9IQXnTCD0vA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "linux"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-arm64-msvc": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.16.4.tgz",
+ "integrity": "sha512-Ma4PwyLfOWZWayfEsNQzTDBVW8PZ6TUUN1uFTBQbF2Chv/+sjenE86lpiEwj2FiviSmSZ4Ap4MaAfl1ciF4aSA==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-ia32-msvc": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.16.4.tgz",
+ "integrity": "sha512-9m/ZDrQsdo/c06uOlP3W9G2ENRVzgzbSXmXHT4hwVaDQhYcRpi9bgBT0FTG9OhESxwK0WjQxYOSfv40cU+T69w==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@rollup/rollup-win32-x64-msvc": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.16.4.tgz",
+ "integrity": "sha512-YunpoOAyGLDseanENHmbFvQSfVL5BxW3k7hhy0eN4rb3gS/ct75dVD0EXOWIqFT/nE8XYW6LP6vz6ctKRi0k9A==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
+ "node_modules/@tensorflow/tfjs": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/@tensorflow/tfjs/-/tfjs-3.9.0.tgz",
+ "integrity": "sha512-TyykXiZ6r9rMoXbQZaAkOKJJUrJHQVAjH/K6XRCPpOG//Hf15ZW97ZODskEByj77yNMw4smFUWCFhprhY2PgDQ==",
+ "dependencies": {
+ "@tensorflow/tfjs-backend-cpu": "3.9.0",
+ "@tensorflow/tfjs-backend-webgl": "3.9.0",
+ "@tensorflow/tfjs-converter": "3.9.0",
+ "@tensorflow/tfjs-core": "3.9.0",
+ "@tensorflow/tfjs-data": "3.9.0",
+ "@tensorflow/tfjs-layers": "3.9.0",
+ "argparse": "^1.0.10",
+ "chalk": "^4.1.0",
+ "core-js": "3",
+ "regenerator-runtime": "^0.13.5",
+ "yargs": "^16.0.3"
+ },
+ "bin": {
+ "tfjs-custom-module": "dist/tools/custom_module/cli.js"
+ }
+ },
+ "node_modules/@tensorflow/tfjs-backend-cpu": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-backend-cpu/-/tfjs-backend-cpu-3.9.0.tgz",
+ "integrity": "sha512-PUv5B3wdQsA8cysk+oUhA0NqMoo/lwP8EazC/axQc8/72Dc6kU8uw/5qZtE5P4xXSqkNSlh2ifFm+8nH/6B+iA==",
+ "dependencies": {
+ "@types/seedrandom": "2.4.27",
+ "seedrandom": "2.4.3"
+ },
+ "engines": {
+ "yarn": ">= 1.3.2"
+ },
+ "peerDependencies": {
+ "@tensorflow/tfjs-core": "3.9.0"
+ }
+ },
+ "node_modules/@tensorflow/tfjs-backend-cpu/node_modules/seedrandom": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-2.4.3.tgz",
+ "integrity": "sha512-2CkZ9Wn2dS4mMUWQaXLsOAfGD+irMlLEeSP3cMxpGbgyOOzJGFa+MWCOMTOCMyZinHRPxyOj/S/C57li/1to6Q=="
+ },
+ "node_modules/@tensorflow/tfjs-backend-webgl": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-backend-webgl/-/tfjs-backend-webgl-3.9.0.tgz",
+ "integrity": "sha512-oUnyQFF9aCnNZpul9AnJwrt8noDJdMmxgq2+e/0DpEMBERcywtVj9qkKCccMaVFsdQV1lQxpV3kjC3vbFMDWKg==",
+ "dependencies": {
+ "@tensorflow/tfjs-backend-cpu": "3.9.0",
+ "@types/offscreencanvas": "~2019.3.0",
+ "@types/seedrandom": "2.4.27",
+ "@types/webgl-ext": "0.0.30",
+ "@types/webgl2": "0.0.5",
+ "seedrandom": "2.4.3"
+ },
+ "engines": {
+ "yarn": ">= 1.3.2"
+ },
+ "peerDependencies": {
+ "@tensorflow/tfjs-core": "3.9.0"
+ }
+ },
+ "node_modules/@tensorflow/tfjs-backend-webgl/node_modules/seedrandom": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-2.4.3.tgz",
+ "integrity": "sha512-2CkZ9Wn2dS4mMUWQaXLsOAfGD+irMlLEeSP3cMxpGbgyOOzJGFa+MWCOMTOCMyZinHRPxyOj/S/C57li/1to6Q=="
+ },
+ "node_modules/@tensorflow/tfjs-converter": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-converter/-/tfjs-converter-3.9.0.tgz",
+ "integrity": "sha512-ftegwQlGkyDCxZGhAVfMyWWXqpNhnyESvNY3oFAUV4eN6i/mmBTCSOQ5AX5VR5lr7PNYPWGO5sJ10Q5HeTPfgw==",
+ "peerDependencies": {
+ "@tensorflow/tfjs-core": "3.9.0"
+ }
+ },
+ "node_modules/@tensorflow/tfjs-core": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-core/-/tfjs-core-3.9.0.tgz",
+ "integrity": "sha512-wQ+VMsbvCne2OsogiNtRP8Mc01LnRGvAYQ0SGaDa4+1uwY2jsMk5GZjG66JQvf/Ppw8wyvKF170eh0yyCBgfcg==",
+ "dependencies": {
+ "@types/long": "^4.0.1",
+ "@types/offscreencanvas": "~2019.3.0",
+ "@types/seedrandom": "2.4.27",
+ "@types/webgl-ext": "0.0.30",
+ "long": "4.0.0",
+ "node-fetch": "~2.6.1",
+ "seedrandom": "2.4.3"
+ },
+ "engines": {
+ "yarn": ">= 1.3.2"
+ }
+ },
+ "node_modules/@tensorflow/tfjs-core/node_modules/seedrandom": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-2.4.3.tgz",
+ "integrity": "sha512-2CkZ9Wn2dS4mMUWQaXLsOAfGD+irMlLEeSP3cMxpGbgyOOzJGFa+MWCOMTOCMyZinHRPxyOj/S/C57li/1to6Q=="
+ },
+ "node_modules/@tensorflow/tfjs-data": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-data/-/tfjs-data-3.9.0.tgz",
+ "integrity": "sha512-1/H9VlYlfEX/LflzobSB5sx3FCavWGmzqRnAyyn5ChjgCzIUa+RtJ7nYgK2+6RC2MIDgKt1jmu36mkKZrwPD3w==",
+ "dependencies": {
+ "@types/node-fetch": "^2.1.2",
+ "node-fetch": "~2.6.1"
+ },
+ "peerDependencies": {
+ "@tensorflow/tfjs-core": "3.9.0",
+ "seedrandom": "~2.4.3"
+ }
+ },
+ "node_modules/@tensorflow/tfjs-layers": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/@tensorflow/tfjs-layers/-/tfjs-layers-3.9.0.tgz",
+ "integrity": "sha512-25I20Oy17YZ3y0x/pabeiN6/vai0vqMQ85/Bp0GLOpcN2kmOLcItdWOAqFW5YPI2nrTqnpNQyk9zhmIh8f6X4w==",
+ "peerDependencies": {
+ "@tensorflow/tfjs-core": "3.9.0"
+ }
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz",
+ "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==",
+ "dev": true
+ },
+ "node_modules/@types/long": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.2.tgz",
+ "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA=="
+ },
+ "node_modules/@types/node": {
+ "version": "20.12.7",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.7.tgz",
+ "integrity": "sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg==",
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "node_modules/@types/node-fetch": {
+ "version": "2.6.11",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz",
+ "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==",
+ "dependencies": {
+ "@types/node": "*",
+ "form-data": "^4.0.0"
+ }
+ },
+ "node_modules/@types/offscreencanvas": {
+ "version": "2019.3.0",
+ "resolved": "https://registry.npmjs.org/@types/offscreencanvas/-/offscreencanvas-2019.3.0.tgz",
+ "integrity": "sha512-esIJx9bQg+QYF0ra8GnvfianIY8qWB0GBx54PK5Eps6m+xTj86KLavHv6qDhzKcu5UUOgNfJ2pWaIIV7TRUd9Q=="
+ },
+ "node_modules/@types/seedrandom": {
+ "version": "2.4.27",
+ "resolved": "https://registry.npmjs.org/@types/seedrandom/-/seedrandom-2.4.27.tgz",
+ "integrity": "sha512-YvMLqFak/7rt//lPBtEHv3M4sRNA+HGxrhFZ+DQs9K2IkYJbNwVIb8avtJfhDiuaUBX/AW0jnjv48FV8h3u9bQ=="
+ },
+ "node_modules/@types/webgl-ext": {
+ "version": "0.0.30",
+ "resolved": "https://registry.npmjs.org/@types/webgl-ext/-/webgl-ext-0.0.30.tgz",
+ "integrity": "sha512-LKVgNmBxN0BbljJrVUwkxwRYqzsAEPcZOe6S2T6ZaBDIrFp0qu4FNlpc5sM1tGbXUYFgdVQIoeLk1Y1UoblyEg=="
+ },
+ "node_modules/@types/webgl2": {
+ "version": "0.0.5",
+ "resolved": "https://registry.npmjs.org/@types/webgl2/-/webgl2-0.0.5.tgz",
+ "integrity": "sha512-oGaKsBbxQOY5+aJFV3KECDhGaXt+yZJt2y/OZsnQGLRkH6Fvr7rv4pCt3SRH1somIHfej/c4u7NSpCyd9x+1Ow=="
+ },
+ "node_modules/@ungap/structured-clone": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz",
+ "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ=="
+ },
+ "node_modules/@wearemothership/dicom-character-set": {
+ "version": "1.0.4-opt.1",
+ "resolved": "https://registry.npmjs.org/@wearemothership/dicom-character-set/-/dicom-character-set-1.0.4-opt.1.tgz",
+ "integrity": "sha512-stqhnpawYHY2UZKj4RHTF71ab3q3z8S1SO9ToQKjsHQwowUdFVo6YFea93psFux3yqNbRlQjwoCdPjHcD0YQzw==",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/array-equal": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/array-equal/-/array-equal-1.0.2.tgz",
+ "integrity": "sha512-gUHx76KtnhEgB3HOuFYiCm3FIdEs6ocM2asHvNTkfu/Y09qQVrrVVaOKENmS2KkSaGoxgXNqC+ZVtR/n0MOkSA==",
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
+ },
+ "node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/cliui": {
+ "version": "7.0.4",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz",
+ "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==",
+ "dependencies": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.0",
+ "wrap-ansi": "^7.0.0"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="
+ },
+ "node_modules/core-js": {
+ "version": "3.37.0",
+ "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.37.0.tgz",
+ "integrity": "sha512-fu5vHevQ8ZG4og+LXug8ulUtVxjOcEYvifJr7L5Bfq9GOztVqsKd9/59hUk2ZSbCrS3BqUr3EpaYGIYzq7g3Ug==",
+ "hasInstallScript": true,
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/core-js"
+ }
+ },
+ "node_modules/cssfilter": {
+ "version": "0.0.10",
+ "resolved": "https://registry.npmjs.org/cssfilter/-/cssfilter-0.0.10.tgz",
+ "integrity": "sha512-FAaLDaplstoRsDR8XGYH51znUN0UY7nMc6Z9/fvE8EXGwvJE9hu7W2vHwx1+bd6gCYnln9nLbzxFTrcO9YQDZw=="
+ },
+ "node_modules/daikon": {
+ "version": "1.2.46",
+ "resolved": "https://registry.npmjs.org/daikon/-/daikon-1.2.46.tgz",
+ "integrity": "sha512-S8dTTlsWYTH3LQztjTW9KnNvxDeL2mr2cau0auLdYMJe4TrocYP1PmidHizO3rXUs+gXpBWI1PQ2qvB4b21QFw==",
+ "dependencies": {
+ "@wearemothership/dicom-character-set": "^1.0.4-opt.1",
+ "fflate": "*",
+ "jpeg-lossless-decoder-js": "2.0.7",
+ "pako": "^2.1",
+ "xss": "1.0.14"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "node_modules/esbuild": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.20.2.tgz",
+ "integrity": "sha512-WdOOppmUNU+IbZ0PaDiTst80zjnrOkyJNHoKupIcVyU8Lvla3Ugx94VzkQ32Ijqd7UhHJy75gNWDMUekcrSJ6g==",
+ "dev": true,
+ "hasInstallScript": true,
+ "bin": {
+ "esbuild": "bin/esbuild"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "optionalDependencies": {
+ "@esbuild/aix-ppc64": "0.20.2",
+ "@esbuild/android-arm": "0.20.2",
+ "@esbuild/android-arm64": "0.20.2",
+ "@esbuild/android-x64": "0.20.2",
+ "@esbuild/darwin-arm64": "0.20.2",
+ "@esbuild/darwin-x64": "0.20.2",
+ "@esbuild/freebsd-arm64": "0.20.2",
+ "@esbuild/freebsd-x64": "0.20.2",
+ "@esbuild/linux-arm": "0.20.2",
+ "@esbuild/linux-arm64": "0.20.2",
+ "@esbuild/linux-ia32": "0.20.2",
+ "@esbuild/linux-loong64": "0.20.2",
+ "@esbuild/linux-mips64el": "0.20.2",
+ "@esbuild/linux-ppc64": "0.20.2",
+ "@esbuild/linux-riscv64": "0.20.2",
+ "@esbuild/linux-s390x": "0.20.2",
+ "@esbuild/linux-x64": "0.20.2",
+ "@esbuild/netbsd-x64": "0.20.2",
+ "@esbuild/openbsd-x64": "0.20.2",
+ "@esbuild/sunos-x64": "0.20.2",
+ "@esbuild/win32-arm64": "0.20.2",
+ "@esbuild/win32-ia32": "0.20.2",
+ "@esbuild/win32-x64": "0.20.2"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz",
+ "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/fflate": {
+ "version": "0.8.2",
+ "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz",
+ "integrity": "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A=="
+ },
+ "node_modules/form-data": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
+ "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "engines": {
+ "node": "6.* || 8.* || >= 10.*"
+ }
+ },
+ "node_modules/gl-matrix": {
+ "version": "3.4.3",
+ "resolved": "https://registry.npmjs.org/gl-matrix/-/gl-matrix-3.4.3.tgz",
+ "integrity": "sha512-wcCp8vu8FT22BnvKVPjXa/ICBWRq/zjFfdofZy1WSpQZpphblv12/bOQLBC1rMM7SGOFS9ltVmKOHil5+Ml7gA=="
+ },
+ "node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/jpeg-lossless-decoder-js": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/jpeg-lossless-decoder-js/-/jpeg-lossless-decoder-js-2.0.7.tgz",
+ "integrity": "sha512-tbZlhFkKmx+JaqVMkq47SKWGuXLkIaV8fTbnhO39dYEnQrSShLGuLCGb0n6ntXjtmk6oAWGiIriWOLwj9od0yQ=="
+ },
+ "node_modules/long": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
+ "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA=="
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.7",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz",
+ "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/nifti-reader-js": {
+ "version": "0.6.8",
+ "resolved": "https://registry.npmjs.org/nifti-reader-js/-/nifti-reader-js-0.6.8.tgz",
+ "integrity": "sha512-yIKNVzYFiUcSHazoR+sd6Ka7sUmZTabaVqJRFxbdlAKR1hnPBuNP71g3AyApo37nJ3k41c632QPij5q7gF1YPQ==",
+ "dependencies": {
+ "fflate": "*"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.6.13",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.13.tgz",
+ "integrity": "sha512-StxNAxh15zr77QvvkmveSQ8uCQ4+v5FkvNTj0OESmiHu+VRi/gXArXtkWMElOsOUNLtUEvI4yS+rdtOHZTwlQA==",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/pako": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/pako/-/pako-2.1.0.tgz",
+ "integrity": "sha512-w+eufiZ1WuJYgPXbV/PO3NCMEc3xqylkKHzp8bxp1uW4qaSNQUkwmLLEc3kKsfz8lpV1F8Ht3U1Cm+9Srog2ug=="
+ },
+ "node_modules/picocolors": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
+ "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==",
+ "dev": true
+ },
+ "node_modules/postcss": {
+ "version": "8.4.38",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz",
+ "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "nanoid": "^3.3.7",
+ "picocolors": "^1.0.0",
+ "source-map-js": "^1.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/regenerator-runtime": {
+ "version": "0.13.11",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz",
+ "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg=="
+ },
+ "node_modules/require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/rollup": {
+ "version": "4.16.4",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.16.4.tgz",
+ "integrity": "sha512-kuaTJSUbz+Wsb2ATGvEknkI12XV40vIiHmLuFlejoo7HtDok/O5eDDD0UpCVY5bBX5U5RYo8wWP83H7ZsqVEnA==",
+ "dev": true,
+ "dependencies": {
+ "@types/estree": "1.0.5"
+ },
+ "bin": {
+ "rollup": "dist/bin/rollup"
+ },
+ "engines": {
+ "node": ">=18.0.0",
+ "npm": ">=8.0.0"
+ },
+ "optionalDependencies": {
+ "@rollup/rollup-android-arm-eabi": "4.16.4",
+ "@rollup/rollup-android-arm64": "4.16.4",
+ "@rollup/rollup-darwin-arm64": "4.16.4",
+ "@rollup/rollup-darwin-x64": "4.16.4",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.16.4",
+ "@rollup/rollup-linux-arm-musleabihf": "4.16.4",
+ "@rollup/rollup-linux-arm64-gnu": "4.16.4",
+ "@rollup/rollup-linux-arm64-musl": "4.16.4",
+ "@rollup/rollup-linux-powerpc64le-gnu": "4.16.4",
+ "@rollup/rollup-linux-riscv64-gnu": "4.16.4",
+ "@rollup/rollup-linux-s390x-gnu": "4.16.4",
+ "@rollup/rollup-linux-x64-gnu": "4.16.4",
+ "@rollup/rollup-linux-x64-musl": "4.16.4",
+ "@rollup/rollup-win32-arm64-msvc": "4.16.4",
+ "@rollup/rollup-win32-ia32-msvc": "4.16.4",
+ "@rollup/rollup-win32-x64-msvc": "4.16.4",
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/rxjs": {
+ "version": "7.8.1",
+ "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz",
+ "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==",
+ "dependencies": {
+ "tslib": "^2.1.0"
+ }
+ },
+ "node_modules/seedrandom": {
+ "version": "2.4.4",
+ "resolved": "https://registry.npmjs.org/seedrandom/-/seedrandom-2.4.4.tgz",
+ "integrity": "sha512-9A+PDmgm+2du77B5i0Ip2cxOqqHjgNxnBgglxLcX78A2D6c2rTo61z4jnVABpF4cKeDMDG+cmXXvdnqse2VqMA==",
+ "peer": true
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz",
+ "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g=="
+ },
+ "node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
+ },
+ "node_modules/tslib": {
+ "version": "2.6.2",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz",
+ "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q=="
+ },
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
+ },
+ "node_modules/vite": {
+ "version": "5.2.10",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-5.2.10.tgz",
+ "integrity": "sha512-PAzgUZbP7msvQvqdSD+ErD5qGnSFiGOoWmV5yAKUEI0kdhjbH6nMWVyZQC/hSc4aXwc0oJ9aEdIiF9Oje0JFCw==",
+ "dev": true,
+ "dependencies": {
+ "esbuild": "^0.20.1",
+ "postcss": "^8.4.38",
+ "rollup": "^4.13.0"
+ },
+ "bin": {
+ "vite": "bin/vite.js"
+ },
+ "engines": {
+ "node": "^18.0.0 || >=20.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/vitejs/vite?sponsor=1"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.3"
+ },
+ "peerDependencies": {
+ "@types/node": "^18.0.0 || >=20.0.0",
+ "less": "*",
+ "lightningcss": "^1.21.0",
+ "sass": "*",
+ "stylus": "*",
+ "sugarss": "*",
+ "terser": "^5.4.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "less": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ },
+ "sass": {
+ "optional": true
+ },
+ "stylus": {
+ "optional": true
+ },
+ "sugarss": {
+ "optional": true
+ },
+ "terser": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ },
+ "node_modules/wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/xss": {
+ "version": "1.0.14",
+ "resolved": "https://registry.npmjs.org/xss/-/xss-1.0.14.tgz",
+ "integrity": "sha512-og7TEJhXvn1a7kzZGQ7ETjdQVS2UfZyTlsEdDOqvQF7GoxNfY+0YLCzBy1kPdsDDx4QuNAonQPddpsn6Xl/7sw==",
+ "dependencies": {
+ "commander": "^2.20.3",
+ "cssfilter": "0.0.10"
+ },
+ "bin": {
+ "xss": "bin/xss"
+ },
+ "engines": {
+ "node": ">= 0.10.0"
+ }
+ },
+ "node_modules/y18n": {
+ "version": "5.0.8",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
+ "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/yargs": {
+ "version": "16.2.0",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz",
+ "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==",
+ "dependencies": {
+ "cliui": "^7.0.2",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.0",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^20.2.2"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/yargs-parser": {
+ "version": "20.2.9",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz",
+ "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==",
+ "engines": {
+ "node": ">=10"
+ }
+ }
+ }
+}
diff --git a/package.json b/package.json
new file mode 100644
index 0000000..24c8b06
--- /dev/null
+++ b/package.json
@@ -0,0 +1,19 @@
+{
+ "name": "niivue-brainchop",
+ "private": true,
+ "version": "0.1.0",
+ "type": "module",
+ "scripts": {
+ "dev": "vite",
+ "build": "vite build",
+ "preview": "vite preview"
+ },
+ "dependencies": {
+ "@niivue/niivue":"^0.42.0",
+ "@tensorflow/tfjs": "3.9.0",
+ "gl-matrix": "^3.4.3"
+ },
+ "devDependencies": {
+ "vite": "^5.2.0"
+ }
+}
diff --git a/public/favicon.ico b/public/favicon.ico
new file mode 100644
index 0000000..21e6155
Binary files /dev/null and b/public/favicon.ico differ
diff --git a/public/models/GT/labels.json b/public/models/GT/labels.json
new file mode 100644
index 0000000..35ee231
--- /dev/null
+++ b/public/models/GT/labels.json
@@ -0,0 +1 @@
+{"0": "background", "1": "Grey Matter", "2": "White Matter"}
diff --git a/public/models/mnm_tfjs_me_test/colorLUT.json b/public/models/mnm_tfjs_me_test/colorLUT.json
new file mode 100644
index 0000000..6da374a
--- /dev/null
+++ b/public/models/mnm_tfjs_me_test/colorLUT.json
@@ -0,0 +1 @@
+{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"}
\ No newline at end of file
diff --git a/public/models/mnm_tfjs_me_test/group1-shard1of1.bin b/public/models/mnm_tfjs_me_test/group1-shard1of1.bin
new file mode 100644
index 0000000..210906a
Binary files /dev/null and b/public/models/mnm_tfjs_me_test/group1-shard1of1.bin differ
diff --git a/public/models/mnm_tfjs_me_test/labels.json b/public/models/mnm_tfjs_me_test/labels.json
new file mode 100644
index 0000000..4885a94
--- /dev/null
+++ b/public/models/mnm_tfjs_me_test/labels.json
@@ -0,0 +1 @@
+{"0": "background", "1": "White Matter", "2": "Grey Matter"}
diff --git a/public/models/mnm_tfjs_me_test/model.json b/public/models/mnm_tfjs_me_test/model.json
new file mode 100644
index 0000000..e288b78
--- /dev/null
+++ b/public/models/mnm_tfjs_me_test/model.json
@@ -0,0 +1 @@
+{"format": "layers-model", "generatedBy": "keras v2.4.0", "convertedBy": "TensorFlow.js Converter v3.2.0", "modelTopology": {"keras_version": "2.4.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 38, 38, 38, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "17", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "17", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "18", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "18", "inbound_nodes": [[["17", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "19", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "19", "inbound_nodes": [[["18", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "20", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "20", "inbound_nodes": [[["19", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["20", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["30", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "17/kernel", "shape": [3, 3, 3, 1, 21], "dtype": "float32"}, {"name": "17/bias", "shape": [21], "dtype": "float32"}, {"name": "19/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "19/bias", "shape": [21], "dtype": "float32"}, {"name": "21/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "21/bias", "shape": [21], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "23/bias", "shape": [21], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "25/bias", "shape": [21], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "27/bias", "shape": [21], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "29/bias", "shape": [21], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 21, 3], "dtype": "float32"}, {"name": "output/bias", "shape": [3], "dtype": "float32"}]}]}
\ No newline at end of file
diff --git a/public/models/model11_50class/colorLUT.json b/public/models/model11_50class/colorLUT.json
new file mode 100644
index 0000000..99c0f7e
--- /dev/null
+++ b/public/models/model11_50class/colorLUT.json
@@ -0,0 +1,52 @@
+{
+ "0": "rgb(0,0,0)",
+ "1": "rgb(245,245,245)",
+ "2": "rgb(196,58,250)",
+ "3": "rgb(220,248,164)",
+ "4": "rgb(230,148,34)",
+ "5": "rgb(0,118,14)",
+ "6": "rgb(122,186,220)",
+ "7": "rgb(236,13,176)",
+ "8": "rgb(12,48,255)",
+ "9": "rgb(119,159,176)",
+ "10": "rgb(220,216,20)",
+ "11": "rgb(103,255,255)",
+ "12": "rgb(60,60,60)",
+ "13": "rgb(255,165,0)",
+ "14": "rgb(165,42,42)",
+ "15": "rgb(0,0,208)",
+ "16": "rgb(25,100,40)",
+ "17": "rgb(125,100,160)",
+ "18": "rgb(100,25,0)",
+ "19": "rgb(220,20,100)",
+ "20": "rgb(220,20,10)",
+ "21": "rgb(180,220,140)",
+ "22": "rgb(220,60,220)",
+ "23": "rgb(180,40,120)",
+ "24": "rgb(140,20,140)",
+ "25": "rgb(20,30,140)",
+ "26": "rgb(35,75,50)",
+ "27": "rgb(225,140,140)",
+ "28": "rgb(200,35,75)",
+ "29": "rgb(160,100,50)",
+ "30": "rgb(20,220,60)",
+ "31": "rgb(60,220,60)",
+ "32": "rgb(220,180,140)",
+ "33": "rgb(20,100,50)",
+ "34": "rgb(220,60,20)",
+ "35": "rgb(120,100,60)",
+ "36": "rgb(220,20,20)",
+ "37": "rgb(220,180,220)",
+ "38": "rgb(60,20,220)",
+ "39": "rgb(160,140,180)",
+ "40": "rgb(80,20,140)",
+ "41": "rgb(75,50,125)",
+ "42": "rgb(20,220,160)",
+ "43": "rgb(20,180,140)",
+ "44": "rgb(140,220,220)",
+ "45": "rgb(80,160,20)",
+ "46": "rgb(100,0,100)",
+ "47": "rgb(70,70,70)",
+ "48": "rgb(150,150,200)",
+ "49": "rgb(255,192,32)"
+}
\ No newline at end of file
diff --git a/public/models/model11_50class/group1-shard1of1.bin b/public/models/model11_50class/group1-shard1of1.bin
new file mode 100644
index 0000000..216fc84
Binary files /dev/null and b/public/models/model11_50class/group1-shard1of1.bin differ
diff --git a/public/models/model11_50class/labels.json b/public/models/model11_50class/labels.json
new file mode 100644
index 0000000..58541ce
--- /dev/null
+++ b/public/models/model11_50class/labels.json
@@ -0,0 +1,52 @@
+{
+ "0": "BG",
+ "1": "Cerebral-White-Matter",
+ "2": "Ventricle",
+ "3": "Cerebellum-White-Matter",
+ "4": "Cerebellum",
+ "5": "Thalamus-Proper*",
+ "6": "Caudate",
+ "7": "Putamen",
+ "8": "Pallidum",
+ "9": "Brain-Stem",
+ "10": "Hippocampus",
+ "11": "Amygdala",
+ "12": "CSF",
+ "13": "Accumbens-area",
+ "14": "VentralDC",
+ "15": "CC_Posterior / CC_Mid_Posterior / CC_Central / CC_Mid_Anterior / CC_Anterior",
+ "16": "ctx-bankssts",
+ "17": "ctx-caudalanteriorcingulate",
+ "18": "ctx-caudalmiddlefrontal",
+ "19": "ctx-cuneus",
+ "20": "ctx-entorhinal",
+ "21": "ctx-fusiform",
+ "22": "ctx-inferiorparietal",
+ "23": "ctx-inferiortemporal",
+ "24": "ctx-isthmuscingulate",
+ "25": "ctx-lateraloccipital",
+ "26": "ctx-lateralorbitofrontal",
+ "27": "ctx-lingual",
+ "28": "ctx-medialorbitofrontal",
+ "29": "ctx-middletemporal",
+ "30": "ctx-parahippocampal",
+ "31": "ctx-paracentral",
+ "32": "ctx-parsopercularis",
+ "33": "ctx-parsorbitalis",
+ "34": "ctx-parstriangularis",
+ "35": "ctx-pericalcarine",
+ "36": "ctx-postcentral",
+ "37": "ctx-posteriorcingulate",
+ "38": "ctx-precentral",
+ "39": "ctx-precuneus",
+ "40": "ctx-rostralanteriorcingulate",
+ "41": "ctx-rostralmiddlefrontal",
+ "42": "ctx-superiorfrontal",
+ "43": "ctx-superiorparietal",
+ "44": "ctx-superiortemporal",
+ "45": "ctx-supramarginal",
+ "46": "ctx-frontalpole",
+ "47": "ctx-temporalpole",
+ "48": "ctx-transversetemporal",
+ "49": "ctx-insula"
+}
\ No newline at end of file
diff --git a/public/models/model11_50class/labels.zip b/public/models/model11_50class/labels.zip
new file mode 100644
index 0000000..824f07b
Binary files /dev/null and b/public/models/model11_50class/labels.zip differ
diff --git a/public/models/model11_50class/labelsWithCompleteAnnot.json b/public/models/model11_50class/labelsWithCompleteAnnot.json
new file mode 100644
index 0000000..8735db8
--- /dev/null
+++ b/public/models/model11_50class/labelsWithCompleteAnnot.json
@@ -0,0 +1,52 @@
+{
+ "0": "BG",
+ "1": "Left-Cerebral-White-Matter / Right-Cerebral-White-Matter",
+ "2": "Left-Lateral-Ventricle / Left-Inf-Lat-Vent / Right-Lateral-Ventricle / Right-Inf-Lat-Vent / 3rd-Ventricle / 4th-Ventricle",
+ "3": "Left-Cerebellum-White-Matter / Right-Cerebellum-White-Matter",
+ "4": "Left-Cerebellum-Cortex / Right-Cerebellum-Cortex",
+ "5": "Left-Thalamus-Proper* / Right-Thalamus-Proper*",
+ "6": "Left-Caudate / Right-Caudate",
+ "7": "Left-Putamen / Right-Putamen",
+ "8": "Left-Pallidum / Right-Pallidum",
+ "9": "Brain-Stem",
+ "10": "Left-Hippocampus / Right-Hippocampus",
+ "11": "Left-Amygdala / Right-Amygdala",
+ "12": "CSF",
+ "13": "Left-Accumbens-area / Right-Accumbens-area",
+ "14": "Left-VentralDC / Right-VentralDC",
+ "15": "CC_Posterior / CC_Mid_Posterior / CC_Central / CC_Mid_Anterior / CC_Anterior",
+ "16": "ctx-lh-bankssts / ctx-rh-bankssts",
+ "17": "ctx-lh-caudalanteriorcingulate / ctx-rh-caudalanteriorcingulate",
+ "18": "ctx-lh-caudalmiddlefrontal / ctx-rh-caudalmiddlefrontal",
+ "19": "ctx-lh-cuneus / ctx-rh-cuneus",
+ "20": "ctx-lh-entorhinal / ctx-rh-entorhinal",
+ "21": "ctx-lh-fusiform / ctx-rh-fusiform",
+ "22": "ctx-lh-inferiorparietal / ctx-rh-inferiorparietal",
+ "23": "ctx-lh-inferiortemporal / ctx-rh-inferiortemporal",
+ "24": "ctx-lh-isthmuscingulate / ctx-rh-isthmuscingulate",
+ "25": "ctx-lh-lateraloccipital / ctx-rh-lateraloccipital",
+ "26": "ctx-lh-lateralorbitofrontal / ctx-rh-lateralorbitofrontal",
+ "27": "ctx-lh-lingual / ctx-rh-lingual",
+ "28": "ctx-lh-medialorbitofrontal / ctx-rh-medialorbitofrontal",
+ "29": "ctx-lh-middletemporal / ctx-rh-middletemporal",
+ "30": "ctx-lh-parahippocampal / ctx-rh-parahippocampal",
+ "31": "ctx-lh-paracentral / ctx-rh-paracentral",
+ "32": "ctx-lh-parsopercularis / ctx-rh-parsopercularis",
+ "33": "ctx-lh-parsorbitalis / ctx-rh-parsorbitalis",
+ "34": "ctx-lh-parstriangularis / ctx-rh-parstriangularis",
+ "35": "ctx-lh-pericalcarine / ctx-rh-pericalcarine",
+ "36": "ctx-lh-postcentral / ctx-rh-postcentral",
+ "37": "ctx-lh-posteriorcingulate / ctx-rh-posteriorcingulate",
+ "38": "ctx-lh-precentral / ctx-rh-precentral",
+ "39": "ctx-lh-precuneus / ctx-rh-precuneus",
+ "40": "ctx-lh-rostralanteriorcingulate / ctx-rh-rostralanteriorcingulate",
+ "41": "ctx-lh-rostralmiddlefrontal / ctx-rh-rostralmiddlefrontal",
+ "42": "ctx-lh-superiorfrontal / ctx-rh-superiorfrontal",
+ "43": "ctx-lh-superiorparietal / ctx-rh-superiorparietal",
+ "44": "ctx-lh-superiortemporal / ctx-rh-superiortemporal",
+ "45": "ctx-lh-supramarginal / ctx-rh-supramarginal",
+ "46": "ctx-lh-frontalpole / ctx-rh-frontalpole",
+ "47": "ctx-lh-temporalpole / ctx-rh-temporalpole",
+ "48": "ctx-lh-transversetemporal / ctx-rh-transversetemporal",
+ "49": "ctx-lh-insula / ctx-rh-insula"
+}
\ No newline at end of file
diff --git a/public/models/model11_50class/model.json b/public/models/model11_50class/model.json
new file mode 100644
index 0000000..37f3f21
--- /dev/null
+++ b/public/models/model11_50class/model.json
@@ -0,0 +1 @@
+{"format": "layers-model", "generatedBy": "keras v2.7.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 256, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "input.1", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.1", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["input.1", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.4", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.4", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["input.4", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.8", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.8", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["input.8", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.12", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.12", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["input.12", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.16", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.16", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["input.16", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.20", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.20", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["input.20", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.24", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.24", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["input.24", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.28", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.28", "inbound_nodes": [[["34", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "36", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "36", "inbound_nodes": [[["input.28", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.32", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.32", "inbound_nodes": [[["36", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "38", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "38", "inbound_nodes": [[["input.32", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 50, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["38", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "input.1/kernel", "shape": [3, 3, 3, 1, 11], "dtype": "float32"}, {"name": "input.1/bias", "shape": [11], "dtype": "float32"}, {"name": "input.12/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.12/bias", "shape": [11], "dtype": "float32"}, {"name": "input.16/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.16/bias", "shape": [11], "dtype": "float32"}, {"name": "input.20/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.20/bias", "shape": [11], "dtype": "float32"}, {"name": "input.24/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.24/bias", "shape": [11], "dtype": "float32"}, {"name": "input.28/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.28/bias", "shape": [11], "dtype": "float32"}, {"name": "input.32/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.32/bias", "shape": [11], "dtype": "float32"}, {"name": "input.4/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.4/bias", "shape": [11], "dtype": "float32"}, {"name": "input.8/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.8/bias", "shape": [11], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 11, 50], "dtype": "float32"}, {"name": "output/bias", "shape": [50], "dtype": "float32"}]}]}
\ No newline at end of file
diff --git a/public/models/model11_gw_ae/colorLUT.json b/public/models/model11_gw_ae/colorLUT.json
new file mode 100644
index 0000000..6da374a
--- /dev/null
+++ b/public/models/model11_gw_ae/colorLUT.json
@@ -0,0 +1 @@
+{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"}
\ No newline at end of file
diff --git a/public/models/model11_gw_ae/group1-shard1of1.bin b/public/models/model11_gw_ae/group1-shard1of1.bin
new file mode 100644
index 0000000..4a52b2a
Binary files /dev/null and b/public/models/model11_gw_ae/group1-shard1of1.bin differ
diff --git a/public/models/model11_gw_ae/labels.json b/public/models/model11_gw_ae/labels.json
new file mode 100644
index 0000000..4885a94
--- /dev/null
+++ b/public/models/model11_gw_ae/labels.json
@@ -0,0 +1 @@
+{"0": "background", "1": "White Matter", "2": "Grey Matter"}
diff --git a/public/models/model11_gw_ae/model.json b/public/models/model11_gw_ae/model.json
new file mode 100644
index 0000000..e10ce2b
--- /dev/null
+++ b/public/models/model11_gw_ae/model.json
@@ -0,0 +1 @@
+{"_comment": "This model was train on 6000 T1 MRI images of the FreeSurfer labeled MRN data as available in MinfulTensors database Collection MRN for 10 epochs. Mongo database running on trendscn018.rs.gsu.edu . Then it was tuned on the HCP/hcp770 database for 5 epochs of HCP freesurfer data and subsequently trained for 5 epochs again on MRN collection. The final tuning was done on a single epoch of hcp770 with OneCycleLR and lr = 0.0002 ", "_model_location": "/home/users/splis/craft/meshnet/enmesh/logs11ae_gwm_after_hcp_plus1hcp/checkpoints/last.pth", "format": "layers-model", "generatedBy": "keras v2.7.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 256, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "19", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "19", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "20", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "20", "inbound_nodes": [[["19", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["20", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "31", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "31", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["31", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "33", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "33", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["33", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["34", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "19/kernel", "shape": [3, 3, 3, 1, 11], "dtype": "float32"}, {"name": "19/bias", "shape": [11], "dtype": "float32"}, {"name": "21/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "21/bias", "shape": [11], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "23/bias", "shape": [11], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "25/bias", "shape": [11], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "27/bias", "shape": [11], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "29/bias", "shape": [11], "dtype": "float32"}, {"name": "31/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "31/bias", "shape": [11], "dtype": "float32"}, {"name": "33/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "33/bias", "shape": [11], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 11, 3], "dtype": "float32"}, {"name": "output/bias", "shape": [3], "dtype": "float32"}]}]}
diff --git a/public/models/model18cls/colorLUT.json b/public/models/model18cls/colorLUT.json
new file mode 100644
index 0000000..27d12d1
--- /dev/null
+++ b/public/models/model18cls/colorLUT.json
@@ -0,0 +1,21 @@
+{
+ "0": "rgb(0,0,0)",
+ "1": "rgb(245,245,245)",
+ "2": "rgb(205,62,78)",
+ "3": "rgb(120,18,134)",
+ "4": "rgb(196,58,250)",
+ "5": "rgb(220,248,164)",
+ "6": "rgb(230,148,34)",
+ "7": "rgb(0,118,14)",
+ "8": "rgb(122,186,220)",
+ "9": "rgb(236,13,176)",
+ "10": "rgb(12,48,255)",
+ "11": "rgb(204,182,142)",
+ "12": "rgb(42,204,164)",
+ "13": "rgb(119,159,176)",
+ "14": "rgb(220,216,20)",
+ "15": "rgb(103,255,255)",
+ "16": "rgb(255,165,0)",
+ "17": "rgb(165,42,42)"
+}
+
diff --git a/public/models/model18cls/labels.json b/public/models/model18cls/labels.json
new file mode 100644
index 0000000..d022502
--- /dev/null
+++ b/public/models/model18cls/labels.json
@@ -0,0 +1,20 @@
+{
+ "0": "Unknown",
+ "1": "Cerebral-White-Matter",
+ "2": "Cerebral-Cortex",
+ "3": "Lateral-Ventricle",
+ "4": "Inferior-Lateral-Ventricle",
+ "5": "Cerebellum-White-Matter",
+ "6": "Cerebellum-Cortex",
+ "7": "Thalamus",
+ "8": "Caudate",
+ "9": "Putamen",
+ "10": "Pallidum",
+ "11": "3rd-Ventricle",
+ "12": "4th-Ventricle",
+ "13": "Brain-Stem",
+ "14": "Hippocampus",
+ "15": "Amygdala",
+ "16": "Accumbens-area",
+ "17": "VentralDC"
+}
diff --git a/public/models/model18cls/model.bin b/public/models/model18cls/model.bin
new file mode 100644
index 0000000..085ee98
Binary files /dev/null and b/public/models/model18cls/model.bin differ
diff --git a/public/models/model18cls/model.json b/public/models/model18cls/model.json
new file mode 100644
index 0000000..79f01fe
--- /dev/null
+++ b/public/models/model18cls/model.json
@@ -0,0 +1,808 @@
+{
+ "format": "layers-model",
+ "generatedBy": "keras v2.7.0",
+ "convertedBy": "TensorFlow.js Converter v3.9.0",
+ "modelTopology": {
+ "keras_version": "2.6.0",
+ "backend": "tensorflow",
+ "model_config": {
+ "class_name": "Functional",
+ "config": {
+ "name": "model",
+ "layers": [
+ {
+ "class_name": "InputLayer",
+ "config": {
+ "batch_input_shape": [
+ null,
+ 256,
+ 256,
+ 256,
+ 1
+ ],
+ "dtype": "float32",
+ "sparse": false,
+ "ragged": false,
+ "name": "input"
+ },
+ "name": "input",
+ "inbound_nodes": []
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_0",
+ "trainable": false,
+ "filters": 21,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_0",
+ "inbound_nodes": [
+ [
+ [
+ "input",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_1",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_1",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_0",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_2",
+ "trainable": false,
+ "filters": 21,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 2,
+ 2,
+ 2
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_2",
+ "inbound_nodes": [
+ [
+ [
+ "activation_1",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_3",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_3",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_2",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_4",
+ "trainable": false,
+ "filters": 21,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 4,
+ 4,
+ 4
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_4",
+ "inbound_nodes": [
+ [
+ [
+ "activation_3",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_5",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_5",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_4",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_6",
+ "trainable": false,
+ "filters": 21,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 8,
+ 8,
+ 8
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_6",
+ "inbound_nodes": [
+ [
+ [
+ "activation_5",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_7",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_7",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_6",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_8",
+ "trainable": false,
+ "filters": 21,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 16,
+ 16,
+ 16
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_8",
+ "inbound_nodes": [
+ [
+ [
+ "activation_7",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_9",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_9",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_8",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_10",
+ "trainable": false,
+ "filters": 21,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 8,
+ 8,
+ 8
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_10",
+ "inbound_nodes": [
+ [
+ [
+ "activation_9",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_11",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_11",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_10",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_12",
+ "trainable": false,
+ "filters": 21,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 4,
+ 4,
+ 4
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_12",
+ "inbound_nodes": [
+ [
+ [
+ "activation_11",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_13",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_13",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_12",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_14",
+ "trainable": false,
+ "filters": 21,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 2,
+ 2,
+ 2
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_14",
+ "inbound_nodes": [
+ [
+ [
+ "activation_13",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_15",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_15",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_14",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_16",
+ "trainable": false,
+ "filters": 21,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_16",
+ "inbound_nodes": [
+ [
+ [
+ "activation_15",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_17",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_17",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_16",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "output",
+ "trainable": false,
+ "filters": 18,
+ "kernel_size": [
+ 1,
+ 1,
+ 1
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "output",
+ "inbound_nodes": [
+ [
+ [
+ "activation_17",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ }
+ ],
+ "input_layers": [
+ [
+ "input",
+ 0,
+ 0
+ ]
+ ],
+ "output_layers": [
+ [
+ "output",
+ 0,
+ 0
+ ]
+ ]
+ }
+ }
+ },
+ "weightsManifest": [
+ {
+ "paths": [
+ "model.bin"
+ ],
+ "weights": [
+ {
+ "name": "conv3d_0/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 1,
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_0/bias",
+ "shape": [
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_2/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 21,
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_2/bias",
+ "shape": [
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_4/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 21,
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_4/bias",
+ "shape": [
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_6/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 21,
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_6/bias",
+ "shape": [
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_8/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 21,
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_8/bias",
+ "shape": [
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_10/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 21,
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_10/bias",
+ "shape": [
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_12/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 21,
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_12/bias",
+ "shape": [
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_14/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 21,
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_14/bias",
+ "shape": [
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_16/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 21,
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_16/bias",
+ "shape": [
+ 21
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "output/kernel",
+ "shape": [
+ 1,
+ 1,
+ 1,
+ 21,
+ 18
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "output/bias",
+ "shape": [
+ 18
+ ],
+ "dtype": "float32"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/public/models/model20chan3cls/colorLUT.json b/public/models/model20chan3cls/colorLUT.json
new file mode 100644
index 0000000..6da374a
--- /dev/null
+++ b/public/models/model20chan3cls/colorLUT.json
@@ -0,0 +1 @@
+{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"}
\ No newline at end of file
diff --git a/public/models/model20chan3cls/labels.json b/public/models/model20chan3cls/labels.json
new file mode 100644
index 0000000..4885a94
--- /dev/null
+++ b/public/models/model20chan3cls/labels.json
@@ -0,0 +1 @@
+{"0": "background", "1": "White Matter", "2": "Grey Matter"}
diff --git a/public/models/model20chan3cls/model.bin b/public/models/model20chan3cls/model.bin
new file mode 100644
index 0000000..abbfc8e
Binary files /dev/null and b/public/models/model20chan3cls/model.bin differ
diff --git a/public/models/model20chan3cls/model.json b/public/models/model20chan3cls/model.json
new file mode 100644
index 0000000..4ce0188
--- /dev/null
+++ b/public/models/model20chan3cls/model.json
@@ -0,0 +1,811 @@
+{
+ "_comment": "Normalize the data for this model with min = 5% quantile, max = 95% quantile",
+ "_model_location": "~/craft/meshnet/enmesh2/logs/tmp/curriculum_enmesh_20channels_gwm/model.last.pth",
+ "_wandb": "https://wandb.ai/neuroneural/curriculum_20_gwm",
+ "format": "layers-model",
+ "generatedBy": "keras v2.7.0",
+ "convertedBy": "TensorFlow.js Converter v3.9.0",
+ "modelTopology": {
+ "keras_version": "2.6.0",
+ "backend": "tensorflow",
+ "model_config": {
+ "class_name": "Functional",
+ "config": {
+ "name": "model",
+ "layers": [
+ {
+ "class_name": "InputLayer",
+ "config": {
+ "batch_input_shape": [
+ null,
+ 256,
+ 256,
+ 256,
+ 1
+ ],
+ "dtype": "float32",
+ "sparse": false,
+ "ragged": false,
+ "name": "input"
+ },
+ "name": "input",
+ "inbound_nodes": []
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_0",
+ "trainable": false,
+ "filters": 20,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_0",
+ "inbound_nodes": [
+ [
+ [
+ "input",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_1",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_1",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_0",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_2",
+ "trainable": false,
+ "filters": 20,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 2,
+ 2,
+ 2
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_2",
+ "inbound_nodes": [
+ [
+ [
+ "activation_1",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_3",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_3",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_2",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_4",
+ "trainable": false,
+ "filters": 20,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 4,
+ 4,
+ 4
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_4",
+ "inbound_nodes": [
+ [
+ [
+ "activation_3",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_5",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_5",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_4",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_6",
+ "trainable": false,
+ "filters": 20,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 8,
+ 8,
+ 8
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_6",
+ "inbound_nodes": [
+ [
+ [
+ "activation_5",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_7",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_7",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_6",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_8",
+ "trainable": false,
+ "filters": 20,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 16,
+ 16,
+ 16
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_8",
+ "inbound_nodes": [
+ [
+ [
+ "activation_7",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_9",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_9",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_8",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_10",
+ "trainable": false,
+ "filters": 20,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 8,
+ 8,
+ 8
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_10",
+ "inbound_nodes": [
+ [
+ [
+ "activation_9",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_11",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_11",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_10",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_12",
+ "trainable": false,
+ "filters": 20,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 4,
+ 4,
+ 4
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_12",
+ "inbound_nodes": [
+ [
+ [
+ "activation_11",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_13",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_13",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_12",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_14",
+ "trainable": false,
+ "filters": 20,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 2,
+ 2,
+ 2
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_14",
+ "inbound_nodes": [
+ [
+ [
+ "activation_13",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_15",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_15",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_14",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_16",
+ "trainable": false,
+ "filters": 20,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_16",
+ "inbound_nodes": [
+ [
+ [
+ "activation_15",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_17",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_17",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_16",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "output",
+ "trainable": false,
+ "filters": 3,
+ "kernel_size": [
+ 1,
+ 1,
+ 1
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "output",
+ "inbound_nodes": [
+ [
+ [
+ "activation_17",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ }
+ ],
+ "input_layers": [
+ [
+ "input",
+ 0,
+ 0
+ ]
+ ],
+ "output_layers": [
+ [
+ "output",
+ 0,
+ 0
+ ]
+ ]
+ }
+ }
+ },
+ "weightsManifest": [
+ {
+ "paths": [
+ "model.bin"
+ ],
+ "weights": [
+ {
+ "name": "conv3d_0/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 1,
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_0/bias",
+ "shape": [
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_2/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 20,
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_2/bias",
+ "shape": [
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_4/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 20,
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_4/bias",
+ "shape": [
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_6/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 20,
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_6/bias",
+ "shape": [
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_8/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 20,
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_8/bias",
+ "shape": [
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_10/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 20,
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_10/bias",
+ "shape": [
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_12/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 20,
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_12/bias",
+ "shape": [
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_14/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 20,
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_14/bias",
+ "shape": [
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_16/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 20,
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_16/bias",
+ "shape": [
+ 20
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "output/kernel",
+ "shape": [
+ 1,
+ 1,
+ 1,
+ 20,
+ 3
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "output/bias",
+ "shape": [
+ 3
+ ],
+ "dtype": "float32"
+ }
+ ]
+ }
+ ]
+}
diff --git a/public/models/model21_104class/colorLUT.json b/public/models/model21_104class/colorLUT.json
new file mode 100644
index 0000000..b321ef3
--- /dev/null
+++ b/public/models/model21_104class/colorLUT.json
@@ -0,0 +1,106 @@
+{
+ "0": "rgb(0,0,0)",
+ "1": "rgb(25,100,40)",
+ "2": "rgb(125,100,160)",
+ "3": "rgb(100,25,0)",
+ "4": "rgb(220,20,100)",
+ "5": "rgb(220,20,10)",
+ "6": "rgb(180,220,140)",
+ "7": "rgb(220,60,220)",
+ "8": "rgb(180,40,120)",
+ "9": "rgb(140,20,140)",
+ "10": "rgb(20,30,140)",
+ "11": "rgb(35,75,50)",
+ "12": "rgb(225,140,140)",
+ "13": "rgb(200,35,75)",
+ "14": "rgb(160,100,50)",
+ "15": "rgb(20,220,60)",
+ "16": "rgb(60,220,60)",
+ "17": "rgb(220,180,140)",
+ "18": "rgb(20,100,50)",
+ "19": "rgb(220,60,20)",
+ "20": "rgb(120,100,60)",
+ "21": "rgb(220,20,20)",
+ "22": "rgb(220,180,220)",
+ "23": "rgb(60,20,220)",
+ "24": "rgb(160,140,180)",
+ "25": "rgb(80,20,140)",
+ "26": "rgb(75,50,125)",
+ "27": "rgb(20,220,160)",
+ "28": "rgb(20,180,140)",
+ "29": "rgb(140,220,220)",
+ "30": "rgb(80,160,20)",
+ "31": "rgb(100,0,100)",
+ "32": "rgb(70,70,70)",
+ "33": "rgb(150,150,200)",
+ "34": "rgb(255,192,32)",
+ "35": "rgb(25,100,40)",
+ "36": "rgb(125,100,160)",
+ "37": "rgb(100,25,0)",
+ "38": "rgb(220,20,100)",
+ "39": "rgb(220,20,10)",
+ "40": "rgb(180,220,140)",
+ "41": "rgb(220,60,220)",
+ "42": "rgb(180,40,120)",
+ "43": "rgb(140,20,140)",
+ "44": "rgb(20,30,140)",
+ "45": "rgb(35,75,50)",
+ "46": "rgb(225,140,140)",
+ "47": "rgb(200,35,75)",
+ "48": "rgb(160,100,50)",
+ "49": "rgb(20,220,60)",
+ "50": "rgb(60,220,60)",
+ "51": "rgb(220,180,140)",
+ "52": "rgb(20,100,50)",
+ "53": "rgb(220,60,20)",
+ "54": "rgb(120,100,60)",
+ "55": "rgb(220,20,20)",
+ "56": "rgb(220,180,220)",
+ "57": "rgb(60,20,220)",
+ "58": "rgb(160,140,180)",
+ "59": "rgb(80,20,140)",
+ "60": "rgb(75,50,125)",
+ "61": "rgb(20,220,160)",
+ "62": "rgb(20,180,140)",
+ "63": "rgb(140,220,220)",
+ "64": "rgb(80,160,20)",
+ "65": "rgb(100,0,100)",
+ "66": "rgb(70,70,70)",
+ "67": "rgb(150,150,200)",
+ "68": "rgb(255,192,32)",
+ "69": "rgb(0,118,14)",
+ "70": "rgb(0,118,14)",
+ "71": "rgb(122,186,220)",
+ "72": "rgb(122,186,220)",
+ "73": "rgb(236,13,176)",
+ "74": "rgb(236,13,176)",
+ "75": "rgb(12,48,255)",
+ "76": "rgb(13,48,255)",
+ "77": "rgb(220,216,20)",
+ "78": "rgb(220,216,20)",
+ "79": "rgb(103,255,255)",
+ "80": "rgb(103,255,255)",
+ "81": "rgb(255,165,0)",
+ "82": "rgb(255,165,0)",
+ "83": "rgb(165,42,42)",
+ "84": "rgb(165,42,42)",
+ "85": "rgb(245,245,245)",
+ "86": "rgb(245,245,245)",
+ "87": "rgb(120,18,134)",
+ "88": "rgb(196,58,250)",
+ "89": "rgb(120,18,134)",
+ "90": "rgb(196,58,250)",
+ "91": "rgb(204,182,142)",
+ "92": "rgb(42,204,164)",
+ "93": "rgb(60,60,60)",
+ "94": "rgb(119,159,176)",
+ "95": "rgb(220,248,164)",
+ "96": "rgb(220,248,164)",
+ "97": "rgb(230,148,34)",
+ "98": "rgb(230,148,34)",
+ "99": "rgb(0,0,64)",
+ "100": "rgb(0,0,112)",
+ "101": "rgb(0,0,160)",
+ "102": "rgb(0,0,208)",
+ "103": "rgb(0,0,255)"
+}
\ No newline at end of file
diff --git a/public/models/model21_104class/group1-shard1of1.bin b/public/models/model21_104class/group1-shard1of1.bin
new file mode 100644
index 0000000..da65df9
Binary files /dev/null and b/public/models/model21_104class/group1-shard1of1.bin differ
diff --git a/public/models/model21_104class/labels.json b/public/models/model21_104class/labels.json
new file mode 100644
index 0000000..1b69b32
--- /dev/null
+++ b/public/models/model21_104class/labels.json
@@ -0,0 +1,106 @@
+{
+ "0": "BG",
+ "1": "ctx-lh-bankssts",
+ "2": "ctx-lh-caudalanteriorcingulate",
+ "3": "ctx-lh-caudalmiddlefrontal",
+ "4": "ctx-lh-cuneus",
+ "5": "ctx-lh-entorhinal",
+ "6": "ctx-lh-fusiform",
+ "7": "ctx-lh-inferiorparietal",
+ "8": "ctx-lh-inferiortemporal",
+ "9": "ctx-lh-isthmuscingulate",
+ "10": "ctx-lh-lateraloccipital",
+ "11": "ctx-lh-lateralorbitofrontal",
+ "12": "ctx-lh-lingual",
+ "13": "ctx-lh-medialorbitofrontal",
+ "14": "ctx-lh-middletemporal",
+ "15": "ctx-lh-parahippocampal",
+ "16": "ctx-lh-paracentral",
+ "17": "ctx-lh-parsopercularis",
+ "18": "ctx-lh-parsorbitalis",
+ "19": "ctx-lh-parstriangularis",
+ "20": "ctx-lh-pericalcarine",
+ "21": "ctx-lh-postcentral",
+ "22": "ctx-lh-posteriorcingulate",
+ "23": "ctx-lh-precentral",
+ "24": "ctx-lh-precuneus",
+ "25": "ctx-lh-rostralanteriorcingulate",
+ "26": "ctx-lh-rostralmiddlefrontal",
+ "27": "ctx-lh-superiorfrontal",
+ "28": "ctx-lh-superiorparietal",
+ "29": "ctx-lh-superiortemporal",
+ "30": "ctx-lh-supramarginal",
+ "31": "ctx-lh-frontalpole",
+ "32": "ctx-lh-temporalpole",
+ "33": "ctx-lh-transversetemporal",
+ "34": "ctx-lh-insula",
+ "35": "ctx-rh-bankssts",
+ "36": "ctx-rh-caudalanteriorcingulate",
+ "37": "ctx-rh-caudalmiddlefrontal",
+ "38": "ctx-rh-cuneus",
+ "39": "ctx-rh-entorhinal",
+ "40": "ctx-rh-fusiform",
+ "41": "ctx-rh-inferiorparietal",
+ "42": "ctx-rh-inferiortemporal",
+ "43": "ctx-rh-isthmuscingulate",
+ "44": "ctx-rh-lateraloccipital",
+ "45": "ctx-rh-lateralorbitofrontal",
+ "46": "ctx-rh-lingual",
+ "47": "ctx-rh-medialorbitofrontal",
+ "48": "ctx-rh-middletemporal",
+ "49": "ctx-rh-parahippocampal",
+ "50": "ctx-rh-paracentral",
+ "51": "ctx-rh-parsopercularis",
+ "52": "ctx-rh-parsorbitalis",
+ "53": "ctx-rh-parstriangularis",
+ "54": "ctx-rh-pericalcarine",
+ "55": "ctx-rh-postcentral",
+ "56": "ctx-rh-posteriorcingulate",
+ "57": "ctx-rh-precentral",
+ "58": "ctx-rh-precuneus",
+ "59": "ctx-rh-rostralanteriorcingulate",
+ "60": "ctx-rh-rostralmiddlefrontal",
+ "61": "ctx-rh-superiorfrontal",
+ "62": "ctx-rh-superiorparietal",
+ "63": "ctx-rh-superiortemporal",
+ "64": "ctx-rh-supramarginal",
+ "65": "ctx-rh-frontalpole",
+ "66": "ctx-rh-temporalpole",
+ "67": "ctx-rh-transversetemporal",
+ "68": "ctx-rh-insula",
+ "69": "Left-Thalamus-Proper*",
+ "70": "Right-Thalamus-Proper*",
+ "71": "Left-Caudate",
+ "72": "Right-Caudate",
+ "73": "Left-Putamen",
+ "74": "Right-Putamen",
+ "75": "Left-Pallidum",
+ "76": "Right-Pallidum",
+ "77": "Left-Hippocampus",
+ "78": "Right-Hippocampus",
+ "79": "Left-Amygdala",
+ "80": "Right-Amygdala",
+ "81": "Left-Accumbens-area",
+ "82": "Right-Accumbens-area",
+ "83": "Left-VentralDC",
+ "84": "Right-VentralDC",
+ "85": "Left-Cerebral-White-Matter",
+ "86": "Right-Cerebral-White-Matter",
+ "87": "Left-Lateral-Ventricle",
+ "88": "Left-Inf-Lat-Vent",
+ "89": "Right-Lateral-Ventricle",
+ "90": "Right-Inf-Lat-Vent",
+ "91": "3rd-Ventricle",
+ "92": "4th-Ventricle",
+ "93": "CSF",
+ "94": "Brain-Stem",
+ "95": "Left-Cerebellum-White-Matter",
+ "96": "Right-Cerebellum-White-Matter",
+ "97": "Left-Cerebellum-Cortex",
+ "98": "Right-Cerebellum-Cortex",
+ "99": "CC_Posterior",
+ "100": "CC_Mid_Posterior",
+ "101": "CC_Central",
+ "102": "CC_Mid_Anterior",
+ "103": "CC_Anterior"
+}
\ No newline at end of file
diff --git a/public/models/model21_104class/model.json b/public/models/model21_104class/model.json
new file mode 100644
index 0000000..cac7c69
--- /dev/null
+++ b/public/models/model21_104class/model.json
@@ -0,0 +1 @@
+{"format": "layers-model", "generatedBy": "keras v2.6.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.6.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 256, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "19", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "19", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "20", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "20", "inbound_nodes": [[["19", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["20", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "31", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "31", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["31", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "33", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "33", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["33", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 104, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["34", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "19/kernel", "shape": [3, 3, 3, 1, 21], "dtype": "float32"}, {"name": "19/bias", "shape": [21], "dtype": "float32"}, {"name": "21/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "21/bias", "shape": [21], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "23/bias", "shape": [21], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "25/bias", "shape": [21], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "27/bias", "shape": [21], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "29/bias", "shape": [21], "dtype": "float32"}, {"name": "31/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "31/bias", "shape": [21], "dtype": "float32"}, {"name": "33/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "33/bias", "shape": [21], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 21, 104], "dtype": "float32"}, {"name": "output/bias", "shape": [104], "dtype": "float32"}]}]}
\ No newline at end of file
diff --git a/public/models/model21_104class/model_D95.json b/public/models/model21_104class/model_D95.json
new file mode 100644
index 0000000..669f199
--- /dev/null
+++ b/public/models/model21_104class/model_D95.json
@@ -0,0 +1 @@
+{"format": "layers-model", "generatedBy": "keras v2.6.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.6.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 95, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "19", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "19", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "20", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "20", "inbound_nodes": [[["19", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["20", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "31", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "31", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["31", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "33", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "33", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["33", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 104, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["34", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "19/kernel", "shape": [3, 3, 3, 1, 21], "dtype": "float32"}, {"name": "19/bias", "shape": [21], "dtype": "float32"}, {"name": "21/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "21/bias", "shape": [21], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "23/bias", "shape": [21], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "25/bias", "shape": [21], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "27/bias", "shape": [21], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "29/bias", "shape": [21], "dtype": "float32"}, {"name": "31/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "31/bias", "shape": [21], "dtype": "float32"}, {"name": "33/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "33/bias", "shape": [21], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 21, 104], "dtype": "float32"}, {"name": "output/bias", "shape": [104], "dtype": "float32"}]}]}
\ No newline at end of file
diff --git a/public/models/model21_3class/colorLUT.json b/public/models/model21_3class/colorLUT.json
new file mode 100644
index 0000000..6da374a
--- /dev/null
+++ b/public/models/model21_3class/colorLUT.json
@@ -0,0 +1 @@
+{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"}
\ No newline at end of file
diff --git a/public/models/model21_3class/group1-shard1of1.bin b/public/models/model21_3class/group1-shard1of1.bin
new file mode 100644
index 0000000..2ebba53
Binary files /dev/null and b/public/models/model21_3class/group1-shard1of1.bin differ
diff --git a/public/models/model21_3class/labels.json b/public/models/model21_3class/labels.json
new file mode 100644
index 0000000..4885a94
--- /dev/null
+++ b/public/models/model21_3class/labels.json
@@ -0,0 +1 @@
+{"0": "background", "1": "White Matter", "2": "Grey Matter"}
diff --git a/public/models/model21_3class/model.json b/public/models/model21_3class/model.json
new file mode 100644
index 0000000..4f6e028
--- /dev/null
+++ b/public/models/model21_3class/model.json
@@ -0,0 +1 @@
+{"format": "layers-model", "generatedBy": "keras v2.7.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 64, 64, 64, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "input.1", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.1", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["input.1", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.4", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.4", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["input.4", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.8", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.8", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["input.8", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.12", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.12", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["input.12", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.16", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.16", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["input.16", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.20", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.20", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["input.20", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.24", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.24", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["input.24", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.28", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.28", "inbound_nodes": [[["34", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "36", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "36", "inbound_nodes": [[["input.28", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.32", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.32", "inbound_nodes": [[["36", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "38", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "38", "inbound_nodes": [[["input.32", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["38", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "input.1/kernel", "shape": [3, 3, 3, 1, 21], "dtype": "float32"}, {"name": "input.1/bias", "shape": [21], "dtype": "float32"}, {"name": "input.12/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.12/bias", "shape": [21], "dtype": "float32"}, {"name": "input.16/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.16/bias", "shape": [21], "dtype": "float32"}, {"name": "input.20/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.20/bias", "shape": [21], "dtype": "float32"}, {"name": "input.24/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.24/bias", "shape": [21], "dtype": "float32"}, {"name": "input.28/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.28/bias", "shape": [21], "dtype": "float32"}, {"name": "input.32/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.32/bias", "shape": [21], "dtype": "float32"}, {"name": "input.4/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.4/bias", "shape": [21], "dtype": "float32"}, {"name": "input.8/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.8/bias", "shape": [21], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 21, 3], "dtype": "float32"}, {"name": "output/bias", "shape": [3], "dtype": "float32"}]}]}
\ No newline at end of file
diff --git a/public/models/model30chan18cls/colorLUT.json b/public/models/model30chan18cls/colorLUT.json
new file mode 100644
index 0000000..27d12d1
--- /dev/null
+++ b/public/models/model30chan18cls/colorLUT.json
@@ -0,0 +1,21 @@
+{
+ "0": "rgb(0,0,0)",
+ "1": "rgb(245,245,245)",
+ "2": "rgb(205,62,78)",
+ "3": "rgb(120,18,134)",
+ "4": "rgb(196,58,250)",
+ "5": "rgb(220,248,164)",
+ "6": "rgb(230,148,34)",
+ "7": "rgb(0,118,14)",
+ "8": "rgb(122,186,220)",
+ "9": "rgb(236,13,176)",
+ "10": "rgb(12,48,255)",
+ "11": "rgb(204,182,142)",
+ "12": "rgb(42,204,164)",
+ "13": "rgb(119,159,176)",
+ "14": "rgb(220,216,20)",
+ "15": "rgb(103,255,255)",
+ "16": "rgb(255,165,0)",
+ "17": "rgb(165,42,42)"
+}
+
diff --git a/public/models/model30chan18cls/labels.json b/public/models/model30chan18cls/labels.json
new file mode 100644
index 0000000..d022502
--- /dev/null
+++ b/public/models/model30chan18cls/labels.json
@@ -0,0 +1,20 @@
+{
+ "0": "Unknown",
+ "1": "Cerebral-White-Matter",
+ "2": "Cerebral-Cortex",
+ "3": "Lateral-Ventricle",
+ "4": "Inferior-Lateral-Ventricle",
+ "5": "Cerebellum-White-Matter",
+ "6": "Cerebellum-Cortex",
+ "7": "Thalamus",
+ "8": "Caudate",
+ "9": "Putamen",
+ "10": "Pallidum",
+ "11": "3rd-Ventricle",
+ "12": "4th-Ventricle",
+ "13": "Brain-Stem",
+ "14": "Hippocampus",
+ "15": "Amygdala",
+ "16": "Accumbens-area",
+ "17": "VentralDC"
+}
diff --git a/public/models/model30chan18cls/model.bin b/public/models/model30chan18cls/model.bin
new file mode 100644
index 0000000..3459133
Binary files /dev/null and b/public/models/model30chan18cls/model.bin differ
diff --git a/public/models/model30chan18cls/model.json b/public/models/model30chan18cls/model.json
new file mode 100644
index 0000000..179715b
--- /dev/null
+++ b/public/models/model30chan18cls/model.json
@@ -0,0 +1,808 @@
+{
+ "format": "layers-model",
+ "generatedBy": "keras v2.7.0",
+ "convertedBy": "TensorFlow.js Converter v3.9.0",
+ "modelTopology": {
+ "keras_version": "2.6.0",
+ "backend": "tensorflow",
+ "model_config": {
+ "class_name": "Functional",
+ "config": {
+ "name": "model",
+ "layers": [
+ {
+ "class_name": "InputLayer",
+ "config": {
+ "batch_input_shape": [
+ null,
+ 256,
+ 256,
+ 256,
+ 1
+ ],
+ "dtype": "float32",
+ "sparse": false,
+ "ragged": false,
+ "name": "input"
+ },
+ "name": "input",
+ "inbound_nodes": []
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_0",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_0",
+ "inbound_nodes": [
+ [
+ [
+ "input",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_1",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_1",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_0",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_2",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 2,
+ 2,
+ 2
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_2",
+ "inbound_nodes": [
+ [
+ [
+ "activation_1",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_3",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_3",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_2",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_4",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 4,
+ 4,
+ 4
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_4",
+ "inbound_nodes": [
+ [
+ [
+ "activation_3",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_5",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_5",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_4",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_6",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 8,
+ 8,
+ 8
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_6",
+ "inbound_nodes": [
+ [
+ [
+ "activation_5",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_7",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_7",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_6",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_8",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 16,
+ 16,
+ 16
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_8",
+ "inbound_nodes": [
+ [
+ [
+ "activation_7",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_9",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_9",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_8",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_10",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 8,
+ 8,
+ 8
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_10",
+ "inbound_nodes": [
+ [
+ [
+ "activation_9",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_11",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_11",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_10",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_12",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 4,
+ 4,
+ 4
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_12",
+ "inbound_nodes": [
+ [
+ [
+ "activation_11",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_13",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_13",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_12",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_14",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 2,
+ 2,
+ 2
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_14",
+ "inbound_nodes": [
+ [
+ [
+ "activation_13",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_15",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_15",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_14",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_16",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_16",
+ "inbound_nodes": [
+ [
+ [
+ "activation_15",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_17",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_17",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_16",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "output",
+ "trainable": false,
+ "filters": 18,
+ "kernel_size": [
+ 1,
+ 1,
+ 1
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "output",
+ "inbound_nodes": [
+ [
+ [
+ "activation_17",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ }
+ ],
+ "input_layers": [
+ [
+ "input",
+ 0,
+ 0
+ ]
+ ],
+ "output_layers": [
+ [
+ "output",
+ 0,
+ 0
+ ]
+ ]
+ }
+ }
+ },
+ "weightsManifest": [
+ {
+ "paths": [
+ "model.bin"
+ ],
+ "weights": [
+ {
+ "name": "conv3d_0/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 1,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_0/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_2/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_2/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_4/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_4/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_6/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_6/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_8/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_8/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_10/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_10/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_12/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_12/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_14/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_14/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_16/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_16/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "output/kernel",
+ "shape": [
+ 1,
+ 1,
+ 1,
+ 30,
+ 18
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "output/bias",
+ "shape": [
+ 18
+ ],
+ "dtype": "float32"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/public/models/model30chan50cls/colorLUT.json b/public/models/model30chan50cls/colorLUT.json
new file mode 100644
index 0000000..99c0f7e
--- /dev/null
+++ b/public/models/model30chan50cls/colorLUT.json
@@ -0,0 +1,52 @@
+{
+ "0": "rgb(0,0,0)",
+ "1": "rgb(245,245,245)",
+ "2": "rgb(196,58,250)",
+ "3": "rgb(220,248,164)",
+ "4": "rgb(230,148,34)",
+ "5": "rgb(0,118,14)",
+ "6": "rgb(122,186,220)",
+ "7": "rgb(236,13,176)",
+ "8": "rgb(12,48,255)",
+ "9": "rgb(119,159,176)",
+ "10": "rgb(220,216,20)",
+ "11": "rgb(103,255,255)",
+ "12": "rgb(60,60,60)",
+ "13": "rgb(255,165,0)",
+ "14": "rgb(165,42,42)",
+ "15": "rgb(0,0,208)",
+ "16": "rgb(25,100,40)",
+ "17": "rgb(125,100,160)",
+ "18": "rgb(100,25,0)",
+ "19": "rgb(220,20,100)",
+ "20": "rgb(220,20,10)",
+ "21": "rgb(180,220,140)",
+ "22": "rgb(220,60,220)",
+ "23": "rgb(180,40,120)",
+ "24": "rgb(140,20,140)",
+ "25": "rgb(20,30,140)",
+ "26": "rgb(35,75,50)",
+ "27": "rgb(225,140,140)",
+ "28": "rgb(200,35,75)",
+ "29": "rgb(160,100,50)",
+ "30": "rgb(20,220,60)",
+ "31": "rgb(60,220,60)",
+ "32": "rgb(220,180,140)",
+ "33": "rgb(20,100,50)",
+ "34": "rgb(220,60,20)",
+ "35": "rgb(120,100,60)",
+ "36": "rgb(220,20,20)",
+ "37": "rgb(220,180,220)",
+ "38": "rgb(60,20,220)",
+ "39": "rgb(160,140,180)",
+ "40": "rgb(80,20,140)",
+ "41": "rgb(75,50,125)",
+ "42": "rgb(20,220,160)",
+ "43": "rgb(20,180,140)",
+ "44": "rgb(140,220,220)",
+ "45": "rgb(80,160,20)",
+ "46": "rgb(100,0,100)",
+ "47": "rgb(70,70,70)",
+ "48": "rgb(150,150,200)",
+ "49": "rgb(255,192,32)"
+}
\ No newline at end of file
diff --git a/public/models/model30chan50cls/labels.json b/public/models/model30chan50cls/labels.json
new file mode 100644
index 0000000..58541ce
--- /dev/null
+++ b/public/models/model30chan50cls/labels.json
@@ -0,0 +1,52 @@
+{
+ "0": "BG",
+ "1": "Cerebral-White-Matter",
+ "2": "Ventricle",
+ "3": "Cerebellum-White-Matter",
+ "4": "Cerebellum",
+ "5": "Thalamus-Proper*",
+ "6": "Caudate",
+ "7": "Putamen",
+ "8": "Pallidum",
+ "9": "Brain-Stem",
+ "10": "Hippocampus",
+ "11": "Amygdala",
+ "12": "CSF",
+ "13": "Accumbens-area",
+ "14": "VentralDC",
+ "15": "CC_Posterior / CC_Mid_Posterior / CC_Central / CC_Mid_Anterior / CC_Anterior",
+ "16": "ctx-bankssts",
+ "17": "ctx-caudalanteriorcingulate",
+ "18": "ctx-caudalmiddlefrontal",
+ "19": "ctx-cuneus",
+ "20": "ctx-entorhinal",
+ "21": "ctx-fusiform",
+ "22": "ctx-inferiorparietal",
+ "23": "ctx-inferiortemporal",
+ "24": "ctx-isthmuscingulate",
+ "25": "ctx-lateraloccipital",
+ "26": "ctx-lateralorbitofrontal",
+ "27": "ctx-lingual",
+ "28": "ctx-medialorbitofrontal",
+ "29": "ctx-middletemporal",
+ "30": "ctx-parahippocampal",
+ "31": "ctx-paracentral",
+ "32": "ctx-parsopercularis",
+ "33": "ctx-parsorbitalis",
+ "34": "ctx-parstriangularis",
+ "35": "ctx-pericalcarine",
+ "36": "ctx-postcentral",
+ "37": "ctx-posteriorcingulate",
+ "38": "ctx-precentral",
+ "39": "ctx-precuneus",
+ "40": "ctx-rostralanteriorcingulate",
+ "41": "ctx-rostralmiddlefrontal",
+ "42": "ctx-superiorfrontal",
+ "43": "ctx-superiorparietal",
+ "44": "ctx-superiortemporal",
+ "45": "ctx-supramarginal",
+ "46": "ctx-frontalpole",
+ "47": "ctx-temporalpole",
+ "48": "ctx-transversetemporal",
+ "49": "ctx-insula"
+}
\ No newline at end of file
diff --git a/public/models/model30chan50cls/model.bin b/public/models/model30chan50cls/model.bin
new file mode 100644
index 0000000..07e1df3
Binary files /dev/null and b/public/models/model30chan50cls/model.bin differ
diff --git a/public/models/model30chan50cls/model.json b/public/models/model30chan50cls/model.json
new file mode 100644
index 0000000..a49c024
--- /dev/null
+++ b/public/models/model30chan50cls/model.json
@@ -0,0 +1,811 @@
+{
+ "_comment": "Normalize the data for this model with min = 5% quantile, max = 95% quantile",
+ "_model_location": "~/craft/meshnet/enmesh2/logs/tmp/curriculum_enmesh_30channels_50/model.last.pth",
+ "_wandb": "https://wandb.ai/neuroneural/curriculum_30_50cls",
+ "format": "layers-model",
+ "generatedBy": "keras v2.7.0",
+ "convertedBy": "TensorFlow.js Converter v3.9.0",
+ "modelTopology": {
+ "keras_version": "2.6.0",
+ "backend": "tensorflow",
+ "model_config": {
+ "class_name": "Functional",
+ "config": {
+ "name": "model",
+ "layers": [
+ {
+ "class_name": "InputLayer",
+ "config": {
+ "batch_input_shape": [
+ null,
+ 256,
+ 256,
+ 256,
+ 1
+ ],
+ "dtype": "float32",
+ "sparse": false,
+ "ragged": false,
+ "name": "input"
+ },
+ "name": "input",
+ "inbound_nodes": []
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_0",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_0",
+ "inbound_nodes": [
+ [
+ [
+ "input",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_1",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_1",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_0",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_2",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 2,
+ 2,
+ 2
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_2",
+ "inbound_nodes": [
+ [
+ [
+ "activation_1",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_3",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_3",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_2",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_4",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 4,
+ 4,
+ 4
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_4",
+ "inbound_nodes": [
+ [
+ [
+ "activation_3",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_5",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_5",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_4",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_6",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 8,
+ 8,
+ 8
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_6",
+ "inbound_nodes": [
+ [
+ [
+ "activation_5",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_7",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_7",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_6",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_8",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 16,
+ 16,
+ 16
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_8",
+ "inbound_nodes": [
+ [
+ [
+ "activation_7",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_9",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_9",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_8",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_10",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 8,
+ 8,
+ 8
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_10",
+ "inbound_nodes": [
+ [
+ [
+ "activation_9",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_11",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_11",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_10",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_12",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 4,
+ 4,
+ 4
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_12",
+ "inbound_nodes": [
+ [
+ [
+ "activation_11",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_13",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_13",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_12",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_14",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 2,
+ 2,
+ 2
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_14",
+ "inbound_nodes": [
+ [
+ [
+ "activation_13",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_15",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_15",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_14",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_16",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_16",
+ "inbound_nodes": [
+ [
+ [
+ "activation_15",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_17",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_17",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_16",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "output",
+ "trainable": false,
+ "filters": 50,
+ "kernel_size": [
+ 1,
+ 1,
+ 1
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "output",
+ "inbound_nodes": [
+ [
+ [
+ "activation_17",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ }
+ ],
+ "input_layers": [
+ [
+ "input",
+ 0,
+ 0
+ ]
+ ],
+ "output_layers": [
+ [
+ "output",
+ 0,
+ 0
+ ]
+ ]
+ }
+ }
+ },
+ "weightsManifest": [
+ {
+ "paths": [
+ "model.bin"
+ ],
+ "weights": [
+ {
+ "name": "conv3d_0/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 1,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_0/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_2/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_2/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_4/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_4/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_6/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_6/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_8/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_8/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_10/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_10/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_12/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_12/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_14/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_14/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_16/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_16/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "output/kernel",
+ "shape": [
+ 1,
+ 1,
+ 1,
+ 30,
+ 50
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "output/bias",
+ "shape": [
+ 50
+ ],
+ "dtype": "float32"
+ }
+ ]
+ }
+ ]
+}
diff --git a/public/models/model5_gw_ae/colorLUT.json b/public/models/model5_gw_ae/colorLUT.json
new file mode 100644
index 0000000..6da374a
--- /dev/null
+++ b/public/models/model5_gw_ae/colorLUT.json
@@ -0,0 +1 @@
+{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"}
\ No newline at end of file
diff --git a/public/models/model5_gw_ae/group1-shard1of1.bin b/public/models/model5_gw_ae/group1-shard1of1.bin
new file mode 100644
index 0000000..1058e4f
Binary files /dev/null and b/public/models/model5_gw_ae/group1-shard1of1.bin differ
diff --git a/public/models/model5_gw_ae/labels.json b/public/models/model5_gw_ae/labels.json
new file mode 100644
index 0000000..4885a94
--- /dev/null
+++ b/public/models/model5_gw_ae/labels.json
@@ -0,0 +1 @@
+{"0": "background", "1": "White Matter", "2": "Grey Matter"}
diff --git a/public/models/model5_gw_ae/model.json b/public/models/model5_gw_ae/model.json
new file mode 100644
index 0000000..9399dd6
--- /dev/null
+++ b/public/models/model5_gw_ae/model.json
@@ -0,0 +1 @@
+{"format": "layers-model", "generatedBy": "keras v2.7.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 256, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "31", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "31", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["31", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "33", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "33", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["33", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "35", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "35", "inbound_nodes": [[["34", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "36", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "36", "inbound_nodes": [[["35", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "37", "trainable": true, "dtype": "float32", "filters": 5, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "37", "inbound_nodes": [[["36", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "38", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "38", "inbound_nodes": [[["37", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["38", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "21/kernel", "shape": [3, 3, 3, 1, 5], "dtype": "float32"}, {"name": "21/bias", "shape": [5], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "23/bias", "shape": [5], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "25/bias", "shape": [5], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "27/bias", "shape": [5], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "29/bias", "shape": [5], "dtype": "float32"}, {"name": "31/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "31/bias", "shape": [5], "dtype": "float32"}, {"name": "33/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "33/bias", "shape": [5], "dtype": "float32"}, {"name": "35/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "35/bias", "shape": [5], "dtype": "float32"}, {"name": "37/kernel", "shape": [3, 3, 3, 5, 5], "dtype": "float32"}, {"name": "37/bias", "shape": [5], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 5, 3], "dtype": "float32"}, {"name": "output/bias", "shape": [3], "dtype": "float32"}]}]}
\ No newline at end of file
diff --git a/public/t1_crop.nii.gz b/public/t1_crop.nii.gz
new file mode 100644
index 0000000..fc4c45c
Binary files /dev/null and b/public/t1_crop.nii.gz differ