Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean train set #3682

Open
wants to merge 38 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
4408156
models
IdsAndriesse May 12, 2024
87d16e1
all results
IdsAndriesse May 23, 2024
612d8d2
pre epochs
IdsAndriesse May 26, 2024
af217a5
fixed epoch based
IdsAndriesse May 27, 2024
f3122f2
dataset generic now
IdsAndriesse May 29, 2024
ad7d69a
pre including all models in a generic way
IdsAndriesse May 29, 2024
5ae99ae
all config generic laod with filtering
IdsAndriesse May 29, 2024
1ad013f
BFS search key made
IdsAndriesse Jun 3, 2024
da759cb
implemented BFS key change in apply dataset
IdsAndriesse Jun 3, 2024
76b4b53
fixed double entries in generate config list
IdsAndriesse Jun 3, 2024
a2923d2
AP using binary AP per class
IdsAndriesse Jun 12, 2024
23db809
fixed metric
IdsAndriesse Jun 14, 2024
acaa8b2
fixed dataset
IdsAndriesse Jun 16, 2024
93f3df7
fixed test_set
IdsAndriesse Jun 18, 2024
86fc0b1
test set fixed
IdsAndriesse Jun 26, 2024
5078702
scoredict revamp to score_list!
IdsAndriesse Jul 12, 2024
61ab857
selected all viable configs
IdsAndriesse Jul 12, 2024
e56471a
fixed custom iou
IdsAndriesse Jul 15, 2024
6a87f70
models selected
IdsAndriesse Jul 18, 2024
97d18dc
clean dir
IdsAndriesse Aug 1, 2024
b8739ec
results
IdsAndriesse Aug 6, 2024
b3daf50
models sorted
IdsAndriesse Aug 6, 2024
82cfad2
standardized selection
IdsAndriesse Aug 9, 2024
1446abf
test selection as subprocesses
IdsAndriesse Aug 9, 2024
31592a4
change station
IdsAndriesse Aug 13, 2024
1ee4863
new
IdsAndriesse Sep 5, 2024
04fdc2b
dataconverter
IdsAndriesse Sep 11, 2024
16ba0b6
arid20_cat added and tested
IdsAndriesse Sep 18, 2024
d0d56e0
zeroshot
IdsAndriesse Sep 19, 2024
795e06f
clean convert, now make conversion general
IdsAndriesse Sep 19, 2024
8037eed
general conversion
IdsAndriesse Sep 19, 2024
5de1855
fixed conversion tests
IdsAndriesse Sep 19, 2024
e50c944
made converstion test and experiment
IdsAndriesse Sep 19, 2024
f12a5b2
experiements
IdsAndriesse Oct 14, 2024
e19dc5d
made name maps for plotting
IdsAndriesse Oct 14, 2024
8f3354a
before restruct vis__pred
IdsAndriesse Oct 27, 2024
ab84803
before restruct vis__pred
IdsAndriesse Oct 27, 2024
5141b0c
acc-eff anal
IdsAndriesse Oct 27, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
The diff you're trying to view is too large. We only load the first 3000 changed files.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -118,3 +118,5 @@ mmseg/.mim

# Pytorch
*.pth

*.png
2 changes: 1 addition & 1 deletion configs/_base_/datasets/ade20k.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
data_root = "/media/ids/Ubuntu files/data/ADEChallengeData2016/"
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
Expand Down
73 changes: 73 additions & 0 deletions configs/_base_/datasets/arid20_cat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
dataset_type = "ARID20CATDataset"
data_root = "/media/ids/Ubuntu files/data/ARID20_CAT/"
crop_size = (640, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='RandomResize',
scale=(2048, 480),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PackSegInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 480), keep_ratio=True),
# add loading annotation after ``Resize`` because ground truth
# does not need to do resize data transform
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
]
img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale_factor=r, keep_ratio=True)
for r in img_ratios
],
[
dict(type='RandomFlip', prob=0., direction='horizontal'),
dict(type='RandomFlip', prob=1., direction='horizontal')
], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]
])
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(
img_path='img_dir/train', seg_map_path='ann_dir/train'),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='img_dir/eval', seg_map_path='ann_dir/eval'),
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='img_dir/test', seg_map_path='ann_dir/test'),
pipeline=test_pipeline))

val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
test_evaluator = val_evaluator
73 changes: 73 additions & 0 deletions configs/_base_/datasets/hots_v1_640x480.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
dataset_type = "HOTSDataset"
data_root = "/media/ids/Ubuntu files/data/HOTS_v1/SemanticSegmentation/"
crop_size = (640, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='RandomResize',
scale=(2048, 480),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PackSegInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 480), keep_ratio=True),
# add loading annotation after ``Resize`` because ground truth
# does not need to do resize data transform
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
]
img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale_factor=r, keep_ratio=True)
for r in img_ratios
],
[
dict(type='RandomFlip', prob=0., direction='horizontal'),
dict(type='RandomFlip', prob=1., direction='horizontal')
], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]
])
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(
img_path='img_dir/train', seg_map_path='ann_dir/train'),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='img_dir/eval', seg_map_path='ann_dir/eval'),
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='img_dir/test', seg_map_path='ann_dir/test'),
pipeline=test_pipeline))

val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
test_evaluator = val_evaluator
73 changes: 73 additions & 0 deletions configs/_base_/datasets/hots_v1_cat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
dataset_type = "HOTSCATDataset"
data_root = "/media/ids/Ubuntu files/data/HOTS_v1_cat/SemanticSegmentation/"
crop_size = (640, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='RandomResize',
scale=(2048, 480),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PackSegInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 480), keep_ratio=True),
# add loading annotation after ``Resize`` because ground truth
# does not need to do resize data transform
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
]
img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale_factor=r, keep_ratio=True)
for r in img_ratios
],
[
dict(type='RandomFlip', prob=0., direction='horizontal'),
dict(type='RandomFlip', prob=1., direction='horizontal')
], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]
])
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(
img_path='img_dir/train', seg_map_path='ann_dir/train'),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='img_dir/eval', seg_map_path='ann_dir/eval'),
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='img_dir/test', seg_map_path='ann_dir/test'),
pipeline=test_pipeline))

val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
test_evaluator = val_evaluator
73 changes: 73 additions & 0 deletions configs/_base_/datasets/irl_vision_sim_512x512.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
dataset_type = "IRLVisionSimDataset"
data_root = "/media/ids/Ubuntu files/data/irl_vision_sim/SemanticSegmentation/"
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='RandomResize',
scale=(2048, 512),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PackSegInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 512), keep_ratio=True),
# add loading annotation after ``Resize`` because ground truth
# does not need to do resize data transform
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
]
img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale_factor=r, keep_ratio=True)
for r in img_ratios
],
[
dict(type='RandomFlip', prob=0., direction='horizontal'),
dict(type='RandomFlip', prob=1., direction='horizontal')
], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]
])
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(
img_path='img_dir/train', seg_map_path='ann_dir/train'),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='img_dir/eval', seg_map_path='ann_dir/eval'),
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='img_dir/test', seg_map_path='ann_dir/test'),
pipeline=test_pipeline))

val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
test_evaluator = val_evaluator
73 changes: 73 additions & 0 deletions configs/_base_/datasets/irl_vision_sim_640x480.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
dataset_type = "IRLVisionSimDataset"
data_root = "/media/ids/Ubuntu files/data/irl_vision_sim/SemanticSegmentation/"
crop_size = (640, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='RandomResize',
scale=(2048, 480),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PackSegInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 480), keep_ratio=True),
# add loading annotation after ``Resize`` because ground truth
# does not need to do resize data transform
dict(type='LoadAnnotations'),
dict(type='PackSegInputs')
]
img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
tta_pipeline = [
dict(type='LoadImageFromFile', backend_args=None),
dict(
type='TestTimeAug',
transforms=[
[
dict(type='Resize', scale_factor=r, keep_ratio=True)
for r in img_ratios
],
[
dict(type='RandomFlip', prob=0., direction='horizontal'),
dict(type='RandomFlip', prob=1., direction='horizontal')
], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]
])
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='InfiniteSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(
img_path='img_dir/train', seg_map_path='ann_dir/train'),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='img_dir/eval', seg_map_path='ann_dir/eval'),
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='img_dir/test', seg_map_path='ann_dir/test'),
pipeline=test_pipeline))

val_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])
test_evaluator = val_evaluator
Loading