Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Docker + smaller ConvNeXt-V2 models #88

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
*.swp
**/__pycache__/**
.idea/*
ckpt/
*.pth
*.log
*.txt
.dockerignore
12 changes: 12 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
FROM pytorch/pytorch:1.10.0-cuda11.3-cudnn8-devel

WORKDIR /

RUN pip install timm==0.5.4

COPY /pretrain/requirements.txt /
RUN pip install --no-cache-dir -r requirements.txt

COPY . .

CMD ["bash"]
5 changes: 5 additions & 0 deletions downstream_imagenet/arg.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,11 @@

HP_DEFAULT_NAMES = ['bs', 'ep', 'wp_ep', 'opt', 'base_lr', 'lr_scale', 'wd', 'mixup', 'rep_aug', 'drop_path', 'ema']
HP_DEFAULT_VALUES = {
'convnext_atto': (1024, 400, 20, 'adam', 0.0002, 0.7, 0.01, 0.8, 3, 0.3, 0.9999),
'convnext_femto': (1024, 400, 20, 'adam', 0.0002, 0.7, 0.01, 0.8, 3, 0.3, 0.9999),
'convnext_pico': (512, 400, 20, 'adam', 0.0002, 0.7, 0.01, 0.8, 3, 0.3, 0.9999),
'convnext_nano': (512, 400, 20, 'adam', 0.0002, 0.7, 0.01, 0.8, 3, 0.3, 0.9999),
'convnext_tiny': (256, 400, 20, 'adam', 0.0002, 0.7, 0.01, 0.8, 3, 0.3, 0.9999),
'convnext_small': (4096, 400, 20, 'adam', 0.0002, 0.7, 0.01, 0.8, 3, 0.3, 0.9999),
'convnext_base': (4096, 400, 20, 'adam', 0.0001, 0.7, 0.01, 0.8, 3, 0.4, 0.9999),
'convnext_large': (4096, 200, 10, 'adam', 0.0001, 0.7, 0.02, 0.8, 3, 0.5, 0.9999),
Expand Down
36 changes: 36 additions & 0 deletions downstream_imagenet/models/convnext_official.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,42 @@ def forward(self, x):
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
}

@register_model
def convnext_atto(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[2, 2, 6, 2], dims=[40, 80, 160, 320], **kwargs)
if pretrained:
url = model_urls['convnext_atto_22k'] if in_22k else model_urls['convnext_atto_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
model.load_state_dict(checkpoint["model"])
return model

@register_model
def convnext_femto(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[2, 2, 6, 2], dims=[48, 96, 192, 384], **kwargs)
if pretrained:
url = model_urls['convnext_femto_22k'] if in_22k else model_urls['convnext_femto_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
model.load_state_dict(checkpoint["model"])
return model

@register_model
def convnext_pico(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[2, 2, 6, 2], dims=[64, 128, 256, 512], **kwargs)
if pretrained:
url = model_urls['convnext_pico_22k'] if in_22k else model_urls['convnext_pico_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
model.load_state_dict(checkpoint["model"])
return model

@register_model
def convnext_nano(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[2, 2, 8, 2], dims=[80, 160, 320, 640], **kwargs)
if pretrained:
url = model_urls['convnext_nano_22k'] if in_22k else model_urls['convnext_nano_1k']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
model.load_state_dict(checkpoint["model"])
return model

@register_model
def convnext_tiny(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
Expand Down
21 changes: 19 additions & 2 deletions pretrain/models/convnext.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,12 +99,29 @@ def get_classifier(self):
def extra_repr(self):
return f'drop_path_rate={self.drop_path_rate}, layer_scale_init_value={self.layer_scale_init_value:g}'

@register_model
def convnext_atto(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[2, 2, 6, 2], dims=[40, 80, 160, 320], **kwargs)
return model

@register_model
def convnext_tiny(pretrained=False, in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
def convnext_femto(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[2, 2, 6, 2], dims=[48, 96, 192, 384], **kwargs)
return model

@register_model
def convnext_pico(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[2, 2, 6, 2], dims=[64, 128, 256, 512], **kwargs)

@register_model
def convnext_nano(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[2, 2, 8, 2], dims=[80, 160, 320, 640], **kwargs)
return model

@register_model
def convnext_tiny(pretrained=False,in_22k=False, **kwargs):
model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
return model

@register_model
def convnext_small(pretrained=False, in_22k=False, **kwargs):
Expand Down
1 change: 1 addition & 0 deletions pretrain/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ Pillow
typed-argument-parser
timm==0.5.4
tensorboardx
tensorboard