Skip to content

Commit

Permalink
Update memory engagement
Browse files Browse the repository at this point in the history
  • Loading branch information
anwai98 committed Dec 22, 2024
1 parent e469aad commit be3bdb6
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 7 deletions.
14 changes: 11 additions & 3 deletions finetuning/livecell_finetuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,17 @@ def finetune_livecell(args):
scheduler_kwargs = {"mode": "min", "factor": 0.9, "patience": 10, "verbose": True}

# NOTE: memory req. for all vit_b models (compared on A100 80GB)
# QLoRA: ~60GB
# LoRA: ~59GB
# FFT: ~63GB
# vit_b
# freeze_encoder: ~ GB
# QLoRA: ~48.54 GB
# LoRA: ~48.62 GB
# FFT: ~49.56 GB

# vit_h
# freeze_encoder: ~36.05 GB
# QLoRA: ~ 65.68 GB
# LoRA: ~ 67.14 GB
# FFT: ~72.34 GB

# Run training.
sam_training.train_sam(
Expand Down
10 changes: 6 additions & 4 deletions micro_sam/models/peft_sam.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@

try:
import bitsandbytes as bnb
except Exception:
bnb = None
_have_bnb = True
except ImportError:
_have_bnb = False


class LoRASurgery(nn.Module):
Expand Down Expand Up @@ -330,8 +331,9 @@ def __init__(
# Whether to quantize the linear layers to 4 bit precision.
# NOTE: This is currently supported for CUDA-supported devices only.
if quantize:
assert bnb is not None, "Please install 'bitsandbytes'."
import bitsandbytes as bnb
if not _have_bnb:
raise ModuleNotFoundError("Please install 'bitsandbytes'.")

for name, module in model.image_encoder.named_modules():
if isinstance(module, torch.nn.Linear):
*parent_path, layer_name = name.split(".")
Expand Down

0 comments on commit be3bdb6

Please sign in to comment.