Skip to content

Commit

Permalink
fixes for code (#206)
Browse files Browse the repository at this point in the history
* updated .gitignore

* removed unused GELU import

* fixed model_configs, fixed all tensors on same device

* removed unused tiktoken

* update

* update hparam search

* remove redundant tokenizer argument

---------

Co-authored-by: rasbt <[email protected]>
  • Loading branch information
d-kleine and rasbt authored Jun 12, 2024
1 parent 1a65020 commit dcbdc1d
Show file tree
Hide file tree
Showing 12 changed files with 33 additions and 46 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ ch07/01_main-chapter-code/loss-plot.pdf

# Checkpoint files
appendix-A/01_main-chapter-code/model.pth

appendix-E/01_main-chapter-code/gpt2

ch05/01_main-chapter-code/gpt2/
Expand All @@ -33,6 +34,7 @@ ch06/02_bonus_additional-experiments/gpt2
ch06/03_bonus_imdb-classification/gpt2

ch07/01_main-chapter-code/gpt2-medium355M-sft.pth
ch07/01_main-chapter-code/gpt2/

# Datasets
appendix-E/01_main-chapter-code/sms_spam_collection.zip
Expand Down
3 changes: 1 addition & 2 deletions appendix-E/01_main-chapter-code/appendix-E.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1370,7 +1370,6 @@
"train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(\n",
" model, train_loader, val_loader, optimizer, device,\n",
" num_epochs=num_epochs, eval_freq=50, eval_iter=5,\n",
" tokenizer=tokenizer\n",
")\n",
"\n",
"end_time = time.time()\n",
Expand Down Expand Up @@ -1495,7 +1494,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
"version": "3.11.4"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion appendix-E/01_main-chapter-code/previous_chapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -484,7 +484,7 @@ def calc_loss_batch(input_batch, target_batch, model, device):

# Overall the same as `train_model_simple` in chapter 5
def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
eval_freq, eval_iter, tokenizer):
eval_freq, eval_iter):
# Initialize lists to track losses and tokens seen
train_losses, val_losses, train_accs, val_accs = [], [], [], []
examples_seen, global_step = 0, -1
Expand Down
8 changes: 4 additions & 4 deletions ch04/01_main-chapter-code/exercise-solutions.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 6,
"id": "5fee2cf5-61c3-4167-81b5-44ea155bbaf2",
"metadata": {},
"outputs": [],
Expand All @@ -282,13 +282,13 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 7,
"id": "5aa1b0c1-d78a-48fc-ad08-4802458b43f7",
"metadata": {},
"outputs": [],
"source": [
"import torch.nn as nn\n",
"from gpt import MultiHeadAttention, LayerNorm, GELU, FeedForward\n",
"from gpt import MultiHeadAttention, LayerNorm, FeedForward\n",
"\n",
"\n",
"class TransformerBlock(nn.Module):\n",
Expand Down Expand Up @@ -351,7 +351,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 8,
"id": "1d013d32-c275-4f42-be21-9010f1537227",
"metadata": {},
"outputs": [],
Expand Down
4 changes: 1 addition & 3 deletions ch04/02_performance-analysis/flops-analysis.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,10 @@
"from importlib.metadata import version\n",
"\n",
"import matplotlib\n",
"import tiktoken\n",
"import torch\n",
"\n",
"print(\"thop version:\", version(\"thop\"))\n",
"print(\"torch version:\", version(\"torch\"))\n",
"print(\"tiktoken version:\", version(\"tiktoken\"))"
"print(\"torch version:\", version(\"torch\"))"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
"numpy version: 1.25.2\n",
"torch version: 2.2.1\n",
"transformers version: 4.33.2\n"
"numpy version: 1.24.3\n",
"torch version: 2.3.0\n",
"transformers version: 4.41.2\n"
]
}
],
Expand All @@ -85,16 +85,6 @@
"id": "ffc17d7d-bcd8-42ee-82a9-04fd55acf15d",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
" torch.utils._pytree._register_pytree_node(\n",
"/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n",
" torch.utils._pytree._register_pytree_node(\n"
]
},
{
"data": {
"text/plain": [
Expand Down Expand Up @@ -162,10 +152,10 @@
"}\n",
"\n",
"model_configs = {\n",
" \"gpt2-small\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n",
" \"gpt2-medium\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n",
" \"gpt2-large\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n",
" \"gpt2-xl\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n",
" \"gpt2-small (124M)\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n",
" \"gpt2-medium (355M)\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n",
" \"gpt2-large (774M)\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n",
" \"gpt2-xl (1558M)\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n",
"}\n",
"\n",
"\n",
Expand Down Expand Up @@ -242,7 +232,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/var/folders/jg/tpqyh1fd5js5wsr1d138k3n40000gn/T/ipykernel_32618/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
"/tmp/ipykernel_9385/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
" return torch.nn.Parameter(torch.tensor(right))\n"
]
}
Expand All @@ -255,13 +245,12 @@
"gpt = GPTModel(BASE_CONFIG)\n",
"\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
"load_weights(gpt, gpt_hf)\n",
"gpt.to(device);"
"load_weights(gpt, gpt_hf)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"id": "4ddd0d51-3ade-4890-9bab-d63f141d095f",
"metadata": {},
"outputs": [
Expand All @@ -285,8 +274,8 @@
"tokenizer = tiktoken.get_encoding(\"gpt2\")\n",
"\n",
"token_ids = generate(\n",
" model=gpt,\n",
" idx=text_to_token_ids(\"Every effort moves\", tokenizer),\n",
" model=gpt.to(device),\n",
" idx=text_to_token_ids(\"Every effort moves\", tokenizer).to(device),\n",
" max_new_tokens=30,\n",
" context_size=BASE_CONFIG[\"context_length\"],\n",
" top_k=1,\n",
Expand Down
4 changes: 2 additions & 2 deletions ch05/05_bonus_hparam_tuning/hparam_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ def calc_loss_batch(input_batch, target_batch, model, device):
def evaluate_model(model, train_loader, val_loader, device, eval_iter):
model.eval()
with torch.no_grad():
train_loss = calc_loss_loader(train_loader, model, device, num_iters=eval_iter)
val_loss = calc_loss_loader(val_loader, model, device, num_iters=eval_iter)
train_loss = calc_loss_loader(train_loader, model, device, num_batches=eval_iter)
val_loss = calc_loss_loader(val_loader, model, device, num_batches=eval_iter)
model.train()
return train_loss, val_loss

Expand Down
4 changes: 2 additions & 2 deletions ch05/05_bonus_hparam_tuning/previous_chapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,12 @@ def __getitem__(self, idx):


def create_dataloader_v1(txt, batch_size=4, max_length=256,
stride=128, shuffle=True, drop_last=True):
stride=128, shuffle=True, drop_last=True, num_workers=0):
# Initialize the tokenizer
tokenizer = tiktoken.get_encoding("gpt2")

# Create dataset
dataset = GPTDatasetV1(txt, tokenizer, max_length, stride, num_workers=0)
dataset = GPTDatasetV1(txt, tokenizer, max_length, stride)

# Create dataloader
dataloader = DataLoader(
Expand Down
5 changes: 2 additions & 3 deletions ch06/01_main-chapter-code/ch06.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1861,7 +1861,7 @@
"source": [
"# Overall the same as `train_model_simple` in chapter 5\n",
"def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,\n",
" eval_freq, eval_iter, tokenizer):\n",
" eval_freq, eval_iter):\n",
" # Initialize lists to track losses and examples seen\n",
" train_losses, val_losses, train_accs, val_accs = [], [], [], []\n",
" examples_seen, global_step = 0, -1\n",
Expand Down Expand Up @@ -1982,7 +1982,6 @@
"train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(\n",
" model, train_loader, val_loader, optimizer, device,\n",
" num_epochs=num_epochs, eval_freq=50, eval_iter=5,\n",
" tokenizer=tokenizer\n",
")\n",
"\n",
"end_time = time.time()\n",
Expand Down Expand Up @@ -2371,7 +2370,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
"version": "3.11.4"
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ def evaluate_model(model, train_loader, val_loader, device,


def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
eval_freq, eval_iter, tokenizer, max_steps=None, trainable_token_pos=-1,
eval_freq, eval_iter, max_steps=None, trainable_token_pos=-1,
accumulation_steps=1, ignore_index=-100):
# Initialize lists to track losses and tokens seen
train_losses, val_losses, train_accs, val_accs = [], [], [], []
Expand Down Expand Up @@ -565,7 +565,7 @@ def replace_linear_with_lora(model, rank, alpha):
train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
model, train_loader, val_loader, optimizer, device,
num_epochs=args.num_epochs, eval_freq=50, eval_iter=5,
tokenizer=tokenizer, max_steps=None, trainable_token_pos=args.trainable_token_pos,
max_steps=None, trainable_token_pos=args.trainable_token_pos,
accumulation_steps=args.accumulation_steps
)

Expand Down
4 changes: 2 additions & 2 deletions ch06/03_bonus_imdb-classification/train-bert-hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def evaluate_model(model, train_loader, val_loader, device, eval_iter):


def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
eval_freq, eval_iter, tokenizer, max_steps=None):
eval_freq, eval_iter, max_steps=None):
# Initialize lists to track losses and tokens seen
train_losses, val_losses, train_accs, val_accs = [], [], [], []
examples_seen, global_step = 0, -1
Expand Down Expand Up @@ -279,7 +279,7 @@ def train_classifier_simple(model, train_loader, val_loader, optimizer, device,
train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
model, train_loader, val_loader, optimizer, device,
num_epochs=num_epochs, eval_freq=50, eval_iter=20,
tokenizer=tokenizer, max_steps=None
max_steps=None
)

end_time = time.time()
Expand Down
4 changes: 2 additions & 2 deletions ch06/03_bonus_imdb-classification/train-gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def evaluate_model(model, train_loader, val_loader, device, eval_iter, trainable


def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,
eval_freq, eval_iter, tokenizer, max_steps=None, trainable_token=-1):
eval_freq, eval_iter, max_steps=None, trainable_token=-1):
# Initialize lists to track losses and tokens seen
train_losses, val_losses, train_accs, val_accs = [], [], [], []
examples_seen, global_step = 0, -1
Expand Down Expand Up @@ -344,7 +344,7 @@ def train_classifier_simple(model, train_loader, val_loader, optimizer, device,
train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(
model, train_loader, val_loader, optimizer, device,
num_epochs=num_epochs, eval_freq=50, eval_iter=20,
tokenizer=tokenizer, max_steps=None, trainable_token=args.trainable_token
max_steps=None, trainable_token=args.trainable_token
)

end_time = time.time()
Expand Down

0 comments on commit dcbdc1d

Please sign in to comment.