diff --git a/.gitignore b/.gitignore index bfad8c6a..45519021 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ ch07/01_main-chapter-code/loss-plot.pdf # Checkpoint files appendix-A/01_main-chapter-code/model.pth + appendix-E/01_main-chapter-code/gpt2 ch05/01_main-chapter-code/gpt2/ @@ -33,6 +34,7 @@ ch06/02_bonus_additional-experiments/gpt2 ch06/03_bonus_imdb-classification/gpt2 ch07/01_main-chapter-code/gpt2-medium355M-sft.pth +ch07/01_main-chapter-code/gpt2/ # Datasets appendix-E/01_main-chapter-code/sms_spam_collection.zip diff --git a/appendix-E/01_main-chapter-code/appendix-E.ipynb b/appendix-E/01_main-chapter-code/appendix-E.ipynb index 70cb4c9a..c73052c8 100644 --- a/appendix-E/01_main-chapter-code/appendix-E.ipynb +++ b/appendix-E/01_main-chapter-code/appendix-E.ipynb @@ -1370,7 +1370,6 @@ "train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(\n", " model, train_loader, val_loader, optimizer, device,\n", " num_epochs=num_epochs, eval_freq=50, eval_iter=5,\n", - " tokenizer=tokenizer\n", ")\n", "\n", "end_time = time.time()\n", @@ -1495,7 +1494,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/appendix-E/01_main-chapter-code/previous_chapters.py b/appendix-E/01_main-chapter-code/previous_chapters.py index 83c55ef3..992e0968 100644 --- a/appendix-E/01_main-chapter-code/previous_chapters.py +++ b/appendix-E/01_main-chapter-code/previous_chapters.py @@ -484,7 +484,7 @@ def calc_loss_batch(input_batch, target_batch, model, device): # Overall the same as `train_model_simple` in chapter 5 def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs, - eval_freq, eval_iter, tokenizer): + eval_freq, eval_iter): # Initialize lists to track losses and tokens seen train_losses, val_losses, train_accs, val_accs = [], [], [], [] examples_seen, global_step = 0, -1 diff --git a/ch04/01_main-chapter-code/exercise-solutions.ipynb b/ch04/01_main-chapter-code/exercise-solutions.ipynb index 6ba1b52d..83683e8b 100644 --- a/ch04/01_main-chapter-code/exercise-solutions.ipynb +++ b/ch04/01_main-chapter-code/exercise-solutions.ipynb @@ -262,7 +262,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 6, "id": "5fee2cf5-61c3-4167-81b5-44ea155bbaf2", "metadata": {}, "outputs": [], @@ -282,13 +282,13 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 7, "id": "5aa1b0c1-d78a-48fc-ad08-4802458b43f7", "metadata": {}, "outputs": [], "source": [ "import torch.nn as nn\n", - "from gpt import MultiHeadAttention, LayerNorm, GELU, FeedForward\n", + "from gpt import MultiHeadAttention, LayerNorm, FeedForward\n", "\n", "\n", "class TransformerBlock(nn.Module):\n", @@ -351,7 +351,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 8, "id": "1d013d32-c275-4f42-be21-9010f1537227", "metadata": {}, "outputs": [], diff --git a/ch04/02_performance-analysis/flops-analysis.ipynb b/ch04/02_performance-analysis/flops-analysis.ipynb index e45cd7b0..1769e649 100644 --- a/ch04/02_performance-analysis/flops-analysis.ipynb +++ b/ch04/02_performance-analysis/flops-analysis.ipynb @@ -62,12 +62,10 @@ "from importlib.metadata import version\n", "\n", "import matplotlib\n", - "import tiktoken\n", "import torch\n", "\n", "print(\"thop version:\", version(\"thop\"))\n", - "print(\"torch version:\", version(\"torch\"))\n", - "print(\"tiktoken version:\", version(\"tiktoken\"))" + "print(\"torch version:\", version(\"torch\"))" ] }, { diff --git a/ch05/02_alternative_weight_loading/weight-loading-hf-transformers.ipynb b/ch05/02_alternative_weight_loading/weight-loading-hf-transformers.ipynb index 414de404..7561bf6b 100644 --- a/ch05/02_alternative_weight_loading/weight-loading-hf-transformers.ipynb +++ b/ch05/02_alternative_weight_loading/weight-loading-hf-transformers.ipynb @@ -65,9 +65,9 @@ "name": "stdout", "output_type": "stream", "text": [ - "numpy version: 1.25.2\n", - "torch version: 2.2.1\n", - "transformers version: 4.33.2\n" + "numpy version: 1.24.3\n", + "torch version: 2.3.0\n", + "transformers version: 4.41.2\n" ] } ], @@ -85,16 +85,6 @@ "id": "ffc17d7d-bcd8-42ee-82a9-04fd55acf15d", "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", - " torch.utils._pytree._register_pytree_node(\n", - "/Users/sebastian/miniforge3/envs/book/lib/python3.11/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", - " torch.utils._pytree._register_pytree_node(\n" - ] - }, { "data": { "text/plain": [ @@ -162,10 +152,10 @@ "}\n", "\n", "model_configs = {\n", - " \"gpt2-small\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n", - " \"gpt2-medium\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n", - " \"gpt2-large\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n", - " \"gpt2-xl\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n", + " \"gpt2-small (124M)\": {\"emb_dim\": 768, \"n_layers\": 12, \"n_heads\": 12},\n", + " \"gpt2-medium (355M)\": {\"emb_dim\": 1024, \"n_layers\": 24, \"n_heads\": 16},\n", + " \"gpt2-large (774M)\": {\"emb_dim\": 1280, \"n_layers\": 36, \"n_heads\": 20},\n", + " \"gpt2-xl (1558M)\": {\"emb_dim\": 1600, \"n_layers\": 48, \"n_heads\": 25},\n", "}\n", "\n", "\n", @@ -242,7 +232,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "/var/folders/jg/tpqyh1fd5js5wsr1d138k3n40000gn/T/ipykernel_32618/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + "/tmp/ipykernel_9385/3877979348.py:4: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", " return torch.nn.Parameter(torch.tensor(right))\n" ] } @@ -255,13 +245,12 @@ "gpt = GPTModel(BASE_CONFIG)\n", "\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", - "load_weights(gpt, gpt_hf)\n", - "gpt.to(device);" + "load_weights(gpt, gpt_hf)" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "id": "4ddd0d51-3ade-4890-9bab-d63f141d095f", "metadata": {}, "outputs": [ @@ -285,8 +274,8 @@ "tokenizer = tiktoken.get_encoding(\"gpt2\")\n", "\n", "token_ids = generate(\n", - " model=gpt,\n", - " idx=text_to_token_ids(\"Every effort moves\", tokenizer),\n", + " model=gpt.to(device),\n", + " idx=text_to_token_ids(\"Every effort moves\", tokenizer).to(device),\n", " max_new_tokens=30,\n", " context_size=BASE_CONFIG[\"context_length\"],\n", " top_k=1,\n", diff --git a/ch05/05_bonus_hparam_tuning/hparam_search.py b/ch05/05_bonus_hparam_tuning/hparam_search.py index 94cfb0d7..9c689f12 100644 --- a/ch05/05_bonus_hparam_tuning/hparam_search.py +++ b/ch05/05_bonus_hparam_tuning/hparam_search.py @@ -53,8 +53,8 @@ def calc_loss_batch(input_batch, target_batch, model, device): def evaluate_model(model, train_loader, val_loader, device, eval_iter): model.eval() with torch.no_grad(): - train_loss = calc_loss_loader(train_loader, model, device, num_iters=eval_iter) - val_loss = calc_loss_loader(val_loader, model, device, num_iters=eval_iter) + train_loss = calc_loss_loader(train_loader, model, device, num_batches=eval_iter) + val_loss = calc_loss_loader(val_loader, model, device, num_batches=eval_iter) model.train() return train_loss, val_loss diff --git a/ch05/05_bonus_hparam_tuning/previous_chapters.py b/ch05/05_bonus_hparam_tuning/previous_chapters.py index 67d6a4be..5a2d85d0 100644 --- a/ch05/05_bonus_hparam_tuning/previous_chapters.py +++ b/ch05/05_bonus_hparam_tuning/previous_chapters.py @@ -40,12 +40,12 @@ def __getitem__(self, idx): def create_dataloader_v1(txt, batch_size=4, max_length=256, - stride=128, shuffle=True, drop_last=True): + stride=128, shuffle=True, drop_last=True, num_workers=0): # Initialize the tokenizer tokenizer = tiktoken.get_encoding("gpt2") # Create dataset - dataset = GPTDatasetV1(txt, tokenizer, max_length, stride, num_workers=0) + dataset = GPTDatasetV1(txt, tokenizer, max_length, stride) # Create dataloader dataloader = DataLoader( diff --git a/ch06/01_main-chapter-code/ch06.ipynb b/ch06/01_main-chapter-code/ch06.ipynb index f7e71ac0..a18a839a 100644 --- a/ch06/01_main-chapter-code/ch06.ipynb +++ b/ch06/01_main-chapter-code/ch06.ipynb @@ -1861,7 +1861,7 @@ "source": [ "# Overall the same as `train_model_simple` in chapter 5\n", "def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs,\n", - " eval_freq, eval_iter, tokenizer):\n", + " eval_freq, eval_iter):\n", " # Initialize lists to track losses and examples seen\n", " train_losses, val_losses, train_accs, val_accs = [], [], [], []\n", " examples_seen, global_step = 0, -1\n", @@ -1982,7 +1982,6 @@ "train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple(\n", " model, train_loader, val_loader, optimizer, device,\n", " num_epochs=num_epochs, eval_freq=50, eval_iter=5,\n", - " tokenizer=tokenizer\n", ")\n", "\n", "end_time = time.time()\n", @@ -2371,7 +2370,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/ch06/02_bonus_additional-experiments/additional-experiments.py b/ch06/02_bonus_additional-experiments/additional-experiments.py index 66b8c7f7..f3217eda 100644 --- a/ch06/02_bonus_additional-experiments/additional-experiments.py +++ b/ch06/02_bonus_additional-experiments/additional-experiments.py @@ -235,7 +235,7 @@ def evaluate_model(model, train_loader, val_loader, device, def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs, - eval_freq, eval_iter, tokenizer, max_steps=None, trainable_token_pos=-1, + eval_freq, eval_iter, max_steps=None, trainable_token_pos=-1, accumulation_steps=1, ignore_index=-100): # Initialize lists to track losses and tokens seen train_losses, val_losses, train_accs, val_accs = [], [], [], [] @@ -565,7 +565,7 @@ def replace_linear_with_lora(model, rank, alpha): train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple( model, train_loader, val_loader, optimizer, device, num_epochs=args.num_epochs, eval_freq=50, eval_iter=5, - tokenizer=tokenizer, max_steps=None, trainable_token_pos=args.trainable_token_pos, + max_steps=None, trainable_token_pos=args.trainable_token_pos, accumulation_steps=args.accumulation_steps ) diff --git a/ch06/03_bonus_imdb-classification/train-bert-hf.py b/ch06/03_bonus_imdb-classification/train-bert-hf.py index 8ab5ba5f..c6f86a3d 100644 --- a/ch06/03_bonus_imdb-classification/train-bert-hf.py +++ b/ch06/03_bonus_imdb-classification/train-bert-hf.py @@ -110,7 +110,7 @@ def evaluate_model(model, train_loader, val_loader, device, eval_iter): def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs, - eval_freq, eval_iter, tokenizer, max_steps=None): + eval_freq, eval_iter, max_steps=None): # Initialize lists to track losses and tokens seen train_losses, val_losses, train_accs, val_accs = [], [], [], [] examples_seen, global_step = 0, -1 @@ -279,7 +279,7 @@ def train_classifier_simple(model, train_loader, val_loader, optimizer, device, train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple( model, train_loader, val_loader, optimizer, device, num_epochs=num_epochs, eval_freq=50, eval_iter=20, - tokenizer=tokenizer, max_steps=None + max_steps=None ) end_time = time.time() diff --git a/ch06/03_bonus_imdb-classification/train-gpt.py b/ch06/03_bonus_imdb-classification/train-gpt.py index cd9687c6..945988dd 100644 --- a/ch06/03_bonus_imdb-classification/train-gpt.py +++ b/ch06/03_bonus_imdb-classification/train-gpt.py @@ -139,7 +139,7 @@ def evaluate_model(model, train_loader, val_loader, device, eval_iter, trainable def train_classifier_simple(model, train_loader, val_loader, optimizer, device, num_epochs, - eval_freq, eval_iter, tokenizer, max_steps=None, trainable_token=-1): + eval_freq, eval_iter, max_steps=None, trainable_token=-1): # Initialize lists to track losses and tokens seen train_losses, val_losses, train_accs, val_accs = [], [], [], [] examples_seen, global_step = 0, -1 @@ -344,7 +344,7 @@ def train_classifier_simple(model, train_loader, val_loader, optimizer, device, train_losses, val_losses, train_accs, val_accs, examples_seen = train_classifier_simple( model, train_loader, val_loader, optimizer, device, num_epochs=num_epochs, eval_freq=50, eval_iter=20, - tokenizer=tokenizer, max_steps=None, trainable_token=args.trainable_token + max_steps=None, trainable_token=args.trainable_token ) end_time = time.time()