From 1afa4e62f513e1e9ffc5b4717ad8781ede81f1d2 Mon Sep 17 00:00:00 2001 From: Ayyuce Demirbas Date: Wed, 19 Jun 2024 04:50:31 +0300 Subject: [PATCH] Setting Kaggle Credentials The existing code in the notebook for setting Kaggle credentials uses the Colab-specific userdata.get API, which may not work outside Colab. The added method works in different environments. It simply creates a .kaggle directory and writes the API token to a kaggle.json file. --- .../paligemma/fine-tuning-paligemma.ipynb | 75 ++++++++++--------- 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb b/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb index 98f6a00dd..7e33fdf3f 100644 --- a/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb +++ b/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb @@ -38,17 +38,17 @@ "source": [ "# Fine-tune PaliGemma with JAX\n", "\n", - "\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\n", - "\u003ctd\u003e\n", - "\u003ca target=\"_blank\" href=\"https://ai.google.dev/gemma/docs/paligemma/fine-tuning-paligemma\"\u003e\u003cimg src=\"https://ai.google.dev/static/site-assets/images/docs/notebook-site-button.png\" height=\"32\" width=\"32\" /\u003eView on ai.google.dev\u003c/a\u003e\n", - "\u003c/td\u003e\n", - "\u003ctd\u003e\n", - "\u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/google/generative-ai-docs/blob/main/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e\n", - "\u003c/td\u003e\n", - "\u003ctd\u003e\n", - "\u003ca target=\"_blank\" href=\"https://github.com/google/generative-ai-docs/blob/main/site/en/gemma/docs/paligemma/fine-tuning-paligemma.ipynb\"\u003e\u003cimg src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\n", - "\u003c/td\u003e\n", - "\u003c/table\u003e\n" + "\n", + "\n", + "\n", + "\n", + "
\n", + "View on ai.google.dev\n", + "\n", + "Run in Google Colab\n", + "\n", + "View source on GitHub\n", + "
\n" ] }, { @@ -165,6 +165,26 @@ "os.environ[\"KAGGLE_KEY\"] = userdata.get('KAGGLE_KEY')" ] }, + { + "cell_type": "code", + "source": [ + "#If you encounter issues with the cell above, please try using this one\n", + "!mkdir ~/.kaggle\n", + "!touch ~/.kaggle/kaggle.json\n", + "\n", + "api_token = {\"username\":\"KAGGLE_USERNAME\",\"key\":\"KAGGLE_KEY\"}\n", + "\n", + "import json\n", + "\n", + "with open('/root/.kaggle/kaggle.json', 'w') as file:\n", + " json.dump(api_token, file)" + ], + "metadata": { + "id": "or4o3oaOD4xW" + }, + "execution_count": null, + "outputs": [] + }, { "cell_type": "markdown", "metadata": { @@ -482,7 +502,7 @@ "\n", " image = tf.constant(image)\n", " image = tf.image.resize(image, (size, size), method='bilinear', antialias=True)\n", - " return image.numpy() / 127.5 - 1.0 # [0, 255]-\u003e[-1,1]\n", + " return image.numpy() / 127.5 - 1.0 # [0, 255]->[-1,1]\n", "\n", "def preprocess_tokens(prefix, suffix=None, seqlen=None):\n", " # Model has been trained to handle tokenized text composed of a prefix with\n", @@ -622,12 +642,12 @@ " return f\"data:image/jpeg;base64,{image_b64}\"\n", "\n", "def render_example(image, caption):\n", - " image = ((image + 1)/2 * 255).astype(np.uint8) # [-1,1] -\u003e [0, 255]\n", + " image = ((image + 1)/2 * 255).astype(np.uint8) # [-1,1] -> [0, 255]\n", " return f\"\"\"\n", - " \u003cdiv style=\"display: inline-flex; align-items: center; justify-content: center;\"\u003e\n", - " \u003cimg style=\"width:128px; height:128px;\" src=\"{render_inline(image, resize=(64,64))}\" /\u003e\n", - " \u003cp style=\"width:256px; margin:10px; font-size:small;\"\u003e{html.escape(caption)}\u003c/p\u003e\n", - " \u003c/div\u003e\n", + "
\n", + " \n", + "

{html.escape(caption)}

\n", + "
\n", " \"\"\"\n", "\n", "html_out = \"\"\n", @@ -744,7 +764,7 @@ " # Append to html output.\n", " for example, response in zip(examples, responses):\n", " outputs.append((example[\"image\"], response))\n", - " if num_examples and len(outputs) \u003e= num_examples:\n", + " if num_examples and len(outputs) >= num_examples:\n", " return outputs" ] }, @@ -853,25 +873,8 @@ "metadata": { "colab": { "gpuType": "T4", - "last_runtime": { - "build_target": "//learning/grp/tools/ml_python:ml_notebook", - "kind": "private" - }, "private_outputs": true, - "provenance": [ - { - "file_id": "17AiK8gRY7oiquQGkBH0d08PFQo3Kyx1I", - "timestamp": 1715287187925 - }, - { - "file_id": "1qZlJfPyfKRrNcz2shxQ93HnnE5Ge1LLn", - "timestamp": 1715019972450 - }, - { - "file_id": "1JFnlD2kSiTNexdPw_NYRtuW6uuSTI0kD", - "timestamp": 1714585741026 - } - ], + "provenance": [], "toc_visible": true }, "kernelspec": {