diff --git a/colab.ipynb b/colab.ipynb deleted file mode 100644 index cf5f045..0000000 --- a/colab.ipynb +++ /dev/null @@ -1,182 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "TC3XkMetGWtK" - }, - "source": [ - "# Neural Amp Modeler (\"Easy Mode\" Trainer)\n", - "**Note**:\n", - "This notebook is meant to be used on [Google Colab](https://colab.research.google.com/github/sdatkinson/neural-amp-modeler/blob/main/bin/train/easy_colab.ipynb).\n", - "\n", - "🔶**Before you run**🔶\n", - "\n", - "Make sure to get a GPU! (From the upper-left menu, click Runtime->Change runtime type->Select \"GPU\" from the \"Hardware accelerator dropdown menu)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5CQleTk7GJV8" - }, - "source": [ - "## Step 1: Get data\n", - "* **Download the reamp signal.** Here: [v3_0_0.wav](https://drive.google.com/file/d/1Pgf8PdE0rKB1TD4TRPKbpNo1ByR3IOm9/view?usp=drive_link).\n", - "* **Reamp your gear.** Then reamp the gear you want to model using it. Save that reamp as \"output.wav\". *Note: Use 48kHz, 24-bit, mono.* For other sample rates, use [the CLI trainer](https://github.com/sdatkinson/neural-amp-modeler).\n", - "* **Upload your files.** Upload the input (DI) and output (amped) files you want to use by clicking the Folder icon on the left ⬅ and then clicking the upload icon or by dragging the files into the panel." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7tRCyI_YjZjj" - }, - "source": [ - "## Step 2: Train!\n", - "Configure your training run below, then hit the Play button to start training!\n", - "\n", - "🕙NOTE: At default settings, training will take about 10 minutes.🕙" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "zrXbQY7vjZjk" - }, - "outputs": [], - "source": [ - "try:\n", - " import nam\n", - "except ImportError as e:\n", - " print(\"Installing NAM into Colab. This should take under 2 minutes.\")\n", - " # Check what we're starting with (Issue 399)\n", - " !if [ ! -d logs ]; then mkdir logs; fi\n", - " !pip list > logs/packages.log\n", - " !pip install neural-amp-modeler > logs/install.log\n", - " # Hint: use the next line instead for the very latest!\n", - " # !pip install git+https://github.com/sdatkinson/neural-amp-modeler.git@main\n", - "\n", - "from nam.train.colab import run\n", - "from nam.models.metadata import GearType, ToneType, UserMetadata\n", - "\n", - "%load_ext tensorboard\n", - "\n", - "from functools import partial\n", - "\n", - "import ipywidgets as widgets\n", - "\n", - "# NOTE: Enums need to be handled carefully since the values need to be supplied literally here!\n", - "\n", - "#@markdown # Training parameters\n", - "epochs = 100 #@param {type: \"number\"}\n", - "architecture = \"standard\" #@param [\"standard\", \"lite\", \"feather\", \"nano\"] {type: \"string\"}\n", - "latency_samples = \"auto\" #@param {type: \"string\"}\n", - "fit_cab = False #@param {type: \"boolean\"}\n", - "ignore_checks = False #@param {type: \"boolean\"}\n", - "\n", - "#@markdown # Metadata\n", - "use_metadata = False #@param {type: \"boolean\"}\n", - "name = \"My model\" #@param {type:\"string\"}\n", - "modeled_by = \"Your name\" #@param {type:\"string\"}\n", - "gear_make = \"GearCo\" #@param {type:\"string\"}\n", - "gear_model = \"GearName\" #@param {type:\"string\"}\n", - "gear_type = \"amp\" #@param [\"amp\", \"pedal\", \"pedal_amp\", \"amp_cab\", \"amp_pedal_cab\", \"preamp\", \"studio\"] {type:\"string\"}\n", - "tone_type = \"clean\" #@param [\"clean\", \"overdrive\", \"crunch\", \"hi_gain\", \"fuzz\"] {type:\"string\"}\n", - "\n", - "def _verbose_enum(E, val):\n", - " try:\n", - " return E(val)\n", - " except ValueError as e:\n", - " raise ValueError(\n", - " str(e)\n", - " + \"\\nValid choices are: \"\n", - " + \", \".join(list(x.value for x in E))\n", - " )\n", - "\n", - "def _parse_latency(ls: str):\n", - " if ls.lower() == \"auto\":\n", - " return None\n", - " try:\n", - " return int(ls)\n", - " except ValueError as e:\n", - " raise ValueError(\n", - " f\"Invalid value for latency {ls} was given. Either use 'auto' or provide \"\n", - " f\"the reamp latency, in samples.\\nOriginal error:\\n\\n{e}\"\n", - " )\n", - "\n", - "user_metadata = None if not use_metadata else UserMetadata(\n", - " name=name,\n", - " modeled_by=modeled_by,\n", - " gear_make=gear_make,\n", - " gear_model=gear_model,\n", - " gear_type=_verbose_enum(GearType, gear_type.lower()),\n", - " tone_type=_verbose_enum(ToneType, tone_type.lower())\n", - ")\n", - "run_partial = partial(run, user_metadata=user_metadata)\n", - "\n", - "%tensorboard --logdir /content/lightning_logs\n", - "run(\n", - " epochs=epochs,\n", - " architecture=architecture,\n", - " fit_cab=fit_cab, # Change me to True for full-rig modeling!\n", - " ignore_checks=ignore_checks, # Change to True to train anyways with potentially bad data\n", - " user_metadata=user_metadata,\n", - " delay=_parse_latency(latency_samples)\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "823KJ_L0Rchp" - }, - "source": [ - "## Step 3: Check the results and download your model\n", - "We're done!\n", - "\n", - "Have a look at the plot above to see how your model compares to the real gear you're modeling.\n", - "Hopefully it looks good!\n", - "Go to the file browser on the left panel ⬅ and download `model.nam` from the `exported_model` directory (you may need to hit the refresh button).\n", - "\n", - "Additionally, if you want to continue to train this model later you can download the lightning model artifacts from `lightning_logs`. If not, that's fine too.\n", - "\n", - "# 🎸 **ENJOY!** 🎸" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "provenance": [] - }, - "gpuClass": "standard", - "kernelspec": { - "display_name": "nam", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.6" - }, - "orig_nbformat": 4, - "vscode": { - "interpreter": { - "hash": "920df60c69944ba95f8c12adb41fedfdc8090c370a20d39253c7705973dd37db" - } - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/docs/source/tutorials/colab.rst b/docs/source/tutorials/colab.rst index a53e1db..1dc28aa 100644 --- a/docs/source/tutorials/colab.rst +++ b/docs/source/tutorials/colab.rst @@ -3,7 +3,7 @@ Training in the cloud with Google Colab If you don't have a good computer for training ML models, you use Google Colab to train in the cloud using the pre-made Jupyter notebook at -`colab.ipynb `_, +`colab.ipynb `_, which is designed to be used with `Google Colab `_. @@ -11,7 +11,7 @@ Opening the notebook -------------------- To open the notebook in Colab, follow -`this link `_. +`this link `_. .. note:: Most browsers work, but Firefox can be a bit temperamental. This isn't NAM's fault; Google Colab just prefers Chrome (unsurprisingly).