diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 00000000..2f8f89bc
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,26 @@
+# Ignore Git and GitHub files
+.git
+.github/
+
+# Ignore Husky configuration files
+.husky/
+
+# Ignore documentation and metadata files
+CONTRIBUTING.md
+LICENSE
+README.md
+
+# Ignore environment examples and sensitive info
+.env
+*.local
+*.example
+
+# Ignore node modules, logs and cache files
+**/*.log
+**/node_modules
+**/dist
+**/build
+**/.cache
+logs
+dist-ssr
+.DS_Store
diff --git a/.env.example b/.env.example
new file mode 100644
index 00000000..ec825e8e
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,47 @@
+# Rename this file to .env once you have filled in the below environment variables!
+
+# Get your GROQ API Key here -
+# https://console.groq.com/keys
+# You only need this environment variable set if you want to use Groq models
+GROQ_API_KEY=
+
+# Get your Open AI API Key by following these instructions -
+# https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
+# You only need this environment variable set if you want to use GPT models
+OPENAI_API_KEY=
+
+# Get your Anthropic API Key in your account settings -
+# https://console.anthropic.com/settings/keys
+# You only need this environment variable set if you want to use Claude models
+ANTHROPIC_API_KEY=
+
+# Get your OpenRouter API Key in your account settings -
+# https://openrouter.ai/settings/keys
+# You only need this environment variable set if you want to use OpenRouter models
+OPEN_ROUTER_API_KEY=
+
+# Get your Google Generative AI API Key by following these instructions -
+# https://console.cloud.google.com/apis/credentials
+# You only need this environment variable set if you want to use Google Generative AI models
+GOOGLE_GENERATIVE_AI_API_KEY=
+
+# You only need this environment variable set if you want to use oLLAMA models
+# EXAMPLE http://localhost:11434
+OLLAMA_API_BASE_URL=
+
+# You only need this environment variable set if you want to use OpenAI Like models
+OPENAI_LIKE_API_BASE_URL=
+
+# You only need this environment variable set if you want to use DeepSeek models through their API
+DEEPSEEK_API_KEY=
+
+# Get your OpenAI Like API Key
+OPENAI_LIKE_API_KEY=
+
+# Get your Mistral API Key by following these instructions -
+# https://console.mistral.ai/api-keys/
+# You only need this environment variable set if you want to use Mistral models
+MISTRAL_API_KEY=
+
+# Include this environment variable if you want more logging for debugging locally
+VITE_LOG_LEVEL=debug
diff --git a/.github/workflows/github-build-push.yml b/.github/workflows/github-build-push.yml
new file mode 100644
index 00000000..4d4db05d
--- /dev/null
+++ b/.github/workflows/github-build-push.yml
@@ -0,0 +1,39 @@
+name: Build and Push Container
+
+on:
+ push:
+ branches:
+ - main
+ # paths:
+ # - 'Dockerfile'
+ workflow_dispatch:
+jobs:
+ build-and-push:
+ runs-on: [ubuntu-latest]
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v1
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v1
+
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v1
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Build and Push Containers
+ uses: docker/build-push-action@v2
+ with:
+ context: .
+ file: Dockerfile
+ platforms: linux/amd64,linux/arm64
+ push: true
+ tags: |
+ ghcr.io/${{ github.repository }}:latest
+ ghcr.io/${{ github.repository }}:${{ github.sha }}
diff --git a/.gitignore b/.gitignore
index 965ef504..69d27903 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,7 +12,7 @@ dist-ssr
*.local
.vscode/*
-!.vscode/launch.json
+.vscode/launch.json
!.vscode/extensions.json
.idea
.DS_Store
@@ -24,7 +24,10 @@ dist-ssr
/.cache
/build
-.env*
+.env.local
+.env
*.vars
.wrangler
_worker.bundle
+
+Modelfile
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ef4141cd..1bf3bfb7 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,95 +1,93 @@
-[![Bolt Open Source Codebase](./public/social_preview_index.jpg)](https://bolt.new)
+# Contributing to Bolt.new Fork
-> Welcome to the **Bolt** open-source codebase! This repo contains a simple example app using the core components from bolt.new to help you get started building **AI-powered software development tools** powered by StackBlitz’s **WebContainer API**.
+First off, thank you for considering contributing to Bolt.new! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make Bolt.new a better tool for developers worldwide.
-### Why Build with Bolt + WebContainer API
+## 📋 Table of Contents
+- [Code of Conduct](#code-of-conduct)
+- [How Can I Contribute?](#how-can-i-contribute)
+- [Pull Request Guidelines](#pull-request-guidelines)
+- [Coding Standards](#coding-standards)
+- [Development Setup](#development-setup)
+- [Deploymnt with Docker](#docker-deployment-documentation)
+- [Project Structure](#project-structure)
-By building with the Bolt + WebContainer API you can create browser-based applications that let users **prompt, run, edit, and deploy** full-stack web apps directly in the browser, without the need for virtual machines. With WebContainer API, you can build apps that give AI direct access and full control over a **Node.js server**, **filesystem**, **package manager** and **dev terminal** inside your users browser tab. This powerful combination allows you to create a new class of development tools that support all major JavaScript libraries and Node packages right out of the box, all without remote environments or local installs.
+## Code of Conduct
-### What’s the Difference Between Bolt (This Repo) and [Bolt.new](https://bolt.new)?
+This project and everyone participating in it is governed by our Code of Conduct. By participating, you are expected to uphold this code. Please report unacceptable behavior to the project maintainers.
-- **Bolt.new**: This is the **commercial product** from StackBlitz—a hosted, browser-based AI development tool that enables users to prompt, run, edit, and deploy full-stack web applications directly in the browser. Built on top of the [Bolt open-source repo](https://github.com/stackblitz/bolt.new) and powered by the StackBlitz **WebContainer API**.
+## How Can I Contribute?
-- **Bolt (This Repo)**: This open-source repository provides the core components used to make **Bolt.new**. This repo contains the UI interface for Bolt as well as the server components, built using [Remix Run](https://remix.run/). By leveraging this repo and StackBlitz’s **WebContainer API**, you can create your own AI-powered development tools and full-stack applications that run entirely in the browser.
+### 🐞 Reporting Bugs and Feature Requests
+- Check the issue tracker to avoid duplicates
+- Use the issue templates when available
+- Include as much relevant information as possible
+- For bugs, add steps to reproduce the issue
-# Get Started Building with Bolt
+### 🔧 Code Contributions
+1. Fork the repository
+2. Create a new branch for your feature/fix
+3. Write your code
+4. Submit a pull request
-Bolt combines the capabilities of AI with sandboxed development environments to create a collaborative experience where code can be developed by the assistant and the programmer together. Bolt combines [WebContainer API](https://webcontainers.io/api) with [Claude Sonnet 3.5](https://www.anthropic.com/news/claude-3-5-sonnet) using [Remix](https://remix.run/) and the [AI SDK](https://sdk.vercel.ai/).
+### ✨ Becoming a Core Contributor
+We're looking for dedicated contributors to help maintain and grow this project. If you're interested in becoming a core contributor, please fill out our [Contributor Application Form](https://forms.gle/TBSteXSDCtBDwr5m7).
-### WebContainer API
+## Pull Request Guidelines
-Bolt uses [WebContainers](https://webcontainers.io/) to run generated code in the browser. WebContainers provide Bolt with a full-stack sandbox environment using [WebContainer API](https://webcontainers.io/api). WebContainers run full-stack applications directly in the browser without the cost and security concerns of cloud hosted AI agents. WebContainers are interactive and editable, and enables Bolt's AI to run code and understand any changes from the user.
+### 📝 PR Checklist
+- [ ] Branch from the main branch
+- [ ] Update documentation if needed
+- [ ] Manually verify all new functionality works as expected
+- [ ] Keep PRs focused and atomic
-The [WebContainer API](https://webcontainers.io) is free for personal and open source usage. If you're building an application for commercial usage, you can learn more about our [WebContainer API commercial usage pricing here](https://stackblitz.com/pricing#webcontainer-api).
+### 👀 Review Process
+1. Manually test the changes
+2. At least one maintainer review required
+3. Address all review comments
+4. Maintain clean commit history
-### Remix App
+## Coding Standards
-Bolt is built with [Remix](https://remix.run/) and
-deployed using [CloudFlare Pages](https://pages.cloudflare.com/) and
-[CloudFlare Workers](https://workers.cloudflare.com/).
+### 💻 General Guidelines
+- Follow existing code style
+- Comment complex logic
+- Keep functions focused and small
+- Use meaningful variable names
-### AI SDK Integration
-
-Bolt uses the [AI SDK](https://github.com/vercel/ai) to integrate with AI
-models. At this time, Bolt supports using Anthropic's Claude Sonnet 3.5.
-You can get an API key from the [Anthropic API Console](https://console.anthropic.com/) to use with Bolt.
-Take a look at how [Bolt uses the AI SDK](https://github.com/stackblitz/bolt.new/tree/main/app/lib/.server/llm)
-
-## Prerequisites
-
-Before you begin, ensure you have the following installed:
-
-- Node.js (v20.15.1)
-- pnpm (v9.4.0)
-
-## Setup
-
-1. Clone the repository (if you haven't already):
+## Development Setup
+### 🔄 Initial Setup
+1. Clone the repository:
```bash
-git clone https://github.com/stackblitz/bolt.new.git
+git clone https://github.com/coleam00/bolt.new-any-llm.git
```
2. Install dependencies:
-
```bash
pnpm install
```
-3. Create a `.env.local` file in the root directory and add your Anthropic API key:
-
-```
+3. Set up environment variables:
+ - Rename `.env.example` to `.env.local`
+ - Add your LLM API keys (only set the ones you plan to use):
+```bash
+GROQ_API_KEY=XXX
+OPENAI_API_KEY=XXX
ANTHROPIC_API_KEY=XXX
+...
```
-
-Optionally, you can set the debug level:
-
-```
+ - Optionally set debug level:
+```bash
VITE_LOG_LEVEL=debug
```
-
**Important**: Never commit your `.env.local` file to version control. It's already included in .gitignore.
-## Available Scripts
-
-- `pnpm run dev`: Starts the development server.
-- `pnpm run build`: Builds the project.
-- `pnpm run start`: Runs the built application locally using Wrangler Pages. This script uses `bindings.sh` to set up necessary bindings so you don't have to duplicate environment variables.
-- `pnpm run preview`: Builds the project and then starts it locally, useful for testing the production build. Note, HTTP streaming currently doesn't work as expected with `wrangler pages dev`.
-- `pnpm test`: Runs the test suite using Vitest.
-- `pnpm run typecheck`: Runs TypeScript type checking.
-- `pnpm run typegen`: Generates TypeScript types using Wrangler.
-- `pnpm run deploy`: Builds the project and deploys it to Cloudflare Pages.
-
-## Development
-
-To start the development server:
-
+### 🚀 Running the Development Server
```bash
pnpm run dev
```
-This will start the Remix Vite development server.
+**Note**: You will need Google Chrome Canary to run this locally if you use Chrome! It's an easy install and a good browser for web development anyway.
## Testing
@@ -108,3 +106,96 @@ pnpm run deploy
```
Make sure you have the necessary permissions and Wrangler is correctly configured for your Cloudflare account.
+
+# Docker Deployment Documentation
+
+This guide outlines various methods for building and deploying the application using Docker.
+
+## Build Methods
+
+### 1. Using Helper Scripts
+
+NPM scripts are provided for convenient building:
+
+```bash
+# Development build
+npm run dockerbuild
+
+# Production build
+npm run dockerbuild:prod
+```
+
+### 2. Direct Docker Build Commands
+
+You can use Docker's target feature to specify the build environment:
+
+```bash
+# Development build
+docker build . --target bolt-ai-development
+
+# Production build
+docker build . --target bolt-ai-production
+```
+
+### 3. Docker Compose with Profiles
+
+Use Docker Compose profiles to manage different environments:
+
+```bash
+# Development environment
+docker-compose --profile development up
+
+# Production environment
+docker-compose --profile production up
+```
+
+## Running the Application
+
+After building using any of the methods above, run the container with:
+
+```bash
+# Development
+docker run -p 5173:5173 --env-file .env.local bolt-ai:development
+
+# Production
+docker run -p 5173:5173 --env-file .env.local bolt-ai:production
+```
+
+## Deployment with Coolify
+
+[Coolify](https://github.com/coollabsio/coolify) provides a straightforward deployment process:
+
+1. Import your Git repository as a new project
+2. Select your target environment (development/production)
+3. Choose "Docker Compose" as the Build Pack
+4. Configure deployment domains
+5. Set the custom start command:
+ ```bash
+ docker compose --profile production up
+ ```
+6. Configure environment variables
+ - Add necessary AI API keys
+ - Adjust other environment variables as needed
+7. Deploy the application
+
+## VS Code Integration
+
+The `docker-compose.yaml` configuration is compatible with VS Code dev containers:
+
+1. Open the command palette in VS Code
+2. Select the dev container configuration
+3. Choose the "development" profile from the context menu
+
+## Environment Files
+
+Ensure you have the appropriate `.env.local` file configured before running the containers. This file should contain:
+- API keys
+- Environment-specific configurations
+- Other required environment variables
+
+## Notes
+
+- Port 5173 is exposed and mapped for both development and production environments
+- Environment variables are loaded from `.env.local`
+- Different profiles (development/production) can be used for different deployment scenarios
+- The configuration supports both local development and production deployment
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..3b5a74cd
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,67 @@
+ARG BASE=node:20.18.0
+FROM ${BASE} AS base
+
+WORKDIR /app
+
+# Install dependencies (this step is cached as long as the dependencies don't change)
+COPY package.json pnpm-lock.yaml ./
+
+RUN corepack enable pnpm && pnpm install
+
+# Copy the rest of your app's source code
+COPY . .
+
+# Expose the port the app runs on
+EXPOSE 5173
+
+# Production image
+FROM base AS bolt-ai-production
+
+# Define environment variables with default values or let them be overridden
+ARG GROQ_API_KEY
+ARG OPENAI_API_KEY
+ARG ANTHROPIC_API_KEY
+ARG OPEN_ROUTER_API_KEY
+ARG GOOGLE_GENERATIVE_AI_API_KEY
+ARG OLLAMA_API_BASE_URL
+ARG VITE_LOG_LEVEL=debug
+
+ENV WRANGLER_SEND_METRICS=false \
+ GROQ_API_KEY=${GROQ_API_KEY} \
+ OPENAI_API_KEY=${OPENAI_API_KEY} \
+ ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \
+ OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
+ GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
+ OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
+ VITE_LOG_LEVEL=${VITE_LOG_LEVEL}
+
+# Pre-configure wrangler to disable metrics
+RUN mkdir -p /root/.config/.wrangler && \
+ echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json
+
+RUN npm run build
+
+CMD [ "pnpm", "run", "dockerstart"]
+
+# Development image
+FROM base AS bolt-ai-development
+
+# Define the same environment variables for development
+ARG GROQ_API_KEY
+ARG OPENAI_API_KEY
+ARG ANTHROPIC_API_KEY
+ARG OPEN_ROUTER_API_KEY
+ARG GOOGLE_GENERATIVE_AI_API_KEY
+ARG OLLAMA_API_BASE_URL
+ARG VITE_LOG_LEVEL=debug
+
+ENV GROQ_API_KEY=${GROQ_API_KEY} \
+ OPENAI_API_KEY=${OPENAI_API_KEY} \
+ ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \
+ OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
+ GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
+ OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
+ VITE_LOG_LEVEL=${VITE_LOG_LEVEL}
+
+RUN mkdir -p ${WORKDIR}/run
+CMD pnpm run dev --host
diff --git a/README.md b/README.md
index d3745298..fb70e756 100644
--- a/README.md
+++ b/README.md
@@ -1,12 +1,52 @@
[![Bolt.new: AI-Powered Full-Stack Web Development in the Browser](./public/social_preview_index.jpg)](https://bolt.new)
+# Bolt.new Fork by Cole Medin
+
+This fork of Bolt.new allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
+
+# Requested Additions to this Fork - Feel Free to Contribute!!
+
+- ✅ OpenRouter Integration (@coleam00)
+- ✅ Gemini Integration (@jonathands)
+- ✅ Autogenerate Ollama models from what is downloaded (@yunatamos)
+- ✅ Filter models by provider (@jasonm23)
+- ✅ Download project as ZIP (@fabwaseem)
+- ✅ Improvements to the main Bolt.new prompt in `app\lib\.server\llm\prompts.ts` (@kofi-bhr)
+- ✅ DeepSeek API Integration (@zenith110)
+- ✅ Mistral API Integration (@ArulGandhi)
+- ✅ "Open AI Like" API Integration (@ZerxZ)
+- ✅ Ability to sync files (one way sync) to local folder (@muzafferkadir)
+- ✅ Containerize the application with Docker for easy installation (@aaronbolton)
+- ✅ Publish projects directly to GitHub (@goncaloalves)
+- ⬜ Prevent Bolt from rewriting files as often (Done but need to review PR still)
+- ⬜ **HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start)
+- ⬜ **HIGH PRIORITY** Load local projects into the app
+- ⬜ **HIGH PRIORITY** - Attach images to prompts
+- ⬜ **HIGH PRIORITY** - Run agents in the backend as opposed to a single model call
+- ⬜ LM Studio Integration
+- ⬜ Together Integration
+- ⬜ Azure Open AI API Integration
+- ⬜ HuggingFace Integration
+- ⬜ Perplexity Integration
+- ⬜ Vertex AI Integration
+- ⬜ Cohere Integration
+- ⬜ Deploy directly to Vercel/Netlify/other similar platforms
+- ⬜ Ability to revert code to earlier version
+- ⬜ Prompt caching
+- ⬜ Better prompt enhancing
+- ⬜ Ability to enter API keys in the UI
+- ⬜ Have LLM plan the project in a MD file for better results/transparency
+- ⬜ VSCode Integration with git-like confirmations
+- ⬜ Upload documents for knowledge - UI design templates, a code base to reference coding style, etc.
+- ⬜ Voice prompting
+
# Bolt.new: AI-Powered Full-Stack Web Development in the Browser
Bolt.new is an AI-powered web development agent that allows you to prompt, run, edit, and deploy full-stack applications directly from your browser—no local setup required. If you're here to build your own AI-powered web dev agent using the Bolt open source codebase, [click here to get started!](./CONTRIBUTING.md)
## What Makes Bolt.new Different
-Claude, v0, etc are incredible- but you can't install packages, run backends or edit code. That’s where Bolt.new stands out:
+Claude, v0, etc are incredible- but you can't install packages, run backends, or edit code. That’s where Bolt.new stands out:
- **Full-Stack in the Browser**: Bolt.new integrates cutting-edge AI models with an in-browser development environment powered by **StackBlitz’s WebContainers**. This allows you to:
- Install and run npm tools and libraries (like Vite, Next.js, and more)
@@ -15,40 +55,196 @@ Claude, v0, etc are incredible- but you can't install packages, run backends or
- Deploy to production from chat
- Share your work via a URL
-- **AI with Environment Control**: Unlike traditional dev environments where the AI can only assist in code generation, Bolt.new gives AI models **complete control** over the entire environment including the filesystem, node server, package manager, terminal, and browser console. This empowers AI agents to handle the entire app lifecycle—from creation to deployment.
+- **AI with Environment Control**: Unlike traditional dev environments where the AI can only assist in code generation, Bolt.new gives AI models **complete control** over the entire environment including the filesystem, node server, package manager, terminal, and browser console. This empowers AI agents to handle the whole app lifecycle—from creation to deployment.
-Whether you’re an experienced developer, a PM or designer, Bolt.new allows you to build production-grade full-stack applications with ease.
+Whether you’re an experienced developer, a PM, or a designer, Bolt.new allows you to easily build production-grade full-stack applications.
For developers interested in building their own AI-powered development tools with WebContainers, check out the open-source Bolt codebase in this repo!
-## Tips and Tricks
+## Setup
-Here are some tips to get the most out of Bolt.new:
+Many of you are new users to installing software from Github. If you have any installation troubles reach out and submit an "issue" using the links above, or feel free to enhance this documentation by forking, editing the instructions, and doing a pull request.
-- **Be specific about your stack**: If you want to use specific frameworks or libraries (like Astro, Tailwind, ShadCN, or any other popular JavaScript framework), mention them in your initial prompt to ensure Bolt scaffolds the project accordingly.
+1. Install Git from https://git-scm.com/downloads
-- **Use the enhance prompt icon**: Before sending your prompt, try clicking the 'enhance' icon to have the AI model help you refine your prompt, then edit the results before submitting.
+2. Install Node.js from https://nodejs.org/en/download/
-- **Scaffold the basics first, then add features**: Make sure the basic structure of your application is in place before diving into more advanced functionality. This helps Bolt understand the foundation of your project and ensure everything is wired up right before building out more advanced functionality.
+Pay attention to the installer notes after completion.
-- **Batch simple instructions**: Save time by combining simple instructions into one message. For example, you can ask Bolt to change the color scheme, add mobile responsiveness, and restart the dev server, all in one go saving you time and reducing API credit consumption significantly.
+On all operating systems, the path to Node.js should automatically be added to your system path. But you can check your path if you want to be sure. On Windows, you can search for "edit the system environment variables" in your system, select "Environment Variables..." once you are in the system properties, and then check for a path to Node in your "Path" system variable. On a Mac or Linux machine, it will tell you to check if /usr/local/bin is in your $PATH. To determine if usr/local/bin is included in $PATH open your Terminal and run:
+
+```
+echo $PATH .
+```
+
+If you see usr/local/bin in the output then you're good to go.
+
+3. Clone the repository (if you haven't already) by opening a Terminal window (or CMD with admin permissions) and then typing in this:
+
+```
+git clone https://github.com/coleam00/bolt.new-any-llm.git
+```
+
+3. Rename .env.example to .env and add your LLM API keys. You will find this file on a Mac at "[your name]/bold.new-any-llm/.env.example". For Windows and Linux the path will be similar.
+
+![image](https://github.com/user-attachments/assets/7e6a532c-2268-401f-8310-e8d20c731328)
+
+If you can't see the file indicated above, its likely you can't view hidden files. On Mac, open a Terminal window and enter this command below. On Windows, you will see the hidden files option in File Explorer Settings. A quick Google search will help you if you are stuck here.
+
+```
+defaults write com.apple.finder AppleShowAllFiles YES
+```
+
+**NOTE**: you only have to set the ones you want to use and Ollama doesn't need an API key because it runs locally on your computer:
+
+Get your GROQ API Key here: https://console.groq.com/keys
+
+Get your Open AI API Key by following these instructions: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
+
+Get your Anthropic API Key in your account settings: https://console.anthropic.com/settings/keys
+
+```
+GROQ_API_KEY=XXX
+OPENAI_API_KEY=XXX
+ANTHROPIC_API_KEY=XXX
+```
+
+Optionally, you can set the debug level:
+
+```
+VITE_LOG_LEVEL=debug
+```
+
+**Important**: Never commit your `.env` file to version control. It's already included in .gitignore.
+
+## Run with Docker
+
+Prerequisites:
+
+Git and Node.js as mentioned above, as well as Docker: https://www.docker.com/
+
+### 1a. Using Helper Scripts
+
+NPM scripts are provided for convenient building:
+
+```bash
+# Development build
+npm run dockerbuild
+
+# Production build
+npm run dockerbuild:prod
+```
+
+### 1b. Direct Docker Build Commands (alternative to using NPM scripts)
+
+You can use Docker's target feature to specify the build environment instead of using NPM scripts if you wish:
+
+```bash
+# Development build
+docker build . --target bolt-ai-development
+
+# Production build
+docker build . --target bolt-ai-production
+```
+
+### 2. Docker Compose with Profiles to Run the Container
-## FAQs
+Use Docker Compose profiles to manage different environments:
-**Where do I sign up for a paid plan?**
-Bolt.new is free to get started. If you need more AI tokens or want private projects, you can purchase a paid subscription in your [Bolt.new](https://bolt.new) settings, in the lower-left hand corner of the application.
+```bash
+# Development environment
+docker-compose --profile development up
-**What happens if I hit the free usage limit?**
-Once your free daily token limit is reached, AI interactions are paused until the next day or until you upgrade your plan.
+# Production environment
+docker-compose --profile production up
+```
-**Is Bolt in beta?**
-Yes, Bolt.new is in beta, and we are actively improving it based on feedback.
+When you run the Docker Compose command with the development profile, any changes you
+make on your machine to the code will automatically be reflected in the site running
+on the container (i.e. hot reloading still applies!).
-**How can I report Bolt.new issues?**
-Check out the [Issues section](https://github.com/stackblitz/bolt.new/issues) to report an issue or request a new feature. Please use the search feature to check if someone else has already submitted the same issue/request.
+## Run Without Docker
-**What frameworks/libraries currently work on Bolt?**
-Bolt.new supports most popular JavaScript frameworks and libraries. If it runs on StackBlitz, it will run on Bolt.new as well.
+1. Install dependencies using Terminal (or CMD in Windows with admin permissions):
-**How can I add make sure my framework/project works well in bolt?**
-We are excited to work with the JavaScript ecosystem to improve functionality in Bolt. Reach out to us via [hello@stackblitz.com](mailto:hello@stackblitz.com) to discuss how we can partner!
+```
+pnpm install
+```
+
+If you get an error saying "command not found: pnpm" or similar, then that means pnpm isn't installed. You can install it via this:
+
+```
+sudo npm install -g pnpm
+```
+
+2. Start the application with the command:
+
+```bash
+pnpm run dev
+```
+
+## Super Important Note on Running Ollama Models
+
+Ollama models by default only have 2048 tokens for their context window. Even for large models that can easily handle way more.
+This is not a large enough window to handle the Bolt.new/oTToDev prompt! You have to create a version of any model you want
+to use where you specify a larger context window. Luckily it's super easy to do that.
+
+All you have to do is:
+
+- Create a file called "Modelfile" (no file extension) anywhere on your computer
+- Put in the two lines:
+
+```
+FROM [Ollama model ID such as qwen2.5-coder:7b]
+PARAMETER num_ctx 32768
+```
+
+- Run the command:
+
+```
+ollama create -f Modelfile [your new model ID, can be whatever you want (example: qwen2.5-coder-extra-ctx:7b)]
+```
+
+Now you have a new Ollama model that isn't heavily limited in the context length like Ollama models are by default for some reason.
+You'll see this new model in the list of Ollama models along with all the others you pulled!
+
+## Adding New LLMs:
+
+To make new LLMs available to use in this version of Bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider.
+
+By default, Anthropic, OpenAI, Groq, and Ollama are implemented as providers, but the YouTube video for this repo covers how to extend this to work with more providers if you wish!
+
+When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it. For Ollama models, make sure you have the model installed already before trying to use it here!
+
+## Available Scripts
+
+- `pnpm run dev`: Starts the development server.
+- `pnpm run build`: Builds the project.
+- `pnpm run start`: Runs the built application locally using Wrangler Pages. This script uses `bindings.sh` to set up necessary bindings so you don't have to duplicate environment variables.
+- `pnpm run preview`: Builds the project and then starts it locally, useful for testing the production build. Note, HTTP streaming currently doesn't work as expected with `wrangler pages dev`.
+- `pnpm test`: Runs the test suite using Vitest.
+- `pnpm run typecheck`: Runs TypeScript type checking.
+- `pnpm run typegen`: Generates TypeScript types using Wrangler.
+- `pnpm run deploy`: Builds the project and deploys it to Cloudflare Pages.
+
+## Development
+
+To start the development server:
+
+```bash
+pnpm run dev
+```
+
+This will start the Remix Vite development server. You will need Google Chrome Canary to run this locally if you use Chrome! It's an easy install and a good browser for web development anyway.
+
+## Tips and Tricks
+
+Here are some tips to get the most out of Bolt.new:
+
+- **Be specific about your stack**: If you want to use specific frameworks or libraries (like Astro, Tailwind, ShadCN, or any other popular JavaScript framework), mention them in your initial prompt to ensure Bolt scaffolds the project accordingly.
+
+- **Use the enhance prompt icon**: Before sending your prompt, try clicking the 'enhance' icon to have the AI model help you refine your prompt, then edit the results before submitting.
+
+- **Scaffold the basics first, then add features**: Make sure the basic structure of your application is in place before diving into more advanced functionality. This helps Bolt understand the foundation of your project and ensure everything is wired up right before building out more advanced functionality.
+
+- **Batch simple instructions**: Save time by combining simple instructions into one message. For example, you can ask Bolt to change the color scheme, add mobile responsiveness, and restart the dev server, all in one go saving you time and reducing API credit consumption significantly.
diff --git a/app/components/chat/BaseChat.tsx b/app/components/chat/BaseChat.tsx
index c4f90f43..c1175f70 100644
--- a/app/components/chat/BaseChat.tsx
+++ b/app/components/chat/BaseChat.tsx
@@ -1,3 +1,5 @@
+// @ts-nocheck
+// Preventing TS checks with files presented in the video for a better presentation.
import type { Message } from 'ai';
import React, { type RefCallback } from 'react';
import { ClientOnly } from 'remix-utils/client-only';
@@ -5,11 +7,65 @@ import { Menu } from '~/components/sidebar/Menu.client';
import { IconButton } from '~/components/ui/IconButton';
import { Workbench } from '~/components/workbench/Workbench.client';
import { classNames } from '~/utils/classNames';
+import { MODEL_LIST, DEFAULT_PROVIDER } from '~/utils/constants';
import { Messages } from './Messages.client';
import { SendButton } from './SendButton.client';
+import { useState } from 'react';
import styles from './BaseChat.module.scss';
+const EXAMPLE_PROMPTS = [
+ { text: 'Build a todo app in React using Tailwind' },
+ { text: 'Build a simple blog using Astro' },
+ { text: 'Create a cookie consent form using Material UI' },
+ { text: 'Make a space invaders game' },
+ { text: 'How do I center a div?' },
+];
+
+const providerList = [...new Set(MODEL_LIST.map((model) => model.provider))]
+
+const ModelSelector = ({ model, setModel, modelList, providerList }) => {
+ const [provider, setProvider] = useState(DEFAULT_PROVIDER);
+ return (
+