diff --git a/README.md b/README.md index fd7a355..80f4a46 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ You will need to provide the following arguments to the build command: docker build -t telegram-chatgpt-bot . \ --build-arg OPENAI_TOKEN= \ --build-arg BOT_TOKEN= \ - --build-arg CHATGPT_VERSION= + --build-arg CHAT_MODEL= ``` You can also run the bot locally diff --git a/internal/config/config.go b/internal/config/config.go index e53bc78..21c7068 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -12,7 +12,7 @@ type Config struct { TelegramAPIToken string `env:"BOT_TOKEN,required"` OpenAIToken string `env:"OPENAI_TOKEN,required"` DefaultLanguage string `env:"LANG,default=en"` - ChatGPTVersion string `env:"CHATGPT_VERSION,default=3.5"` + ChatModel string `env:"CHAT_MODEL,default=gpt-3.5-turbo-1106"` } var ( diff --git a/internal/handlers/private.go b/internal/handlers/private.go index 74aab8f..d62a2f2 100644 --- a/internal/handlers/private.go +++ b/internal/handlers/private.go @@ -105,13 +105,6 @@ func apiRequestRoutine( const ( chatGPT4 = "4" ) - var modelVersion string - switch config.Get().ChatGPTVersion { - case chatGPT4: - modelVersion = openai.GPT4 - default: - modelVersion = openai.GPT3Dot5Turbo - } instruction := openai.ChatCompletionMessage{ Role: "system", @@ -160,7 +153,7 @@ func apiRequestRoutine( resp, err := openaiClient.CreateChatCompletion( context.Background(), openai.ChatCompletionRequest{ - Model: modelVersion, + Model: config.Get().ChatModel, Messages: append([]openai.ChatCompletionMessage{instruction}, chatHistory...), MaxTokens: openAIMaxTokens, Temperature: openAITemperature,