Skip to content

Commit

Permalink
Fixed issue with Gemini API
Browse files Browse the repository at this point in the history
  • Loading branch information
AJaySi committed Apr 22, 2024
1 parent 180f28a commit 357cba3
Show file tree
Hide file tree
Showing 15 changed files with 188 additions and 186 deletions.
21 changes: 19 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,11 +1,28 @@
# AI Content Generation Toolkit - Alwrity
![](https://github.com/AJaySi/AI-Blog-Writer/blob/main/workspace/keyword_blog.gif)
# How to Use AI Content Generation Toolkit - Alwrity

1). Visit alwrity.com, Under tools section, you will find some AI content writing tools, which are Free & No-Signup.
**Note:** Although, this is limited, as is our wallet & Resources.

2). For complete AI content toolkit, alwrity offers a commandline App. Its a BYOK model(Bring Your Own Key).
**Note:** (🗯️ commandline, byok and shit) ... Now, before you run away 🏃💨

If you have 💻 Laptop + 🛜 Internet + 20 minutes, you will be generating blogs, articles etc with just few words.

**[Getting Started for Absolute Begginers]**(https://www.alwrity.com/post/getting-started-with-alwrity-ai-writer)

Getting started for pretentious Developers : Continue Reading.....

**If you still Get stuck, Open a issue here & say pretty please** :: https://github.com/AJaySi/AI-Writer/issues


## Introduction

Alwrity automates and enhances the process of blog creation, optimization, and management.
Leveraging AI technologies, it assists content creators and digital marketers in generating, formatting, and uploading blog content efficiently. The toolkit integrates advanced AI models for text generation, image creation, and data analysis, streamlining the content creation pipeline.

# AI Content Generation Toolkit - Alwrity
![](https://github.com/AJaySi/AI-Blog-Writer/blob/main/workspace/keyword_blog.gif)

---

## Getting Started 🚀 🤞🤞🤞
Expand Down
48 changes: 24 additions & 24 deletions alwrity.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import typer
from prompt_toolkit.shortcuts import checkboxlist_dialog, message_dialog, input_dialog
from prompt_toolkit.shortcuts import radiolist_dialog
from prompt_toolkit import prompt
from prompt_toolkit.styles import Style
from prompt_toolkit.shortcuts import radiolist_dialog
Expand All @@ -20,7 +21,8 @@

app = typer.Typer()

from lib.utils.alwrity_utils import blog_from_audio, blog_from_keyword, do_web_research, do_web_research, write_story, essay_writer, blog_tools, competitor_analysis
from lib.utils.alwrity_utils import blog_from_audio, blog_from_keyword, do_web_research, do_web_research
from lib.utils.alwrity_utils import write_story, essay_writer, blog_tools, competitor_analysis, image_to_text_writer, image_generator


def prompt_for_time_range():
Expand Down Expand Up @@ -61,10 +63,11 @@ def start_interactive_mode():
("AI Blog Writer", "AI Blog Writer"),
("AI Story Writer", "AI Story Writer"),
("AI Essay Writer", "AI Essay Writer"),
("AI Image to Text Writer", "AI Image to Text Writer"),
("Online Blog Tools/Apps", "Online Blog Tools/Apps"),
("Do keyword Research", "Do keyword Research"),
("Competitor Analysis", "Competitor Analysis"),
("Create Blog Images(TBD)", "Create Blog Images(TBD)"),
("Create Blog Images", "Create Blog Images"),
("AI Social Media(TBD)", "AI Social Media(TBD)"),
("Quit", "Quit")
]
Expand All @@ -76,10 +79,12 @@ def start_interactive_mode():
write_story()
elif mode == 'AI Essay Writer':
essay_writer()
elif mode == 'AI Image to Text Writer':
image_to_text_writer()
elif mode == 'Do keyword Research':
do_web_research()
elif mode == 'Create Blog Images(TBD)':
faq_generator()
elif mode == 'Create Blog Images':
image_generator()
elif mode == 'Competitor Analysis':
competitor_analysis()
elif mode == 'Online Blog Tools/Apps':
Expand All @@ -105,8 +110,7 @@ def check_search_apis():
"""

# Use rich.print for styling and hyperlinking
print("\n\n🙋♂️ 🙋♂️ Before doing web research, ensure the following API keys are available:")
print("Blogen uses Basic, Semantic, Neural web search using above APIs for contextual blog generation.\n")
print("Alwrity uses Basic, Semantic, Neural web search using above APIs for contextual blog generation.\n")

api_keys = {
"METAPHOR_API_KEY": "Metaphor AI Key (Get it here: [link=https://dashboard.exa.ai/login]Metaphor API[/link])",
Expand All @@ -121,7 +125,6 @@ def check_search_apis():
if os.getenv(key) is None:
# Use rich.print for styling and hyperlinking
print(f"[bold red]✖ 🚫 {key} is missing:[/bold red] [blue underline]Get {key} API Key[/blue underline]")
typer.echo(f"[bold red]✖ 🚫 {key} is missing:[/bold red] [link={key}]Get {key} API Key[/link]")
missing_keys.append((key, description))

if missing_keys:
Expand All @@ -142,10 +145,12 @@ def get_api_key(api_key: str, api_description: str):
api_key (str): The name of the API key variable.
api_description (str): The description of the API key.
"""
user_input = typer.prompt(f"\n\n🙆💩💩🙆 - Please enter {api_key} API Key:")
print("\n\n")
print(f"[bold green] 🙋 Attention Here: 🙋 -- {api_description}")
user_input = typer.prompt(f"💩 -**Please Enter(copy/paste) {api_key} API Key** - Here🙋:")
with open(".env", "a") as env_file:
env_file.write(f"{api_key}={user_input}\n")
print(f"✅ {api_description} API Key added to .env file.")
print(f"✅ API Key added to .env file.")


def write_blog():
Expand All @@ -172,22 +177,20 @@ def check_llm_environs():
""" Function to check which LLM api is given. """
# Load .env file
load_dotenv(Path('.env'))
# Check if GPT_PROVIDER is defined in .env file
gpt_provider = os.getenv("GPT_PROVIDER")

# Disable unsupported GPT providers
supported_providers = ['google', 'openai', 'mistralai']
if gpt_provider is None or gpt_provider.lower() not in map(str.lower, supported_providers):
# Prompt user to select a provider
selected_provider = radiolistbox(
msg='Select your preferred GPT provider:',
title='GPT Provider Selection',
choices=["Google", "OpenAI", "MistralAI/WIP", "Ollama (TBD)"],
default_choice="Google"
)
gpt_provider = selected_provider
gpt_provider = radiolist_dialog(
title="Select your GPT Provider(llm) from 'google', 'openai', 'mistralai'",
values=[("google", "Google Gemini Pro"), ("openai", "OpenAI- ChatGPT"), ("mistralai", "MistralAI/WIP")]).run()
# Update .env file
os.environ["GPT_PROVIDER"] = gpt_provider
with open(".env", "a") as env_file:
env_file.write(f"GPT_PROVIDER=gpt_provider\n")
print(f"✅ API Key added to .env file.")

if gpt_provider.lower() == "google":
api_key_var = "GEMINI_API_KEY"
Expand All @@ -198,9 +201,9 @@ def check_llm_environs():
elif gpt_provider.lower() == "mistralai":
api_key_var = "MISTRAL_API_KEY"
missing_api_msg = "To get your MistralAI API key, please visit: https://mistralai.com/api"
else:
print("Unrecognised/Unsupported GPT provider. Check your main_config and environs.")
exit(1)

if api_key_var not in os.environ:
get_api_key(api_key_var, missing_api_msg)


def check_internet():
Expand Down Expand Up @@ -233,13 +236,10 @@ def create_env_file():


if __name__ == "__main__":
print("Checking Internet, lets get the basics right.")
check_internet()
print("Create .env file, if not Present working directory")
create_env_file()
print("Check Metaphor, Tavily, YOU.com Search API keys.")
os.system("clear" if os.name == "posix" else "cls")
check_search_apis()
print("Check LLM details & AI Model to use.")
check_llm_environs()
os.environ["SEARCH_SAVE_FILE"] = os.path.join(os.getcwd(), "workspace",
"web_research_report" + "_" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
Expand Down
79 changes: 0 additions & 79 deletions lib/gpt_providers/image_to_text_gen/gemini_image_details.py

This file was deleted.

2 changes: 1 addition & 1 deletion lib/gpt_providers/text_generation/ai_essay_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def ai_essay_generator(essay_title, selected_essay_type, selected_education_leve
load_dotenv(Path('../.env'))
genai.configure(api_key=os.getenv('GEMINI_API_KEY'))
# Initialize the generative model
model = genai.GenerativeModel('gemini-1.0-pro')
model = genai.GenerativeModel('gemini-pro')

# Generate prompts
try:
Expand Down
2 changes: 1 addition & 1 deletion lib/gpt_providers/text_generation/gemini_pro_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
)


#@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def gemini_text_response(prompt, temperature, top_p, n, max_tokens):
""" Common functiont to get response from gemini pro Text. """
#FIXME: Include : https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/System_instructions_REST.ipynb
Expand Down
41 changes: 41 additions & 0 deletions lib/gpt_providers/text_to_image_generation/gen_stabl_diff_img.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from PIL import Image
import requests

# Ensure you sign up for an account to obtain an API key:
# https://platform.stability.ai/
# Your API key can be found here after account creation:
# https://platform.stability.ai/account/keys


def generate_stable_diffusion_image(prompt):
"""
Generate images using Stable Diffusion API based on a given prompt.
Args:
prompt (str): The prompt to generate the image.
image_dir (str): The directory where the image will be saved.
Raises:
Warning: If the adult content classifier is triggered.
Exception: For any issues during image generation or saving.
"""
api_key = os.getenv('STABILITY_API_KEY')

response = requests.post(
f"https://api.stability.ai/v2beta/stable-image/generate/sd3",
headers={
"authorization": f"Bearer {api_key}",
"accept": "image/*"
},
files={"none": ''},
data={
"prompt": prompt,
"output_format": "webp",
},
)

if response.status_code == 200:
with open("./dog-wearing-glasses.jpeg", 'wb') as file:
file.write(response.content)
else:
raise Exception(str(response.json()))
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,12 @@
format="<level>{level}</level>|<green>{file}:{line}:{function}</green>| {message}"
)

from .gpt_providers.openai_gpt_provider import generate_dalle2_images, generate_dalle3_images, openai_chatgpt
from .stabl_diff_img2html import generate_stable_diffusion_image
#from .gen_dali2_images
from .gen_dali3_images import generate_dalle3_images
from .gen_stabl_diff_img import generate_stable_diffusion_image


def generate_image(user_prompt, image_dir, image_engine="dalle3"):
def generate_image(user_prompt, image_engine="dalle3"):
"""
The generation API endpoint creates an image based on a text prompt.
Expand All @@ -40,18 +41,14 @@ def generate_image(user_prompt, image_dir, image_engine="dalle3"):
Must be one of "url" or "b64_json". Defaults to "url".
--> user (str): A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse.
"""
logger.info(f"Generated blog images will be stored at: {image_dir=}")

img_prompt = generate_img_prompt(user_prompt)
# call the OpenAI API to generate image from prompt.
logger.info(f"Calling openai.image.generate with prompt: {img_prompt}")
logger.info(f"Calling image.generate with prompt: {img_prompt}")

if 'dalle2' in image_engine:
image_stored_at = generate_dalle2_images(img_prompt, image_dir)
elif 'dalle3' in image_engine:
image_stored_at = generate_dalle3_images(img_prompt, image_dir)
elif 'stable_diffusion' in image_engine:
image_stored_at = generate_stable_diffusion_image(img_prompt, image_dir)
if 'Dalle3' in image_engine:
image_stored_at = generate_dalle3_images(img_prompt)
elif 'Stable Diffusion' in image_engine:
image_stored_at = generate_stable_diffusion_image(img_prompt)

return image_stored_at

Expand All @@ -72,5 +69,5 @@ def generate_img_prompt(user_prompt):
Advice for creating prompt for image from the given text(no more than 150 words).
Reply with only one answer and no descrition. Generate image prompt for the below text.
Text: {user_prompt}"""
response = openai_chatgpt(prompt)
response = (prompt)
return response
File renamed without changes.
Loading

0 comments on commit 357cba3

Please sign in to comment.