Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

(question)switching from openai and NPU usage on the opi 5? #39

Open
develperbayman opened this issue Aug 16, 2023 · 2 comments
Open

(question)switching from openai and NPU usage on the opi 5? #39

develperbayman opened this issue Aug 16, 2023 · 2 comments

Comments

@develperbayman
Copy link

hello this is a very cool project is there a way to leverage petals as a flask app i already have? i want to move away from openai and usage i using a orange pi 5 but i have never been able to utilize the NPU also i looked at the documents and i thought my head was going to explode but yeah i want to basically take my code and switch out openai to petals use my npu/gpu to help the cluster/myself then access the chatbot within my html
heres my code any advice would be awesome as im getting tired on it and just want it to work lol ~heres to hoping the code markdown stuck (i always have problems with it)


import openai
import threading
import time
import sys
import chat_commands
from gtts import gTTS
import os
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import *
from flask import Flask, request, render_template
from PIL import Image, ImageTk
import torch
import torchvision.models as models
import speech_recognition as sr
import pygame
import speech_recognition as sr
import webbrowser
import re
import subprocess
import os
#import guifunc
#import pythonide
import pipgui
#from tkinter import *
#from tkinter.filedialog import asksaveasfilename, askopenfilename

# OPENAI API KEY
openai.api_key = "api_key"

doListenToCommand = True
listening = False

# List with common departures to end the while loop
despedida = ["Goodbye", "goodbye", "bye", "Bye", "See you later", "see you later"]

# Create the GUI window
window = tk.Tk()
window.title("Computer:AI")
window.geometry("400x400")

# Create the text entry box
text_entry = tk.Entry(window, width=50)
text_entry.pack(side=tk.BOTTOM)

# Create the submit button
submit_button = tk.Button(window, text="Submit", command=lambda: submit())
submit_button.pack(side=tk.BOTTOM)

# Create the text output box
text_output = tk.Text(window, height=300, width=300)
text_output.pack(side=tk.BOTTOM)

def submit(event=None, text_input=None):
    global doListenToCommand
    global listening

    # Get the user input and check if the input matches the list of goodbyes
    if text_input != "":
        usuario = text_input
    else:
        usuario = text_entry.get()

    if usuario in despedida:
        on_closing()
    else:
        prompt = f"You are ChatGPT and answer my following message: {usuario}"

    # Getting responses using the OpenAI API
    response = openai.Completion.create(
        engine="text-davinci-003",
        prompt=prompt,
        max_tokens=2049
    )

    respuesta = response["choices"][0]["text"]

    # Converting text to audio
    texto = str(respuesta)
    tts = gTTS(texto, lang='en', tld='ie')
    tts.save("audio.mp3")

    # Displaying the answer on the screen
    text_output.insert(tk.END, "ChatGPT: " + respuesta + "\n")

    # Clear the input text
    text_entry.delete(0, tk.END)

    # Playing the audio
    doListenToCommand = False
    time.sleep(1)
    os.system("play audio.mp3")
    doListenToCommand = True

    # Call function to listen to the user
    # if listening == False:
    #     listen_to_command()



# Bind the Enter key to the submit function
window.bind("<Return>", submit)
#pygame.mixer.music.load("audio.mp3")
#pygame.mixer.music.play()
#termux audio
#os.system("mpg123 audio.mp3")
# Flask app
app = Flask(__name__, template_folder='templates')

@app.route("/", methods=["GET", "POST"])
def index():
    if request.method == "POST":
        file = request.files["file"]
        file.save(file.filename)
        openai.api_key = request.form["apikey"]
        return "Model file and API key saved."
    return render_template("index.html")

def run_as_normal_app():
    window.update()

def run_on_flask():
    app.run()

def listen_to_command():
    global doListenToCommand
    global listening

    # If we are not to be listening then exit the function.
    if doListenToCommand == True:
        # Initialize the recognizer
        r = sr.Recognizer()

        # Use the default microphone as the audio source
        with sr.Microphone() as source:
            print("Listening...")
            listening = True
            audio = r.listen(source)
            listening = False

        try:
            # Use speech recognition to convert speech to text
            command = r.recognize_google(audio)
            print("You said:", command)
            text_output.insert(tk.END, "You: " + command + "\n")
            text_entry.delete(0, tk.END)
            
            # Check if the command is a "generate image" instruction
            # if "generate image" in command.lower():
            #   # Call the function to generate the image
            #   generate_image()

            # Process the commands
            # Prepare object to be passed.
            class passed_commands:
                tk = tk
                text_output = text_output
                submit = submit

            chat_commands.process_commands(passed_commands,command)

        except sr.UnknownValueError:
            print("Speech recognition could not understand audio.")
        except sr.RequestError as e:
            print("Could not request results from Google Speech Recognition service:", str(e))


        listen_to_command()
        listening = False

def on_closing():
    if tk.messagebox.askokcancel("Quit", "Do you want to quit?"):
        window.destroy()

def pythonide():
    command = pythonide
    process = subprocess.run(["python3","pythonide.py"])        

window.protocol("WM_DELETE_WINDOW", on_closing)

if __name__ == "__main__":
    # Create the menu bar
    menu_bar = tk.Menu(window)

    # Create the "File" menu
    file_menu = tk.Menu(menu_bar, tearoff=0)
    file_menu.add_command(label="Open LLM", command=lambda: filedialog.askopenfilename())
    file_menu.add_command(label="Save LLM", command=lambda: filedialog.asksaveasfilename())
    file_menu.add_separator()
    #file_menu.add_command(label="Exit", command=window.quit)
    file_menu.add_command(label="Exit", command=on_closing)
    menu_bar.add_cascade(label="File", menu=file_menu)

    # Create the "Run" menu
    run_menu = tk.Menu(menu_bar, tearoff=0)
    run_menu.add_command(label="Run as normal app", command=run_as_normal_app)
    run_menu.add_command(label="Run on Flask", command=run_on_flask)
    run_menu.add_command(label='Python Ide', command=pythonide)
    menu_bar.add_cascade(label="Run", menu=run_menu)

    # Set the menu bar
    window.config(menu=menu_bar)


    # Start the main program loop
    start_listening_thread = threading.Thread(target=listen_to_command)
    start_listening_thread.daemon = True
    start_listening_thread.start()
    window.mainloop()````
@develperbayman
Copy link
Author

the above script works great for openai would be sweet if i could make it use petals

@develperbayman
Copy link
Author

here a code update if anyone is interested i may open a repo

import tkinter as tk
from gtts import gTTS
import speech_recognition as sr
from transformers import BloomTokenizerFast
from petals import DistributedBloomForCausalLM

# Initialize the Tkinter window
root = tk.Tk()
root.title("Petals Chatbot")

# Create a Text widget for displaying messages
text_widget = tk.Text(root)
text_widget.pack()

# Initialize the chatbot components
MODEL_NAME = "bigscience/bloom-7b1-petals"
DEVICE = 'cuda'
tokenizer = BloomTokenizerFast.from_pretrained(MODEL_NAME)
model = DistributedBloomForCausalLM.from_pretrained(MODEL_NAME).to(DEVICE)

# Function to generate a response from user input
def generate_response(user_input):
    inputs = tokenizer([f"{user_input}\n-----\n"], return_tensors='pt')['input_ids'].to(DEVICE)
    with model.inference_session(max_length=512) as sess:
        outputs = model.generate(
            inputs,
            temperature=0.6,
            do_sample=True,
            top_k=100,
            max_new_tokens=50,
            session=sess,
        )
        bloom_answer_token = tokenizer.decode(outputs[0, -1:])
        return bloom_answer_token

# Function to speak the response
def speak_response(response):
    tts = gTTS(text=response, lang='en')
    tts.save("response.mp3")
    os.system("play response.mp3")

# Function to handle sending user message
def send_user_message():
    user_input = user_input_entry.get()
    add_message("You: " + user_input, True)
    response = generate_response(user_input)
    add_message("Bot: " + response, False)
    speak_response(response)

# Function to add a message to the display
def add_message(message, is_user):
    tag = "user" if is_user else "bot"
    text_widget.insert(tk.END, "\n" + message, tag)
    text_widget.see(tk.END)

# Entry widget for user input
user_input_entry = tk.Entry(root)
user_input_entry.pack()

# Button to send user message
send_button = tk.Button(root, text="Send", command=send_user_message)
send_button.pack()

# Styling for user and bot messages
text_widget.tag_configure("user", foreground="blue")
text_widget.tag_configure("bot", foreground="green")

root.mainloop()```

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant