Skip to content

Commit

Permalink
Merge pull request #279 from amosproj/function-chat-fix
Browse files Browse the repository at this point in the history
Firebase Functions bug fix to get multi LLM ans
  • Loading branch information
preetvadaliya authored Jul 17, 2024
2 parents c7eb55e + 35f0fb8 commit a78bafa
Show file tree
Hide file tree
Showing 13 changed files with 171 additions and 258 deletions.
21 changes: 16 additions & 5 deletions functions/handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

from chain import create_health_ai_chain
from config import initialize_firebase
from langchain_anthropic import ChatAnthropic
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_openai import ChatOpenAI
from store import get_vector_store

Expand All @@ -16,10 +18,19 @@ def get_health_ai_response(question, llm):


def get_response_from_llm(query, llm):
models = {'gpt-4': {}, 'gpt-3.5-turbo-instruct': {'name': 'gpt-3.5-turbo-instruct'}}
if llm in models:
llm_model = ChatOpenAI(api_key=environ.get('OPEN_AI_API_KEY'), temperature=0, **models[llm])
if llm == 'gpt-4' or llm == 'gpt-3.5-turbo-instruct':
llm_model = ChatOpenAI(api_key=environ.get('OPEN_AI_API_KEY'), temperature=0, model=llm)
response = get_health_ai_response(query, llm_model)
elif llm == 'google':
llm_model = ChatGoogleGenerativeAI(
model='gemini-1.5-pro-latest', google_api_key=environ.get('GOOGLE_API_KEY')
)
response = get_health_ai_response(query, llm_model)
elif llm == 'claude':
llm_model = ChatAnthropic(
model='claude-3-5-sonnet-20240620', api_key=environ.get('ANTHROPIC_API_KEY')
)
response = get_health_ai_response(query, llm_model)
return response
else:
return 'Model Not Found'
response = 'Model Not Found'
return response
4 changes: 2 additions & 2 deletions functions/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from handlers import get_response_from_llm


@https_fn.on_request(cors=options.CorsOptions(cors_origins=['*']))
@https_fn.on_request(memory=options.MemoryOption.GB_32, cpu=8, timeout_sec=300)
def get_response_url(req: https_fn.Request) -> https_fn.Response:
query = req.get_json().get('query', '')
llms = req.get_json().get('llms', ['gpt-4'])
Expand All @@ -15,7 +15,7 @@ def get_response_url(req: https_fn.Request) -> https_fn.Response:
return https_fn.Response(dumps(responses), mimetype='application/json')


@https_fn.on_call()
@https_fn.on_call(memory=options.MemoryOption.GB_32, cpu=8, timeout_sec=300)
def get_response(req: https_fn.CallableRequest):
query = req.data.get('query', '')
llms = req.data.get('llms', ['gpt-4'])
Expand Down
7 changes: 7 additions & 0 deletions functions/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,10 @@ langchain-community
langchain-openai
langchain-astradb
lark
langchain_core
langchain_google_genai
langchain_anthropic
google-auth
google-auth-oauthlib
google-api-python-client
python-dotenv
6 changes: 2 additions & 4 deletions src/backend/Scrapers/YouTube/you_tube.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import html
import json
import logging
import os
import re
from typing import List
Expand All @@ -14,9 +15,6 @@
from src.backend.Scrapers.YouTube import INDEX_FILE_PATH, RAW_DIR_PATH
from src.backend.Types.you_tube import TypeYouTubeScrappingData
from src.backend.Utils.splitter import get_text_chunks
import logging




class YouTubeScraper(BaseScraper):
Expand Down Expand Up @@ -100,7 +98,7 @@ def _scrape(self) -> TypeYouTubeScrappingData:
scrap_data['ref'] = f'https://www.youtube.com/watch?v={scrap_data["videoId"]}'
return scrap_data
except Exception as e:
#print(f'Error: {e} No caption found for videoId: {self.element_id}')
# print(f'Error: {e} No caption found for videoId: {self.element_id}')
error_msg = f'Error: {e} No caption found for videoId: {self.element_id}'
write_to_log(self.element_id, self.__class__.__name__, error_msg)
return {}
Expand Down
6 changes: 3 additions & 3 deletions src/frontend/components/DropdownMenu/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ import { Style } from './style';
*/

export const DropdownMenu = () => {
const { params } = useRoute<RouteProp<MainDrawerParams>>();
const [isVisible, setIsVisible] = useState(false);
const { activeChatId, setActiveChatId } = useActiveChatId();
const { activeLLMs, toggleLLM } = useLLMs(activeChatId || 'default');
const { activeLLMs, toggleLLM } = useLLMs(params?.chatId || 'new');

const { chat, status, error } = useGetChat(activeChatId);
const { chat, status } = useGetChat(params?.chatId || 'new');

const activeLLMsCount = Object.values(activeLLMs).filter((llm) => llm.active).length;
const activeLLMsNames = Object.values(activeLLMs)
Expand Down
20 changes: 8 additions & 12 deletions src/frontend/components/RenderChat/index.tsx
Original file line number Diff line number Diff line change
@@ -1,25 +1,21 @@
import React, { useEffect } from 'react';
import { Text, View } from 'react-native';
import Markdown from 'react-native-markdown-display';
import { ActivityIndicator, Avatar, useTheme } from 'react-native-paper';
import { useUser } from 'reactfire';
import { useActiveChatId, useGetChat } from 'src/frontend/hooks';
import { ActivityIndicator } from 'react-native-paper';
import { useGetChat } from 'src/frontend/hooks';
import type { conversationMessage } from 'src/frontend/types';
import { ChatBubble } from '../ChatBubble';
import { Style } from './style';

/**
* This file handles rendering all the different chat bubbles from a saved chat in firestore
*
* There is case distinction between AI and user messages because the storage format is different
*/

export function RenderChat() {
const { activeChatId } = useActiveChatId();
const { chat, status } = useGetChat(activeChatId);
const { colors } = useTheme();
const { data: user } = useUser();
type RenderChatProps = {
chatId: string;
};

export function RenderChat(props: RenderChatProps) {
const { chatId } = props;
const { chat, status } = useGetChat(chatId);
if (status === 'loading') return <ActivityIndicator />;
let id = 0;
return (
Expand Down
4 changes: 2 additions & 2 deletions src/frontend/hooks/useUpdateChat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@ import type { Chat } from '../types';
* E.g. with updated conversations with an LLM
*/

export function useUpdateChat(chatId: string) {
export function useUpdateChat() {
const [isUpdating, setIsUpdating] = useState(false);
const [error, setError] = useState<string | null>(null);
const [isSuccess, setIsSuccess] = useState(false);
const { data: users } = useUser();
const firestore = useFirestore();

const updateChat = async (data: Partial<Chat>) => {
const updateChat = async (chatId: string, data: Partial<Chat>) => {
setIsUpdating(true);
setIsSuccess(false);
try {
Expand Down
4 changes: 2 additions & 2 deletions src/frontend/routes/MainRoutes.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import { createDrawerNavigator } from '@react-navigation/drawer';
import React from 'react';
import { Header } from '../components';
import { Screens } from '../helpers';
import { ChatUI, CustomInstructions, DrawerMenu } from '../screens';
import { Chat, ChatUI, CustomInstructions, DrawerMenu } from '../screens';

export type MainDrawerParams = {
[Screens.Chat]: { chatId: string | null };
Expand All @@ -16,7 +16,7 @@ export function MainRoutes() {
<MainRouteDrawer.Navigator drawerContent={(props) => <DrawerMenu />}>
<MainRouteDrawer.Screen
name={Screens.Chat}
component={ChatUI}
component={Chat}
options={{ header: (props) => <Header {...props} /> }}
/>
<MainRouteDrawer.Screen
Expand Down
127 changes: 127 additions & 0 deletions src/frontend/screens/Chat/index.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
import type { DrawerNavigationProp } from '@react-navigation/drawer';
import { type RouteProp, useNavigation, useRoute } from '@react-navigation/native';
import { arrayUnion } from 'firebase/firestore';
import React, { useEffect, useRef, useState } from 'react';
import { ScrollView, View } from 'react-native';
import { ActivityIndicator, useTheme } from 'react-native-paper';
import { ChatKeyboard, RenderChat, VoiceButton } from 'src/frontend/components';
import { useCreateChat, useGetChat, useGetResponse, useUpdateChat } from 'src/frontend/hooks';
import type { MainDrawerParams } from 'src/frontend/routes/MainRoutes';
import { styles } from './style';

export function Chat() {
const { params } = useRoute<RouteProp<MainDrawerParams>>();
const { setParams } = useNavigation<DrawerNavigationProp<MainDrawerParams>>();
const { colors } = useTheme();
const [activeChatId, setActiveChatId] = useState('new');
const { chat } = useGetChat(activeChatId);
const { updateChat } = useUpdateChat();
const { createChat } = useCreateChat();
const scrollViewRef = useRef<ScrollView>(null);
const [text, setText] = useState('');
const [isLoading, setIsLoading] = useState(false);
const getResponse = useGetResponse();

// ------------- Keyboard scrolls down when sending a message -------------
useEffect(() => {
scrollViewRef.current?.scrollToEnd({ animated: true });
}, [chat?.conversation.length]);

// ------------- Create chat -------------
const createNewChat = async (firstMessage: string) => {
let chatId = 'new';
try {
const userPrompt = { type: 'USER', message: firstMessage };
const result = await createChat({
title: firstMessage,
model: ['gpt-4'],
conversation: [userPrompt]
});
setActiveChatId(result?.id || 'new');
setParams({ chatId: result?.id });
chatId = result?.id || 'new';
} catch (error) {
console.error('Error creating chat:', error);
}
return chatId;
};

// ------------- Update chat id -------------
useEffect(() => {
setActiveChatId(params?.chatId || 'new');
}, [params?.chatId]);

// biome-ignore lint/suspicious/noExplicitAny: <explanation>
async function fetchResponse(query: string): Promise<any> {
const url = 'https://us-central1-amos-agent-framework.cloudfunctions.net/get_response_url_2';
const data = {
query: query,
llms: chat?.model || ['gpt-4'],
history: JSON.parse(JSON.stringify(chat?.conversation || []))
};
try {
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
});
if (!response.ok) {
throw new Error(`Error: ${response.status}`);
}
const responseData = await response.json();
return responseData;
} catch (error) {
console.error('Error:', error);
throw error;
}
}

// ------------- Send Message -------------
const sendMessage = async () => {
const queryText = text.trim();
try {
setIsLoading(true);
setText('');
if (!queryText) return;
if (activeChatId === 'new') {
const chatId = await createNewChat(text);
const data = await fetchResponse(queryText);
await updateChat(chatId, { conversation: arrayUnion({ type: 'AI', message: data }) });
} else {
await updateChat(activeChatId, {
conversation: arrayUnion({ type: 'USER', message: queryText })
});
const data = await fetchResponse(queryText);
await updateChat(activeChatId, { conversation: arrayUnion({ type: 'AI', message: data }) });
}
} catch (error) {
console.error(JSON.stringify(error));
} finally {
setIsLoading(false);
}
};

return (
<View style={styles.container}>
<ScrollView
style={styles.chatContainer}
contentContainerStyle={styles.scrollViewContent}
ref={scrollViewRef}
>
<RenderChat chatId={activeChatId} />
{isLoading && <ActivityIndicator />}
</ScrollView>
<View style={[styles.inputContainer, { borderColor: colors.outlineVariant }]}>
<ChatKeyboard text={text} setText={setText} onSend={sendMessage} />
<VoiceButton
text={text}
setText={setText}
onPress={sendMessage}
isSendButtonDisabled={isLoading}
/>
</View>
</View>
);
}
File renamed without changes.
Loading

0 comments on commit a78bafa

Please sign in to comment.