-
+ return
{
+ return
+
+
Something went wrong
+ resetErrorBoundary()} variant='destructive'>Try again
+
-
+ }}>
+
+
+
+
+
+
+
}
\ No newline at end of file
diff --git a/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ChatScreen.tsx b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ChatScreen.tsx
new file mode 100644
index 000000000..50439f158
--- /dev/null
+++ b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ChatScreen.tsx
@@ -0,0 +1,76 @@
+'use client';
+import { Avatar, AvatarFallback } from "@/components/ui/avatar";
+import { useAtomValue } from "jotai";
+import { activeSessionId } from "./atoms";
+import useSWR from "swr";
+import { ChatMessageType, getConversationBySessionId } from "@/data/conversations";
+import Loader from "@/components/ui/Loader";
+import { format } from 'timeago.js';
+import { EmptyBlock } from "@/components/domain/EmptyBlock";
+
+function UserMessage({ message, created_at }: ChatMessageType) {
+ return (
+
+
+
+ {message}
+
+
+ {format(created_at)}
+
+
+
+ );
+}
+function CopilotMessage({ message, created_at }: ChatMessageType) {
+ return (
+
+
+ C
+
+
+
+ {message}
+
+
{format(created_at)}
+
+
+ );
+}
+function ChatDivider({ content }: { content: string }) {
+ return (
+
+
+ {content}
+
+
+ );
+}
+
+export function ChatScreen() {
+ const activeid = useAtomValue(activeSessionId);
+ const {
+ data: chat,
+ isLoading
+ } = useSWR(activeid, getConversationBySessionId)
+ return (
+
+ {
+ isLoading &&
+ }
+ {
+ chat ? chat?.data.map((c, i) => {
+ if (c.from_user) {
+ return
+ } else if (!c.from_user) {
+ return
+ }
+ }) :
+
+ Select a conversation to start chatting
+
+
+ }
+
+ );
+}
diff --git a/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ConversationAside.tsx b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ConversationAside.tsx
new file mode 100644
index 000000000..d703e9757
--- /dev/null
+++ b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ConversationAside.tsx
@@ -0,0 +1,53 @@
+"use client";
+import { Avatar, AvatarFallback } from "@/components/ui/avatar";
+import { useAtomValue } from "jotai";
+import { Bookmark, CheckCircle, Trash2 } from "lucide-react";
+import { activeSessionId } from "./atoms";
+
+export function ConversationAside() {
+ const activeid = useAtomValue(activeSessionId);
+
+ return (
+ activeid &&
+
+
+
+
+
+ Conversation between your assistant and Unknown User
+
+
+
+
+
+
+ actions
+
+
+
+ Mark as Reviewed
+
+
+
+ Save for Later
+
+
+
+ Delete
+
+
+
+
+
+
+ );
+}
diff --git a/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ConversationHeader.tsx b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ConversationHeader.tsx
new file mode 100644
index 000000000..b4b334c74
--- /dev/null
+++ b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ConversationHeader.tsx
@@ -0,0 +1,12 @@
+import React from "react";
+import { HeaderShell } from "@/components/domain/HeaderShell";
+
+export function ConversationHeader() {
+ return (
+
+
+ Conversation
+
+
+ );
+}
diff --git a/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ListConverations.tsx b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ListConverations.tsx
new file mode 100644
index 000000000..b36bd4df5
--- /dev/null
+++ b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/ListConverations.tsx
@@ -0,0 +1,48 @@
+"use client";
+import { useAtom } from "jotai";
+import { activeSessionId } from "./atoms";
+import { cn } from "@/lib/utils";
+import useSWR from "swr";
+import { useCopilot } from "../../../_context/CopilotProvider";
+import { ConversationType, getSessionsByBotId } from "@/data/conversations";
+import { format } from 'timeago.js';
+function Conversation(props: ConversationType) {
+ const [activeid, setActiveId] = useAtom(activeSessionId);
+ const isActive = activeid === props.session_id;
+ return (
+
setActiveId(props.session_id)}
+ role="button"
+ className={cn(
+ "w-full border border-l-[3px] p-4 transition-colors last-of-type:mb-2",
+ isActive
+ ? "sticky bottom-0 left-0 top-0 !border-l-primary bg-accent"
+ : "border-x-transparent bg-white border-border",
+ )}
+ >
+
+
{props.first_message.message}
+
{format(props.first_message.created_at)}
+
+
+ );
+}
+
+export function ListConversations() {
+ const {
+ id: copilotId,
+ } = useCopilot();
+ const {
+ data: conversations
+ } = useSWR(copilotId + "/conversations", async () => (await getSessionsByBotId(copilotId))?.data)
+ return (
+
+
+ {conversations?.map((c, i) => (
+
+ ))}
+
+
+ );
+}
diff --git a/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/atoms.ts b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/atoms.ts
new file mode 100644
index 000000000..56b12ec80
--- /dev/null
+++ b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/_parts/atoms.ts
@@ -0,0 +1,3 @@
+import { atom } from "jotai";
+
+export const activeSessionId = atom
(null);
diff --git a/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/layout.tsx b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/layout.tsx
index 78541a753..db7e2e71f 100644
--- a/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/layout.tsx
+++ b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/layout.tsx
@@ -1,7 +1,5 @@
-import { HeaderShell } from "@/components/domain/HeaderShell";
-import { Button } from "@/components/ui/button";
-import { RefreshCcw } from "lucide-react";
import React from "react";
+import { ListConversations } from "./_parts/ListConverations";
type Props = {
children: React.ReactNode;
@@ -13,15 +11,15 @@ type Props = {
export default function SettingsLayout({ children, params }: Props) {
const copilotBase = `/copilot/${params.copilot_id}/conversations`;
return (
-
-
-
- Conversations
-
-
-
-
-
+
+
+
+
+ Conversations
+
+
+
+
{children}
);
diff --git a/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/page.tsx b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/page.tsx
index 5be4efb64..2a1b42451 100644
--- a/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/page.tsx
+++ b/dashboard/app/(copilot)/copilot/[copilot_id]/conversations/page.tsx
@@ -1,10 +1,18 @@
import React from "react";
+import { ConversationAside } from "./_parts/ConversationAside";
+import { ChatScreen } from "./_parts/ChatScreen";
+
+import { ConversationHeader } from "./_parts/ConversationHeader";
+
export default function Conversations() {
return (
-
-
-
Nothing here Yet
-
Work in Progress
+
);
diff --git a/dashboard/app/(copilot)/copilot/[copilot_id]/knowledge/_parts/AddDataSource.tsx b/dashboard/app/(copilot)/copilot/[copilot_id]/knowledge/_parts/AddDataSource.tsx
index e3fba4340..e58845cef 100644
--- a/dashboard/app/(copilot)/copilot/[copilot_id]/knowledge/_parts/AddDataSource.tsx
+++ b/dashboard/app/(copilot)/copilot/[copilot_id]/knowledge/_parts/AddDataSource.tsx
@@ -81,7 +81,7 @@ function AddUrlDataSource() {
title: "Data source(s) added successfully",
variant: "success"
});
- _.delay(() => setDialog(null), 2000)
+ _.delay(() => setDialog(null), 1000)
} else {
toast({
title: "Error adding data source(s)",
diff --git a/dashboard/app/(copilot)/copilot/[copilot_id]/knowledge/_parts/KnowledgeTable.tsx b/dashboard/app/(copilot)/copilot/[copilot_id]/knowledge/_parts/KnowledgeTable.tsx
index a0bbdf06c..f2fbfa608 100644
--- a/dashboard/app/(copilot)/copilot/[copilot_id]/knowledge/_parts/KnowledgeTable.tsx
+++ b/dashboard/app/(copilot)/copilot/[copilot_id]/knowledge/_parts/KnowledgeTable.tsx
@@ -14,7 +14,7 @@ import {
HoverCardTrigger,
} from "@/components/ui/hover-card";
import { Checkbox } from "@/components/ui/checkbox";
-import { Minus, Trash2, ChevronsUpDown } from "lucide-react";
+import { Minus, Trash2, ChevronsUpDown, RotateCcw, CheckCircle, XCircle, RotateCw } from "lucide-react";
import {
ColumnDef,
ColumnFiltersState,
@@ -29,20 +29,20 @@ import {
useReactTable,
} from "@tanstack/react-table";
import _ from "lodash";
-import { timeSince } from "@/lib/timesince";
import { Button } from "@/components/ui/button";
import { useAtomValue } from "jotai";
import { searchQueryAtom } from "./searchAtom";
import { EmptyBlock } from "@/components/domain/EmptyBlock";
import useSWR from "swr";
-import { getDataSourcesByBotId } from "@/data/knowledge";
+import { Datasource, getDataSourcesByBotId } from "@/data/knowledge";
import { useCopilot } from "../../../_context/CopilotProvider";
import Link from "next/link";
+import { format } from 'timeago.js'
export type DataSources = {
id: string;
name: string;
type: string;
- status: string;
+ status: Datasource['status'];
date: Date | number | string;
source: string;
};
@@ -91,9 +91,20 @@ const columns: ColumnDef
[] = [
{
accessorKey: "status",
header: "Status",
- cell: ({ row }) => (
- {row.getValue("status")}
- ),
+ cell: ({ row }) => {
+ const status = row.getValue("status").toUpperCase() as DataSources['status'];
+ switch (status) {
+ case "PENDING":
+ return
+ case "SUCCESS":
+ case "COMPLETED":
+ return
+ case "FAILED":
+ return
+ default:
+ return status;
+ }
+ }
},
{
accessorKey: "type",
@@ -103,7 +114,7 @@ const columns: ColumnDef[] = [
{
accessorKey: "date",
header: "Date",
- cell: ({ row }) => {timeSince(row.getValue("date"))} ago ,
+ cell: ({ row }) => {format(row.getValue("date"), 'en-us')} ,
},
{
accessorKey: "source",
@@ -123,11 +134,9 @@ export function KnowledgeTable() {
const [rowSelection, setRowSelection] = React.useState({});
const {
data: dataSources,
- isLoading
} = useSWR(copilotId + '/data_sources', async () => {
const resp = await getDataSourcesByBotId(copilotId);
const data: DataSources[] = [];
- console.log(resp.data)
if (resp.data.web_sources) {
resp.data.web_sources.forEach((item) => {
data.push({
@@ -154,6 +163,8 @@ export function KnowledgeTable() {
}
return data
+ }, {
+ refreshInterval: 1000 * 10
})
const table = useReactTable({
data: dataSources || [],
diff --git a/dashboard/app/(copilot)/copilot/[copilot_id]/page.tsx b/dashboard/app/(copilot)/copilot/[copilot_id]/page.tsx
index f66ce418b..5d0fb5101 100644
--- a/dashboard/app/(copilot)/copilot/[copilot_id]/page.tsx
+++ b/dashboard/app/(copilot)/copilot/[copilot_id]/page.tsx
@@ -55,6 +55,9 @@ function InstallationSection() {
Authorization: "Bearer your_auth_token_goes_here",
AnyKey: "AnyValue"
},
+ user:{
+ name:"Default User"
+ }
}
window.addEventListener("DOMContentLoaded", ()=>initAiCoPilot(options)); // window.onload
`}
@@ -119,8 +122,8 @@ export default function CopilotPage() {
-
-
+
+
Attention
@@ -138,7 +141,7 @@ export default function CopilotPage() {
-
diff --git a/dashboard/app/globals.css b/dashboard/app/globals.css
index 4afe05aa7..b9aa6a64c 100644
--- a/dashboard/app/globals.css
+++ b/dashboard/app/globals.css
@@ -20,7 +20,7 @@
--primary-foreground: 0 0% 98%;
/* */
--accent-alt: 200 27% 96%;
- --secondary: 189 28% 95%;
+ --secondary: 0 0% 96%;
--secondary-foreground: 0 0% 9%;
--muted: 0 0% 96.1%;
diff --git a/dashboard/components/ui/avatar.tsx b/dashboard/components/ui/avatar.tsx
index da33cd106..3bc53513a 100644
--- a/dashboard/components/ui/avatar.tsx
+++ b/dashboard/components/ui/avatar.tsx
@@ -1,24 +1,39 @@
-"use client"
+"use client";
-import * as React from "react"
-import * as AvatarPrimitive from "@radix-ui/react-avatar"
-
-import { cn } from "@/lib/utils"
+import * as React from "react";
+import * as AvatarPrimitive from "@radix-ui/react-avatar";
+import { cn } from "@/lib/utils";
+import { cva, VariantProps } from "class-variance-authority";
+const avatarVariants = cva(
+ "relative flex h-10 w-10 shrink-0 overflow-hidden rounded-full",
+ {
+ variants: {
+ size: {
+ small: "h-6 w-6",
+ medium: "h-8 w-8",
+ large: "h-10 w-10",
+ xlarge: "h-12 w-12",
+ xxlarge: "h-14 w-14",
+ },
+ },
+ defaultVariants: {
+ size: "medium",
+ },
+ },
+);
const Avatar = React.forwardRef<
React.ElementRef
,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
+ React.ComponentPropsWithoutRef &
+ VariantProps
+>(({ className, size, ...props }, ref) => (
-))
-Avatar.displayName = AvatarPrimitive.Root.displayName
+));
+Avatar.displayName = AvatarPrimitive.Root.displayName;
const AvatarImage = React.forwardRef<
React.ElementRef,
@@ -29,8 +44,8 @@ const AvatarImage = React.forwardRef<
className={cn("aspect-square h-full w-full", className)}
{...props}
/>
-))
-AvatarImage.displayName = AvatarPrimitive.Image.displayName
+));
+AvatarImage.displayName = AvatarPrimitive.Image.displayName;
const AvatarFallback = React.forwardRef<
React.ElementRef,
@@ -39,12 +54,12 @@ const AvatarFallback = React.forwardRef<
-))
-AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName
+));
+AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName;
-export { Avatar, AvatarImage, AvatarFallback }
+export { Avatar, AvatarImage, AvatarFallback };
diff --git a/dashboard/components/ui/popover.tsx b/dashboard/components/ui/popover.tsx
index be4633085..fe89c979b 100644
--- a/dashboard/components/ui/popover.tsx
+++ b/dashboard/components/ui/popover.tsx
@@ -11,21 +11,30 @@ const PopoverTrigger = PopoverPrimitive.Trigger;
const PopoverContentPrimitive = PopoverPrimitive.Content;
const PopoverContent = React.forwardRef<
React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, align = "center", sideOffset = 4, ...props }, ref) => (
-
-
-
-));
+ React.ComponentPropsWithoutRef & {
+ animated?: boolean;
+ }
+>(
+ (
+ { className, align = "center", animated = true, sideOffset = 4, ...props },
+ ref,
+ ) => (
+
+
+
+ ),
+);
PopoverContent.displayName = PopoverPrimitive.Content.displayName;
export { Popover, PopoverTrigger, PopoverContent, PopoverContentPrimitive };
diff --git a/dashboard/data/conversations.ts b/dashboard/data/conversations.ts
new file mode 100644
index 000000000..214e2c02a
--- /dev/null
+++ b/dashboard/data/conversations.ts
@@ -0,0 +1,37 @@
+import axios from "axios";
+const instance = axios.create({
+ baseURL: "http://localhost:8888/backend/chat",
+});
+export type ChatMessageType = {
+ id: string;
+ chatbot_id: string;
+ session_id: string;
+ message: string;
+ from_user: boolean;
+ created_at: string;
+};
+
+// http://localhost:8888/backend/chat/sessions/:sessionId/chats
+export async function getConversationBySessionId(sessionId: string) {
+ if (!sessionId) return;
+ return instance.get(`/sessions/${sessionId}/chats`);
+}
+
+export type ConversationType = {
+ first_message: {
+ id: number;
+ chatbot_id: string;
+ created_at: string;
+ from_user: boolean;
+ message: string;
+ session_id: string;
+ updated_at: string;
+ };
+ session_id: string;
+}
+
+// http://localhost:8888/backend/chat/b/:bot_id/chat_sessions
+export async function getSessionsByBotId(bot_id: string) {
+ if (!bot_id) return;
+ return instance.get(`/b/${bot_id}/chat_sessions`);
+}
\ No newline at end of file
diff --git a/dashboard/data/knowledge.ts b/dashboard/data/knowledge.ts
index 09942ab93..433054471 100644
--- a/dashboard/data/knowledge.ts
+++ b/dashboard/data/knowledge.ts
@@ -25,11 +25,11 @@ export async function ingestDataSources(filenames: string[], bot_id: string) {
});
}
-type Datasource = {
+export type Datasource = {
chatbot_id: string;
id: string;
source: string;
- status: string;
+ status: "SUCCESS" | "COMPLETED" | "FAILED" | "PENDING";
updated_at: string;
}
export async function getDataSourcesByBotId(bot_id: string) {
diff --git a/dashboard/package.json b/dashboard/package.json
index 87b990161..0ea9a7c28 100644
--- a/dashboard/package.json
+++ b/dashboard/package.json
@@ -12,7 +12,7 @@
"dependencies": {
"@hookform/resolvers": "^3.3.1",
"@openchatai/copilot-flows-editor": "^1.5.2",
- "@openchatai/copilot-widget": "^1.1.1",
+ "@openchatai/copilot-widget": "^1.2.0",
"@radix-ui/react-accordion": "^1.1.2",
"@radix-ui/react-alert-dialog": "^1.0.5",
"@radix-ui/react-avatar": "^1.0.4",
@@ -45,6 +45,7 @@
"react": "^18",
"react-dom": "^18",
"react-dropzone": "^14.2.3",
+ "react-error-boundary": "^4.0.11",
"react-hook-form": "^7.47.0",
"react-hotkeys-hook": "^4.4.1",
"react-syntax-highlighter": "^15.5.0",
@@ -54,6 +55,7 @@
"swr": "^2.2.4",
"tailwind-merge": "^1.14.0",
"tailwindcss-animate": "^1.0.7",
+ "timeago.js": "^4.0.2",
"zod": "^3.22.3"
},
"devDependencies": {
diff --git a/dashboard/pnpm-lock.yaml b/dashboard/pnpm-lock.yaml
index 25c3d0df6..70aaa4688 100644
--- a/dashboard/pnpm-lock.yaml
+++ b/dashboard/pnpm-lock.yaml
@@ -12,8 +12,8 @@ dependencies:
specifier: ^1.5.2
version: 1.6.0(@radix-ui/react-alert-dialog@1.0.5)(@radix-ui/react-dialog@1.0.5)(@radix-ui/react-tooltip@1.0.7)(react-dom@18.2.0)(react@18.2.0)
'@openchatai/copilot-widget':
- specifier: ^1.1.1
- version: 1.1.1(react-dom@18.2.0)(react@18.2.0)
+ specifier: ^1.2.0
+ version: 1.2.0(react-dom@18.2.0)(react@18.2.0)
'@radix-ui/react-accordion':
specifier: ^1.1.2
version: 1.1.2(@types/react-dom@18.2.13)(@types/react@18.2.28)(react-dom@18.2.0)(react@18.2.0)
@@ -110,6 +110,9 @@ dependencies:
react-dropzone:
specifier: ^14.2.3
version: 14.2.3(react@18.2.0)
+ react-error-boundary:
+ specifier: ^4.0.11
+ version: 4.0.11(react@18.2.0)
react-hook-form:
specifier: ^7.47.0
version: 7.47.0(react@18.2.0)
@@ -137,6 +140,9 @@ dependencies:
tailwindcss-animate:
specifier: ^1.0.7
version: 1.0.7(tailwindcss@3.3.3)
+ timeago.js:
+ specifier: ^4.0.2
+ version: 4.0.2
zod:
specifier: ^3.22.3
version: 3.22.4
@@ -445,8 +451,8 @@ packages:
react-dom: 18.2.0(react@18.2.0)
dev: false
- /@openchatai/copilot-widget@1.1.1(react-dom@18.2.0)(react@18.2.0):
- resolution: {integrity: sha512-Khc6k8mhbaNIEwQKI3oU6anAMzQGf7y5GFvJ34uEotoVHpsU6rDO7odq6HBs2Mv23Zmnt1REp1euUaOv30zMFg==}
+ /@openchatai/copilot-widget@1.2.0(react-dom@18.2.0)(react@18.2.0):
+ resolution: {integrity: sha512-OEpRHjp2hx5+Q2gjkZf0DjQWqnQoLkMSyuXP3vins0n418zn1UR/CDP3nSkiRu0Pm9LZKxP9lu4qziVRlXAmig==}
peerDependencies:
react: ^18.x
react-dom: ^18.x
@@ -4150,6 +4156,15 @@ packages:
react: 18.2.0
dev: false
+ /react-error-boundary@4.0.11(react@18.2.0):
+ resolution: {integrity: sha512-U13ul67aP5DOSPNSCWQ/eO0AQEYzEFkVljULQIjMV0KlffTAhxuDoBKdO0pb/JZ8mDhMKFZ9NZi0BmLGUiNphw==}
+ peerDependencies:
+ react: '>=16.13.1'
+ dependencies:
+ '@babel/runtime': 7.23.2
+ react: 18.2.0
+ dev: false
+
/react-hook-form@7.47.0(react@18.2.0):
resolution: {integrity: sha512-F/TroLjTICipmHeFlMrLtNLceO2xr1jU3CyiNla5zdwsGUGu2UOxxR4UyJgLlhMwLW/Wzp4cpJ7CPfgJIeKdSg==}
engines: {node: '>=12.22.0'}
@@ -4653,6 +4668,10 @@ packages:
dependencies:
any-promise: 1.3.0
+ /timeago.js@4.0.2:
+ resolution: {integrity: sha512-a7wPxPdVlQL7lqvitHGGRsofhdwtkoSXPGATFuSOA2i1ZNQEPLrGnj68vOp2sOJTCFAQVXPeNMX/GctBaO9L2w==}
+ dev: false
+
/to-regex-range@5.0.1:
resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
engines: {node: '>=8.0'}
diff --git a/docker-compose.yml b/docker-compose.yml
index 32a48f252..8d91e0019 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -5,9 +5,9 @@ services:
build:
context: ./backend
dockerfile: Dockerfile
- image: codebanesr/backend:arm_edge # Add this line
+ image: codebanesr/backend:latest # Add this line
ports:
- - 5000:5000
+ - 5001:5000
depends_on:
- llm-server
- mysql
@@ -27,7 +27,7 @@ services:
build:
context: ./llm-server
dockerfile: Dockerfile
- image: codebanesr/llm-server:arm_edge # Add this line
+ image: codebanesr/llm-server:latest # Add this line
volumes:
- shared_data:/app/shared_data
networks:
@@ -55,7 +55,7 @@ services:
build:
context: ./workers
dockerfile: Dockerfile
- image: codebanesr/workers:arm_edge # Add this line
+ image: codebanesr/workers:latest # Add this line
networks:
- opencopilot_network
volumes:
@@ -74,7 +74,7 @@ services:
build:
context: ./dashboard
dockerfile: Dockerfile
- image: codebanesr/dashboard:arm_edge # Add this line
+ image: codebanesr/dashboard:latest # Add this line
ports:
- 8000:8000
volumes:
diff --git a/k8s/backend-deployment.yaml b/k8s/backend-deployment.yaml
index 090671e10..49e43dfca 100644
--- a/k8s/backend-deployment.yaml
+++ b/k8s/backend-deployment.yaml
@@ -37,7 +37,7 @@ spec:
value: "3306"
- name: DB_USERNAME
value: dbuser
- image: codebanesr/backend:arm_edge
+ image: codebanesr/backend:latest
name: backend
ports:
- containerPort: 5000
@@ -47,6 +47,18 @@ spec:
volumeMounts:
- mountPath: /app/shared_data
name: shared-data
+
+ initContainers:
+ - name: php-migrations
+ image: codebanesr/backend:latest
+ command: ["/bin/sh", "-c"]
+ args:
+ - |
+ php artisan cache:clear
+ php artisan config:cache
+ php artisan migrate
+ php artisan key:generate
+ php artisan storage:link
restartPolicy: Always
volumes:
- name: shared-data
diff --git a/k8s/dashboard-deployment.yaml b/k8s/dashboard-deployment.yaml
index 788cd6472..4136256d2 100644
--- a/k8s/dashboard-deployment.yaml
+++ b/k8s/dashboard-deployment.yaml
@@ -26,7 +26,7 @@ spec:
io.kompose.service: dashboard
spec:
containers:
- - image: codebanesr/dashboard:arm_edge
+ - image: codebanesr/dashboard:latest
name: dashboard
ports:
- containerPort: 8000
diff --git a/k8s/llm-server-deployment.yaml b/k8s/llm-server-deployment.yaml
index 92f01075f..bec060656 100644
--- a/k8s/llm-server-deployment.yaml
+++ b/k8s/llm-server-deployment.yaml
@@ -122,7 +122,7 @@ spec:
configMapKeyRef:
key: STORE
name: configmap
- image: codebanesr/llm-server:arm_edge
+ image: codebanesr/llm-server:latest
name: llm-server
ports:
- containerPort: 8002
diff --git a/k8s/workers-deployment.yaml b/k8s/workers-deployment.yaml
index b6fce80dc..e075e4b71 100644
--- a/k8s/workers-deployment.yaml
+++ b/k8s/workers-deployment.yaml
@@ -126,7 +126,7 @@ spec:
configMapKeyRef:
key: STORE
name: configmap
- image: codebanesr/workers:arm_edge
+ image: codebanesr/workers:latest
name: workers
resources: {}
volumeMounts:
diff --git a/llm-server/app.py b/llm-server/app.py
index 5ea50bb22..bb06c480c 100644
--- a/llm-server/app.py
+++ b/llm-server/app.py
@@ -1,4 +1,5 @@
from flask import Flask, request, jsonify, Response
+from models.repository.chat_history_repo import create_chat_history
from routes.workflow.workflow_controller import workflow
from routes.uploads.upload_controller import upload_controller
from routes._swagger.controller import _swagger
@@ -10,6 +11,8 @@
from routes.data_source.data_source_controller import datasource_workflow
from dotenv import load_dotenv
+from utils.vector_store_setup import init_qdrant_collections
+
load_dotenv()
from opencopilot_db import create_database_schema
@@ -23,7 +26,9 @@
app.register_blueprint(datasource_workflow, url_prefix="/data_sources")
app.config.from_object(Config)
-from routes.root_service import handle_request
+from routes.root_service import extract_data, handle_request
+
+init_qdrant_collections()
## TODO: Implement caching for the swagger file content (no need to load it everytime)
@@ -32,6 +37,13 @@ def handle() -> Response:
data = request.get_json()
try:
response = handle_request(data)
+ create_chat_history(data["bot_id"], data["session_id"], True, data["text"])
+ create_chat_history(
+ data["bot_id"],
+ data["session_id"],
+ False,
+ response["response"] or response["error"],
+ )
return jsonify(response)
except Exception as e:
struct_log.exception(
diff --git a/llm-server/models/repository/chat_history_repo.py b/llm-server/models/repository/chat_history_repo.py
index 47d3d57c3..1a986693b 100644
--- a/llm-server/models/repository/chat_history_repo.py
+++ b/llm-server/models/repository/chat_history_repo.py
@@ -1,9 +1,12 @@
from datetime import datetime
-from typing import Optional, cast, List
+from typing import Optional, cast, List, Dict, Union
from opencopilot_db import ChatHistory, engine, pdf_data_source_model
from sqlalchemy.orm import sessionmaker
from datetime import datetime
from typing import Optional, Tuple
+from sqlalchemy import distinct
+from sqlalchemy.orm import class_mapper
+from langchain.schema import BaseMessage, AIMessage, HumanMessage
Session = sessionmaker(bind=engine)
@@ -69,6 +72,18 @@ def get_all_chat_history_by_session_id(
return chats
+def get_chat_message_as_llm_conversation(session_id: str) -> List[BaseMessage]:
+ chats = get_all_chat_history_by_session_id(session_id, 100)
+ conversations: List[BaseMessage] = []
+ for chat in chats:
+ if chat.from_user == True:
+ conversations.append(HumanMessage(content=chat.message))
+ elif chat.from_user == False:
+ conversations.append(AIMessage(content=chat.message))
+
+ return conversations
+
+
def get_all_chat_history(limit: int = 10, offset: int = 0) -> List[ChatHistory]:
"""Retrieves all chat history records.
@@ -169,3 +184,60 @@ def get_chat_history_for_retrieval_chain(
user_query = None
return chat_history
+
+
+def get_unique_sessions_with_first_message_by_bot_id(
+ bot_id: str, limit: int = 20, offset: int = 0
+) -> List[Dict[str, Union[str, Optional[ChatHistory]]]]:
+ """
+ Retrieve unique session_ids for a given bot_id with pagination,
+ along with the first message in each session.
+
+ Args:
+ bot_id (str): The bot_id for which to retrieve session_ids.
+ limit (int, optional): The maximum number of results to return. Defaults to 20.
+ offset (int, optional): The number of results to skip from the beginning. Defaults to 0.
+ session (Session, optional): The SQLAlchemy session. Defaults to None.
+
+ Returns:
+ List[Dict[str, Union[str, Optional[ChatHistory]]]]: A list of dictionaries containing
+ unique session_ids and their first messages.
+ """
+ # If a session is not provided, create a new one
+ session = Session()
+
+ # Use distinct to get unique session_ids
+ unique_session_ids = (
+ session.query(distinct(ChatHistory.session_id))
+ .filter_by(chatbot_id=bot_id)
+ .limit(limit)
+ .offset(offset)
+ .all()
+ )
+
+ result_list = []
+
+ for session_id in unique_session_ids:
+ # Get the first message in each session
+ first_message = (
+ session.query(ChatHistory)
+ .filter_by(chatbot_id=bot_id, session_id=session_id[0])
+ .order_by(ChatHistory.created_at.asc())
+ .first()
+ )
+
+ # Convert ChatHistory object to a dictionary
+ if first_message:
+ first_message_dict = {
+ column.key: getattr(first_message, column.key)
+ for column in class_mapper(ChatHistory).mapped_table.columns
+ }
+ else:
+ first_message_dict = None
+
+ # Create a dictionary with session_id and first_message
+ result_dict = {"session_id": session_id[0], "first_message": first_message_dict}
+
+ result_list.append(result_dict)
+
+ return result_list
diff --git a/llm-server/requirements.txt b/llm-server/requirements.txt
index eafce4622..3c6e4dbdd 100644
--- a/llm-server/requirements.txt
+++ b/llm-server/requirements.txt
@@ -77,7 +77,7 @@ openai==0.28.1
openapi-schema-validator==0.6.2
openapi-spec-validator==0.7.1
opencopilot-db==2.0.0
-opencopilot-utils==2.0.0
+opencopilot-utils==2.1.0
opentelemetry-api==1.21.0
opentelemetry-exporter-otlp-proto-common==1.21.0
opentelemetry-exporter-otlp-proto-grpc==1.21.0
diff --git a/llm-server/routes/_swagger/controller.py b/llm-server/routes/_swagger/controller.py
index 576ba336f..442708356 100644
--- a/llm-server/routes/_swagger/controller.py
+++ b/llm-server/routes/_swagger/controller.py
@@ -1,16 +1,9 @@
-from flask import Flask, request, jsonify, Blueprint, request, Response
+from flask import request, jsonify, Blueprint, request, Response
-import json, yaml, re
from bson import ObjectId
import routes._swagger.service as swagger_service
from utils.db import Database
-from qdrant_client import QdrantClient
-from qdrant_client.http import models
-
-import os
-
-client = QdrantClient(url=os.getenv("QDRANT_URL", "http://qdrant:6333"))
db_instance = Database()
mongo = db_instance.get_db()
@@ -72,10 +65,6 @@ def add_swagger_file(id) -> Response:
def add_init_swagger_file(bot_id: str) -> Response:
body = request.get_json()
swagger_url = body["swagger_url"]
- client.create_collection(
- collection_name=bot_id,
- vectors_config=models.VectorParams(size=1536, distance=models.Distance.COSINE),
- )
result = swagger_service.save_swaggerfile_to_mongo(swagger_url, bot_id)
return jsonify(result)
diff --git a/llm-server/routes/chat/chat_controller.py b/llm-server/routes/chat/chat_controller.py
index c5bc981f6..97e275f6f 100644
--- a/llm-server/routes/chat/chat_controller.py
+++ b/llm-server/routes/chat/chat_controller.py
@@ -1,4 +1,7 @@
-from models.repository.chat_history_repo import get_all_chat_history_by_session_id
+from models.repository.chat_history_repo import (
+ get_all_chat_history_by_session_id,
+ get_unique_sessions_with_first_message_by_bot_id,
+)
from flask import Blueprint, request, jsonify
from utils.db import Database
from flask import Flask, request, jsonify, Blueprint, request, Response
@@ -32,3 +35,17 @@ def get_session_chats(session_id: str) -> Response:
)
return jsonify(chats_filtered)
+
+
+# unique_session_ids = get_unique_session_ids(session)
+
+
+@chat_workflow.route("/b//chat_sessions", methods=["GET"])
+def get_chat_sessions(bot_id: str) -> Response:
+ limit = request.args.get("limit", 20)
+ offset = request.args.get("offset", 0)
+ chat_history_sessions = get_unique_sessions_with_first_message_by_bot_id(
+ bot_id, limit, offset
+ )
+
+ return chat_history_sessions
diff --git a/llm-server/routes/root_service.py b/llm-server/routes/root_service.py
index 1ca8ab96d..83f6bd952 100644
--- a/llm-server/routes/root_service.py
+++ b/llm-server/routes/root_service.py
@@ -16,12 +16,10 @@
import os
from typing import Dict, Any, cast
from routes.workflow.utils.router import get_action_type
+from utils.chat_models import CHAT_MODELS
from utils.db import Database
import json
-from models.repository.chat_history_repo import (
- create_chat_history,
- get_chat_history_for_retrieval_chain,
-)
+from models.repository.chat_history_repo import get_chat_history_for_retrieval_chain
from utils.get_chat_model import get_chat_model
from utils.process_app_state import process_state
from prance import ResolvingParser
@@ -49,6 +47,8 @@
FILE_NOT_FOUND = "File not found"
FAILED_TO_CALL_API_ENDPOINT = "Failed to call or map API endpoint"
+chat = get_chat_model(CHAT_MODELS.gpt_3_5_turbo)
+
def handle_request(data: Dict[str, Any]) -> Any:
(
@@ -66,7 +66,7 @@ def handle_request(data: Dict[str, Any]) -> Any:
check_required_fields(base_prompt, text, swagger_url)
swagger_doc = None
try:
- action = get_action_type(text, bot_id)
+ action = get_action_type(text, bot_id, session_id)
logging.info(f"Triggered action: {action}")
if action == ActionType.ASSISTANT_ACTION:
current_state = process_state(app, headers)
@@ -84,6 +84,7 @@ def handle_request(data: Dict[str, Any]) -> Any:
app,
swagger_doc,
session_id,
+ bot_id,
)
bot_response = hasSingleIntent(
@@ -100,21 +101,19 @@ def handle_request(data: Dict[str, Any]) -> Any:
swagger_url,
app,
session_id,
+ bot_id,
)
elif len(bot_response.ids) == 0:
- return handle_no_api_call(
- swagger_url, session_id, text, bot_response.bot_message
- )
+ return handle_no_api_call(bot_response.bot_message)
- elif (
- action == ActionType.KNOWLEDGE_BASE_QUERY
- or action == ActionType.GENERAL_QUERY
- ):
+ elif action == ActionType.KNOWLEDGE_BASE_QUERY:
sanitized_question = text.strip().replace("\n", " ")
- vector_store = get_vector_store(StoreOptions(namespace=bot_id))
+ vector_store = get_vector_store(StoreOptions(namespace="knowledgebase"))
mode = "assistant"
- chain = getConversationRetrievalChain(vector_store, mode, base_prompt)
+ chain = getConversationRetrievalChain(
+ vector_store, mode, base_prompt, bot_id
+ )
chat_history = get_chat_history_for_retrieval_chain(session_id, limit=40)
response = chain(
{"question": sanitized_question, "chat_history": chat_history},
@@ -122,18 +121,16 @@ def handle_request(data: Dict[str, Any]) -> Any:
)
return {"response": response["answer"]}
- # elif action == ActionType.GENERAL_QUERY:
- # chat = get_chat_model("gpt-3.5-turbo")
+ elif action == ActionType.GENERAL_QUERY:
+ messages = [
+ SystemMessage(
+ content="You are an ai assistant, that answers general queries in <= 3 sentences"
+ ),
+ HumanMessage(content=f"Answer the following: {text}"),
+ ]
- # messages = [
- # SystemMessage(
- # content="You are an ai assistant, that answers general queries in <= 3 sentences"
- # ),
- # HumanMessage(content=f"Answer the following: {text}"),
- # ]
-
- # content = chat(messages).content
- # return {"response": content}
+ content = chat(messages).content
+ return {"response": content}
raise action
except Exception as e:
@@ -188,7 +185,9 @@ def get_qa_prompt_by_mode(mode: str, initial_prompt: Optional[str]) -> str:
return initial_prompt if initial_prompt else ""
-def getConversationRetrievalChain(vector_store: VectorStore, mode, initial_prompt: str):
+def getConversationRetrievalChain(
+ vector_store: VectorStore, mode, initial_prompt: str, bot_id: str
+):
llm = get_llm()
# template = get_qa_prompt_by_mode(mode, initial_prompt=initial_prompt)
@@ -209,7 +208,9 @@ def getConversationRetrievalChain(vector_store: VectorStore, mode, initial_promp
chain = ConversationalRetrievalChain.from_llm(
llm,
chain_type="stuff",
- retriever=vector_store.as_retriever(),
+ retriever=vector_store.as_retriever(
+ search_kwargs={"filter": {"bot_id": bot_id}}
+ ),
verbose=True,
combine_docs_chain_kwargs={"prompt": prompt},
)
@@ -274,6 +275,7 @@ def handle_existing_workflow(
app: str,
swagger_doc: ResolvingParser,
session_id: str,
+ bot_id: str,
) -> Dict[str, Any]:
# use user defined workflows if exists, if not use auto_gen_workflow
_workflow = mongo.workflows.find_one(
@@ -292,10 +294,6 @@ def handle_existing_workflow(
app,
)
- create_chat_history(swagger_url, session_id, True, text)
- create_chat_history(
- swagger_url, session_id, False, output["response"] or output["error"]
- )
return output
@@ -308,6 +306,7 @@ def handle_api_calls(
swagger_url: str,
app: str,
session_id: str,
+ bot_id: str,
) -> Dict[str, Any]:
_workflow = create_workflow_from_operation_ids(ids, swagger_doc, text)
output = run_workflow(
@@ -321,21 +320,13 @@ def handle_api_calls(
# m_workflow = mongo.auto_gen_workflows.insert_one(_workflow)
# add_workflow_data_to_qdrant(m_workflow.inserted_id, _workflow, swagger_url)
- create_chat_history(swagger_url, session_id, True, text)
- create_chat_history(
- swagger_url, session_id, False, output["response"] or output["error"]
- )
return output
-def handle_no_api_call(
- swagger_url: str, session_id: str, text: str, bot_message: str
-) -> Dict[str, str]:
- create_chat_history(swagger_url, session_id, True, text)
- create_chat_history(swagger_url, session_id, False, bot_message)
+def handle_no_api_call(bot_message: str) -> Dict[str, str]:
return {"response": bot_message}
def handle_exception(e: Exception, event: str) -> Dict[str, Any]:
struct_log.exception(payload={}, error=str(e), event="/handle_request")
- return {"response": None, "error": "An error occured in hand"}
+ return {"response": None, "error": "An error occured in handle request"}
diff --git a/llm-server/routes/workflow/extractors/convert_json_to_text.py b/llm-server/routes/workflow/extractors/convert_json_to_text.py
index 187c18eb8..698cd65a6 100644
--- a/llm-server/routes/workflow/extractors/convert_json_to_text.py
+++ b/llm-server/routes/workflow/extractors/convert_json_to_text.py
@@ -3,16 +3,14 @@
from langchain.schema import HumanMessage, SystemMessage
from typing import Any
from routes.workflow.extractors.extract_json import extract_json_payload
+from utils import get_chat_model
+from utils.chat_models import CHAT_MODELS
openai_api_key = os.getenv("OPENAI_API_KEY")
def convert_json_to_text(user_input: str, api_response: str) -> str:
- chat = ChatOpenAI(
- openai_api_key=os.getenv("OPENAI_API_KEY"),
- model="gpt-3.5-turbo-16k",
- temperature=0,
- )
+ chat = get_chat_model(CHAT_MODELS.gpt_3_5_turbo)
messages = [
SystemMessage(
diff --git a/llm-server/routes/workflow/extractors/extract_body.py b/llm-server/routes/workflow/extractors/extract_body.py
index 7c966b0f8..45cf217d0 100644
--- a/llm-server/routes/workflow/extractors/extract_body.py
+++ b/llm-server/routes/workflow/extractors/extract_body.py
@@ -1,5 +1,6 @@
import os
from langchain.schema import HumanMessage, SystemMessage
+from utils.chat_models import CHAT_MODELS
from utils.get_chat_model import get_chat_model
from opencopilot_utils import get_llm
@@ -20,7 +21,7 @@ async def gen_body_from_schema(
app: Optional[str],
current_state: Optional[str],
) -> Any:
- chat = get_chat_model("gpt-3.5-turbo-16k")
+ chat = get_chat_model(CHAT_MODELS.gpt_3_5_turbo_16k)
api_generation_prompt = None
if app:
module_name = f"integrations.custom_prompts.{app}"
diff --git a/llm-server/routes/workflow/extractors/extract_param.py b/llm-server/routes/workflow/extractors/extract_param.py
index 1a3b94571..3bdad0d11 100644
--- a/llm-server/routes/workflow/extractors/extract_param.py
+++ b/llm-server/routes/workflow/extractors/extract_param.py
@@ -1,6 +1,7 @@
import os
from langchain.chat_models import ChatOpenAI
from routes.workflow.extractors.extract_json import extract_json_payload
+from utils.chat_models import CHAT_MODELS
from utils.get_chat_model import get_chat_model
from opencopilot_utils import get_llm
from custom_types.t_json import JsonData
@@ -15,7 +16,7 @@
async def gen_params_from_schema(
param_schema: str, text: str, prev_resp: str, current_state: Optional[str]
) -> Optional[JsonData]:
- chat = get_chat_model("gpt-3.5-turbo-16k")
+ chat = get_chat_model(CHAT_MODELS.gpt_3_5_turbo_16k)
messages = [
SystemMessage(
content="You are an intelligent machine learning model that can produce REST API's params / query params in json format, given the json schema, user input, data from previous api calls, and current application state."
diff --git a/llm-server/routes/workflow/extractors/transform_api_response.py b/llm-server/routes/workflow/extractors/transform_api_response.py
index a32a0eca4..455ae4934 100644
--- a/llm-server/routes/workflow/extractors/transform_api_response.py
+++ b/llm-server/routes/workflow/extractors/transform_api_response.py
@@ -4,17 +4,15 @@
from typing import Any
from routes.workflow.extractors.extract_json import extract_json_payload
from routes.lossy_compressors.truncate_json import truncate_json
+from utils import get_chat_model
+from utils.chat_models import CHAT_MODELS
openai_api_key = os.getenv("OPENAI_API_KEY")
def transform_api_response_from_schema(server_url: str, responseText: str) -> str:
- chat = ChatOpenAI(
- openai_api_key=os.getenv("OPENAI_API_KEY"),
- model="gpt-3.5-turbo-16k",
- temperature=0,
- )
+ chat = get_chat_model(CHAT_MODELS.gpt_3_5_turbo_16k)
responseText = truncate_json(json.loads(responseText))
messages = [
diff --git a/llm-server/routes/workflow/utils/__init__.py b/llm-server/routes/workflow/utils/__init__.py
index 0fa4d7854..4f64333d4 100644
--- a/llm-server/routes/workflow/utils/__init__.py
+++ b/llm-server/routes/workflow/utils/__init__.py
@@ -6,3 +6,4 @@
from .get_swagger_op_by_id import *
from .run_openapi_ops import *
from .run_workflow import *
+from utils.chat_models import *
diff --git a/llm-server/routes/workflow/utils/detect_multiple_intents.py b/llm-server/routes/workflow/utils/detect_multiple_intents.py
index 352217098..fa9e648ef 100644
--- a/llm-server/routes/workflow/utils/detect_multiple_intents.py
+++ b/llm-server/routes/workflow/utils/detect_multiple_intents.py
@@ -7,12 +7,16 @@
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
+from utils.chat_models import CHAT_MODELS
from utils.get_chat_model import get_chat_model
from routes.workflow.extractors.extract_json import extract_json_payload
import os
import logging
from prance import ResolvingParser
-from models.repository.chat_history_repo import get_all_chat_history_by_session_id
+from models.repository.chat_history_repo import (
+ get_all_chat_history_by_session_id,
+ get_chat_message_as_llm_conversation,
+)
from opencopilot_db import ChatHistory
logging.basicConfig(level=logging.INFO)
@@ -88,42 +92,6 @@ def get_summaries(swagger_doc: ResolvingParser) -> str:
return summaries_str
-def generate_consolidated_requirement(
- user_input: str, session_id: str
-) -> Optional[str]:
- """Generates a consolidated query from chat history and an AI chat.
-
- Args:
- chat_history: A list of Message objects representing the chat history.
- ai_chat: A ChatOpenAI object representing the AI chat.
-
- Returns:
- A consolidated query string.
- """
- chat = get_chat_model("gpt-3.5-turbo")
-
- history = get_all_chat_history_by_session_id(session_id)
- if len(history) == 0:
- return None
-
- conversation_str = join_conversations(history)
- messages = [
- SystemMessage(
- content="You are an AI model designed to generate a standalone prompt. The user message may also contain instructions for you as a bot, like generating some content in this message. You should act accordingly"
- ),
- HumanMessage(
- content="You will receive user input. Based on the conversation and the current user prompt, I want you to convert the user prompt into a standalone prompt if the user prompt references something in conversation history."
- ),
- HumanMessage(
- content="Conversation History: ({}), \n\n Current User input: ({}).".format(
- conversation_str, user_input
- ),
- ),
- ]
- content = chat(messages).content
- return content
-
-
def hasSingleIntent(
swagger_doc: ResolvingParser,
user_requirement: str,
@@ -132,31 +100,40 @@ def hasSingleIntent(
app: str,
) -> BotMessage:
summaries = get_summaries(swagger_doc)
- chat = get_chat_model("gpt-3.5-turbo-16k")
-
- consolidated_user_requirement = (
- generate_consolidated_requirement(user_requirement, session_id)
- or user_requirement
- )
+ chat = get_chat_model(CHAT_MODELS.gpt_3_5_turbo_16k)
messages = [
SystemMessage(
- content="You serve as an AI co-pilot tasked with identifying the correct sequence of API calls necessary to execute a user's action. To accomplish the task, you will be provided with information about the existing state of the application. A user input and list of api summaries. If the user is asking you to perform a `CRUD` operation, provide the list of operation ids of api calls needed in the `ids` field of the json. `bot_message` should consist of a straightforward sentence, free from any special characters. Note that the application uses current state as a cache, if you don't find the required information in the cache, you should try to find an api call to fetch that information. Your response MUST be a valid minified json"
- ),
- current_state
- and HumanMessage(
- content="Here is the current state of the application: {}".format(
- current_state
- )
- ),
- HumanMessage(
- content="Here's a list of api summaries {}.".format(summaries),
- ),
- HumanMessage(
- content="user requirement: {}".format(consolidated_user_requirement)
- ),
- HumanMessage(
- content="""Reply in the following json format ```{
+ content="You serve as an AI co-pilot tasked with identifying the correct sequence of API calls necessary to execute a user's action. To accomplish the task, you will be provided with information about the existing state of the application and list of api summaries. If the user is asking you to perform a `CRUD` operation, provide the list of operation ids of api calls needed in the `ids` field of the json. `bot_message` should consist of a straightforward sentence, free from any special characters. Note that the application uses current state as a cache, if you don't find the required information in the cache, you should try to find an api call to fetch that information. Your response MUST be a valid minified json"
+ )
+ ]
+
+ # old conversations go here
+ prev_conversations = []
+ if session_id:
+ prev_conversations = get_chat_message_as_llm_conversation(session_id)
+
+ if len(prev_conversations) > 0:
+ messages.extend(prev_conversations)
+
+ if current_state:
+ messages.extend(
+ [
+ HumanMessage(
+ content="Here is the current state of the application: {}".format(
+ current_state
+ )
+ )
+ ]
+ )
+
+ messages.extend(
+ [
+ HumanMessage(
+ content="Here's a list of api summaries {}.".format(summaries),
+ ),
+ HumanMessage(
+ content="""Reply in the following json format ```{
"ids": [
"list",
"of",
@@ -165,11 +142,13 @@ def hasSingleIntent(
],
"bot_message": "Bot response here"
}```"""
- ),
- HumanMessage(
- content="If the user's question can be answered directly without making API calls, please respond appropriately in the `bot_message` section of the response and leaving the `ids` field empty ([])."
- ),
- ]
+ ),
+ HumanMessage(
+ content="If the question can be answered directly without making API calls, please respond appropriately in the `bot_message` section of the response and leaving the `ids` field empty ([])."
+ ),
+ HumanMessage(content=user_requirement),
+ ]
+ )
result = chat([x for x in messages if x is not None])
logging.info(
diff --git a/llm-server/routes/workflow/utils/router.py b/llm-server/routes/workflow/utils/router.py
index 6623c45a5..ee5be366c 100644
--- a/llm-server/routes/workflow/utils/router.py
+++ b/llm-server/routes/workflow/utils/router.py
@@ -6,76 +6,79 @@
from opencopilot_utils.get_vector_store import get_vector_store
from opencopilot_utils import StoreOptions
from custom_types.action_type import ActionType
+from models.repository.chat_history_repo import get_chat_message_as_llm_conversation
+from utils.chat_models import CHAT_MODELS
from utils import get_chat_model
from typing import Optional, Tuple, List
from langchain.docstore.document import Document
from langchain.vectorstores.base import VectorStore
-from prance import ResolvingParser
+from utils import struct_log
-chat = get_chat_model("gpt-3.5-turbo")
+
+chat = get_chat_model(CHAT_MODELS.gpt_3_5_turbo)
def get_relevant_docs(text: str, bot_id: str) -> Optional[str]:
try:
- score_threshold = float(os.getenv("SCORE_THRESHOLD_KB", 0.75))
- vector_store: VectorStore = get_vector_store(StoreOptions(bot_id))
- result = vector_store.similarity_search_with_relevance_scores(
- text, score_threshold=score_threshold
+ score_threshold = float(os.getenv("SCORE_THRESHOLD_KB", "0.75"))
+ vector_store: VectorStore = get_vector_store(StoreOptions("knowledgebase"))
+
+ retriever = vector_store.as_retriever(
+ search_kwargs={
+ "k": 5,
+ "score_threshold": score_threshold,
+ "filter": {"bot_id": bot_id},
+ },
)
+ result = retriever.get_relevant_documents(text)
+
if result and len(result) > 0:
- (document, score) = result[0]
- return document.page_content
+ return result[0].page_content
return None
except Exception as e:
- logging.info(f"[Error] {e}")
+ struct_log.exception(payload=text, error=str(e), event="get_relevant_docs")
return None
-def classify_text(user_requirement: str, context: str) -> ActionType:
+def classify_text(user_requirement: str, context: str, session_id: str) -> ActionType:
+ prev_conversations = []
+ if session_id:
+ prev_conversations = get_chat_message_as_llm_conversation(session_id)
+
messages = [
SystemMessage(
- content="You are a multi-label classification model. Your reply should only be one of the allowed keywords"
- ),
- HumanMessage(
- content=f"""
- You must output one of '{ActionType.ASSISTANT_ACTION.value}', '{ActionType.KNOWLEDGE_BASE_QUERY.value}', '{ActionType.GENERAL_QUERY.value}' based on the following conditions
- """
- ),
- HumanMessage(
- content=f"""
- If the user's requirement would require making an API call to a third-party service, return the output as: {ActionType.ASSISTANT_ACTION.value}.
-
- Actions such as performing tasks, listing items, displaying information, and managing additions/removals are all categorized as assistant actions etc, are all assistant actions
- """
- ),
- HumanMessage(
- content=f"""
- If the user's requirement is related to this context ```{context}```, output: {ActionType.KNOWLEDGE_BASE_QUERY.value}
- If you are unsure, output: {ActionType.GENERAL_QUERY.value}
- """
- ),
- HumanMessage(
- content="Here's the user input {}".format(user_requirement),
- ),
+ content=f"""You are a classification model, which when given user input can classify it into one of the three types below. If the user asks you to list something, show or delete something. You should output {ActionType.ASSISTANT_ACTION.value} because these require making api calls. {ActionType.KNOWLEDGE_BASE_QUERY.value}"""
+ )
]
+ if len(prev_conversations) > 0:
+ messages.extend(prev_conversations)
+
+ messages.extend(
+ [
+ HumanMessage(
+ content=f"Here's the user requirement:```{user_requirement}```, and here's the context: ```{context}```. Now classify the user requirement."
+ ),
+ ]
+ )
+
content = chat(messages).content
if ActionType.ASSISTANT_ACTION.value in content:
return ActionType.ASSISTANT_ACTION
- elif ActionType.KNOWLEDGE_BASE_QUERY.value in content:
+ elif context is not None:
return ActionType.KNOWLEDGE_BASE_QUERY
return ActionType.GENERAL_QUERY
-def get_action_type(user_requirement: str, bot_id: str) -> ActionType:
- context = get_relevant_docs(user_requirement, bot_id) or []
+def get_action_type(user_requirement: str, bot_id: str, session_id: str) -> ActionType:
+ context = get_relevant_docs(user_requirement, bot_id) or None
- route = classify_text(user_requirement, context)
+ route = classify_text(user_requirement, context, session_id)
return route
diff --git a/llm-server/routes/workflow/utils/run_openapi_ops.py b/llm-server/routes/workflow/utils/run_openapi_ops.py
index d4bd48a14..85c99304e 100644
--- a/llm-server/routes/workflow/utils/run_openapi_ops.py
+++ b/llm-server/routes/workflow/utils/run_openapi_ops.py
@@ -41,7 +41,18 @@ def run_openapi_operations(
current_state,
)
- api_response = make_api_request(headers=headers, **api_payload.__dict__)
+ api_response = None
+ try:
+ struct_log.info(
+ payload=api_payload.__dict__,
+ event="make_api_call"
+ )
+
+ api_response = make_api_request(headers=headers, **api_payload.__dict__)
+
+ except Exception as e:
+ struct_log.exception(error=str(e), event="make api call failed")
+ return {}
# if a custom transformer function is defined for this operationId use that, otherwise forward it to the llm
# so we don't necessarily have to defined mappers for all api endpoints
@@ -63,16 +74,14 @@ def run_openapi_operations(
except Exception as e:
struct_log.exception(
- payload={
+ payload=json.dumps({
text,
headers,
server_base_url,
app,
- },
+ }),
error=str(e),
event="/check_workflow_in_store",
)
- # At this point we will retry the operation with hierarchical planner
- raise e
return convert_json_to_text(text, prev_api_response)
diff --git a/llm-server/utils/chat_models.py b/llm-server/utils/chat_models.py
new file mode 100644
index 000000000..d1c8777ca
--- /dev/null
+++ b/llm-server/utils/chat_models.py
@@ -0,0 +1,14 @@
+from typing import NamedTuple
+
+
+class ChatModels(NamedTuple):
+ gpt_3_5_turbo: str = "gpt-3.5-turbo"
+ gpt_3_5_turbo_16k: str = "gpt-3.5-turbo-16k"
+ claude_2_0: str = "claude-2.0"
+ mistral_openorca: str = "mistral-openorca"
+ nous_hermes = "nous-hermes"
+ llama2: str = "llama2"
+ xwinlm = "xwinlm"
+
+
+CHAT_MODELS: ChatModels = ChatModels()
diff --git a/llm-server/utils/get_chat_model.py b/llm-server/utils/get_chat_model.py
index 6afd785e7..071cc4d12 100644
--- a/llm-server/utils/get_chat_model.py
+++ b/llm-server/utils/get_chat_model.py
@@ -1,28 +1,49 @@
from langchain.chat_models import ChatOpenAI
-from langchain.chat_models import ChatOllama
-
+from langchain.chat_models import ChatOllama, ChatAnthropic
+from .chat_models import CHAT_MODELS
from typing import Optional
import os
localip = os.getenv("LOCAL_IP", "localhost")
+
+
def get_chat_model(prop: str) -> ChatOpenAI:
- if prop == "gpt-3.5-turbo":
+ if prop == CHAT_MODELS.gpt_3_5_turbo:
return ChatOpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY"),
- model="gpt-3.5-turbo",
+ model=CHAT_MODELS.gpt_3_5_turbo,
temperature=0,
)
- elif prop == "gpt-3.5-turbo-16k":
+ elif prop == CHAT_MODELS.gpt_3_5_turbo_16k:
return ChatOpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY"),
- model="gpt-3.5-turbo-16k",
+ model=CHAT_MODELS.gpt_3_5_turbo_16k,
+ temperature=0,
+ )
+ elif prop == CHAT_MODELS.claude_2_0:
+ return ChatAnthropic(
+ anthropic_api_key=os.getenv("CLAUDE_API_KEY", "CLAUDE_API_KEY")
+ )
+ elif prop == CHAT_MODELS.mistral_openorca:
+ return ChatOllama(
+ base_url=f"{localip}:11434",
+ model=CHAT_MODELS.mistral_openorca,
+ temperature=0,
+ )
+ elif prop == CHAT_MODELS.nous_hermes:
+ return ChatOllama(
+ base_url=f"{localip}:11434",
+ model=CHAT_MODELS.nous_hermes,
+ temperature=0,
+ )
+ elif prop == CHAT_MODELS.xwinlm:
+ return ChatOllama(
+ base_url=f"{localip}:11434",
+ model=CHAT_MODELS.xwinlm,
temperature=0,
)
-
- elif prop == "mistral-openorca":
- return ChatOllama(base_url=f"{localip}:11434", model="mistral-openorca", temperature=0)
elif prop == "llama2":
return ChatOpenAI(model="llama2", temperature=0)
else:
diff --git a/llm-server/utils/vector_db/add_workflow.py b/llm-server/utils/vector_db/add_workflow.py
index 68368836c..12e22f48a 100644
--- a/llm-server/utils/vector_db/add_workflow.py
+++ b/llm-server/utils/vector_db/add_workflow.py
@@ -19,4 +19,14 @@ def add_workflow_data_to_qdrant(
)
]
embeddings = get_embeddings()
- init_vector_store(docs, embeddings, StoreOptions(bot_id))
+ init_vector_store(
+ docs,
+ embeddings,
+ StoreOptions(
+ namespace="swagger",
+ metadata={
+ "bot_id": bot_id
+ # "swagger_id": workflow_data.get("swagger_id"),
+ },
+ ),
+ )
diff --git a/llm-server/utils/vector_store_setup.py b/llm-server/utils/vector_store_setup.py
new file mode 100644
index 000000000..e14234984
--- /dev/null
+++ b/llm-server/utils/vector_store_setup.py
@@ -0,0 +1,26 @@
+from qdrant_client import QdrantClient, models
+import os
+from opencopilot_utils import get_vector_store, StoreOptions
+
+vector_size = int(os.getenv("VECTOR_SIZE", "1536"))
+
+
+def init_qdrant_collections():
+ # refer: from opencopilot_utils import StoreOptions, for list of namespaces to be created on startup
+ client = QdrantClient(url=os.getenv("QDRANT_URL", "http://qdrant:6333"))
+ try:
+ client.create_collection(
+ "knowledgebase",
+ vectors_config=models.VectorParams(
+ size=vector_size, distance=models.Distance.COSINE
+ ),
+ )
+
+ client.create_collection(
+ "swagger",
+ vectors_config=models.VectorParams(
+ size=vector_size, distance=models.Distance.COSINE
+ ),
+ )
+ except Exception as e:
+ print(f"Collection already exists, ignoring new collection creation")
diff --git a/nginx/nginx.conf b/nginx/nginx.conf
index 5fb63ac36..7b3005aa6 100644
--- a/nginx/nginx.conf
+++ b/nginx/nginx.conf
@@ -43,6 +43,16 @@ http {
proxy_pass http://llm-server:8002/;
}
+ location /backend/chat/ {
+ add_header 'Access-Control-Allow-Origin' '*';
+ add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
+ add_header 'Access-Control-Allow-Headers' 'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range';
+ add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range';
+ rewrite /backend/chat/(.*) /chat/$1 break;
+ proxy_pass http://llm-server:8002/;
+ }
+
+
location /backend/ {
proxy_pass http://backend:5000/;
}
diff --git a/workers/shared/utils/opencopilot_utils/get_vector_store.py b/workers/shared/utils/opencopilot_utils/get_vector_store.py
index d0ff0c204..0e263dd37 100644
--- a/workers/shared/utils/opencopilot_utils/get_vector_store.py
+++ b/workers/shared/utils/opencopilot_utils/get_vector_store.py
@@ -27,9 +27,10 @@ def get_vector_store(options: StoreOptions) -> VectorStore:
vector_store = Qdrant(
client, collection_name=options.namespace, embeddings=embedding
)
+
# vector_store = Qdrant.from_documents([], embedding, url='http://localhost:6333', collection=options.namespace)
else:
raise ValueError("Invalid STORE environment variable value")
- return vector_store
+ return vector_store
\ No newline at end of file
diff --git a/workers/shared/utils/opencopilot_utils/init_vector_store.py b/workers/shared/utils/opencopilot_utils/init_vector_store.py
index 4c8778380..52fa10b6d 100644
--- a/workers/shared/utils/opencopilot_utils/init_vector_store.py
+++ b/workers/shared/utils/opencopilot_utils/init_vector_store.py
@@ -7,8 +7,11 @@
def init_vector_store(docs: list[Document], embeddings: OpenAIEmbeddings, options: StoreOptions) -> None:
store_type = StoreType[os.environ['STORE']]
+
+ for doc in docs:
+ doc.metadata.update(options.metadata)
+
if store_type == StoreType.QDRANT:
- print("called qdrant.from_documents")
Qdrant.from_documents(docs, embeddings, collection_name=options.namespace, url=os.environ['QDRANT_URL'])
else:
valid_stores = ", ".join(StoreType._member_names())
diff --git a/workers/shared/utils/opencopilot_utils/interfaces.py b/workers/shared/utils/opencopilot_utils/interfaces.py
index 68f39e145..46dc973cf 100644
--- a/workers/shared/utils/opencopilot_utils/interfaces.py
+++ b/workers/shared/utils/opencopilot_utils/interfaces.py
@@ -1,5 +1,8 @@
-from typing import Optional
+from typing import Optional, Literal, Dict
class StoreOptions:
- def __init__(self, namespace: Optional[str] = None):
- self.namespace = namespace
\ No newline at end of file
+ ALLOWED_NAMESPACES = Literal["swagger", "knowledgebase"]
+
+ def __init__(self, namespace: ALLOWED_NAMESPACES, metadata: Dict[str, str] = {}):
+ self.namespace = namespace
+ self.metadata = metadata
\ No newline at end of file
diff --git a/workers/shared/utils/setup.py b/workers/shared/utils/setup.py
index ab2696ddd..3cf04ac9b 100644
--- a/workers/shared/utils/setup.py
+++ b/workers/shared/utils/setup.py
@@ -5,7 +5,7 @@
setup(
name="opencopilot_utils",
- version="2.0.0",
+ version="2.1.0",
packages=find_packages(),
install_requires=[
"langchain"
diff --git a/workers/tasks/process_markdown.py b/workers/tasks/process_markdown.py
index 3ab46ea3d..40e938f4f 100644
--- a/workers/tasks/process_markdown.py
+++ b/workers/tasks/process_markdown.py
@@ -16,7 +16,7 @@ def process_markdown(file_name: str, bot_id: str):
)
docs = text_splitter.split_documents(raw_docs)
embeddings = get_embeddings()
- init_vector_store(docs, embeddings, StoreOptions(namespace=bot_id))
+ init_vector_store(docs, embeddings, StoreOptions(namespace="knowledgebase", metadata={"bot_id": bot_id}))
update_pdf_data_source_status(chatbot_id=bot_id, file_name=file_name, status="COMPLETED")
except Exception as e:
diff --git a/workers/tasks/process_pdfs.py b/workers/tasks/process_pdfs.py
index 5faa58551..d3f2974ee 100644
--- a/workers/tasks/process_pdfs.py
+++ b/workers/tasks/process_pdfs.py
@@ -16,7 +16,7 @@ def process_pdf(file_name: str, bot_id: str):
)
docs = text_splitter.split_documents(raw_docs)
embeddings = get_embeddings()
- init_vector_store(docs, embeddings, StoreOptions(namespace=bot_id))
+ init_vector_store(docs, embeddings, StoreOptions(namespace="knowledgebase", metadata={"bot_id": bot_id}))
update_pdf_data_source_status(chatbot_id=bot_id, file_name=file_name, status="COMPLETED")
except Exception as e:
diff --git a/workers/tasks/web_crawl.py b/workers/tasks/web_crawl.py
index 97b78c21b..07d63350a 100644
--- a/workers/tasks/web_crawl.py
+++ b/workers/tasks/web_crawl.py
@@ -83,7 +83,7 @@ def scrape_website_in_bfs(url: str, bot_id: str, unique_urls: Set[str], max_page
docs = text_splitter.create_documents([text])
embeddings = get_embeddings()
- init_vector_store(docs, embeddings, StoreOptions(namespace=bot_id))
+ init_vector_store(docs, embeddings, StoreOptions(namespace="knowledgebase", metadata={"bot_id": bot_id}))
update_website_data_source_status_by_url(url=url, status="SUCCESS")
if driver is not None:
@@ -110,7 +110,7 @@ def web_crawl(url, bot_id: str):
print(f"Received: {url}, {bot_id}")
create_website_data_source(chatbot_id=bot_id, status="PENDING", url=url)
unique_urls: set = set()
- scrape_website_in_bfs(url, bot_id, unique_urls, 2)
+ scrape_website_in_bfs(url, bot_id, unique_urls, 5)
except Exception as e:
traceback.print_exc()