Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

upgrade sk,others to latest version #86

Merged
merged 4 commits into from
Jan 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ai-service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ spec:
memory: 50Mi
limits:
cpu: 30m
memory: 65Mi
memory: 85Mi
startupProbe:
httpGet:
path: /health
Expand Down
2 changes: 1 addition & 1 deletion charts/aks-store-demo/templates/ai-service.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ spec:
memory: 50Mi
limits:
cpu: 30m
memory: 65Mi
memory: 85Mi
startupProbe:
httpGet:
path: /health
Expand Down
6 changes: 3 additions & 3 deletions src/ai-service/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
fastapi==0.95.2
fastapi==0.105.0
uvicorn==0.22.0
pydantic==1.10.8
pydantic==2.5.0
pytest==7.3.1
httpx
pyyaml
semantic-kernel==0.3.1.dev0
semantic-kernel==0.4.2.dev0
azure.identity==1.14.0
requests==2.31.0
70 changes: 70 additions & 0 deletions src/ai-service/routers/LLM.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
from azure.identity import DefaultAzureCredential
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, OpenAIChatCompletion
from dotenv import load_dotenv
import os


def get_llm():
""" Function to initialize the LLM so that it can be used in the app """
# Set the useLocalLLM and useAzureOpenAI variables based on environment variables
useLocalLLM: bool = False
useAzureOpenAI: bool = False
kernel = False

if os.environ.get("USE_LOCAL_LLM"):
useLocalLLM = os.environ.get("USE_LOCAL_LLM").lower() == "true"

if os.environ.get("USE_AZURE_OPENAI"):
useAzureOpenAI = os.environ.get("USE_AZURE_OPENAI").lower() == "true"

# if useLocalLLM and useAzureOpenAI are both set to true, raise an exception
if useLocalLLM and useAzureOpenAI:
raise Exception("USE_LOCAL_LLM and USE_AZURE_OPENAI environment variables cannot both be set to true")

# if useLocalLLM or useAzureOpenAI are set to true, get the endpoint from the environment variables
if useLocalLLM or useAzureOpenAI:
endpoint: str = os.environ.get("AI_ENDPOINT") or os.environ.get("AZURE_OPENAI_ENDPOINT")

if isinstance(endpoint, str) == False or endpoint == "":
raise Exception("AI_ENDPOINT or AZURE_OPENAI_ENDPOINT environment variable must be set when USE_LOCAL_LLM or USE_AZURE_OPENAI is set to true")

# if not using local LLM, set up the semantic kernel
if useLocalLLM:
print("Using Local LLM")
else:
print("Using OpenAI and setting up Semantic Kernel")
# Load environment variables from .env file
load_dotenv()

# Initialize the semantic kernel
kernel: sk.Kernel = sk.Kernel()

kernel = sk.Kernel()

# Get the Azure OpenAI deployment name, API key, and endpoint or OpenAI org id from environment variables
api_key: str = os.environ.get("OPENAI_API_KEY")
useAzureAD: str = os.environ.get("USE_AZURE_AD")

if (isinstance(api_key, str) == False or api_key == "") and (isinstance(useAzureAD, str) == False or useAzureAD == ""):
raise Exception("OPENAI_API_KEY environment variable must be set")

if not useAzureOpenAI:
org_id = os.environ.get("OPENAI_ORG_ID")
if isinstance(org_id, str) == False or org_id == "":
raise Exception("OPENAI_ORG_ID environment variable must be set when USE_AZURE_OPENAI is set to False")
# Add the OpenAI text completion service to the kernel
kernel.add_chat_service("dv", OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id))

else:
deployment: str = os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME")
# Add the Azure OpenAI text completion service to the kernel
if isinstance(useAzureAD, str) == True and useAzureAD.lower() == "true":
print("Authenticating to Azure OpenAI with Azure AD Workload Identity")
credential = DefaultAzureCredential()
access_token = credential.get_token("https://cognitiveservices.azure.com/.default")
kernel.add_chat_service("dv", AzureChatCompletion(deployment_name=deployment, endpoint=endpoint, ad_token=access_token.token))
else:
print("Authenticating to Azure OpenAI with OpenAI API key")
kernel.add_chat_service("dv", AzureChatCompletion(deployment_name=deployment, endpoint=endpoint, api_key=api_key))
return kernel, useLocalLLM, endpoint
71 changes: 5 additions & 66 deletions src/ai-service/routers/description_generator.py
Original file line number Diff line number Diff line change
@@ -1,74 +1,13 @@
from azure.identity import DefaultAzureCredential
from typing import Any, List, Dict
from fastapi import APIRouter, Request, status
from fastapi.responses import Response, JSONResponse
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, OpenAIChatCompletion
from dotenv import load_dotenv
from typing import Any, List, Dict
import os
import requests
import json
from routers.LLM import get_llm

# Set the useLocalLLM and useAzureOpenAI variables based on environment variables
useLocalLLM: bool = False
useAzureOpenAI: bool = False

if os.environ.get("USE_LOCAL_LLM"):
useLocalLLM = os.environ.get("USE_LOCAL_LLM").lower() == "true"

if os.environ.get("USE_AZURE_OPENAI"):
useAzureOpenAI = os.environ.get("USE_AZURE_OPENAI").lower() == "true"

# if useLocalLLM and useAzureOpenAI are both set to true, raise an exception
if useLocalLLM and useAzureOpenAI:
raise Exception("USE_LOCAL_LLM and USE_AZURE_OPENAI environment variables cannot both be set to true")

# if useLocalLLM or useAzureOpenAI are set to true, get the endpoint from the environment variables
if useLocalLLM or useAzureOpenAI:
endpoint: str = os.environ.get("AI_ENDPOINT") or os.environ.get("AZURE_OPENAI_ENDPOINT")

if isinstance(endpoint, str) == False or endpoint == "":
raise Exception("AI_ENDPOINT or AZURE_OPENAI_ENDPOINT environment variable must be set when USE_LOCAL_LLM or USE_AZURE_OPENAI is set to true")

# if not using local LLM, set up the semantic kernel
if useLocalLLM:
print("Using Local LLM")
else:
print("Using OpenAI and setting up Semantic Kernel")
# Load environment variables from .env file
load_dotenv()

# Initialize the semantic kernel
kernel: sk.Kernel = sk.Kernel()

kernel = sk.Kernel()

# Get the Azure OpenAI deployment name, API key, and endpoint or OpenAI org id from environment variables
api_key: str = os.environ.get("OPENAI_API_KEY")
useAzureAD: str = os.environ.get("USE_AZURE_AD")

if (isinstance(api_key, str) == False or api_key == "") and (isinstance(useAzureAD, str) == False or useAzureAD == ""):
raise Exception("OPENAI_API_KEY environment variable must be set")

if not useAzureOpenAI:
org_id = os.environ.get("OPENAI_ORG_ID")
if isinstance(org_id, str) == False or org_id == "":
raise Exception("OPENAI_ORG_ID environment variable must be set when USE_AZURE_OPENAI is set to False")
# Add the OpenAI text completion service to the kernel
kernel.add_chat_service("dv", OpenAIChatCompletion("gpt-3.5-turbo", api_key, org_id))

else:
deployment: str = os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME")
# Add the Azure OpenAI text completion service to the kernel
if isinstance(useAzureAD, str) == True and useAzureAD.lower() == "true":
print("Authenticating to Azure OpenAI with Azure AD Workload Identity")
credential = DefaultAzureCredential()
access_token = credential.get_token("https://cognitiveservices.azure.com/.default")
kernel.add_chat_service("dv", AzureChatCompletion(deployment_name=deployment, endpoint=endpoint, api_key=access_token.token, ad_auth=True))
else:
print("Authenticating to Azure OpenAI with OpenAI API key")
kernel.add_chat_service("dv", AzureChatCompletion(deployment, endpoint, api_key))

# initialize the model that would be used for the app
kernel, useLocalLLM, endpoint = get_llm()
if not useLocalLLM:
# Import semantic skills from the "skills" directory
skills_directory: str = "skills"
productFunctions: dict = kernel.import_semantic_skill_from_directory(skills_directory, "ProductSkill")
Expand Down
Loading