diff --git a/integrations/llm/openai/notebooks/Comet_and_OpenAI.ipynb b/integrations/llm/openai/notebooks/Comet_and_OpenAI.ipynb index d4a7def4..bafcb350 100644 --- a/integrations/llm/openai/notebooks/Comet_and_OpenAI.ipynb +++ b/integrations/llm/openai/notebooks/Comet_and_OpenAI.ipynb @@ -139,30 +139,26 @@ " \"\"\"\n", " Answer a question\n", " \"\"\"\n", - " try:\n", - " # Create a chat completion using the question and system instructions\n", - " messages = [\n", - " {\n", - " \"role\": \"system\",\n", - " \"content\": \"Answer the question and if the question can't be answered, say \\\"I don't know\\\"\",\n", - " },\n", - " {\"role\": \"user\", \"content\": question},\n", - " ]\n", + " # Create a chat completion using the question and system instructions\n", + " messages = [\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"Answer the question and if the question can't be answered, say \\\"I don't know\\\"\",\n", + " },\n", + " {\"role\": \"user\", \"content\": question},\n", + " ]\n", "\n", - " response = openai.ChatCompletion.create(\n", - " messages=messages,\n", - " temperature=0,\n", - " max_tokens=max_tokens,\n", - " top_p=1,\n", - " frequency_penalty=0,\n", - " presence_penalty=0,\n", - " stop=stop_sequence,\n", - " model=model,\n", - " )\n", - " return response[\"choices\"][0][\"message\"][\"content\"].strip()\n", - " except Exception as e:\n", - " print(e)\n", - " return \"\"" + " response = openai.ChatCompletion.create(\n", + " messages=messages,\n", + " temperature=0,\n", + " max_tokens=max_tokens,\n", + " top_p=1,\n", + " frequency_penalty=0,\n", + " presence_penalty=0,\n", + " stop=stop_sequence,\n", + " model=model,\n", + " )\n", + " return response[\"choices\"][0][\"message\"][\"content\"].strip()" ] }, { @@ -211,13 +207,6 @@ "source": [ "answer_question(\"What is the airspeed velocity of an unladen swallow?\")" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": {