Skip to content

Commit

Permalink
Format Python code with psf/black push
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions authored and github-actions committed Jun 21, 2023
1 parent 3a460d2 commit 47ef9d1
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 28 deletions.
65 changes: 43 additions & 22 deletions models/index_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,9 @@ async def paginate_embed(self, response_text):

return pages

def index_file(self, file_path, service_context, suffix=None) -> GPTVectorStoreIndex:
def index_file(
self, file_path, service_context, suffix=None
) -> GPTVectorStoreIndex:
if suffix and suffix == ".md":
loader = MarkdownReader()
document = loader.load_data(file_path)
Expand Down Expand Up @@ -559,11 +561,15 @@ async def set_file_index(
await file.save(temp_file.name)
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False
tokenizer=tiktoken.encoding_for_model(
"text-davinci-003"
).encode,
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(embed_model=embedding_model, callback_manager=callback_manager)
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)
index = await self.loop.run_in_executor(
None,
partial(
Expand Down Expand Up @@ -610,10 +616,12 @@ async def set_link_index_recurse(
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(embed_model=embedding_model, callback_manager=callback_manager)
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)

# Pre-emptively connect and get the content-type of the response
try:
Expand Down Expand Up @@ -707,10 +715,12 @@ async def set_link_index(
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(embed_model=embedding_model, callback_manager=callback_manager)
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)

# Pre-emptively connect and get the content-type of the response
try:
Expand Down Expand Up @@ -806,10 +816,12 @@ async def set_discord_index(
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(embed_model=embedding_model, callback_manager=callback_manager)
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)
index = await self.loop.run_in_executor(
None, partial(self.index_discord, document, service_context)
)
Expand Down Expand Up @@ -922,16 +934,18 @@ async def compose_indexes(self, user_id, indexes, name, deep_compose):

llm_predictor_mock = MockLLMPredictor(4096)
embedding_model_mock = MockEmbedding(1536)

token_counter_mock = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False
verbose=False,
)

callback_manager_mock = CallbackManager([token_counter_mock])

service_context_mock = ServiceContext.from_defaults(
llm_predictor=llm_predictor_mock, embed_model=embedding_model_mock, callback_manager=callback_manager_mock
llm_predictor=llm_predictor_mock,
embed_model=embedding_model_mock,
callback_manager=callback_manager_mock,
)

# Run the mock call first
Expand All @@ -957,13 +971,15 @@ async def compose_indexes(self, user_id, indexes, name, deep_compose):

token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode,
verbose=False
verbose=False,
)

callback_manager = CallbackManager([token_counter])

service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, embed_model=embedding_model, callback_manager=callback_manager
llm_predictor=llm_predictor,
embed_model=embedding_model,
callback_manager=callback_manager,
)

tree_index = await self.loop.run_in_executor(
Expand Down Expand Up @@ -1004,12 +1020,14 @@ async def compose_indexes(self, user_id, indexes, name, deep_compose):

token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode,
verbose=False
verbose=False,
)

callback_manager = CallbackManager([token_counter])

service_context = ServiceContext.from_defaults(embed_model=embedding_model, callback_manager=callback_manager)
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)

simple_index = await self.loop.run_in_executor(
None,
Expand Down Expand Up @@ -1062,10 +1080,12 @@ async def backup_discord(
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model("text-davinci-003").encode,
verbose=False
verbose=False,
)
callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(embed_model=embedding_model, callback_manager=callback_manager)
service_context = ServiceContext.from_defaults(
embed_model=embedding_model, callback_manager=callback_manager
)
index = await self.loop.run_in_executor(
None, partial(self.index_discord, document, service_context)
)
Expand Down Expand Up @@ -1120,13 +1140,14 @@ async def query(
try:
embedding_model = OpenAIEmbedding()
token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(model).encode,
verbose=False
tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False
)

callback_manager = CallbackManager([token_counter])
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, embed_model=embedding_model, callback_manager=callback_manager
llm_predictor=llm_predictor,
embed_model=embedding_model,
callback_manager=callback_manager,
)

token_counter.reset_counts()
Expand Down
11 changes: 5 additions & 6 deletions models/search_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,21 +349,20 @@ async def search(
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=model))

token_counter = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(model).encode,
verbose=False
tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False
)

callback_manager = CallbackManager([token_counter])

service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, embed_model=embedding_model, callback_manager=callback_manager
llm_predictor=llm_predictor,
embed_model=embedding_model,
callback_manager=callback_manager,
)


# Check price
token_counter_mock = TokenCountingHandler(
tokenizer=tiktoken.encoding_for_model(model).encode,
verbose=False
tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False
)
callback_manager_mock = CallbackManager([token_counter_mock])
embed_model_mock = MockEmbedding(embed_dim=1536)
Expand Down

0 comments on commit 47ef9d1

Please sign in to comment.