-
Notifications
You must be signed in to change notification settings - Fork 0
/
splitter.py
31 lines (25 loc) · 1015 Bytes
/
splitter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# Split documents into chunks
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
import tiktoken
import numpy as np
CHUNK_SIZE = 2000 # Token size for each chunk
CHUNK_OVERLAP = 200 # Token overlap between chunks
def get_num_tokens_from_string(string: str, encoding_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def split_documents(docs):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=CHUNK_SIZE,
chunk_overlap=CHUNK_OVERLAP,
length_function=len,
separators=["\n\n", "\n", " ", ""])
contents = docs
if docs and isinstance(docs[0], Document):
contents = [doc.page_content for doc in docs]
texts = text_splitter.create_documents(contents)
n_chunks = len(texts)
print(f"Split into {n_chunks} chunks")
return texts