You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hi there, thanks a lot for the tutorial on YouTube. After running the code locally I seem to face some issue, and was wondering if you could help. I have already installed ollama and have it running in the background, in the same directory.
Hi there, thanks a lot for the tutorial on YouTube. After running the code locally I seem to face some issue, and was wondering if you could help. I have already installed ollama and have it running in the background, in the same directory.
when running:
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": prompt}
)
error:
ValidationError Traceback (most recent call last)
Cell In[7], line 1
----> 1 qa_chain = RetrievalQA.from_chain_type(
2 llm,
3 retriever=vectorstore.as_retriever(),
4 chain_type_kwargs={"prompt": prompt}
5 )
File /usr/local/lib/python3.10/dist-packages/langchain/chains/retrieval_qa/base.py:100, in BaseRetrievalQA.from_chain_type(cls, llm, chain_type, chain_type_kwargs, **kwargs)
98 """Load chain from chain type."""
99 _chain_type_kwargs = chain_type_kwargs or {}
--> 100 combine_documents_chain = load_qa_chain(
101 llm, chain_type=chain_type, **_chain_type_kwargs
102 )
103 return cls(combine_documents_chain=combine_documents_chain, **kwargs)
File /usr/local/lib/python3.10/dist-packages/langchain/chains/question_answering/init.py:249, in load_qa_chain(llm, chain_type, verbose, callback_manager, **kwargs)
244 if chain_type not in loader_mapping:
245 raise ValueError(
246 f"Got unsupported chain type: {chain_type}. "
247 f"Should be one of {loader_mapping.keys()}"
248 )
--> 249 return loader_mapping[chain_type](
250 llm, verbose=verbose, callback_manager=callback_manager, **kwargs
251 )
File /usr/local/lib/python3.10/dist-packages/langchain/chains/question_answering/init.py:73, in _load_stuff_chain(llm, prompt, document_variable_name, verbose, callback_manager, callbacks, **kwargs)
63 def _load_stuff_chain(
64 llm: BaseLanguageModel,
65 prompt: Optional[BasePromptTemplate] = None,
(...)
70 **kwargs: Any,
71 ) -> StuffDocumentsChain:
72 _prompt = prompt or stuff_prompt.PROMPT_SELECTOR.get_prompt(llm)
---> 73 llm_chain = LLMChain(
74 llm=llm,
75 prompt=_prompt,
76 verbose=verbose,
77 callback_manager=callback_manager,
78 callbacks=callbacks,
79 )
80 # TODO: document prompt
81 return StuffDocumentsChain(
82 llm_chain=llm_chain,
83 document_variable_name=document_variable_name,
(...)
87 **kwargs,
88 )
File /usr/local/lib/python3.10/dist-packages/langchain/load/serializable.py:75, in Serializable.init(self, **kwargs)
74 def init(self, **kwargs: Any) -> None:
---> 75 super().init(**kwargs)
76 self._lc_kwargs = kwargs
File /usr/local/lib/python3.10/dist-packages/pydantic/main.py:341, in pydantic.main.BaseModel.init()
ValidationError: 1 validation error for LLMChain
llm
Can't instantiate abstract class BaseLanguageModel with abstract methods agenerate_prompt, apredict, apredict_messages, generate_prompt, invoke, predict, predict_messages (type=type_error)
The text was updated successfully, but these errors were encountered: