大模型笔记(二):基于Langchain的RAG代码实践
LangChain 是一个基于大型语言模型(LLM)开发应用程序的框架。
·
目录
LangChain 是一个基于大型语言模型(LLM)开发应用程序的框架。具体而言,该框架由以下开源库组成:
- langchain-core:基本抽象和 LangChain 表达式语言。
- langchain-community:第三方集成。合作伙伴包(例如 langchain-openai,langchain-anthropic 等):某些集成已进一步拆分为仅依赖于 langchain-core 的轻量级包。
- langchain:构成应用程序认知架构的链条、代理和检索策略。
- langgraph:通过将步骤建模为图中的边缘和节点,使用LLMs构建强大且有状态的多角色应用程序。
- langserve:将 LangChain 链条部署为 REST API。
- LangSmith:开发人员平台,可让你对LLM应用程序进行调试、测试、评估和监控,并与LangChain无缝集成。
模型实践1
基于langchain_community,利用提问模板和Tongyi模型得到结果。chain = prompt | llm
# 通义 langchain 拼接
# langchain_community中自带的langchain拼接,用它的Template和Tongyi连接在一起变成一个pipeline
import os
from langchain_community.llms import Tongyi
DASHSCOPE_API_KEY = 'your-dashscope-api-key'
os.environ["DASHSCOPE_API_KEY"] = DASHSCOPE_API_KEY
# 方法1,先尝试一下invoke直接输出的效果,Tongyi()这个函数就已经是llm集成化的函数了,可能有默认设置的模型
text = "What NFL team won the Super Bowl in the year Justin Bieber was born?"
res = Tongyi().invoke(text)
print(res)
# 方法2:指定特定模型
from langchain_core.prompts import PromptTemplate
llm = Tongyi(model_name="qwen-plus", temperature=0.1)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template) # 先写一个含{}字符串的模板template,然后转化为PromptTemplate类
chain = prompt | llm # PromptTemplate模板|Tongyi模型 = 新的模型,注意!prompt在前面,llm在后面!
question = "What NFL team won the Super Bowl in the year Justin Bieber was born?"
# result = chain.invoke(question) # 新模型invoke出result,传入的参数是question
result = chain.invoke({"question": question}) # 我现在有模板了,我要告诉它,我现在传入的参数是{}里的question
print(result)
模型实践2
(1)导入相关库并设置API-KEY
from langchain_text_splitters import RecursiveCharacterTextSplitter #文本分割器
# from langchain import hub # 模型/组件集合,可以用于预定义的模型或运行链
from langchain_chroma import Chroma # 文档存储解决方案,矢量存储
from langchain_core.output_parsers import StrOutputParser # 把模型输出解析为字符串形式
from langchain_core.runnables import RunnablePassthrough # 占位符,把原始数据传递给下一环节
# from langchain_openai import OpenAIEmbeddings #文本嵌入模块
import os
from langchain_community.llms import Tongyi
# from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate
DASHSCOPE_API_KEY=''
os.environ["DASHSCOPE_API_KEY"] = DASHSCOPE_API_KEY
(2)选择embedding模型,这里使用DashScopeEmbeddings接口
from langchain_community.embeddings import DashScopeEmbeddings
embeddings = DashScopeEmbeddings(
model="text-embedding-v1", dashscope_api_key='')
使用HuggingFaceEmbeddings接口
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
HuggingFaceEmbeddings(model_name="/root/data/model/sentence-transformer")
(3)解析非结构化文件:Word
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader("报告.docx")
docs = loader.load()
print(docs[0].page_content[:100])
或解析网页
# Load, chunk and index the contents of the blog.
loader = WebBaseLoader(
web_paths=("https://blog.csdn.net/m0_47738450/article/details/141243694",),
# bs_kwargs=dict( # 定制解析行为
# parse_only=bs4.SoupStrainer( # 筛选网页中感兴趣的部分
# class_=("post-content", "post-title", "post-header")
# )
# ),
)
docs = loader.load() # 文档的集合
print(docs)
(4)分块/检索
# 先分块,再嵌入embedding,再存储向量,再检索
# 把长文本分割成较小的块,每个文本块的最大字符数是1000,每个块之间有200个字符的重叠,保持上下文的连贯性
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) #文本分块器
splits = text_splitter.split_documents(docs) # 给文本分块
# 本来向量库直接读取文件,一句一句的映射,但是文件太大只能分块了
embeddings = DashScopeEmbeddings(model="text-embedding-v1")
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
# Retrieve and generate using the relevant snippets of the blog.
retriever = vectorstore.as_retriever() # 检索器
(5)hub拉取一个模板
prompt = hub.pull("rlm/rag-prompt") # 拉取一个提示模板
或者自建一个模板
template = '''
【任务描述】
请根据用户输入的上下文回答问题,并遵守回答要求。
【背景知识】
{{context}}
【回答要求】
- 你需要严格根据背景知识的内容回答,禁止根据常识和已知信息回答问题。
- 对于不知道的信息,直接回答“未找到相关答案”
-----------
{question}
'''
prompt = PromptTemplate.from_template(template) # 先写一个含{}字符串的模板template,然后转化为PromptTemplate类
(6)选择Tongyi作为LLM,打通RAG pipeline
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
llm = Tongyi(model_name="qwen-plus", temperature=0.1)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
res = rag_chain.invoke("报告主旨是什么?")
print(res)
完整代码
from langchain_text_splitters import RecursiveCharacterTextSplitter #文本分割器
# from langchain import hub # 模型/组件集合,可以用于预定义的模型或运行链
from langchain_chroma import Chroma # 文档存储解决方案,矢量存储
from langchain_core.output_parsers import StrOutputParser # 把模型输出解析为字符串形式
from langchain_core.runnables import RunnablePassthrough # 占位符,把原始数据传递给下一环节
# from langchain_openai import OpenAIEmbeddings #文本嵌入模块
import os
from langchain_community.llms import Tongyi
# from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate
DASHSCOPE_API_KEY=''
os.environ["DASHSCOPE_API_KEY"] = DASHSCOPE_API_KEY
from langchain_community.embeddings import DashScopeEmbeddings
embeddings = DashScopeEmbeddings(model="text-embedding-v1", dashscope_api_key='')
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader("报告.docx")
docs = loader.load()
print(docs[0].page_content[:100])
# 先分块,再嵌入embedding,再存储向量,再检索
# 把长文本分割成较小的块,每个文本块的最大字符数是1000,每个块之间有200个字符的重叠,保持上下文的连贯性
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) #文本分块器
splits = text_splitter.split_documents(docs) # 给文本分块
# 本来向量库直接读取文件,一句一句的映射,但是文件太大只能分块了
embeddings = DashScopeEmbeddings(model="text-embedding-v1")
vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)
# Retrieve and generate using the relevant snippets of the blog.
retriever = vectorstore.as_retriever() # 检索器
template = '''
【任务描述】
请根据用户输入的上下文回答问题,并遵守回答要求。
【背景知识】
{{context}}
【回答要求】
- 你需要严格根据背景知识的内容回答,禁止根据常识和已知信息回答问题。
- 对于不知道的信息,直接回答“未找到相关答案”
-----------
{question}
'''
prompt = PromptTemplate.from_template(template) # 先写一个含{}字符串的模板template,然后转化为PromptTemplate类
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
llm = Tongyi(model_name="qwen-plus", temperature=0.1)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
res = rag_chain.invoke("报告主旨是什么?")
print(res)
模型实现3
基于streamlit的多 PDF RAG 聊天机器人
这里的文档读取使用PdfReader,RecursiveCharacterTextSplitter和上面是一样的。
模板用的是ChatPromptTemplate
embedding模型用的是SpacyEmbeddings,上面用的是DashScopeEmbeddings
向量库用的是FAISS,上面用的是Chroma
retriever用的是create_retriever_tool
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.embeddings.spacy_embeddings import SpacyEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.tools.retriever import create_retriever_tool
from dotenv import load_dotenv
from langchain_anthropic import ChatAnthropic
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain.agents import AgentExecutor, create_tool_calling_agent
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
embeddings = SpacyEmbeddings(model_name="en_core_web_sm")
def pdf_read(pdf_doc):
text = ""
for pdf in pdf_doc:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_chunks(text):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
chunks = text_splitter.split_text(text)
return chunks
def vector_store(text_chunks):
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
vector_store.save_local("faiss_db")
def get_conversational_chain(tools,ques):
#os.environ["ANTHROPIC_API_KEY"]=os.getenv["ANTHROPIC_API_KEY"]
#llm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0, api_key=os.getenv("ANTHROPIC_API_KEY"),verbose=True)
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, api_key="")
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are a helpful assistant. Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in provided context just say, "answer is not available in the context", don't provide the wrong answer""",
),
("placeholder", "{chat_history}"),
("human", "{input}"),
("placeholder", "{agent_scratchpad}"),
]
)
tool=[tools]
agent = create_tool_calling_agent(llm, tool, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tool, verbose=True)
response=agent_executor.invoke({"input": ques})
print(response)
st.write("Reply: ", response['output'])
def user_input(user_question):
new_db = FAISS.load_local("faiss_db",embeddings,allow_dangerous_deserialization=True)
retriever=new_db.as_retriever()
retrieval_chain= create_retriever_tool(retriever,"pdf_extractor","This tool is to give answer to queries from the pdf")
get_conversational_chain(retrieval_chain,user_question)
def main():
st.set_page_config("Chat PDF")
st.header("RAG based Chat with PDF")
user_question = st.text_input("Ask a Question from the PDF Files")
if user_question:
user_input(user_question)
with st.sidebar:
st.title("Menu:")
pdf_doc = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
if st.button("Submit & Process"):
with st.spinner("Processing..."):
raw_text = pdf_read(pdf_doc)
text_chunks = get_chunks(raw_text)
vector_store(text_chunks)
st.success("Done")
if __name__ == "__main__":
main()
参考
更多推荐

所有评论(0)