logo
LangChain 框架指南
AI Engineer

LangChain 框架指南

LangChain 是构建 LLM 应用的流行框架,提供了链式调用、Agent、RAG 等丰富的功能模块。

LangChain 框架指南快速开始

LangChain 快速开始

本指南帮助你快速上手 LangChain,构建你的第一个 LLM 应用。

#安装

#Python

bash
pip install langchain langchain-openai langchain-community

#按需安装

bash
# OpenAI 支持 pip install langchain-openai # Anthropic (Claude) 支持 pip install langchain-anthropic # 向量存储 pip install langchain-chroma # ChromaDB pip install langchain-pinecone # Pinecone # 文档加载 pip install pypdf # PDF pip install unstructured # 多格式

#环境配置

bash
# .env 文件 OPENAI_API_KEY=sk-... ANTHROPIC_API_KEY=sk-ant-...
python
from dotenv import load_dotenv load_dotenv()

#第一个应用

#简单对话

python
from langchain_openai import ChatOpenAI from langchain_core.messages import HumanMessage, SystemMessage # 创建模型 llm = ChatOpenAI(model="gpt-4o") # 发送消息 messages = [ SystemMessage(content="你是一个专业的程序员"), HumanMessage(content="用 Python 写一个快速排序") ] response = llm.invoke(messages) print(response.content)

#使用 Claude

python
from langchain_anthropic import ChatAnthropic llm = ChatAnthropic(model="claude-sonnet-4-20250514") response = llm.invoke("介绍一下 LangChain") print(response.content)

#Prompt Templates

#基础模板

python
from langchain_core.prompts import ChatPromptTemplate # 创建模板 prompt = ChatPromptTemplate.from_messages([ ("system", "你是一个{role}专家"), ("human", "{question}") ]) # 使用模板 messages = prompt.invoke({ "role": "Python", "question": "如何读取 JSON 文件?" }) response = llm.invoke(messages) print(response.content)

#字符串模板

python
from langchain_core.prompts import PromptTemplate prompt = PromptTemplate.from_template( "将以下文本翻译成{language}:\n\n{text}" ) formatted = prompt.format( language="英文", text="今天天气很好" )

#输出解析

#字符串解析

python
from langchain_core.output_parsers import StrOutputParser parser = StrOutputParser() # 链式调用 chain = prompt | llm | parser result = chain.invoke({"role": "Python", "question": "什么是装饰器?"}) print(result) # 直接是字符串

#JSON 解析

python
from langchain_core.output_parsers import JsonOutputParser from pydantic import BaseModel class Recipe(BaseModel): name: str ingredients: list[str] steps: list[str] parser = JsonOutputParser(pydantic_object=Recipe) prompt = ChatPromptTemplate.from_messages([ ("system", "返回 JSON 格式的食谱。{format_instructions}"), ("human", "给我一个{dish}的食谱") ]) chain = prompt | llm | parser result = chain.invoke({ "dish": "番茄炒蛋", "format_instructions": parser.get_format_instructions() }) print(result) # {"name": "番茄炒蛋", "ingredients": [...], "steps": [...]}

#LCEL (LangChain Expression Language)

#基础链

python
from langchain_openai import ChatOpenAI from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser # 组件 prompt = ChatPromptTemplate.from_template("讲一个关于{topic}的笑话") llm = ChatOpenAI() parser = StrOutputParser() # 链式组合 chain = prompt | llm | parser # 调用 result = chain.invoke({"topic": "程序员"}) print(result)

#流式输出

python
for chunk in chain.stream({"topic": "程序员"}): print(chunk, end="", flush=True)

#批量处理

python
results = chain.batch([ {"topic": "程序员"}, {"topic": "产品经理"}, {"topic": "设计师"} ])

#并行执行

python
from langchain_core.runnables import RunnableParallel # 并行执行多个任务 parallel = RunnableParallel( joke=ChatPromptTemplate.from_template("讲一个关于{topic}的笑话") | llm | parser, poem=ChatPromptTemplate.from_template("写一首关于{topic}的短诗") | llm | parser ) result = parallel.invoke({"topic": "春天"}) print(result["joke"]) print(result["poem"])

#记忆 (Memory)

#对话记忆

python
from langchain_community.chat_message_histories import ChatMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory # 存储 store = {} def get_session_history(session_id: str): if session_id not in store: store[session_id] = ChatMessageHistory() return store[session_id] # 创建带记忆的链 chain_with_history = RunnableWithMessageHistory( chain, get_session_history, input_messages_key="question", history_messages_key="history" ) # 对话 config = {"configurable": {"session_id": "user-123"}} response1 = chain_with_history.invoke( {"question": "我叫小明"}, config=config ) response2 = chain_with_history.invoke( {"question": "我叫什么名字?"}, config=config ) # 会记住你叫小明

#文档加载

#加载文本文件

python
from langchain_community.document_loaders import TextLoader loader = TextLoader("./document.txt") documents = loader.load()

#加载 PDF

python
from langchain_community.document_loaders import PyPDFLoader loader = PyPDFLoader("./document.pdf") pages = loader.load_and_split()

#加载网页

python
from langchain_community.document_loaders import WebBaseLoader loader = WebBaseLoader("https://example.com/article") documents = loader.load()

#目录加载

python
from langchain_community.document_loaders import DirectoryLoader loader = DirectoryLoader( "./docs/", glob="**/*.md", # 匹配模式 show_progress=True ) documents = loader.load()

#文本分割

python
from langchain_text_splitters import RecursiveCharacterTextSplitter splitter = RecursiveCharacterTextSplitter( chunk_size=1000, # 块大小 chunk_overlap=200, # 重叠大小 separators=["\n\n", "\n", " ", ""] # 分割符 ) chunks = splitter.split_documents(documents) print(f"分割为 {len(chunks)} 个块")

#完整示例:问答应用

python
from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain_community.document_loaders import TextLoader from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough # 1. 加载文档 loader = TextLoader("./knowledge.txt") documents = loader.load() # 2. 分割文档 splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) chunks = splitter.split_documents(documents) # 3. 创建向量存储 embeddings = OpenAIEmbeddings() vectorstore = Chroma.from_documents(chunks, embeddings) # 4. 创建检索器 retriever = vectorstore.as_retriever(search_kwargs={"k": 3}) # 5. 创建 Prompt prompt = ChatPromptTemplate.from_template(""" 根据以下上下文回答问题。如果上下文中没有相关信息,请说"我不知道"。 上下文: {context} 问题:{question} 回答: """) # 6. 创建 Chain def format_docs(docs): return "\n\n".join(doc.page_content for doc in docs) chain = ( {"context": retriever | format_docs, "question": RunnablePassthrough()} | prompt | ChatOpenAI() | StrOutputParser() ) # 7. 使用 answer = chain.invoke("这个项目是做什么的?") print(answer)

#下一步


提示:LangChain 发展迅速,建议查看 官方文档 获取最新 API。

1v1免费职业咨询