结构化输出结果

master
msb_31955 3 weeks ago
parent ec0a13a56e
commit 1a9d6dd3e4

@ -0,0 +1,26 @@
from langchain_core.output_parsers import StrOutputParser, JsonOutputParser, SimpleJsonOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from env_util import DASHSCOPE_API_KEY, DASHSCOPE_BASE_URL
# llm对象就是调用大模型的对象
llm = ChatOpenAI(
model = "qwen-plus",
base_url=DASHSCOPE_BASE_URL,
api_key=DASHSCOPE_API_KEY,
temperature=0.8,
);
# 提示词模板
prompt_template = PromptTemplate.from_template("帮我生成一个简短的,关于{topic}的报幕词。")
# 指定字符串结构化输出
chain = prompt_template | llm | StrOutputParser()
# 调用大模型,然后给提示词模板赋值
resp = chain.invoke({"topic","迎新生晚会"})
# 输出结果
print(resp)

@ -0,0 +1,44 @@
import json
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
from env_util import DASHSCOPE_API_KEY, DASHSCOPE_BASE_URL
# llm对象就是调用大模型的对象
llm = ChatOpenAI(
model = "qwen-plus",
base_url=DASHSCOPE_BASE_URL,
api_key=DASHSCOPE_API_KEY,
temperature=0.8,
);
# 使用pydantic定义一个类
class Result(BaseModel):
first: str = Field(description = "内容开头")
content: str = Field(description = "主体内容")
last: str = Field(description = "内容结尾")
# 指定返回结果
runnable = llm.with_structured_output(Result)
# 提示词模板
prompt_template = PromptTemplate.from_template("帮我生成一个简短的,关于{topic}的报幕词。")
# 组装这里有个非常奇怪的问题要把runnable放在后面。。 不知为啥
# 貌似Python或者Chain的语法问题需要把接收下面topic的放在前面语法问题
# chain = runnable | prompt_template
chain = prompt_template | runnable
resp = chain.invoke({"topic","迎新生晚会"})
print(resp)
# 结果转成Map
print(resp.__dict__);
# 结果转成JSON,这里的json别导错了。。
print(json.dumps(resp.__dict__))
Loading…
Cancel
Save