from langchain.llms import OpenAI from langchain.prompts import PromptTemplate
llm = OpenAI(temperature=0.9) prompt = PromptTemplate( input_variables=["product"], template="What is a good name for a company that makes {product}?", )
创建一个非常简单的链,它将接受用户输入,用它格式化提示,然后将其发送给LLMs。
1 2 3 4 5
from langchain.chains import LLMChain chain = LLMChain(llm=llm, prompt=prompt)
# Run the chain only specifying the input variable. print(chain.run("colorful socks"))
prompt = PromptTemplate( input_variables=["company", "product"], template="What is a good name for {company} that makes {product}?", ) chain = LLMChain(llm=llm, prompt=prompt) print(chain.run({ 'company': "ABC Startup", 'product': "colorful socks" }))
Socktopia Colourful Creations.
我们读入一个csv文件,用于之后的示例
1 2 3
import pandas as pd df = pd.read_csv('Data.csv') df.head()
LLMChain
这是一个简单但是非常强大的Chain,为后面介绍的许多Chain打下了基础
1 2 3
from langchain.chat_models import ChatOpenAI from langchain.prompts import ChatPromptTemplate from langchain.chains import LLMChain
初始化要使用的LLM
1
llm = ChatOpenAI(temperature=0.9)
定义Prompt
1 2 3 4
prompt = ChatPromptTemplate.from_template( "What is the best name to describe \ a company that makes {product}?" )
# prompt template 2 second_prompt = ChatPromptTemplate.from_template( "Write a 20 words description for the following \ company:{company_name}" ) # chain 2 chain_two = LLMChain(llm=llm, prompt=second_prompt)
# prompt template 1: translate to english first_prompt = ChatPromptTemplate.from_template( "Translate the following review to english:" "\n\n{Review}" ) # chain 1: input= Review and output= English_Review chain_one = LLMChain(llm=llm, prompt=first_prompt, output_key="English_Review" ) second_prompt = ChatPromptTemplate.from_template( "Can you summarize the following review in 1 sentence:" "\n\n{English_Review}" ) # chain 2: input= English_Review and output= summary chain_two = LLMChain(llm=llm, prompt=second_prompt, output_key="summary" )
# prompt template 3: translate to english third_prompt = ChatPromptTemplate.from_template( "What language is the following review:\n\n{Review}" ) # chain 3: input= Review and output= language chain_three = LLMChain(llm=llm, prompt=third_prompt, output_key="language" )
# prompt template 4: follow up message fourth_prompt = ChatPromptTemplate.from_template( "Write a follow up response to the following " "summary in the specified language:" "\n\nSummary: {summary}\n\nLanguage: {language}" ) # chain 4: input= summary, language and output= followup_message chain_four = LLMChain(llm=llm, prompt=fourth_prompt, output_key="followup_message" )
physics_template = """You are a very smart physics professor. \ You are great at answering questions about physics in a concise\ and easy to understand manner. \ When you don't know the answer to a question you admit\ that you don't know. Here is a question: {input}"""
math_template = """You are a very good mathematician. \ You are great at answering math questions. \ You are so good because you are able to break down \ hard problems into their component parts, answer the component parts, and then put them together\ to answer the broader question. Here is a question: {input}"""
history_template = """You are a very good historian. \ You have an excellent knowledge of and understanding of people,\ events and contexts from a range of historical periods. \ You have the ability to think, reflect, debate, discuss and \ evaluate the past. You have a respect for historical evidence\ and the ability to make use of it to support your explanations \ and judgements. Here is a question: {input}"""
computerscience_template = """ You are a successful computer scientist.\ You have a passion for creativity, collaboration,\ forward-thinking, confidence, strong problem-solving capabilities,\ understanding of theories and algorithms, and excellent communication \ skills. You are great at answering coding questions. \ You are so good because you know how to solve a problem by \ describing the solution in imperative steps \ that a machine can easily interpret and you know how to \ choose a solution that has a good balance between \ time complexity and space complexity. Here is a question: {input}"""
prompt_infos = [ { "name": "physics", "description": "Good for answering questions about physics", "prompt_template": physics_template }, { "name": "math", "description": "Good for answering math questions", "prompt_template": math_template }, { "name": "History", "description": "Good for answering history questions", "prompt_template": history_template }, { "name": "computer science", "description": "Good for answering computer science questions", "prompt_template": computerscience_template } ]
导入包
1 2 3
from langchain.chains.router import MultiPromptChain # 用于在不同提示模板之间进行路由 from langchain.chains.router.llm_router import LLMRouterChain,RouterOutputParser from langchain.prompts import PromptTemplate
MULTI_PROMPT_ROUTER_TEMPLATE = """Given a raw text input to a \ language model select the model prompt best suited for the input. \ You will be given the names of the available prompts and a \ description of what the prompt is best suited for. \ You may also revise the original input if you think that revising\ it will ultimately lead to a better response from the language model. << FORMATTING >> Return a markdown code snippet with a JSON object formatted to look like: ```json {{{{ "destination": string \ name of the prompt to use or "DEFAULT" "next_inputs": string \ a potentially modified version of the original input }}}} ``` REMEMBER: "destination" MUST be one of the candidate prompt \ names specified below OR it can be "DEFAULT" if the input is not\ well suited for any of the candidate prompts. REMEMBER: "next_inputs" can just be the original input \ if you don't think any modifications are needed. << CANDIDATE PROMPTS >> {destinations} << INPUT >> {{input}} << OUTPUT (remember to include the ```json)>>"""