! pip install --upgrade google-cloud-aiplatform
! pip install shapely<2.0.0
! pip install langchain
! pip install pypdf
! pip install pydantic==1.10.8
! pip install chromadb==0.3.26
! pip install langchain[docarray]
! pip install typing-inspect==0.8.0 typing_extensions==4.5.0
6 Chains
Complex applications will require chaining LLMs together, or with other components.
We will cover the following types of chains:
Sequential chains
Router chains
# Automatically restart kernel after installs so that your environment can access the new packages
import IPython
= IPython.Application.instance()
app True) app.kernel.do_shutdown(
If you’re on Colab, authenticate via the following cell
from google.colab import auth
auth.authenticate_user()
7 Initialize the SDK and LLM
# Add your project id and the region
= "<..>"
PROJECT_ID = "<..>" REGION
# Utils
import time
from typing import List
# Vertex AI
import vertexai
# Langchain
import langchain
from pydantic import BaseModel
print(f"LangChain version: {langchain.__version__}")
from langchain.chat_models import ChatVertexAI
from langchain.prompts import ChatPromptTemplate
from langchain.llms import VertexAI
from langchain.chains import LLMChain
=PROJECT_ID, location=REGION)
vertexai.init(project
# LLM model
= VertexAI(
llm ="text-bison@001",
model_name=256,
max_output_tokens# Increasing the temp
# for more creative output
=0.9,
temperature=0.8,
top_p=40,
top_k=True,
verbose )
7.0.1 LLMChain
An LLMChain simply provides a prompt to the LLM.
= ChatPromptTemplate.from_template(
prompt "What is the best name to describe \
a company that makes {product}?"
)
= LLMChain(llm=llm, prompt=prompt)
chain = "A saw for laminate wood"
product chain.run(product)
7.0.2 Sequential chain
A sequential chain makes a series of calls to an LLM. It enables a pipeline-style workflow in which the output from one call becomes the input to the next.
The two types include:
SimpleSequentialChain
, where predictably each step has a single input and output, which becomes the input to the next step.SequentialChain
, which allows for multiple inputs and outputs.
from langchain.chains import SimpleSequentialChain
from langchain.prompts import PromptTemplate
# This is an LLMChain to write a pitch for a new product
= VertexAI(temperature=0.7)
llm = """You are an entrepreneur. Think of a ground breaking new product and write a short pitch.
template
Title: {title}
Entrepreneur: This is a pitch for the above product:"""
= PromptTemplate(input_variables=["title"], template=template)
prompt_template = LLMChain(llm=llm, prompt=prompt_template) pitch_chain
= """You are a panelist on Dragon's Den. Given a \
template description of the product, you are to explain why you think it will \
succeed or fail in the market.
Product pitch: {pitch}
Review by Dragon's Den panelist:"""
= PromptTemplate(input_variables=["pitch"], template=template)
prompt_template = LLMChain(llm=llm, prompt=prompt_template) review_chain
# This is the overall chain where we run these two chains in sequence.
from langchain.chains import SimpleSequentialChain
= SimpleSequentialChain(chains=[pitch_chain, review_chain], verbose=True) overall_chain
= overall_chain.run("Portable iced coffee maker") review
7.0.3 Router chain
A RouterChain
dynamically selects the next chain to use for a given input. This feature uses the MultiPromptChain
to select then answer with the best-suited prompt to the question.
from langchain.chains.router import MultiPromptChain
= """
korean_template You are an expert in korean history and culture.
Here is a question:
{input}
"""
= """
spanish_template You are an expert in spanish history and culture.
Here is a question:
{input}
"""
= """
chinese_template You are an expert in Chinese history and culture.
Here is a question:
{input}
"""
= [
prompt_infos
{"name": "korean",
"description": "Good for answering questions about Korean history and culture",
"prompt_template": korean_template,
},
{"name": "spanish",
"description": "Good for answering questions about Spanish history and culture",
"prompt_template": spanish_template,
},
{"name": "chinese",
"description": "Good for answering questions about Chinese history and culture",
"prompt_template": chinese_template,
}, ]
from langchain.chains.router import MultiPromptChain
from langchain.chains.router.llm_router import LLMRouterChain,RouterOutputParser
from langchain.prompts import PromptTemplate
= VertexAI(temperature=0) llm
= {}
destination_chains for p_info in prompt_infos:
= p_info["name"]
name = p_info["prompt_template"]
prompt_template = ChatPromptTemplate.from_template(template=prompt_template)
prompt = LLMChain(llm=llm, prompt=prompt)
chain = chain
destination_chains[name]
= [f"{p['name']}: {p['description']}" for p in prompt_infos]
destinations = "\n".join(destinations) destinations_str
= ChatPromptTemplate.from_template("{input}")
default_prompt = LLMChain(llm=llm, prompt=default_prompt) default_chain
# Thanks to Deeplearning.ai for this template and for the
# Langchain short course at deeplearning.ai/short-courses/.
= """Given a raw text input to a \
MULTI_PROMPT_ROUTER_TEMPLATE language model select the model prompt best suited for the input. \
You will be given the names of the available prompts and a \
description of what the prompt is best suited for. \
You may also revise the original input if you think that revising\
it will ultimately lead to a better response from the language model.
<< FORMATTING >>
Return a markdown code snippet with a JSON object formatted to look like:
```json
{{{{
"destination": string \ name of the prompt to use or "DEFAULT"
"next_inputs": string \ a potentially modified version of the original input
}}}}
```
REMEMBER: "destination" MUST be one of the candidate prompt \
names specified below OR it can be "DEFAULT" if the input is not\
well suited for any of the candidate prompts.
REMEMBER: "next_inputs" can just be the original input \
if you don't think any modifications are needed.
<< CANDIDATE PROMPTS >>
{destinations}
<< INPUT >>
{{input}}
<< OUTPUT (remember to include the ```json)>>"""
= MULTI_PROMPT_ROUTER_TEMPLATE.format(
router_template =destinations_str
destinations
)= PromptTemplate(
router_prompt =router_template,
template=["input"],
input_variables=RouterOutputParser(),
output_parser
)
= LLMRouterChain.from_llm(llm, router_prompt) router_chain
= MultiPromptChain(router_chain=router_chain,
chain =destination_chains,
destination_chains=default_chain, verbose=True
default_chain )
Notice in the outputs the country of speciality is prefixed eg: chinese: {'input': ...
, denoting the routing to the correct expert.
"What was the Han Dynasty?") chain.run(
"What are some of typical dishes in Catalonia?") chain.run(
"How would I greet a friend's parents in Korean?") chain.run(
"Summarize Don Quixote in a short paragraph") chain.run(
If we provide a question that is outside of our experts’ fields, the default model handles it.
"How can I fix a carburetor?") chain.run(