! pip install --upgrade google-cloud-aiplatform
! pip install shapely<2.0.0
! pip install langchain
! pip install pypdf
! pip install pydantic==1.10.8
! pip install chromadb==0.3.26
! pip install langchain[docarray]
! pip install typing-inspect==0.8.0 typing_extensions==4.5.0
4 LangChain Intro
Models, prompt templates and parsers
# Automatically restart kernel after installs so that your environment can access the new packages
import IPython
= IPython.Application.instance()
app True) app.kernel.do_shutdown(
If you’re on Colab, authenticate via the following cell
from google.colab import auth
auth.authenticate_user()
Add your project id and the region
= "<..>"
PROJECT_ID = "<..>" REGION
import vertexai
=PROJECT_ID, location=REGION) vertexai.init(project
# Utils
import time
from typing import List
# Langchain
import langchain
from pydantic import BaseModel
print(f"LangChain version: {langchain.__version__}")
# Vertex AI
from google.cloud import aiplatform
from langchain.chat_models import ChatVertexAI
from langchain.embeddings import VertexAIEmbeddings
from langchain.llms import VertexAI
from langchain.schema import HumanMessage, SystemMessage
print(f"Vertex AI SDK version: {aiplatform.__version__}")
# Chat
= ChatVertexAI(
chat =1024,
max_output_tokens=0.2,
temperature=0.8,
top_p=40,
top_k=True) verbose
="Hello")]) chat([HumanMessage(content
= chat(
res
[
SystemMessage(="You are an expert chef that thinks of imaginative recipies when people give you ingredients."
content
),="I have some kidney beans and tomatoes, what would be an easy lunch?"),
HumanMessage(content
]
)
print(res.content)
4.0.1 Prompt templates
Langhain’s abstractions such as prompt templates can help keep prompts modular and reusable, especially in large applications which may require long and varied prompts.
= """Translate the text \
template_string that is delimited by triple backticks \
into a style that is {style}. \
text: ```{text}```
"""
from langchain.prompts import ChatPromptTemplate
= ChatPromptTemplate.from_template(template_string) prompt_template
0].prompt prompt_template.messages[
0].prompt.input_variables prompt_template.messages[
= """British English, \
customer_style respectful tone of a customer service agent.
"""
= """
customer_email I'm writing this review to express my complete dismay \
and utter horror at the downright disastrous \
coffee maker I purchased from your store. \
It is not at all what I expected. It's a total insult \
to the divine elixir that is coffee!
"""
= prompt_template.format_messages(
customer_messages =customer_style,
style=customer_email) text
print(type(customer_messages))
print(type(customer_messages[0]))
# Call the LLM to translate to the style of the customer message
= chat(customer_messages)
customer_response print(customer_response.content)
= """
service_style_glaswegian A polite assistant that writes in ponetic Glaswegian
"""
= """
service_reply We're very sorry to read the coffee maker isn't suitable. \
Please come back to the shop, where you can sample some \
brews from the other machines. We offer a refund or exchange \
should you find a better match.
"""
= prompt_template.format_messages(
service_messages =service_style_glaswegian,
style=service_reply) text
Notice when we call the chat model we add an increase to the temperature
parameter, to allow for more imaginative responses.
= chat(service_messages, temperature=0.5)
service_response print(service_response.content)
4.0.2 Why use prompt templates?
Prompts can become long and confusing to read in application code, so the level of abstraction templates offer can help reuse material and keep code modular and more understandable.
4.0.3 Parsing outputs
= """\
customer_review The excellent barbecue cauliflower starter left \
a lasting impression -- gorgeous presentation and flavors, really geared the tastebuds into action. \
Moving on to the main course, pretty great also. \
Delicious and flavorful chickpea and vegetable curry. They really nailed the buttery consistency, \
depth and balance of the spices. \
The dessert was a bit bland. I opted for a vegan chocolate mousse, \
hoping for a decadent and indulgent finale to my meal. \
It was very visually appealing but was missing the smooth, velvety \
texture of a great mousse.
"""
= """\
review_template For the input text, extract the following details: \
starter: How did the reviewer find the first course? \
Rate either Poor, Good, or Excellent. \
Do the same for the main course and dessert
Format the output as JSON with the following keys:
starter
main_course
dessert
text: {text}
"""
from langchain.prompts import ChatPromptTemplate
= ChatPromptTemplate.from_template(review_template)
prompt_template print(prompt_template)
= prompt_template.format_messages(text=customer_review)
messages = chat(messages, temperature=0.1)
response print(response.content)
Though it looks like a Python dictionary, our output is actually a string type.
type(response.content)
This means we are unable to access values in this fashion:
"main_course") response.content.get(
This is where Langchain’s parser comes in.
from langchain.output_parsers import ResponseSchema
from langchain.output_parsers import StructuredOutputParser
= ResponseSchema(name="starter", description="Review of the starter")
starter_schema = ResponseSchema(name="main_course", description="Review of the main course")
main_course_schema = ResponseSchema(name="dessert", description="Review of the dessert")
dessert_schema
= [starter_schema, main_course_schema, dessert_schema] response_schemas
= StructuredOutputParser.from_response_schemas(response_schemas) output_parser
= output_parser.get_format_instructions()
format_instructions print(format_instructions)
Now we can update our prior review template to include the format instructions
= """\
review_template_2 For the input text, extract the following details: \
starter: How did the reviewer find the first course? \
Rate either Poor, Good, or Excellent. \
Do the same for the main course and dessert
starter
main_course
dessert
text: {text}
{format_instructions}
"""
= ChatPromptTemplate.from_template(template=review_template_2)
prompt
= prompt.format_messages(text=customer_review,
messages =format_instructions) format_instructions
print(messages[0].content)
= chat(messages) response
Let’s try it on the same review
type(response)
= output_parser.parse(response.content)
output_dict output_dict
type(output_dict)
"main_course") output_dict.get(