! pip install --upgrade google-cloud-aiplatform
! pip install shapely<2.0.0
! pip install langchain
! pip install pypdf
! pip install pydantic==1.10.8
! pip install chromadb==0.3.26
! pip install langchain[docarray]
! pip install typing-inspect==0.8.0 typing_extensions==4.5.04 LangChain Intro
Models, prompt templates and parsers
# Automatically restart kernel after installs so that your environment can access the new packages
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)If you’re on Colab, authenticate via the following cell
from google.colab import auth
auth.authenticate_user()Add your project id and the region
PROJECT_ID = "<..>"
REGION = "<..>"import vertexai
vertexai.init(project=PROJECT_ID, location=REGION)# Utils
import time
from typing import List
# Langchain
import langchain
from pydantic import BaseModel
print(f"LangChain version: {langchain.__version__}")
# Vertex AI
from google.cloud import aiplatform
from langchain.chat_models import ChatVertexAI
from langchain.embeddings import VertexAIEmbeddings
from langchain.llms import VertexAI
from langchain.schema import HumanMessage, SystemMessage
print(f"Vertex AI SDK version: {aiplatform.__version__}")# Chat
chat = ChatVertexAI(
max_output_tokens=1024,
temperature=0.2,
top_p=0.8,
top_k=40,
verbose=True)chat([HumanMessage(content="Hello")])res = chat(
[
SystemMessage(
content="You are an expert chef that thinks of imaginative recipies when people give you ingredients."
),
HumanMessage(content="I have some kidney beans and tomatoes, what would be an easy lunch?"),
]
)
print(res.content)4.0.1 Prompt templates
Langhain’s abstractions such as prompt templates can help keep prompts modular and reusable, especially in large applications which may require long and varied prompts.
template_string = """Translate the text \
that is delimited by triple backticks \
into a style that is {style}. \
text: ```{text}```
"""from langchain.prompts import ChatPromptTemplate
prompt_template = ChatPromptTemplate.from_template(template_string)prompt_template.messages[0].promptprompt_template.messages[0].prompt.input_variablescustomer_style = """British English, \
respectful tone of a customer service agent.
"""customer_email = """
I'm writing this review to express my complete dismay \
and utter horror at the downright disastrous \
coffee maker I purchased from your store. \
It is not at all what I expected. It's a total insult \
to the divine elixir that is coffee!
"""customer_messages = prompt_template.format_messages(
style=customer_style,
text=customer_email)print(type(customer_messages))
print(type(customer_messages[0]))# Call the LLM to translate to the style of the customer message
customer_response = chat(customer_messages)
print(customer_response.content)service_style_glaswegian = """
A polite assistant that writes in ponetic Glaswegian
"""service_reply = """
We're very sorry to read the coffee maker isn't suitable. \
Please come back to the shop, where you can sample some \
brews from the other machines. We offer a refund or exchange \
should you find a better match.
"""service_messages = prompt_template.format_messages(
style=service_style_glaswegian,
text=service_reply)Notice when we call the chat model we add an increase to the temperature parameter, to allow for more imaginative responses.
service_response = chat(service_messages, temperature=0.5)
print(service_response.content)4.0.2 Why use prompt templates?
Prompts can become long and confusing to read in application code, so the level of abstraction templates offer can help reuse material and keep code modular and more understandable.
4.0.3 Parsing outputs
customer_review = """\
The excellent barbecue cauliflower starter left \
a lasting impression -- gorgeous presentation and flavors, really geared the tastebuds into action. \
Moving on to the main course, pretty great also. \
Delicious and flavorful chickpea and vegetable curry. They really nailed the buttery consistency, \
depth and balance of the spices. \
The dessert was a bit bland. I opted for a vegan chocolate mousse, \
hoping for a decadent and indulgent finale to my meal. \
It was very visually appealing but was missing the smooth, velvety \
texture of a great mousse.
"""
review_template = """\
For the input text, extract the following details: \
starter: How did the reviewer find the first course? \
Rate either Poor, Good, or Excellent. \
Do the same for the main course and dessert
Format the output as JSON with the following keys:
starter
main_course
dessert
text: {text}
"""
from langchain.prompts import ChatPromptTemplate
prompt_template = ChatPromptTemplate.from_template(review_template)
print(prompt_template)messages = prompt_template.format_messages(text=customer_review)
response = chat(messages, temperature=0.1)
print(response.content)Though it looks like a Python dictionary, our output is actually a string type.
type(response.content)This means we are unable to access values in this fashion:
response.content.get("main_course")This is where Langchain’s parser comes in.
from langchain.output_parsers import ResponseSchema
from langchain.output_parsers import StructuredOutputParser
starter_schema = ResponseSchema(name="starter", description="Review of the starter")
main_course_schema = ResponseSchema(name="main_course", description="Review of the main course")
dessert_schema = ResponseSchema(name="dessert", description="Review of the dessert")
response_schemas = [starter_schema, main_course_schema, dessert_schema]output_parser = StructuredOutputParser.from_response_schemas(response_schemas)format_instructions = output_parser.get_format_instructions()
print(format_instructions)Now we can update our prior review template to include the format instructions
review_template_2 = """\
For the input text, extract the following details: \
starter: How did the reviewer find the first course? \
Rate either Poor, Good, or Excellent. \
Do the same for the main course and dessert
starter
main_course
dessert
text: {text}
{format_instructions}
"""
prompt = ChatPromptTemplate.from_template(template=review_template_2)
messages = prompt.format_messages(text=customer_review,
format_instructions=format_instructions)print(messages[0].content)response = chat(messages)Let’s try it on the same review
type(response)output_dict = output_parser.parse(response.content)
output_dicttype(output_dict)output_dict.get("main_course")