This tutorial will walk you through the process of creating a simple chatbot that interfaces with Google’s cutting-edge Gemini Pro model. By leveraging Vertex AI APIs for model interaction, setting up a user-friendly UI with Streamlit, and utilizing LangChain to manage context, this chatbot is capable of remembering previous conversation segments. The best part? You won’t need specialized computing hardware to use this application.
Exploring Gemini Pro:
When Google unveiled the Gemini models on December 6, 2023, they announced that Bard would be powered by the revolutionary Gemini Pro model. However, as of now, this implementation has not yet reached the UK or EU, likely due to regulatory constraints. Fortunately, the Vertex AI APIs provide a way to explore the capabilities of the Gemini Pro model, even in regions where it has not been officially deployed. In this tutorial, we’ll guide you through the process using less than 100 lines of code. Let’s dive in!
How to use the Gemini models is described in Google’s Gemini API documentation
import os
from dotenv import load_dotenv
import streamlit as st
from vertexai.preview.generative_models import GenerativeModel
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
class GeminiProLLM(LLM):
@property
def _llm_type(self) -> str:
return "gemini-pro"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
if stop is not None:
raise ValueError("stop kwargs are not permitted.")
gemini_pro_model = GenerativeModel("gemini-pro")
model_response = gemini_pro_model.generate_content(
prompt,
generation_config={"temperature": 0.1}
)
if len(model_response.candidates[0].content.parts) > 0:
return model_response.candidates[0].content.parts[0].text
else:
return "<No answer given by Gemini Pro>"
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {"model_id": "gemini-pro", "temperature": 0.1}
# Initialize Vertex AI
load_dotenv()
project_name = os.getenv("VERTEXAI_PROJECT")
vertexai.init(project=project_name)
# Setting page title and header
st.set_page_config(page_title="Gemini Pro Chatbot", page_icon=":robot_face:")
st.markdown("<h1 style='text-align: center;'>Gemini Pro Chatbot</h1>", unsafe_allow_html=True)
# Load chat model
@st.cache_resource
def load_chain():
llm = GeminiProLLM()
memory = ConversationBufferMemory()
chain = ConversationChain(llm=llm, memory=memory)
return chain
chatchain = load_chain()
# Initialise session state variables
if 'messages' not in st.session_state:
st.session_state['messages'] = []
st.sidebar.title("Sidebar")
clear_button = st.sidebar.button("Clear Conversation", key="clear")
# Reset conversation
if clear_button:
st.session_state['messages'] = []
# Display previous messages
for message in st.session_state['messages']:
role = message["role"]
content = message["content"]
with st.chat_message(role):
st.markdown(content)
# Chat input
prompt = st.chat_input("You:")
if prompt:
st.session_state['messages'].append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
response = chatchain(prompt)["response"]
st.session_state['messages'].append({"role": "assistant", "content": response})
with st.chat_message("assistant"):
st.markdown(response)