diff --git a/.env.example b/.env.example index 6e7676f..1681f46 100644 --- a/.env.example +++ b/.env.example @@ -10,9 +10,19 @@ AZURE_OPENAI_COMPLETION_DEPLOYMENT_NAME = " + 133577438666544464 + \ No newline at end of file diff --git a/labs/00-setup/README.md b/labs/00-setup/README.md index 3133620..c179791 100644 --- a/labs/00-setup/README.md +++ b/labs/00-setup/README.md @@ -107,6 +107,7 @@ With all of the above updates to the `.env` file made, make sure you save the fi **NOTE**: The `.gitignore` file in this repo is configured to ignore the `.env` file, so the secrets such as the API key will not be uploaded to a public repo. +You can update the rest of the properties later in the labs. ___ ## Next Section diff --git a/labs/02-integrating-ai/00-PythonModules/pythonmodules.ipynb b/labs/02-integrating-ai/00-PythonModules/pythonmodules.ipynb index 51c76db..1ad9791 100644 --- a/labs/02-integrating-ai/00-PythonModules/pythonmodules.ipynb +++ b/labs/02-integrating-ai/00-PythonModules/pythonmodules.ipynb @@ -55,7 +55,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.9" }, "orig_nbformat": 4 }, diff --git a/labs/02-integrating-ai/01-AzureOpenAIAPI/azureopenaiapi.ipynb b/labs/02-integrating-ai/01-AzureOpenAIAPI/azureopenaiapi.ipynb index 1fa5042..b972852 100644 --- a/labs/02-integrating-ai/01-AzureOpenAIAPI/azureopenaiapi.ipynb +++ b/labs/02-integrating-ai/01-AzureOpenAIAPI/azureopenaiapi.ipynb @@ -194,7 +194,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" }, "orig_nbformat": 4 }, diff --git a/labs/02-integrating-ai/02-OpenAIPackages/openai.ipynb b/labs/02-integrating-ai/02-OpenAIPackages/openai.ipynb index 69a2100..05b21a8 100644 --- a/labs/02-integrating-ai/02-OpenAIPackages/openai.ipynb +++ b/labs/02-integrating-ai/02-OpenAIPackages/openai.ipynb @@ -22,7 +22,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -168,7 +168,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.12.3" }, "orig_nbformat": 4 }, diff --git a/labs/02-integrating-ai/03-Langchain/langchain.ipynb b/labs/02-integrating-ai/03-Langchain/langchain.ipynb index 7906d25..d9db6ee 100644 --- a/labs/02-integrating-ai/03-Langchain/langchain.ipynb +++ b/labs/02-integrating-ai/03-Langchain/langchain.ipynb @@ -231,7 +231,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.11.8" }, "orig_nbformat": 4 }, diff --git a/labs/03-orchestration/02-Embeddings/embeddings.ipynb b/labs/03-orchestration/02-Embeddings/embeddings.ipynb index e61de2f..f417d2a 100644 --- a/labs/03-orchestration/02-Embeddings/embeddings.ipynb +++ b/labs/03-orchestration/02-Embeddings/embeddings.ipynb @@ -34,8 +34,7 @@ "\n", "# Create an instance of Azure OpenAI\n", "llm = AzureChatOpenAI(\n", - " azure_deployment = os.getenv(\"AZURE_OPENAI_COMPLETION_DEPLOYMENT_NAME\"),\n", - " temperature = 0\n", + " azure_deployment = os.getenv(\"AZURE_OPENAI_COMPLETION_DEPLOYMENT_NAME\")\n", ")" ] }, @@ -470,7 +469,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.12.3" }, "orig_nbformat": 4 }, diff --git a/labs/03-orchestration/03-VectorStore/mongo.ipynb b/labs/03-orchestration/03-VectorStore/mongo.ipynb new file mode 100644 index 0000000..128edb6 --- /dev/null +++ b/labs/03-orchestration/03-VectorStore/mongo.ipynb @@ -0,0 +1,474 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Langchain with Azure CosmosDB for Mongo DB vCore\n", + "\n", + "## Setup, Vectorize and Load Data\n", + "\n", + "In this lab, we'll see how to leverage a sample dataset stored in Azure Cosmos DB for MongoDB to ground OpenAI models. We'll do this taking advantage of Azure Cosmos DB for Mongo DB vCore's [vector similarity search](https://learn.microsoft.com/azure/cosmos-db/mongodb/vcore/vector-search) functionality. \n", + "\n", + "You will need to create at the Azure Portal an M40 cluster by using Azure Cosmos DB for MongoDB vCore. You can create vector indexes on M40 cluster tiers and higher. After the cluster is created add the connection string at the .env file. \n", + "\n", + "Let's start by importing the modules we will use. \n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import ijson\n", + "from openai import AzureOpenAI\n", + "\n", + "from tenacity import retry, stop_after_attempt, wait_random_exponential\n", + "from time import sleep\n", + "\n", + "from langchain.chains import ConversationalRetrievalChain\n", + "from langchain.globals import set_llm_cache\n", + "from langchain.memory import ConversationBufferMemory\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.cache import AzureCosmosDBSemanticCache\n", + "from langchain_community.chat_message_histories import MongoDBChatMessageHistory\n", + "from langchain_community.vectorstores.azure_cosmos_db import (\n", + " AzureCosmosDBVectorSearch,\n", + " CosmosDBSimilarityType,\n", + " CosmosDBVectorSearchType)\n", + "from langchain_openai import AzureChatOpenAI, AzureOpenAIEmbeddings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will now load the values from the `.env` file in the root of the repository and intantiate the mongo and openAI clients" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import pymongo\n", + "from dotenv import load_dotenv\n", + "\n", + "# Load environment variables\n", + "if load_dotenv():\n", + " print(\"Found Azure OpenAI Endpoint: \" + os.getenv(\"AZURE_OPENAI_ENDPOINT\"))\n", + "else: \n", + " print(\"No file .env found\")\n", + "\n", + "cosmos_conn = os.getenv(\"MONGO_DB_CONNECTION_STRING\")\n", + "cosmos_database = os.getenv(\"MONGO_DB_database_name\")\n", + "cosmos_collection = os.getenv(\"MONGO_DB_collection_name\")\n", + "cosmos_vector_property = os.getenv(\"MONGO_DB_vector_property_name\")\n", + "cosmos_cache = os.getenv(\"MONGO_DB_cache_collection_name\")\n", + "\n", + "storage_file_url = os.getenv(\"storage_file_url\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_openai import AzureChatOpenAI\n", + "from langchain_openai import AzureOpenAIEmbeddings\n", + "\n", + "# Create the MongoDB client\n", + "cosmos_client = pymongo.MongoClient(cosmos_conn)\n", + "\n", + "# Create the OpenAI client\n", + "openai_client = AzureOpenAI(\n", + "\tazure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n", + "\tapi_key=os.getenv(\"OPENAI_API_KEY\"),\n", + "\tapi_version=os.getenv(\"OPENAI_API_VERSION\")\n", + ")\n", + "\n", + "# Create an Embeddings Instance of Azure OpenAI\n", + "embeddings = AzureOpenAIEmbeddings(\n", + " azure_deployment = os.getenv(\"AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME\"),\n", + " openai_api_version = os.getenv(\"OPENAI_EMBEDDING_API_VERSION\"),\n", + " model= os.getenv(\"AZURE_OPENAI_EMBEDDING_MODEL\")\n", + ")\n", + "\n", + "# Create a Chat Completion Instance of Azure OpenAI\n", + "llm = AzureChatOpenAI(\n", + " azure_deployment = os.getenv(\"AZURE_OPENAI_COMPLETION_DEPLOYMENT_NAME\")\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create a collection with a vector index\n", + "\n", + "This function takes a database object, a collection name, the name of the document property that will store vectors, and the number of vector dimensions used for the embeddings." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_collection_and_vector_index(database, cosmos_collection, vector_property, embeddings_dimensions):\n", + "\n", + " collection = database[cosmos_collection]\n", + "\n", + " database.command(\n", + " {\n", + " \"createIndexes\": cosmos_collection,\n", + " \"indexes\": [\n", + " {\n", + " \"name\": \"VectorSearchIndex\",\n", + " \"key\": {\n", + " vector_property: \"cosmosSearch\"\n", + " },\n", + " \"cosmosSearchOptions\": { \n", + " \"kind\": \"vector-hnsw\", \n", + " \"m\": 16, # default value \n", + " \"efConstruction\": 64, # default value \n", + " \"similarity\": \"COS\", \n", + " \"dimensions\": 1536 # Number of dimensions for vector similarity. The maximum number of supported dimensions is 2000\n", + " } \n", + " } \n", + " ] \n", + " }\n", + " ) \n", + "\n", + " return collection\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create the Database and Collections with Vector Index\n", + "\n", + "In this lab, we will create two collections. One that will store the movie data with their embeddings and another to store the promts and the answers with their embeddings to implement semantic cache.\n", + "\n", + "❓ What is semantic caching?\n", + "\n", + "Caching systems typically store commonly retrieved data for subsequent serving in an optimal manner. In the context of LLMs, semantic cache maintains a cache of previously asked questions and responses, uses similarity measures to retrieve semantically similar queries from the cache and respond with cached responses if a match is found within the threshold for similarity. If cache is not able to return a response, then the answer can be returned from a fresh LLM call.\n", + "\n", + "👌 Benefits of semantic caching:\n", + "\n", + "- Cost optimization: Since the responses are served without invoking LLMs, there can be significant cost benefits for caching responses. We have come across use cases where customers have reported 20 – 30 % of the total queries from users can be served by the caching layer.\n", + "Improvement in latency: LLMs are known to exhibit higher latencies to generate responses. This can be reduced by response caching, to the extent that queries are answered from caching layer and not by invoking LLMs every time.\n", + "- Scaling: Since questions responded by cache hit do not invoke LLMs, provisioned resources/endpoints are free to answer unseen/newer questions from users. This can be helpful when applications are scaled to handle more users.\n", + "- Consistency in responses: Since caching layer answers from cached responses, there is no actual generation involved and the same response is provided to queries deemed semantically similar." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Check if the collection database and drop if it does\n", + "if cosmos_database in cosmos_client.list_database_names():\n", + " cosmos_client.drop_database(cosmos_database)\n", + "\n", + "# Create the database \n", + "database = cosmos_client[cosmos_database]\n", + "\n", + "# Create the data collection with vector index\n", + "collection = create_collection_and_vector_index(database, cosmos_collection, cosmos_vector_property, 1536)\n", + "\n", + "# Create the cache collection with vector index\n", + "cache = create_collection_and_vector_index(database, cosmos_cache, cosmos_vector_property, 1536)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generate embeddings from Azure OpenAI\n", + "\n", + "The following function will generate embeddings for a given text. We add retry to handle any throttling due to quota limits." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "@retry(wait=wait_random_exponential(min=1, max=200), stop=stop_after_attempt(20))\n", + "def generate_embeddings(text):\n", + " \n", + " response = openai_client.embeddings.create(\n", + " input=text,\n", + " model=os.getenv(\"AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME\"),\n", + " dimensions=1536\n", + " )\n", + " \n", + " embeddings = response.model_dump()\n", + " return embeddings['data'][0]['embedding']" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Stream, vectorize & store\n", + "\n", + "In this lab we'll use a subset of MovieLens dataset of 5000 movies. \n", + "We will stream the data out of blob storage, generate vectors on the overview of the json document using the function above, then store it in Azure Cosmos DB for MongoDB collection. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import urllib\n", + "# open the file and stream the data to ingest\n", + "stream = urllib.request.urlopen(storage_file_url)\n", + "\n", + "counter = 0\n", + "\n", + "# iterate through the stream, generate vectors and insert into collection\n", + "for object in ijson.items(stream, 'item', use_float=True):\n", + "\n", + " #generate embeddings\n", + " vectorArray = generate_embeddings(object['overview'])\n", + "\n", + " #add the document to the collection\n", + " object[cosmos_vector_property] = vectorArray\n", + "\n", + " #insert the document into the collection\n", + " collection.insert_one(object)\n", + "\n", + " counter += 1\n", + "\n", + " if counter % 100 == 0:\n", + " print(\"Inserted {} documents into collection: '{}'.\".format(counter, collection.name))\n", + " sleep(.5) # sleep for 0.5 seconds to help avoid rate limiting\n", + "\n", + "\n", + "print(\"Data inserted into collection: '{}'.\\n\".format(collection.name))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Configure Vector Search w/ LangChain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cdb = AzureCosmosDBVectorSearch(\n", + " collection= cosmos_collection,\n", + " embedding=embeddings)\n", + "\n", + "vectorstore = cdb.from_connection_string(\n", + " connection_string=cosmos_conn,\n", + " namespace = cosmos_database + \".\" + cosmos_collection,\n", + " embedding = embeddings,\n", + " embedding_key = cosmos_vector_property,\n", + " text_key = \"overview\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup RAG and Semantic Caching with your LLM" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First let's write the prompt template to use for the LLM. We are setting up an AI assistant to help answer questions about our movies dataset. We ask to use the context of the retrieved documents from the vector store. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prompt_template = \"\"\"\n", + "You are an upbeat AI assistant who is excited to help answer questions about movies. \n", + "Use only the context which is the overview of the movies:\n", + "\n", + "{context},\n", + "\n", + "or this chat history\n", + "\n", + "{chat_history},\n", + "\n", + "to answer this question. \n", + "\n", + "Question: {question}\n", + "If you don't know the answer, just say that you don't know. Don't try to make up an answer.\n", + "\"\"\"\n", + "chatbot_prompt = PromptTemplate(\n", + " template = prompt_template, input_variables = [\"context\", \"question\", \"chat_history\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then let's prepare the chain. The `prepare_chain` function is responsible for setting up a retrieval chain. The function starts by creating a `retriever` object using the vectorstore.as_retriever method. This retriever is configured to use a similarity search with a score threshold of 0.2 and to return the top 5 most similar results (k=5).\n", + "\n", + "Next we have the `ConversationalRetrievalChain` object that is responsible for managing the retrieval of responsed in a conversational context. It is configured with the previously created `retriever` and is set to return the source documents of the retrieved responses. The `combine_docs_chain_kwargs` parameter is set to final prompt of the `ConversationalRetrievalChain`. We add the verbose flag to return the final prompt and see the retrieved documents that will be used for the LLM. \n", + "\n", + "The next section is to set up the semantic cache. There we need a similarity threshold of 0.99 to match the question asked. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def prepare_chain():\n", + " \n", + " retriever = vectorstore.as_retriever(\n", + " search_type = \"similarity\",\n", + " search_kwargs = {\"k\": 5, 'score_threshold': 0.2})\n", + "\n", + " sem_qa = ConversationalRetrievalChain.from_llm(\n", + " llm = llm,\n", + " chain_type = \"stuff\",\n", + " retriever = retriever,\n", + " return_source_documents = True,\n", + " combine_docs_chain_kwargs = {\"prompt\": chatbot_prompt},\n", + " verbose=True)\n", + "\n", + " similarity_algorithm = CosmosDBSimilarityType.COS\n", + " kind = CosmosDBVectorSearchType.VECTOR_IVF\n", + " num_lists = 1\n", + " score_threshold = 0.99\n", + "\n", + " sem_cache = AzureCosmosDBSemanticCache(\n", + " cosmosdb_connection_string = cosmos_conn,\n", + " cosmosdb_client = None,\n", + " embedding = embeddings,\n", + " database_name = cosmos_database, \n", + " collection_name = cosmos_cache,\n", + " similarity = similarity_algorithm,\n", + " num_lists = num_lists,\n", + " kind = kind,\n", + " dimensions = 1536,\n", + " score_threshold = score_threshold)\n", + "\n", + " set_llm_cache(sem_cache)\n", + "\n", + " return retriever, llm, sem_qa, sem_cache\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "retriever, llm, chain, sem_cache = prepare_chain()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's test the chatbot with a question:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "query = \"Tell me about films with Buzz Lightyear\"\n", + "response = chain.invoke({\"question\": query, 'chat_history': [] })\n", + "print (response['answer'])\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you ask a very similar question you will notice how faster it will be as it will use the semantic cache instead of LLM." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "query = \"Tell me something about films with Buzz Lightyear\"\n", + "response = chain.invoke({\"question\": query, 'chat_history': [] })\n", + "print (response['answer'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "query = \"Whose spaceship crashed on a desert planet\"\n", + "response = chain.invoke({\"question\": query, 'chat_history': [] })\n", + "print (response['answer'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Section\n", + "\n", + "📣 [Azure AI Search with Semantic Kernel and C#](../04-ACS/acs-sk-csharp.ipynb)\n", + "\n", + "📣 [Azure AI Search with Semantic Kernel and Python](../04-ACS/acs-sk-python.ipynb)\n", + "\n", + "📣 [Azure AI Search with Langchain and Python](../04-ACS/acs-lc-python.ipynb)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/labs/03-orchestration/03-Qdrant/movies.csv b/labs/03-orchestration/03-VectorStore/movies.csv similarity index 100% rename from labs/03-orchestration/03-Qdrant/movies.csv rename to labs/03-orchestration/03-VectorStore/movies.csv diff --git a/labs/03-orchestration/03-Qdrant/qdrant.ipynb b/labs/03-orchestration/03-VectorStore/qdrant.ipynb similarity index 99% rename from labs/03-orchestration/03-Qdrant/qdrant.ipynb rename to labs/03-orchestration/03-VectorStore/qdrant.ipynb index 5c8da42..27f1da6 100644 --- a/labs/03-orchestration/03-Qdrant/qdrant.ipynb +++ b/labs/03-orchestration/03-VectorStore/qdrant.ipynb @@ -5,7 +5,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# 03 - Langchain with Qdrant\n", + "# Langchain with Qdrant\n", "\n", "In this lab, we will do a deeper dive around the Qdrant vector store and different ways to interact with it. We'll look at how we can use different search methods to vary the results and how we can use the results with a large language model.\n", "\n", @@ -338,7 +338,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.11.8" }, "orig_nbformat": 4 }, diff --git a/labs/03-orchestration/06-Conversations/chat-conversation.ipynb b/labs/03-orchestration/06-Conversations/chat-conversation.ipynb index c4f8d90..84e6650 100644 --- a/labs/03-orchestration/06-Conversations/chat-conversation.ipynb +++ b/labs/03-orchestration/06-Conversations/chat-conversation.ipynb @@ -48,15 +48,41 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_message_histories import MongoDBChatMessageHistory\n", + "pip install -U langchain-mongodb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "connection_string=os.getenv(\"MONGO_DB_CONNECTION_STRING\")\n", + "database_name=os.getenv(\"MONGO_DB_database_name\")\n", + "collection_name=os.getenv(\"MONGO_DB_chathistory_collection_name\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_mongodb import MongoDBChatMessageHistory\n", + "from langchain.memory import ConversationBufferMemory\n", "\n", "chat_message_history = MongoDBChatMessageHistory(\n", " session_id=session_id,\n", - " connection_string=os.getenv(\"MONGO_DB_CONNECTION_STRING\"),\n", - " database_name=\"my_db\",\n", - " collection_name=\"chat_histories\",\n", + " connection_string=connection_string,\n", + " database_name=database_name,\n", + " collection_name=collection_name,\n", ")\n", "\n", + "conversational_memory = ConversationBufferMemory(\n", + " chat_memory=chat_message_history,\n", + " memory_key='chat_history',\n", + " return_messages=True)\n", + "\n", "# Prompt: Create a conversation flow of 10 questions and answers between Luke and Yoda about the power of the force for creating cloud born applications on Azure.\n", "chat_message_history.add_user_message(\"Master, how can Azure help me in creating cloud-born applications?\")\n", "chat_message_history.add_ai_message(\"Azure, a powerful platform it is. Build, deploy, and manage applications across a global network of Microsoft-managed data centers, you can. With your preferred tools and frameworks, it works.\")\n", @@ -287,14 +313,14 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_message_histories import MongoDBChatMessageHistory\n", "from langchain_core.chat_history import BaseChatMessageHistory\n", + "from langchain_mongodb import MongoDBChatMessageHistory\n", "\n", "chat_message_history = MongoDBChatMessageHistory(\n", " session_id=session_id,\n", - " connection_string=os.getenv(\"MONGO_DB_CONNECTION_STRING\"),\n", - " database_name=\"my_db\",\n", - " collection_name=\"chat_histories\",\n", + " connection_string=connection_string,\n", + " database_name=database_name,\n", + " collection_name=collection_name,\n", ")\n", "\n", "# Prompt: Create a conversation flow of 10 questions and answers between Luke and Yoda about the power of the force for creating cloud born applications on Azure.\n", @@ -409,7 +435,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.7" + "version": "3.12.3" } }, "nbformat": 4,