diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index 1189ed8098af..1b1a44fa1bb6 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -73,14 +73,28 @@ jobs: - name: Install Ollama if: matrix.os == 'ubuntu-latest' run: | - curl -fsSL https://ollama.com/install.sh | sh - ollama serve & - sleep 5 + if ${{ vars.OLLAMA_MODEL != '' }}; then + curl -fsSL https://ollama.com/install.sh | sh + ollama serve & + sleep 5 + fi - name: Pull model in Ollama if: matrix.os == 'ubuntu-latest' run: | - ollama pull ${{ vars.OLLAMA_MODEL }} - ollama list + if ${{ vars.OLLAMA_MODEL != '' }}; then + ollama pull ${{ vars.OLLAMA_MODEL }} + ollama list + fi + - name: Google auth + uses: google-github-actions/auth@v2 + with: + project_id: ${{ vars.VERTEX_AI_PROJECT_ID }} + credentials_json: ${{ secrets.VERTEX_AI_SERVICE_ACCOUNT_KEY }} + - name: Set up gcloud + uses: google-github-actions/setup-gcloud@v2 + - name: Setup Redis Stack Server + if: matrix.os == 'ubuntu-latest' + run: docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest - name: Run Integration Tests id: run_tests shell: bash @@ -97,6 +111,7 @@ jobs: OPENAI_CHAT_MODEL_ID: ${{ vars.OPENAI_CHAT_MODEL_ID }} OPENAI_TEXT_MODEL_ID: ${{ vars.OPENAI_TEXT_MODEL_ID }} OPENAI_EMBEDDING_MODEL_ID: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }} + OPENAI_TEXT_TO_IMAGE_MODEL_ID: ${{ vars.OPENAI_TEXT_TO_IMAGE_MODEL_ID }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} PINECONE_API_KEY: ${{ secrets.PINECONE__APIKEY }} POSTGRES_CONNECTION_STRING: ${{secrets.POSTGRES__CONNECTIONSTR}} @@ -109,18 +124,37 @@ jobs: ACA_POOL_MANAGEMENT_ENDPOINT: ${{secrets.ACA_POOL_MANAGEMENT_ENDPOINT}} MISTRALAI_API_KEY: ${{secrets.MISTRALAI_API_KEY}} MISTRALAI_CHAT_MODEL_ID: ${{ vars.MISTRALAI_CHAT_MODEL_ID }} + MISTRALAI_EMBEDDING_MODEL_ID: ${{ vars.MISTRALAI_EMBEDDING_MODEL_ID }} + ANTHROPIC_API_KEY: ${{secrets.ANTHROPIC_API_KEY}} + ANTHROPIC_CHAT_MODEL_ID: ${{ vars.ANTHROPIC_CHAT_MODEL_ID }} OLLAMA_MODEL: "${{ matrix.os == 'ubuntu-latest' && vars.OLLAMA_MODEL || '' }}" # phi3 GOOGLE_AI_GEMINI_MODEL_ID: ${{ vars.GOOGLE_AI_GEMINI_MODEL_ID }} GOOGLE_AI_EMBEDDING_MODEL_ID: ${{ vars.GOOGLE_AI_EMBEDDING_MODEL_ID }} GOOGLE_AI_API_KEY: ${{ secrets.GOOGLE_AI_API_KEY }} + VERTEX_AI_PROJECT_ID: ${{ vars.VERTEX_AI_PROJECT_ID }} + VERTEX_AI_GEMINI_MODEL_ID: ${{ vars.VERTEX_AI_GEMINI_MODEL_ID }} + VERTEX_AI_EMBEDDING_MODEL_ID: ${{ vars.VERTEX_AI_EMBEDDING_MODEL_ID }} + REDIS_CONNECTION_STRING: ${{ vars.REDIS_CONNECTION_STRING }} run: | - if ${{ matrix.os == 'ubuntu-latest' }}; then - docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest - fi - cd python - poetry run pytest ./tests/integration -v - poetry run pytest ./tests/samples -v + poetry run pytest -n logical --dist loadfile --dist worksteal ./tests/integration ./tests/samples -v --junitxml=pytest.xml + - name: Surface failing tests + if: always() + uses: pmeier/pytest-results-action@main + with: + # A list of JUnit XML files, directories containing the former, and wildcard + # patterns to process. + # See @actions/glob for supported patterns. + path: python/pytest.xml + # (Optional) Add a summary of the results at the top of the report + summary: true + # (Optional) Select which results should be included in the report. + # Follows the same syntax as `pytest -r` + display-options: fEX + # (Optional) Fail the workflow if no JUnit XML was found. + fail-on-empty: true + # (Optional) Title of the test results section in the workflow summary + title: Test results python-integration-tests: needs: paths-filter @@ -167,6 +201,15 @@ jobs: ollama pull ${{ vars.OLLAMA_MODEL }} ollama list + - name: Google auth + uses: google-github-actions/auth@v2 + with: + project_id: ${{ vars.VERTEX_AI_PROJECT_ID }} + credentials_json: ${{ secrets.VERTEX_AI_SERVICE_ACCOUNT_KEY }} + + - name: Set up gcloud + uses: google-github-actions/setup-gcloud@v2 + - name: Run Integration Tests id: run_tests shell: bash @@ -183,6 +226,7 @@ jobs: OPENAI_CHAT_MODEL_ID: ${{ vars.OPENAI_CHAT_MODEL_ID }} OPENAI_TEXT_MODEL_ID: ${{ vars.OPENAI_TEXT_MODEL_ID }} OPENAI_EMBEDDING_MODEL_ID: ${{ vars.OPENAI_EMBEDDING_MODEL_ID }} + OPENAI_TEXT_TO_IMAGE_MODEL_ID: ${{ vars.OPENAI_TEXT_TO_IMAGE_MODEL_ID }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} PINECONE_API_KEY: ${{ secrets.PINECONE__APIKEY }} POSTGRES_CONNECTION_STRING: ${{secrets.POSTGRES__CONNECTIONSTR}} @@ -195,18 +239,25 @@ jobs: ACA_POOL_MANAGEMENT_ENDPOINT: ${{secrets.ACA_POOL_MANAGEMENT_ENDPOINT}} MISTRALAI_API_KEY: ${{secrets.MISTRALAI_API_KEY}} MISTRALAI_CHAT_MODEL_ID: ${{ vars.MISTRALAI_CHAT_MODEL_ID }} + MISTRALAI_EMBEDDING_MODEL_ID: ${{ vars.MISTRALAI_EMBEDDING_MODEL_ID }} OLLAMA_MODEL: "${{ matrix.os == 'ubuntu-latest' && vars.OLLAMA_MODEL || '' }}" # phi3 GOOGLE_AI_GEMINI_MODEL_ID: ${{ vars.GOOGLE_AI_GEMINI_MODEL_ID }} GOOGLE_AI_EMBEDDING_MODEL_ID: ${{ vars.GOOGLE_AI_EMBEDDING_MODEL_ID }} GOOGLE_AI_API_KEY: ${{ secrets.GOOGLE_AI_API_KEY }} + VERTEX_AI_PROJECT_ID: ${{ vars.VERTEX_AI_PROJECT_ID }} + VERTEX_AI_GEMINI_MODEL_ID: ${{ vars.VERTEX_AI_GEMINI_MODEL_ID }} + VERTEX_AI_EMBEDDING_MODEL_ID: ${{ vars.VERTEX_AI_EMBEDDING_MODEL_ID }} + REDIS_CONNECTION_STRING: ${{ vars.REDIS_CONNECTION_STRING }} + ANTHROPIC_API_KEY: ${{secrets.ANTHROPIC_API_KEY}} + ANTHROPIC_CHAT_MODEL_ID: ${{ vars.ANTHROPIC_CHAT_MODEL_ID }} run: | if ${{ matrix.os == 'ubuntu-latest' }}; then docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest fi cd python - poetry run pytest ./tests/integration -v - poetry run pytest ./tests/samples -v + poetry run pytest -n logical --dist loadfile --dist worksteal ./tests/integration -v + poetry run pytest -n logical --dist loadfile --dist worksteal ./tests/samples -v # This final job is required to satisfy the merge queue. It must only run (or succeed) if no tests failed python-integration-tests-check: diff --git a/.github/workflows/python-test-coverage.yml b/.github/workflows/python-test-coverage.yml index d61da4f022a2..7d3c14ce783b 100644 --- a/.github/workflows/python-test-coverage.yml +++ b/.github/workflows/python-test-coverage.yml @@ -10,10 +10,6 @@ on: types: - in_progress -env: - PYTHON_VERSION: "3.10" - RUN_OS: ubuntu-latest - jobs: python-tests-coverage: runs-on: ubuntu-latest @@ -27,13 +23,13 @@ jobs: uses: lewagon/wait-on-check-action@v1.3.4 with: ref: ${{ github.event.pull_request.head.sha }} - check-name: 'Python Unit Tests (${{ env.PYTHON_VERSION }}, ${{ env.RUN_OS }}, false)' + check-name: 'Python Test Coverage' repo-token: ${{ secrets.GH_ACTIONS_PR_WRITE }} wait-interval: 90 allowed-conclusions: success - uses: actions/checkout@v4 - name: Setup filename variables - run: echo "FILE_ID=${{ github.event.number }}-${{ env.RUN_OS }}-${{ env.PYTHON_VERSION }}" >> $GITHUB_ENV + run: echo "FILE_ID=${{ github.event.number }}" >> $GITHUB_ENV - name: Download coverage uses: dawidd6/action-download-artifact@v3 with: @@ -57,9 +53,9 @@ jobs: github-token: ${{ secrets.GH_ACTIONS_PR_WRITE }} pytest-coverage-path: python-coverage.txt coverage-path-prefix: "python/" - title: "Python ${{ env.PYTHON_VERSION }} Test Coverage Report" - badge-title: "Py${{ env.PYTHON_VERSION }} Test Coverage" - junitxml-title: "Python ${{ env.PYTHON_VERSION }} Unit Test Overview" + title: "Python Test Coverage Report" + badge-title: "Python Test Coverage" + junitxml-title: "Python Unit Test Overview" junitxml-path: pytest.xml default-branch: "main" - unique-id-for-comment: python-${{ env.PYTHON_VERSION }} + unique-id-for-comment: python-test-coverage diff --git a/.github/workflows/python-unit-tests.yml b/.github/workflows/python-unit-tests.yml index 8e34ad0e9b5f..4137270c3796 100644 --- a/.github/workflows/python-unit-tests.yml +++ b/.github/workflows/python-unit-tests.yml @@ -18,7 +18,7 @@ jobs: os: [ubuntu-latest, windows-latest, macos-latest] experimental: [false] include: - - python-version: "3.13.0-beta.3" + - python-version: "3.13.0-beta.4" os: "ubuntu-latest" experimental: true permissions: @@ -28,8 +28,6 @@ jobs: working-directory: python steps: - uses: actions/checkout@v4 - - name: Setup filename variables - run: echo "FILE_ID=${{ github.event.number }}-${{ matrix.os }}-${{ matrix.python-version }}" >> $GITHUB_ENV - name: Install poetry run: pipx install poetry - name: Set up Python ${{ matrix.python-version }} @@ -40,8 +38,50 @@ jobs: - name: Install dependencies run: poetry install --with unit-tests - name: Test with pytest - run: poetry run pytest -q --junitxml=pytest.xml --cov=semantic_kernel --cov-report=term-missing:skip-covered ./tests/unit | tee python-coverage.txt + run: poetry run pytest --junitxml=pytest.xml ./tests/unit + - name: Surface failing tests + if: always() + uses: pmeier/pytest-results-action@main + with: + # A list of JUnit XML files, directories containing the former, and wildcard + # patterns to process. + # See @actions/glob for supported patterns. + path: python/pytest.xml + # (Optional) Add a summary of the results at the top of the report + summary: true + # (Optional) Select which results should be included in the report. + # Follows the same syntax as `pytest -r` + display-options: fEX + # (Optional) Fail the workflow if no JUnit XML was found. + fail-on-empty: true + # (Optional) Title of the test results section in the workflow summary + title: Test results + python-test-coverage: + name: Python Test Coverage + runs-on: [ubuntu-latest] + continue-on-error: true + permissions: + contents: write + defaults: + run: + working-directory: python + steps: + - uses: actions/checkout@v4 + - name: Setup filename variables + run: echo "FILE_ID=${{ github.event.number }}" >> $GITHUB_ENV + - name: Install poetry + run: pipx install poetry + - name: Set up Python 3.10 + uses: actions/setup-python@v5 + with: + python-version: "3.10" + cache: "poetry" + - name: Install dependencies + run: poetry install --with unit-tests + - name: Test with pytest + run: poetry run pytest -q --junitxml=pytest.xml --cov=semantic_kernel --cov-report=term-missing:skip-covered ./tests/unit | tee python-coverage.txt - name: Upload coverage + if: always() uses: actions/upload-artifact@v4 with: name: python-coverage-${{ env.FILE_ID }}.txt @@ -49,6 +89,7 @@ jobs: overwrite: true retention-days: 1 - name: Upload pytest.xml + if: always() uses: actions/upload-artifact@v4 with: name: pytest-${{ env.FILE_ID }}.xml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5b983bf90ade..5fd6aa7d9377 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,7 +37,7 @@ repos: - id: pyupgrade args: [--py310-plus] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.5.2 + rev: v0.5.7 hooks: - id: ruff args: [ --fix, --exit-non-zero-on-fix ] diff --git a/README.md b/README.md index 215e13a43cce..2cc88b643a4a 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,13 @@ ## Status - - Python
-[![Python package](https://img.shields.io/pypi/v/semantic-kernel)](https://pypi.org/project/semantic-kernel/) - - .NET
-[![Nuget package](https://img.shields.io/nuget/vpre/Microsoft.SemanticKernel)](https://www.nuget.org/packages/Microsoft.SemanticKernel/)[![dotnet Docker](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-docker.yml/badge.svg?branch=main)](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-docker.yml)[![dotnet Windows](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-windows.yml/badge.svg?branch=main)](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-windows.yml) +- Python
+ [![Python package](https://img.shields.io/pypi/v/semantic-kernel)](https://pypi.org/project/semantic-kernel/) +- .NET
+ [![Nuget package](https://img.shields.io/nuget/vpre/Microsoft.SemanticKernel)](https://www.nuget.org/packages/Microsoft.SemanticKernel/)[![dotnet Docker](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-docker.yml/badge.svg?branch=main)](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-docker.yml)[![dotnet Windows](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-windows.yml/badge.svg?branch=main)](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-windows.yml) ## Overview + [![License: MIT](https://img.shields.io/github/license/microsoft/semantic-kernel)](https://github.com/microsoft/semantic-kernel/blob/main/LICENSE) [![Discord](https://img.shields.io/discord/1063152441819942922?label=Discord&logo=discord&logoColor=white&color=d82679)](https://aka.ms/SKDiscord) @@ -27,8 +28,8 @@ plugins with AI. With Semantic Kernel can ask an LLM to generate a plan that achieves a user's unique goal. Afterwards, Semantic Kernel will execute the plan for the user. - It provides: + - abstractions for AI services (such as chat, text to images, audio to text, etc.) and memory stores - implementations of those abstractions for services from [OpenAI](https://platform.openai.com/docs/introduction), [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service), [Hugging Face](https://huggingface.co/), local models, and more, and for a multitude of vector databases, such as those from [Chroma](https://docs.trychroma.com/getting-started), [Qdrant](https://qdrant.tech/), [Milvus](https://milvus.io/), and [Azure](https://learn.microsoft.com/en-us/azure/search/search-what-is-azure-search) - a common representation for [plugins](https://learn.microsoft.com/en-us/semantic-kernel/ai-orchestration/plugins), which can then be orchestrated automatically by AI @@ -45,7 +46,7 @@ Semantic Kernel was designed to be future proof, easily connecting your code to ## Getting started with Semantic Kernel -The Semantic Kernel SDK is available in C#, Python, and Java. To get started, choose your preferred language below. See the [Feature Matrix](https://learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages) to see a breakdown of +The Semantic Kernel SDK is available in C#, Python, and Java. To get started, choose your preferred language below. See the [Feature Matrix](https://learn.microsoft.com/en-us/semantic-kernel/get-started/supported-languages) for a breakdown of feature parity between our currently supported languages. @@ -84,9 +85,9 @@ from either OpenAI or Azure OpenAI and to run one of the C#, Python, and Java co ### For Python: -1. Go to the Quick start page [here](https://learn.microsoft.com/en-us/semantic-kernel/get-started/quick-start-guide?pivots=programming-language-csharp) and follow the steps to dive in. -2. You'll need to ensure that you toggle to C# in the the Choose a programming language table at the top of the page. - ![csharpmap](https://learn.microsoft.com/en-us/semantic-kernel/media/pythonmap.png) +1. Go to the Quick start page [here](https://learn.microsoft.com/en-us/semantic-kernel/get-started/quick-start-guide?pivots=programming-language-python) and follow the steps to dive in. +2. You'll need to ensure that you toggle to Python in the the Choose a programming language table at the top of the page. + ![pythonmap](https://learn.microsoft.com/en-us/semantic-kernel/media/pythonmap.png) ### For Java: @@ -115,10 +116,11 @@ on our Learn site. Each sample comes with a completed C# and Python project that Finally, refer to our API references for more details on the C# and Python APIs: - [C# API reference](https://learn.microsoft.com/en-us/dotnet/api/microsoft.semantickernel?view=semantic-kernel-dotnet) -- Python API reference (coming soon) +- [Python API reference](https://learn.microsoft.com/en-us/python/api/semantic-kernel/semantic_kernel?view=semantic-kernel-python) - Java API reference (coming soon) ## Visual Studio Code extension: design semantic functions with ease + The Semantic Kernel extension for Visual Studio Code makes it easy to design and test semantic functions. The extension provides an interface for designing semantic functions and allows you to test them with the push of a button with your existing models and data. ## Join the community diff --git a/TRANSPARENCY_FAQS.md b/TRANSPARENCY_FAQS.md new file mode 100644 index 000000000000..a891ec68ec28 --- /dev/null +++ b/TRANSPARENCY_FAQS.md @@ -0,0 +1,70 @@ +# Semantic Kernel Responsible AI FAQs + +## What is Microsoft Semantic Kernel? +Microsoft Semantic Kernel is a lightweight, open-source development kit designed to facilitate the integration of AI models into applications written in languages such as C#, Python, or Java. + +It serves as efficient middleware that supports developers in building AI agents, automating business processes, and connecting their code with the latest AI technologies. Input to this system can range from text data to structured commands, and it produces various outputs, including natural language responses, function calls, and other actionable data. + + +## What can Microsoft Semantic Kernel do? +Building upon its foundational capabilities, Microsoft Semantic Kernel facilitates several functionalities: +- AI Agent Development: Users can create agents capable of performing specific tasks or interactions based on user input. +- Function Invocation: It can automate code execution by calling functions based on AI model outputs. +- Modular and Extensible: Developers can enhance functionality through plugins and a variety of pre-built connectors, providing flexibility in integrating additional AI services. +- Multi-Modal Support: The kernel easily expands existing applications to support modalities like voice and video through its architecture +- Filtering: Developers can use filters to monitor the application, control function invocation or implement Responsible AI. +- Prompt Templates: Developer can define their prompts using various template languages including Handlebars and Liquid or the built-in Semantic Kernel format. + + +## What is/are Microsoft Semantic Kernel’s intended use(s)? +The intended uses of Microsoft Semantic Kernel include: +- Production Ready Applications: Building small to large enterprise scale solutions that can leverage advanced AI models capabilities. +- Automation of Business Processes: Facilitating quick and efficient automation of workflows and tasks within organizations. +- Integration of AI Services: Connecting client code with a variety of pre-built AI services and capabilities for rapid development. + + +## How was Microsoft Semantic Kernel evaluated? What metrics are used to measure performance? +Microsoft Semantic Kernel was reviewed for reliability and performance metrics that include: +- Accuracy: Evaluated based on the correctness of the outputs generated against known facts. +- Integration Speed: Assessed by the time taken to integrate AI models and initiate functional outputs based on telemetry. +- Performance Consistency: Measurements taken to verify the system's reliability based on telemetry. + + +## What are the limitations of Microsoft Semantic Kernel? +Semantic Kernel integrates with Large Language Models (LLMs) to allow AI capabilities to be added to existing application. +LLMs have some inherent limitations such as: +- Contextual Misunderstanding: The system may struggle with nuanced requests, particularly those involving complex context. +- Bias in LLM Outputs: Historical biases in the training data can inadvertently influence model outputs. + - Users can mitigate these issues by: + - Formulating clear and explicit queries. + - Regularly reviewing AI-generated outputs to identify and rectify biases or inaccuracies. + - Providing relevant information when prompting the LLM so that it can base it's responses on this data +- Not all LLMs support all features uniformly e.g., function calling. +Semantic Kernel is constantly evolving and adding new features so: +- There are some components still being developed e.g., support for some modalities such as Video and Classification, memory connectors for certain Vector databases, AI connectors for certain AI services. +- There are some components that are still experimental, these are clearly flagged and are subject to change. + +## What operational factors and settings allow for effective and responsible use of Microsoft Semantic Kernel? +Operational factors and settings for optimal use include: +- Custom Configuration Options: Users can tailor system parameters to match specific application needs, such as output style or verbosity. +- Safe Operating Parameters: The system operates best within defined ranges of input complexity and length, ensuring reliability and safety. +- Real-Time Monitoring: System behavior should be regularly monitored to detect unexpected patterns or malfunctions promptly. +- Incorporate RAI and safety tools like Prompt Shield with filters to ensure responsible use. + + +### Plugins and Extensibility + +#### What are plugins and how does Microsoft Semantic Kernel use them? +Plugins are API calls that enhance and extend the capabilities of Microsoft Semantic Kernel by integrating with other services. They can be developed internally or by third-party developers, offering functionalities that users can toggle on or off based on their requirements. The kernel supports OpenAPI specifications, allowing for easy integration and sharing of plugins within developer teams. + +#### What data can Microsoft Semantic Kernel provide to plugins? What permissions do Microsoft Semantic Kernel plugins have? +Plugins can access essential user information necessary for their operation, such as: +- Input Context: Information directly related to the queries and commands issued to the system. +- Execution Data: Results and performance metrics from previous operations, provided they adhere to user privacy standards. Developers retain control over plugin permissions, choosing what information plugins can access or transmit, ensuring compliance with data protection protocols. +- Semantic Kernel supports filters which allow developers to integrate with RAI solutions + +#### What kinds of issues may arise when using Microsoft Semantic Kernel enabled with plugins? +Potential issues that may arise include: +- Invocation Failures: Incorrectly triggered plugins can result in unexpected outputs. +- Output Misinformation: Errors in plugin handling can lead to generation of inaccurate or misleading results. +- Dependency Compatibility: Changes in external dependencies may affect plugin functionality. To prevent these issues, users are advised to keep plugins updated and to rigorously test their implementations for stability and accuracy diff --git a/docs/decisions/0050-updated-vector-store-design.md b/docs/decisions/0050-updated-vector-store-design.md new file mode 100644 index 000000000000..c008068b1e95 --- /dev/null +++ b/docs/decisions/0050-updated-vector-store-design.md @@ -0,0 +1,995 @@ +--- +# These are optional elements. Feel free to remove any of them. +status: proposed +contact: westey-m +date: 2024-06-05 +deciders: sergeymenshykh, markwallace, rbarreto, dmytrostruk, westey-m, matthewbolanos, eavanvalkenburg +consulted: stephentoub, dluc, ajcvickers, roji +informed: +--- + +# Updated Memory Connector Design + +## Context and Problem Statement + +Semantic Kernel has a collection of connectors to popular Vector databases e.g. Azure AI Search, Chroma, Milvus, ... +Each Memory connector implements a memory abstraction defined by Semantic Kernel and allows developers to easily integrate Vector databases into their applications. +The current abstractions are experimental and the purpose of this ADR is to progress the design of the abstractions so that they can graduate to non experimental status. + +### Problems with current design + +1. The `IMemoryStore` interface has four responsibilities with different cardinalities. Some are schema aware and others schema agnostic. +2. The `IMemoryStore` interface only supports a fixed schema for data storage, retrieval and search, which limits its usability by customers with existing data sets. +2. The `IMemoryStore` implementations are opinionated around key encoding / decoding and collection name sanitization, which limits its usability by customers with existing data sets. + +Responsibilities: + +|Functional Area|Cardinality|Significance to Semantic Kernel| +|-|-|-| +|Collection/Index create|An implementation per store type and model|Valuable when building a store and adding data| +|Collection/Index list names, exists and delete|An implementation per store type|Valuable when building a store and adding data| +|Data Storage and Retrieval|An implementation per store type|Valuable when building a store and adding data| +|Vector Search|An implementation per store type, model and search type|Valuable for many scenarios including RAG, finding contradictory facts based on user input, finding similar memories to merge, etc.| + + +### Memory Store Today +```cs +interface IMemoryStore +{ + // Collection / Index Management + Task CreateCollectionAsync(string collectionName, CancellationToken cancellationToken = default); + IAsyncEnumerable GetCollectionsAsync(CancellationToken cancellationToken = default); + Task DoesCollectionExistAsync(string collectionName, CancellationToken cancellationToken = default); + Task DeleteCollectionAsync(string collectionName, CancellationToken cancellationToken = default); + + // Data Storage and Retrieval + Task UpsertAsync(string collectionName, MemoryRecord record, CancellationToken cancellationToken = default); + IAsyncEnumerable UpsertBatchAsync(string collectionName, IEnumerable records, CancellationToken cancellationToken = default); + Task GetAsync(string collectionName, string key, bool withEmbedding = false, CancellationToken cancellationToken = default); + IAsyncEnumerable GetBatchAsync(string collectionName, IEnumerable keys, bool withVectors = false, CancellationToken cancellationToken = default); + Task RemoveAsync(string collectionName, string key, CancellationToken cancellationToken = default); + Task RemoveBatchAsync(string collectionName, IEnumerable keys, CancellationToken cancellationToken = default); + + // Vector Search + IAsyncEnumerable<(MemoryRecord, double)> GetNearestMatchesAsync( + string collectionName, + ReadOnlyMemory embedding, + int limit, + double minRelevanceScore = 0.0, + bool withVectors = false, + CancellationToken cancellationToken = default); + + Task<(MemoryRecord, double)?> GetNearestMatchAsync( + string collectionName, + ReadOnlyMemory embedding, + double minRelevanceScore = 0.0, + bool withEmbedding = false, + CancellationToken cancellationToken = default); +} +``` + +### Actions + +1. The `IMemoryStore` should be split into different interfaces, so that schema aware and schema agnostic operations are separated. +2. The **Data Storage and Retrieval** and **Vector Search** areas should allow typed access to data and support any schema that is currently available in the customer's data store. +3. The collection / index create functionality should allow developers to use a common definition that is part of the abstraction to create collections. +4. The collection / index list/exists/delete functionality should allow management of any collection regardless of schema. +5. Remove opinionated behaviors from connectors. The opinionated behavior limits the ability of these connectors to be used with pre-existing vector databases. As far as possible these behaviors should be moved into decorators or be injectable. Examples of opinionated behaviors: + 1. The AzureAISearch connector encodes keys before storing and decodes them after retrieval since keys in Azure AI Search supports a limited set of characters. + 2. The AzureAISearch connector sanitizes collection names before using them, since Azure AI Search supports a limited set of characters. + 3. The Redis connector prepends the collection name on to the front of keys before storing records and also registers the collection name as a prefix for records to be indexed by the index. + +### Non-functional requirements for new connectors +1. Ensure all connectors are throwing the same exceptions consistently with data about the request made provided in a consistent manner. +2. Add consistent telemetry for all connectors. +3. As far as possible integration tests should be runnable on build server. + +### New Designs + +The separation between collection/index management and record management. + +```mermaid +--- +title: SK Collection/Index and record management +--- +classDiagram + note for IVectorRecordStore "Can manage records for any scenario" + note for IVectorCollectionCreate "Can create collections and\nindexes" + note for IVectorCollectionNonSchema "Can retrieve/delete any collections and\nindexes" + + namespace SKAbstractions{ + class IVectorCollectionCreate{ + <> + +CreateCollection + } + + class IVectorCollectionNonSchema{ + <> + +GetCollectionNames + +CollectionExists + +DeleteCollection + } + + class IVectorRecordStore~TModel~{ + <> + +Upsert(TModel record) string + +UpserBatch(TModel record) string + +Get(string key) TModel + +GetBatch(string[] keys) TModel[] + +Delete(string key) + +DeleteBatch(string[] keys) + } + } + + namespace AzureAIMemory{ + class AzureAISearchVectorCollectionCreate{ + } + + class AzureAISearchVectorCollectionNonSchema{ + } + + class AzureAISearchVectorRecordStore{ + } + } + + namespace RedisMemory{ + class RedisVectorCollectionCreate{ + } + + class RedisVectorCollectionNonSchema{ + } + + class RedisVectorRecordStore{ + } + } + + IVectorCollectionCreate <|-- AzureAISearchVectorCollectionCreate + IVectorCollectionNonSchema <|-- AzureAISearchVectorCollectionNonSchema + IVectorRecordStore <|-- AzureAISearchVectorRecordStore + + IVectorCollectionCreate <|-- RedisVectorCollectionCreate + IVectorCollectionNonSchema <|-- RedisVectorCollectionNonSchema + IVectorRecordStore <|-- RedisVectorRecordStore +``` + +How to use your own schema with core sk functionality. + +```mermaid +--- +title: Chat History Break Glass +--- +classDiagram + note for IVectorRecordStore "Can manage records\nfor any scenario" + note for IVectorCollectionCreate "Can create collections\nan dindexes" + note for IVectorCollectionNonSchema "Can retrieve/delete any\ncollections and indexes" + note for CustomerHistoryVectorCollectionCreate "Creates history collections and indices\nusing Customer requirements" + note for CustomerHistoryVectorRecordStore "Decorator class for IVectorRecordStore that maps\nbetween the customer model to our model" + + namespace SKAbstractions{ + class IVectorCollectionCreate{ + <> + +CreateCollection + } + + class IVectorCollectionNonSchema{ + <> + +GetCollectionNames + +CollectionExists + +DeleteCollection + } + + class IVectorRecordStore~TModel~{ + <> + +Upsert(TModel record) string + +Get(string key) TModel + +Delete(string key) string + } + + class ISemanticTextMemory{ + <> + +SaveInformationAsync() + +SaveReferenceAsync() + +GetAsync() + +DeleteAsync() + +SearchAsync() + +GetCollectionsAsync() + } + } + + namespace CustomerProject{ + class CustomerHistoryModel{ + +string text + +float[] vector + +Dictionary~string, string~ properties + } + + class CustomerHistoryVectorCollectionCreate{ + +CreateCollection + } + + class CustomerHistoryVectorRecordStore{ + -IVectorRecordStore~CustomerHistoryModel~ _store + +Upsert(ChatHistoryModel record) string + +Get(string key) ChatHistoryModel + +Delete(string key) string + } + } + + namespace SKCore{ + class SemanticTextMemory{ + -IVectorRecordStore~ChatHistoryModel~ _VectorRecordStore + -IMemoryCollectionService _collectionsService + -ITextEmbeddingGenerationService _embeddingGenerationService + } + + class ChatHistoryPlugin{ + -ISemanticTextMemory memory + } + + class ChatHistoryModel{ + +string message + +float[] embedding + +Dictionary~string, string~ metadata + } + } + + IVectorCollectionCreate <|-- CustomerHistoryVectorCollectionCreate + + IVectorRecordStore <|-- CustomerHistoryVectorRecordStore + IVectorRecordStore <.. CustomerHistoryVectorRecordStore + CustomerHistoryModel <.. CustomerHistoryVectorRecordStore + ChatHistoryModel <.. CustomerHistoryVectorRecordStore + + ChatHistoryModel <.. SemanticTextMemory + IVectorRecordStore <.. SemanticTextMemory + IVectorCollectionCreate <.. SemanticTextMemory + + ISemanticTextMemory <.. ChatHistoryPlugin +``` + +### Vector Store Cross Store support - General Features + +A comparison of the different ways in which stores implement storage capabilities to help drive decisions: + +|Feature|Azure AI Search|Weaviate|Redis|Chroma|FAISS|Pinecone|LLamaIndex|PostgreSql|Qdrant|Milvus| +|-|-|-|-|-|-|-|-|-|-|-| +|Get Item Support|Y|Y|Y|Y||Y||Y|Y|Y| +|Batch Operation Support|Y|Y|Y|Y||Y||||Y| +|Per Item Results for Batch Operations|Y|Y|Y|N||N||||| +|Keys of upserted records|Y|Y|N3|N3||N3||||Y| +|Keys of removed records|Y||N3|N||N||||N3| +|Retrieval field selection for gets|Y||Y4|P2||N||Y|Y|Y| +|Include/Exclude Embeddings for gets|P1|Y|Y4,1|Y||N||P1|Y|N| +|Failure reasons when batch partially fails|Y|Y|Y|N||N||||| +|Is Key separate from data|N|Y|Y|Y||Y||N|Y|N| +|Can Generate Ids|N|Y|N|N||Y||Y|N|Y| +|Can Generate Embedding|Not Available Via API yet|Y|N|Client Side Abstraction|||||N|| + +Footnotes: +- P = Partial Support +- 1 Only if you have the schema, to select the appropriate fields. +- 2 Supports broad categories of fields only. +- 3 Id is required in request, so can be returned if needed. +- 4 No strong typed support when specifying field list. + +### Vector Store Cross Store support - Fields, types and indexing + +|Feature|Azure AI Search|Weaviate|Redis|Chroma|FAISS|Pinecone|LLamaIndex|PostgreSql|Qdrant|Milvus| +|-|-|-|-|-|-|-|-|-|-|-| +|Field Differentiation|Fields|Key, Props, Vectors|Key, Fields|Key, Document, Metadata, Vector||Key, Metadata, SparseValues, Vector||Fields|Key, Props(Payload), Vectors|Fields| +|Multiple Vector per record support|Y|Y|Y|N||[N](https://docs.pinecone.io/guides/data/upsert-data#upsert-records-with-metadata)||Y|Y|Y| +|Index to Collection|1 to 1|1 to 1|1 to many|1 to 1|-|1 to 1|-|1 to 1|1 to 1|1 to 1| +|Id Type|String|UUID|string with collection name prefix|string||string|UUID|64Bit Int / UUID / ULID|64Bit Unsigned Int / UUID|Int64 / varchar| +|Supported Vector Types|[Collection(Edm.Byte) / Collection(Edm.Single) / Collection(Edm.Half) / Collection(Edm.Int16) / Collection(Edm.SByte)](https://learn.microsoft.com/en-us/rest/api/searchservice/supported-data-types)|float32|FLOAT32 and FLOAT64|||[Rust f32](https://docs.pinecone.io/troubleshooting/embedding-values-changed-when-upserted)||[single-precision (4 byte float) / half-precision (2 byte float) / binary (1bit) / sparse vectors (4 bytes)](https://github.com/pgvector/pgvector?tab=readme-ov-file#pgvector)|UInt8 / Float32|Binary / Float32 / Float16 / BFloat16 / SparseFloat| +|Supported Distance Functions|[Cosine / dot prod / euclidean dist (l2 norm)](https://learn.microsoft.com/en-us/azure/search/vector-search-ranking#similarity-metrics-used-to-measure-nearness)|[Cosine dist / dot prod / Squared L2 dist / hamming (num of diffs) / manhattan dist](https://weaviate.io/developers/weaviate/config-refs/distances#available-distance-metrics)|[Euclidean dist (L2) / Inner prod (IP) / Cosine dist](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/vectors/)|[Squared L2 / Inner prod / Cosine similarity](https://docs.trychroma.com/guides#changing-the-distance-function)||[cosine sim / euclidean dist / dot prod](https://docs.pinecone.io/reference/api/control-plane/create_index)||[L2 dist / inner prod / cosine dist / L1 dist / Hamming dist / Jaccard dist (NB: Specified at query time, not index creation time)](https://github.com/pgvector/pgvector?tab=readme-ov-file#pgvector)|[Dot prod / Cosine sim / Euclidean dist (L2) / Manhattan dist](https://qdrant.tech/documentation/concepts/search/)|[Cosine sim / Euclidean dist / Inner Prod](https://milvus.io/docs/index-vector-fields.md)| +|Supported index types|[Exhaustive KNN (FLAT) / HNSW](https://learn.microsoft.com/en-us/azure/search/vector-search-ranking#algorithms-used-in-vector-search)|[HNSW / Flat / Dynamic](https://weaviate.io/developers/weaviate/config-refs/schema/vector-index)|[HNSW / FLAT](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/vectors/#create-a-vector-field)|[HNSW not configurable](https://cookbook.chromadb.dev/core/concepts/#vector-index-hnsw-index)||[PGA](https://www.pinecone.io/blog/hnsw-not-enough/)||[HNSW / IVFFlat](https://github.com/pgvector/pgvector?tab=readme-ov-file#indexing)|[HNSW for dense](https://qdrant.tech/documentation/concepts/indexing/#vector-index)|

[In Memory: FLAT / IVF_FLAT / IVF_SQ8 / IVF_PQ / HNSW / SCANN](https://milvus.io/docs/index.md)

[On Disk: DiskANN](https://milvus.io/docs/disk_index.md)

[GPU: GPU_CAGRA / GPU_IVF_FLAT / GPU_IVF_PQ / GPU_BRUTE_FORCE](https://milvus.io/docs/gpu_index.md)

| + +Footnotes: +- HNSW = Hierarchical Navigable Small World (HNSW performs an [approximate nearest neighbor (ANN)](https://learn.microsoft.com/en-us/azure/search/vector-search-overview#approximate-nearest-neighbors) search) +- KNN = k-nearest neighbors (performs a brute-force search that scans the entire vector space) +- IVFFlat = Inverted File with Flat Compression (This index type uses approximate nearest neighbor search (ANNS) to provide fast searches) +- Weaviate Dynamic = Starts as flat and switches to HNSW if the number of objects exceed a limit +- PGA = [Pinecone Graph Algorithm](https://www.pinecone.io/blog/hnsw-not-enough/) + +### Vector Store Cross Store support - Search and filtering + +|Feature|Azure AI Search|Weaviate|Redis|Chroma|FAISS|Pinecone|LLamaIndex|PostgreSql|Qdrant|Milvus| +|-|-|-|-|-|-|-|-|-|-|-| +|Index allows text search|Y|Y|Y|Y (On Metadata by default)||[Only in combination with Vector](https://docs.pinecone.io/guides/data/understanding-hybrid-search)||Y (with TSVECTOR field)|Y|Y| +|Text search query format|[Simple or Full Lucene](https://learn.microsoft.com/en-us/azure/search/search-query-create?tabs=portal-text-query#choose-a-query-type-simple--full)|[wildcard](https://weaviate.io/developers/weaviate/search/filters#filter-text-on-partial-matches)|wildcard & fuzzy|[contains & not contains](https://docs.trychroma.com/guides#filtering-by-document-contents)||Text only||[wildcard & binary operators](https://www.postgresql.org/docs/16/textsearch-controls.html#TEXTSEARCH-PARSING-QUERIES)|[Text only](https://qdrant.tech/documentation/concepts/filtering/#full-text-match)|[wildcard](https://milvus.io/docs/single-vector-search.md#Filtered-search)| +|Multi Field Vector Search Support|Y|[N](https://weaviate.io/developers/weaviate/search/similarity)||N (no multi vector support)||N||[Unclear due to order by syntax](https://github.com/pgvector/pgvector?tab=readme-ov-file#querying)|[N](https://qdrant.tech/documentation/concepts/search/)|[Y](https://milvus.io/api-reference/restful/v2.4.x/v2/Vector%20(v2)/Hybrid%20Search.md)| +|Targeted Multi Field Text Search Support|Y|[Y](https://weaviate.io/developers/weaviate/search/hybrid#set-weights-on-property-values)|[Y](https://redis.io/docs/latest/develop/interact/search-and-query/advanced-concepts/query_syntax/#field-modifiers)|N (only on document)||N||Y|Y|Y| +|Vector per Vector Field for Search|Y|N/A||N/A|||N/A||N/A|N/A|[Y](https://milvus.io/docs/multi-vector-search.md#Step-1-Create-Multiple-AnnSearchRequest-Instances)| +|Separate text search query from vectors|Y|[Y](https://weaviate.io/developers/weaviate/search/hybrid#specify-a-search-vector)|Y|Y||Y||Y|Y|[Y](https://milvus.io/api-reference/restful/v2.4.x/v2/Vector%20(v2)/Hybrid%20Search.md)| +|Allows filtering|Y|Y|Y (on TAG)|Y (On Metadata by default)||[Y](https://docs.pinecone.io/guides/indexes/configure-pod-based-indexes#selective-metadata-indexing)||Y|Y|Y| +|Allows filter grouping|Y (Odata)|[Y](https://weaviate.io/developers/weaviate/search/filters#nested-filters)||[Y](https://docs.trychroma.com/guides#using-logical-operators)||Y||Y|[Y](https://qdrant.tech/documentation/concepts/filtering/#clauses-combination)|[Y](https://milvus.io/docs/get-and-scalar-query.md#Use-Basic-Operators)| +|Allows scalar index field setup|Y|Y|Y|N||Y||Y|Y|Y| +|Requires scalar index field setup to filter|Y|Y|Y|N||N (on by default for all)||N|N|N (can filter without index)| + +### Support for different mappers + +Mapping between data models and the storage models can also require custom logic depending on the type of data model and storage model involved. + +I'm therefore proposing that we allow mappers to be injectable for each `VectorStoreCollection` instance. The interfaces for these would vary depending +on the storage models used by each vector store and any unique capabilities that each vector store may have, e.g. qdrant can operate in `single` or +`multiple named vector` modes, which means the mapper needs to know whether to set a single vector or fill a vector map. + +In addition to this, we should build first party mappers for each of the vector stores, which will cater for built in, generic models or use metadata to perform the mapping. + +### Support for different storage schemas + +The different stores vary in many ways around how data is organized. +- Some just store a record with fields on it, where fields can be a key or a data field or a vector and their type is determined at collection creation time. +- Others separate fields by type when interacting with the api, e.g. you have to specify a key explicitly, put metadata into a metadata dictionary and put vectors into a vector array. + +I'm proposing that we allow two ways in which to provide the information required to map data between the consumer data model and storage data model. +First is a set of configuration objects that capture the types of each field. Second would be a set of attributes that can be used to decorate the model itself +and can be converted to the configuration objects, allowing a single execution path. +Additional configuration properties can easily be added for each type of field as required, e.g. IsFilterable or IsFullTextSearchable, allowing us to also create an index from the provided configuration. + +I'm also proposing that even though similar attributes already exist in other systems, e.g. System.ComponentModel.DataAnnotations.KeyAttribute, we create our own. +We will likely require additional properties on all these attributes that are not currently supported on the existing attributes, e.g. whether a field is or +should be filterable. Requiring users to switch to new attributes later will be disruptive. + +Here is what the attributes would look like, plus a sample use case. + +```cs +sealed class VectorStoreRecordKeyAttribute : Attribute +{ +} +sealed class VectorStoreRecordDataAttribute : Attribute +{ + public bool HasEmbedding { get; set; } + public string EmbeddingPropertyName { get; set; } +} +sealed class VectorStoreRecordVectorAttribute : Attribute +{ +} + +public record HotelInfo( + [property: VectorStoreRecordKey, JsonPropertyName("hotel-id")] string HotelId, + [property: VectorStoreRecordData, JsonPropertyName("hotel-name")] string HotelName, + [property: VectorStoreRecordData(HasEmbedding = true, EmbeddingPropertyName = "DescriptionEmbeddings"), JsonPropertyName("description")] string Description, + [property: VectorStoreRecordVector, JsonPropertyName("description-embeddings")] ReadOnlyMemory? DescriptionEmbeddings); +``` + +Here is what the configuration objects would look like. + +```cs +abstract class VectorStoreRecordProperty(string propertyName); + +sealed class VectorStoreRecordKeyProperty(string propertyName): Field(propertyName) +{ +} +sealed class VectorStoreRecordDataProperty(string propertyName): Field(propertyName) +{ + bool HasEmbedding; + string EmbeddingPropertyName; +} +sealed class VectorStoreRecordVectorProperty(string propertyName): Field(propertyName) +{ +} + +sealed class VectorStoreRecordDefinition +{ + IReadOnlyList Properties; +} +``` + +### Notable method signature changes from existing interface + +All methods currently existing on IMemoryStore will be ported to new interfaces, but in places I am proposing that we make changes to improve +consistency and scalability. + +1. `RemoveAsync` and `RemoveBatchAsync` renamed to `DeleteAsync` and `DeleteBatchAsync`, since record are actually deleted, and this also matches the verb used for collections. +2. `GetCollectionsAsync` renamed to `GetCollectionNamesAsync`, since we are only retrieving names and no other information about collections. +3. `DoesCollectionExistAsync` renamed to `CollectionExistsAsync` since this is shorter and is more commonly used in other apis. + +### Comparison with other AI frameworks + +|Criteria|Current SK Implementation|Proposed SK Implementation|Spring AI|LlamaIndex|Langchain| +|-|-|-|-|-|-| +|Support for Custom Schemas|N|Y|N|N|N| +|Naming of store|MemoryStore|VectorStore, VectorStoreCollection|VectorStore|VectorStore|VectorStore| +|MultiVector support|N|Y|N|N|N| +|Support Multiple Collections via SDK params|Y|Y|N (via app config)|Y|Y| + +## Decision Drivers + +From GitHub Issue: +- API surface must be easy to use and intuitive +- Alignment with other patterns in the SK +- - Design must allow Memory Plugins to be easily instantiated with any connector +- Design must support all Kernel content types +- Design must allow for database specific configuration +- All NFR's to be production ready are implemented (see Roadmap for more detail) +- Basic CRUD operations must be supported so that connectors can be used in a polymorphic manner +- Official Database Clients must be used where available +- Dynamic database schema must be supported +- Dependency injection must be supported +- Azure-ML YAML format must be supported +- Breaking glass scenarios must be supported + +## Considered Questions + +1. Combined collection and record management vs separated. +2. Collection name and key value normalization in decorator or main class. +3. Collection name as method param or constructor param. +4. How to normalize ids across different vector stores where different types are supported. +5. Store Interface/Class Naming + +### Question 1: Combined collection and record management vs separated. + +#### Option 1 - Combined collection and record management + +```cs +interface IVectorRecordStore +{ + Task CreateCollectionAsync(CollectionCreateConfig collectionConfig, CancellationToken cancellationToken = default); + IAsyncEnumerable ListCollectionNamesAsync(CancellationToken cancellationToken = default); + Task CollectionExistsAsync(string name, CancellationToken cancellationToken = default); + Task DeleteCollectionAsync(string name, CancellationToken cancellationToken = default); + + Task UpsertAsync(TRecord data, CancellationToken cancellationToken = default); + IAsyncEnumerable UpsertBatchAsync(IEnumerable dataSet, CancellationToken cancellationToken = default); + Task GetAsync(string key, bool withEmbedding = false, CancellationToken cancellationToken = default); + IAsyncEnumerable GetBatchAsync(IEnumerable keys, bool withVectors = false, CancellationToken cancellationToken = default); + Task DeleteAsync(string key, CancellationToken cancellationToken = default); + Task DeleteBatchAsync(IEnumerable keys, CancellationToken cancellationToken = default); +} + +class AzureAISearchVectorRecordStore( + Azure.Search.Documents.Indexes.SearchIndexClient client, + Schema schema): IVectorRecordStore; + +class WeaviateVectorRecordStore( + WeaviateClient client, + Schema schema): IVectorRecordStore; + +class RedisVectorRecordStore( + StackExchange.Redis.IDatabase database, + Schema schema): IVectorRecordStore; +``` + +#### Option 2 - Separated collection and record management with opinionated create implementations + +```cs + +interface IVectorCollectionStore +{ + virtual Task CreateChatHistoryCollectionAsync(string name, CancellationToken cancellationToken = default); + virtual Task CreateSemanticCacheCollectionAsync(string name, CancellationToken cancellationToken = default); + + IAsyncEnumerable ListCollectionNamesAsync(CancellationToken cancellationToken = default); + Task CollectionExistsAsync(string name, CancellationToken cancellationToken = default); + Task DeleteCollectionAsync(string name, CancellationToken cancellationToken = default); +} + +class AzureAISearchVectorCollectionStore: IVectorCollectionStore; +class RedisVectorCollectionStore: IVectorCollectionStore; +class WeaviateVectorCollectionStore: IVectorCollectionStore; + +// Customers can inherit from our implementations and replace just the creation scenarios to match their schemas. +class CustomerCollectionStore: AzureAISearchVectorCollectionStore, IVectorCollectionStore; + +// We can also create implementations that create indices based on an MLIndex specification. +class MLIndexAzureAISearchVectorCollectionStore(MLIndex mlIndexSpec): AzureAISearchVectorCollectionStore, IVectorCollectionStore; + +interface IVectorRecordStore +{ + Task GetAsync(string key, GetRecordOptions? options = default, CancellationToken cancellationToken = default); + Task DeleteAsync(string key, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default); + Task UpsertAsync(TRecord record, UpsertRecordOptions? options = default, CancellationToken cancellationToken = default); +} + +class AzureAISearchVectorRecordStore(): IVectorRecordStore; +``` + +#### Option 3 - Separated collection and record management with collection create separate from other operations. + +Vector store same as option 2 so not repeated for brevity. + +```cs + +interface IVectorCollectionCreate +{ + virtual Task CreateCollectionAsync(string name, CancellationToken cancellationToken = default); +} + +// Implement a generic version of create that takes a configuration that should work for 80% of cases. +class AzureAISearchConfiguredVectorCollectionCreate(CollectionCreateConfig collectionConfig): IVectorCollectionCreate; + +// Allow custom implementations of create for break glass scenarios for outside the 80% case. +class AzureAISearchChatHistoryVectorCollectionCreate: IVectorCollectionCreate; +class AzureAISearchSemanticCacheVectorCollectionCreate: IVectorCollectionCreate; + +// Customers can create their own creation scenarios to match their schemas, but can continue to use our get, does exist and delete class. +class CustomerChatHistoryVectorCollectionCreate: IVectorCollectionCreate; + +interface IVectorCollectionNonSchema +{ + IAsyncEnumerable ListCollectionNamesAsync(CancellationToken cancellationToken = default); + Task CollectionExistsAsync(string name, CancellationToken cancellationToken = default); + Task DeleteCollectionAsync(string name, CancellationToken cancellationToken = default); +} + +class AzureAISearchVectorCollectionNonSchema: IVectorCollectionNonSchema; +class RedisVectorCollectionNonSchema: IVectorCollectionNonSchema; +class WeaviateVectorCollectionNonSchema: IVectorCollectionNonSchema; + +``` + +#### Option 4 - Separated collection and record management with collection create separate from other operations, with collection management aggregation class on top. + +Variation on option 3. + +```cs + +interface IVectorCollectionCreate +{ + virtual Task CreateCollectionAsync(string name, CancellationToken cancellationToken = default); +} + +interface IVectorCollectionNonSchema +{ + IAsyncEnumerable ListCollectionNamesAsync(CancellationToken cancellationToken = default); + Task CollectionExistsAsync(string name, CancellationToken cancellationToken = default); + Task DeleteCollectionAsync(string name, CancellationToken cancellationToken = default); +} + +// DB Specific NonSchema implementations +class AzureAISearchVectorCollectionNonSchema: IVectorCollectionNonSchema; +class RedisVectorCollectionNonSchema: IVectorCollectionNonSchema; + +// Combined Create + NonSchema Interface +interface IVectorCollectionStore: IVectorCollectionCreate, IVectorCollectionNonSchema {} + +// Base abstract class that forwards non-create operations to provided implementation. +abstract class VectorCollectionStore(IVectorCollectionNonSchema collectionNonSchema): IVectorCollectionStore +{ + public abstract Task CreateCollectionAsync(string name, CancellationToken cancellationToken = default); + public IAsyncEnumerable ListCollectionNamesAsync(CancellationToken cancellationToken = default) { return collectionNonSchema.ListCollectionNamesAsync(cancellationToken); } + public Task CollectionExistsAsync(string name, CancellationToken cancellationToken = default) { return collectionNonSchema.CollectionExistsAsync(name, cancellationToken); } + public Task DeleteCollectionAsync(string name, CancellationToken cancellationToken = default) { return collectionNonSchema.DeleteCollectionAsync(name, cancellationToken); } +} + +// Collections store implementations, that inherit from base class, and just adds the different creation implementations. +class AzureAISearchChatHistoryVectorCollectionStore(AzureAISearchVectorCollectionNonSchema nonSchema): VectorCollectionStore(nonSchema); +class AzureAISearchSemanticCacheVectorCollectionStore(AzureAISearchVectorCollectionNonSchema nonSchema): VectorCollectionStore(nonSchema); +class AzureAISearchMLIndexVectorCollectionStore(AzureAISearchVectorCollectionNonSchema nonSchema): VectorCollectionStore(nonSchema); + +// Customer collections store implementation, that uses the base Azure AI Search implementation for get, doesExist and delete, but adds its own creation. +class ContosoProductsVectorCollectionStore(AzureAISearchVectorCollectionNonSchema nonSchema): VectorCollectionStore(nonSchema); + +``` + +#### Option 5 - Separated collection and record management with collection create separate from other operations, with overall aggregation class on top. + +Same as option 3 / 4, plus: + +```cs + +interface IVectorStore : IVectorCollectionStore, IVectorRecordStore +{ +} + +// Create a static factory that produces one of these, so only the interface is public, not the class. +internal class VectorStore(IVectorCollectionCreate create, IVectorCollectionNonSchema nonSchema, IVectorRecordStore records): IVectorStore +{ +} + +``` + +#### Option 6 - Collection store acts as factory for record store. + +`IVectorStore` acts as a factory for `IVectorStoreCollection`, and any schema agnostic multi-collection operations are kept on `IVectorStore`. + + +```cs +public interface IVectorStore +{ + IVectorStoreCollection GetCollection(string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition = null); + IAsyncEnumerable ListCollectionNamesAsync(CancellationToken cancellationToken = default)); +} + +public interface IVectorStoreCollection +{ + public string Name { get; } + + // Collection Operations + Task CreateCollectionAsync(); + Task CreateCollectionIfNotExistsAsync(); + Task CollectionExistsAsync(); + Task DeleteCollectionAsync(); + + // Data manipulation + Task GetAsync(TKey key, GetRecordOptions? options = default, CancellationToken cancellationToken = default); + IAsyncEnumerable GetBatchAsync(IEnumerable keys, GetRecordOptions? options = default, CancellationToken cancellationToken = default); + Task DeleteAsync(TKey key, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default); + Task DeleteBatchAsync(IEnumerable keys, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default); + Task UpsertAsync(TRecord record, UpsertRecordOptions? options = default, CancellationToken cancellationToken = default); + IAsyncEnumerable UpsertBatchAsync(IEnumerable records, UpsertRecordOptions? options = default, CancellationToken cancellationToken = default); +} +``` + + +#### Decision Outcome + +Option 1 is problematic on its own, since we have to allow consumers to create custom implementations of collection create for break glass scenarios. With +a single interface like this, it will require them to implement many methods that they do not want to change. Options 4 & 5, gives us more flexibility while +still preserving the ease of use of an aggregated interface as described in Option 1. + +Option 2 doesn't give us the flexbility we need for break glass scenarios, since it only allows certain types of collections to be created. It also means +that each time a new collection type is required it introduces a breaking change, so it is not a viable option. + +Since collection create and configuration and the possible options vary considerable across different database types, we will need to support an easy +to use break glass scenario for collection creation. While we would be able to develop a basic configurable create option, for complex create scenarios +users will need to implement their own. We will also need to support multiple create implementations out of the box, e.g. a configuration based option using +our own configuration, create implementations that re-create the current model for backward compatibility, create implementations that use other configuration +as input, e.g. Azure-ML YAML. Therefore separating create, which may have many implementations, from exists, list and delete, which requires only a single implementation per database type is useful. +Option 3 provides us this separation, but Option 4 + 5 builds on top of this, and allows us to combine different implementations together for simpler +consumption. + +Chosen option: 6 + +- Easy to use, and similar to many SDk implementations. +- Can pass a single object around for both collection and record access. + +### Question 2: Collection name and key value normalization in store, decorator or via injection. + +#### Option 1 - Normalization in main record store + +- Pros: Simple +- Cons: The normalization needs to vary separately from the record store, so this will not work + +```cs + public class AzureAISearchVectorStoreCollection : IVectorStoreCollection + { + ... + + // On input. + var normalizedCollectionName = this.NormalizeCollectionName(collectionName); + var encodedId = AzureAISearchMemoryRecord.EncodeId(key); + + ... + + // On output. + DecodeId(this.Id) + + ... + } +``` + +#### Option 2 - Normalization in decorator + +- Pros: Allows normalization to vary separately from the record store. +- Pros: No code executed when no normalization required. +- Pros: Easy to package matching encoders/decoders together. +- Pros: Easier to obsolete encoding/normalization as a concept. +- Cons: Not a major con, but need to implement the full VectorStoreCollection interface, instead of e.g. just providing the two translation functions, if we go with option 3. +- Cons: Hard to have a generic implementation that can work with any model, without either changing the data in the provided object on upsert or doing cloning in an expensive way. + +```cs + new KeyNormalizingAISearchVectorStoreCollection( + "keyField", + new AzureAISearchVectorStoreCollection(...)); +``` + +#### Option 3 - Normalization via optional function parameters to record store constructor + +- Pros: Allows normalization to vary separately from the record store. +- Pros: No need to implement the full VectorStoreCollection interface. +- Pros: Can modify values on serialization without changing the incoming record, if supported by DB SDK. +- Cons: Harder to package matching encoders/decoders together. + +```cs +public class AzureAISearchVectorStoreCollection(StoreOptions options); + +public class StoreOptions +{ + public Func? EncodeKey { get; init; } + public Func? DecodeKey { get; init; } + public Func? SanitizeCollectionName { get; init; } +} +``` + +#### Option 4 - Normalization via custom mapper + +If developer wants to change any values they can do so by creating a custom mapper. + +- Cons: Developer needs to implement a mapper if they want to do normalization. +- Cons: Developer cannot change collection name as part of the mapping. +- Pros: No new extension points required to support normalization. +- Pros: Developer can change any field in the record. + +#### Decision Outcome + +Chosen option 3, since it is similar to how we are doing mapper injection and would also work well in python. + +Option 1 won't work because if e.g. the data was written using another tool, it may be unlikely that it was encoded using the same mechanism as supported here +and therefore this functionality may not be appropriate. The developer should have the ability to not use this functionality or +provide their own encoding / decoding behavior. + +### Question 3: Collection name as method param or via constructor or either + +#### Option 1 - Collection name as method param + +```cs +public class MyVectorStoreCollection() +{ + public async Task GetAsync(string collectionName, string key, GetRecordOptions? options = default, CancellationToken cancellationToken = default); +} +``` + +#### Option 2 - Collection name via constructor + +```cs +public class MyVectorStoreCollection(string defaultCollectionName) +{ + public async Task GetAsync(string key, GetRecordOptions? options = default, CancellationToken cancellationToken = default); +} +``` + +#### Option 3 - Collection name via either + +```cs +public class MyVectorStoreCollection(string defaultCollectionName) +{ + public async Task GetAsync(string key, GetRecordOptions? options = default, CancellationToken cancellationToken = default); +} + +public class GetRecordOptions +{ + public string CollectionName { get; init; }; +} +``` + +#### Decision Outcome + +Chosen option 2. None of the other options work with the decision outcome of Question 1, since that design requires the `VectorStoreCollection` to be tied to a single collection instance. + +### Question 4: How to normalize ids across different vector stores where different types are supported. + +#### Option 1 - Take a string and convert to a type that was specified on the constructor + +```cs +public async Task GetAsync(string key, GetRecordOptions? options = default, CancellationToken cancellationToken = default) +{ + var convertedKey = this.keyType switch + { + KeyType.Int => int.parse(key), + KeyType.GUID => Guid.parse(key) + } + + ... +} +``` + +- No additional overloads are required over time so no breaking changes. +- Most data types can easily be represented in string form and converted to/from it. + +#### Option 2 - Take an object and cast to a type that was specified on the constructor. + +```cs +public async Task GetAsync(object key, GetRecordOptions? options = default, CancellationToken cancellationToken = default) +{ + var convertedKey = this.keyType switch + { + KeyType.Int => key as int, + KeyType.GUID => key as Guid + } + + if (convertedKey is null) + { + throw new InvalidOperationException($"The provided key must be of type {this.keyType}") + } + + ... +} + +``` + +- No additional overloads are required over time so no breaking changes. +- Any data types can be represented as object. + +#### Option 3 - Multiple overloads where we convert where possible, throw when not possible. + +```cs +public async Task GetAsync(string key, GetRecordOptions? options = default, CancellationToken cancellationToken = default) +{ + var convertedKey = this.keyType switch + { + KeyType.Int => int.Parse(key), + KeyType.String => key, + KeyType.GUID => Guid.Parse(key) + } +} +public async Task GetAsync(int key, GetRecordOptions? options = default, CancellationToken cancellationToken = default) +{ + var convertedKey = this.keyType switch + { + KeyType.Int => key, + KeyType.String => key.ToString(), + KeyType.GUID => throw new InvalidOperationException($"The provided key must be convertible to a GUID.") + } +} +public async Task GetAsync(GUID key, GetRecordOptions? options = default, CancellationToken cancellationToken = default) +{ + var convertedKey = this.keyType switch + { + KeyType.Int => throw new InvalidOperationException($"The provided key must be convertible to an int.") + KeyType.String => key.ToString(), + KeyType.GUID => key + } +} +``` + +- Additional overloads are required over time if new key types are found on new connectors, causing breaking changes. +- You can still call a method that causes a runtime error, when the type isn't supported. + +#### Option 4 - Add key type as generic to interface + +```cs +interface IVectorRecordStore +{ + Task GetAsync(TKey key, GetRecordOptions? options = default, CancellationToken cancellationToken = default); +} + +class AzureAISearchVectorRecordStore: IVectorRecordStore +{ + public AzureAISearchVectorRecordStore() + { + // Check if TKey matches the type of the field marked as a key on TRecord and throw if they don't match. + // Also check if keytype is one of the allowed types for Azure AI Search and throw if it isn't. + } +} + +``` + +- No runtime issues after construction. +- More cumbersome interface. + +#### Decision Outcome + +Chosen option 4, since it is forwards compatible with any complex key types we may need to support but still allows +each implementation to hardcode allowed key types if the vector db only supports certain key types. + +### Question 5: Store Interface/Class Naming. + +#### Option 1 - VectorDB + +```cs +interface IVectorDBRecordService {} +interface IVectorDBCollectionUpdateService {} +interface IVectorDBCollectionCreateService {} +``` + +#### Option 2 - Memory + +```cs +interface IMemoryRecordService {} +interface IMemoryCollectionUpdateService {} +interface IMemoryCollectionCreateService {} +``` + +### Option 3 - VectorStore + +```cs +interface IVectorRecordStore {} +interface IVectorCollectionNonSchema {} +interface IVectorCollectionCreate {} +interface IVectorCollectionStore {}: IVectorCollectionCreate, IVectorCollectionNonSchema +interface IVectorStore {}: IVectorCollectionStore, IVectorRecordStore +``` + +### Option 4 - VectorStore + VectorStoreCollection + +```cs +interface IVectorStore +{ + IVectorStoreCollection GetCollection() +} +interface IVectorStoreCollection +{ + Get() + Delete() + Upsert() +} +``` + +#### Decision Outcome + +Chosen option 4. The word memory is broad enough to encompass any data, so using it seems arbitrary. All competitors are using the term vector store, so using something similar is good for recognition. +Option 4 also matches our design as chosen in question 1. + +## Usage Examples + +### DI Framework: .net 8 Keyed Services + +```cs +class CacheEntryModel(string prompt, string result, ReadOnlyMemory promptEmbedding); + +class SemanticTextMemory(IVectorStore configuredVectorStore, VectorStoreRecordDefinition? vectorStoreRecordDefinition): ISemanticTextMemory +{ + public async Task SaveInformation(string collectionName, TDataType record) + { + var collection = vectorStore.GetCollection(collectionName, vectorStoreRecordDefinition); + if (!await collection.CollectionExists()) + { + await collection.CreateCollection(); + } + await collection.UpsertAsync(record); + } +} + +class CacheSetFunctionFilter(ISemanticTextMemory memory); // Saves results to cache. +class CacheGetPromptFilter(ISemanticTextMemory memory); // Check cache for entries. + +var builder = Kernel.CreateBuilder(); + +builder + // Existing registration: + .AddAzureOpenAITextEmbeddingGeneration(textEmbeddingDeploymentName, azureAIEndpoint, apiKey, serviceId: "AzureOpenAI:text-embedding-ada-002") + + // Register an IVectorStore implementation under the given key. + .AddAzureAISearch("Cache", azureAISearchEndpoint, apiKey, new Options() { withEmbeddingGeneration = true }); + +// Add Semantic Cache Memory for the cache entry model. +builder.Services.AddTransient(sp => { + return new SemanticTextMemory( + sp.GetKeyedService("Cache"), + cacheRecordDefinition); +}); + +// Add filter to retrieve items from cache and one to add items to cache. +// Since these filters depend on ISemanticTextMemory and that is already registered, it should get matched automatically. +builder.Services.AddTransient(); +builder.Services.AddTransient(); +``` + +## Roadmap + +### Record Management + +1. Release VectorStoreCollection public interface and implementations for Azure AI Search, Qdrant and Redis. +2. Add support for registering record stores with SK container to allow automatic dependency injection. +3. Add VectorStoreCollection implementations for remaining stores. + +### Collection Management + +4. Release Collection Management public interface and implementations for Azure AI Search, Qdrant and Redis. +5. Add support for registering collection management with SK container to allow automatic dependency injection. +6. Add Collection Management implementations for remaining stores. + +### Collection Creation + +7. Release Collection Creation public interface. +8. Create cross db collection creation config that supports common functionality, and per daatabase implementation that supports this configuration. +9. Add support for registering collection creation with SK container to allow automatic dependency injection. + +### First Party Memory Features and well known model support + +10. Add model and mappers for legacy SK MemoryStore interface, so that consumers using this has an upgrade path to the new memory storage stack. +11. Add model and mappers for popular loader systems, like Kernel Memory or LlamaIndex. +11. Explore adding first party implementations for common scenarios, e.g. semantic caching. Specfics TBD. + +### Cross Cutting Requirements + +Need the following for all features: + +- Unit tests +- Integration tests +- Logging / Telemetry +- Common Exception Handling +- Samples, including: + - Usage scenario for collection and record management using custom model and configured collection creation. + - A simple consumption example like semantic caching, specfics TBD. + - Adding your own collection creation implementation. + - Adding your own custom model mapper. +- Documentation, including: + - How to create models and annotate/describe them to use with the storage system. + - How to define configuration for creating collections using common create implementation. + - How to use record and collection management apis. + - How to implement your own collection create implementation for break glass scenario. + - How to implement your own mapper. + - How to upgrade from the current storage system to the new one. diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props index 645c8a249d2a..b1c7dc58eddc 100644 --- a/dotnet/Directory.Packages.props +++ b/dotnet/Directory.Packages.props @@ -10,7 +10,7 @@ - + @@ -18,7 +18,7 @@ - + @@ -27,9 +27,10 @@ - + + @@ -38,12 +39,12 @@ - + - + @@ -68,23 +69,24 @@ + - + - + - - + + - + @@ -92,6 +94,7 @@ + diff --git a/dotnet/SK-dotnet.sln b/dotnet/SK-dotnet.sln index 6574700e6ce6..b6cd87d2040b 100644 --- a/dotnet/SK-dotnet.sln +++ b/dotnet/SK-dotnet.sln @@ -318,7 +318,9 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Connectors.Redis.UnitTests" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Connectors.Qdrant.UnitTests", "src\Connectors\Connectors.Qdrant.UnitTests\Connectors.Qdrant.UnitTests.csproj", "{E92AE954-8F3A-4A6F-A4F9-DC12017E5AAF}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "StepwisePlannerMigration", "samples\Demos\StepwisePlannerMigration\StepwisePlannerMigration.csproj", "{38374C62-0263-4FE8-A18C-70FC8132912B}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "StepwisePlannerMigration", "samples\Demos\StepwisePlannerMigration\StepwisePlannerMigration.csproj", "{38374C62-0263-4FE8-A18C-70FC8132912B}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AIModelRouter", "samples\Demos\AIModelRouter\AIModelRouter.csproj", "{E06818E3-00A5-41AC-97ED-9491070CDEA1}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -795,6 +797,12 @@ Global {38374C62-0263-4FE8-A18C-70FC8132912B}.Publish|Any CPU.Build.0 = Debug|Any CPU {38374C62-0263-4FE8-A18C-70FC8132912B}.Release|Any CPU.ActiveCfg = Release|Any CPU {38374C62-0263-4FE8-A18C-70FC8132912B}.Release|Any CPU.Build.0 = Release|Any CPU + {E06818E3-00A5-41AC-97ED-9491070CDEA1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {E06818E3-00A5-41AC-97ED-9491070CDEA1}.Debug|Any CPU.Build.0 = Debug|Any CPU + {E06818E3-00A5-41AC-97ED-9491070CDEA1}.Publish|Any CPU.ActiveCfg = Debug|Any CPU + {E06818E3-00A5-41AC-97ED-9491070CDEA1}.Publish|Any CPU.Build.0 = Debug|Any CPU + {E06818E3-00A5-41AC-97ED-9491070CDEA1}.Release|Any CPU.ActiveCfg = Release|Any CPU + {E06818E3-00A5-41AC-97ED-9491070CDEA1}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -904,6 +912,7 @@ Global {1D4667B9-9381-4E32-895F-123B94253EE8} = {0247C2C9-86C3-45BA-8873-28B0948EDC0C} {E92AE954-8F3A-4A6F-A4F9-DC12017E5AAF} = {0247C2C9-86C3-45BA-8873-28B0948EDC0C} {38374C62-0263-4FE8-A18C-70FC8132912B} = {5D4C0700-BBB5-418F-A7B2-F392B9A18263} + {E06818E3-00A5-41AC-97ED-9491070CDEA1} = {5D4C0700-BBB5-418F-A7B2-F392B9A18263} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {FBDC56A3-86AD-4323-AA0F-201E59123B83} diff --git a/dotnet/nuget/nuget-package.props b/dotnet/nuget/nuget-package.props index 3e173111cdd3..00837d71f910 100644 --- a/dotnet/nuget/nuget-package.props +++ b/dotnet/nuget/nuget-package.props @@ -1,7 +1,7 @@ - 1.16.1 + 1.17.1 $(VersionPrefix)-$(VersionSuffix) $(VersionPrefix) @@ -10,7 +10,7 @@ true - 1.16.1 + 1.17.0 $(NoWarn);CP0003 diff --git a/dotnet/samples/Concepts/Agents/ChatCompletion_FunctionTermination.cs b/dotnet/samples/Concepts/Agents/ChatCompletion_FunctionTermination.cs index f344dae432b9..16c019aebbfd 100644 --- a/dotnet/samples/Concepts/Agents/ChatCompletion_FunctionTermination.cs +++ b/dotnet/samples/Concepts/Agents/ChatCompletion_FunctionTermination.cs @@ -1,5 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. using System.ComponentModel; +using Microsoft.Extensions.DependencyInjection; using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.Agents; using Microsoft.SemanticKernel.ChatCompletion; @@ -21,8 +22,8 @@ public async Task UseAutoFunctionInvocationFilterWithAgentInvocationAsync() new() { Instructions = "Answer questions about the menu.", - Kernel = CreateKernelWithChatCompletion(), - ExecutionSettings = new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }, + Kernel = CreateKernelWithFilter(), + Arguments = new KernelArguments(new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }), }; KernelPlugin plugin = KernelPluginFactory.CreateFromType(); @@ -74,8 +75,8 @@ public async Task UseAutoFunctionInvocationFilterWithAgentChatAsync() new() { Instructions = "Answer questions about the menu.", - Kernel = CreateKernelWithChatCompletion(), - ExecutionSettings = new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }, + Kernel = CreateKernelWithFilter(), + Arguments = new KernelArguments(new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }), }; KernelPlugin plugin = KernelPluginFactory.CreateFromType(); @@ -119,17 +120,41 @@ private void WriteContent(ChatMessageContent content) Console.WriteLine($"[{content.Items.LastOrDefault()?.GetType().Name ?? "(empty)"}] {content.Role} : '{content.Content}'"); } + private Kernel CreateKernelWithFilter() + { + IKernelBuilder builder = Kernel.CreateBuilder(); + + if (this.UseOpenAIConfig) + { + builder.AddOpenAIChatCompletion( + TestConfiguration.OpenAI.ChatModelId, + TestConfiguration.OpenAI.ApiKey); + } + else + { + builder.AddAzureOpenAIChatCompletion( + TestConfiguration.AzureOpenAI.ChatDeploymentName, + TestConfiguration.AzureOpenAI.Endpoint, + TestConfiguration.AzureOpenAI.ApiKey); + } + + builder.Services.AddSingleton(new AutoInvocationFilter()); + + return builder.Build(); + } + private sealed class MenuPlugin { [KernelFunction, Description("Provides a list of specials from the menu.")] [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1024:Use properties where appropriate", Justification = "Too smart")] public string GetSpecials() { - return @" -Special Soup: Clam Chowder -Special Salad: Cobb Salad -Special Drink: Chai Tea -"; + return + """ + Special Soup: Clam Chowder + Special Salad: Cobb Salad + Special Drink: Chai Tea + """; } [KernelFunction, Description("Provides the price of the requested menu item.")] diff --git a/dotnet/samples/Concepts/Agents/ChatCompletion_HistoryReducer.cs b/dotnet/samples/Concepts/Agents/ChatCompletion_HistoryReducer.cs new file mode 100644 index 000000000000..6e0816bc8470 --- /dev/null +++ b/dotnet/samples/Concepts/Agents/ChatCompletion_HistoryReducer.cs @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft. All rights reserved. +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Agents; +using Microsoft.SemanticKernel.Agents.History; +using Microsoft.SemanticKernel.ChatCompletion; + +namespace Agents; + +/// +/// Demonstrate creation of and +/// eliciting its response to three explicit user messages. +/// +public class ChatCompletion_HistoryReducer(ITestOutputHelper output) : BaseTest(output) +{ + private const string TranslatorName = "NumeroTranslator"; + private const string TranslatorInstructions = "Add one to latest user number and spell it in spanish without explanation."; + + /// + /// Demonstrate the use of when directly + /// invoking a . + /// + [Fact] + public async Task TruncatedAgentReductionAsync() + { + // Define the agent + ChatCompletionAgent agent = CreateTruncatingAgent(10, 10); + + await InvokeAgentAsync(agent, 50); + } + + /// + /// Demonstrate the use of when directly + /// invoking a . + /// + [Fact] + public async Task SummarizedAgentReductionAsync() + { + // Define the agent + ChatCompletionAgent agent = CreateSummarizingAgent(10, 10); + + await InvokeAgentAsync(agent, 50); + } + + /// + /// Demonstrate the use of when using + /// to invoke a . + /// + [Fact] + public async Task TruncatedChatReductionAsync() + { + // Define the agent + ChatCompletionAgent agent = CreateTruncatingAgent(10, 10); + + await InvokeChatAsync(agent, 50); + } + + /// + /// Demonstrate the use of when using + /// to invoke a . + /// + [Fact] + public async Task SummarizedChatReductionAsync() + { + // Define the agent + ChatCompletionAgent agent = CreateSummarizingAgent(10, 10); + + await InvokeChatAsync(agent, 50); + } + + // Proceed with dialog by directly invoking the agent and explicitly managing the history. + private async Task InvokeAgentAsync(ChatCompletionAgent agent, int messageCount) + { + ChatHistory chat = []; + + int index = 1; + while (index <= messageCount) + { + // Provide user input + chat.Add(new ChatMessageContent(AuthorRole.User, $"{index}")); + Console.WriteLine($"# {AuthorRole.User}: '{index}'"); + + // Reduce prior to invoking the agent + bool isReduced = await agent.ReduceAsync(chat); + + // Invoke and display assistant response + await foreach (ChatMessageContent message in agent.InvokeAsync(chat)) + { + chat.Add(message); + Console.WriteLine($"# {message.Role} - {message.AuthorName ?? "*"}: '{message.Content}'"); + } + + index += 2; + + // Display the message count of the chat-history for visibility into reduction + Console.WriteLine($"@ Message Count: {chat.Count}\n"); + + // Display summary messages (if present) if reduction has occurred + if (isReduced) + { + int summaryIndex = 0; + while (chat[summaryIndex].Metadata?.ContainsKey(ChatHistorySummarizationReducer.SummaryMetadataKey) ?? false) + { + Console.WriteLine($"\tSummary: {chat[summaryIndex].Content}"); + ++summaryIndex; + } + } + } + } + + // Proceed with dialog with AgentGroupChat. + private async Task InvokeChatAsync(ChatCompletionAgent agent, int messageCount) + { + AgentGroupChat chat = new(); + + int lastHistoryCount = 0; + + int index = 1; + while (index <= messageCount) + { + // Provide user input + chat.AddChatMessage(new ChatMessageContent(AuthorRole.User, $"{index}")); + Console.WriteLine($"# {AuthorRole.User}: '{index}'"); + + // Invoke and display assistant response + await foreach (ChatMessageContent message in chat.InvokeAsync(agent)) + { + Console.WriteLine($"# {message.Role} - {message.AuthorName ?? "*"}: '{message.Content}'"); + } + + index += 2; + + // Display the message count of the chat-history for visibility into reduction + // Note: Messages provided in descending order (newest first) + ChatMessageContent[] history = await chat.GetChatMessagesAsync(agent).ToArrayAsync(); + Console.WriteLine($"@ Message Count: {history.Length}\n"); + + // Display summary messages (if present) if reduction has occurred + if (history.Length < lastHistoryCount) + { + int summaryIndex = history.Length - 1; + while (history[summaryIndex].Metadata?.ContainsKey(ChatHistorySummarizationReducer.SummaryMetadataKey) ?? false) + { + Console.WriteLine($"\tSummary: {history[summaryIndex].Content}"); + --summaryIndex; + } + } + + lastHistoryCount = history.Length; + } + } + + private ChatCompletionAgent CreateSummarizingAgent(int reducerMessageCount, int reducerThresholdCount) + { + Kernel kernel = this.CreateKernelWithChatCompletion(); + return + new() + { + Name = TranslatorName, + Instructions = TranslatorInstructions, + Kernel = kernel, + HistoryReducer = new ChatHistorySummarizationReducer(kernel.GetRequiredService(), reducerMessageCount, reducerThresholdCount), + }; + } + + private ChatCompletionAgent CreateTruncatingAgent(int reducerMessageCount, int reducerThresholdCount) => + new() + { + Name = TranslatorName, + Instructions = TranslatorInstructions, + Kernel = this.CreateKernelWithChatCompletion(), + HistoryReducer = new ChatHistoryTruncationReducer(reducerMessageCount, reducerThresholdCount), + }; +} diff --git a/dotnet/samples/Concepts/Agents/ChatCompletion_ServiceSelection.cs b/dotnet/samples/Concepts/Agents/ChatCompletion_ServiceSelection.cs new file mode 100644 index 000000000000..82b2ca28bce0 --- /dev/null +++ b/dotnet/samples/Concepts/Agents/ChatCompletion_ServiceSelection.cs @@ -0,0 +1,128 @@ +// Copyright (c) Microsoft. All rights reserved. +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Agents; +using Microsoft.SemanticKernel.ChatCompletion; +using Microsoft.SemanticKernel.Connectors.OpenAI; + +namespace Agents; + +/// +/// Demonstrate service selection for through setting service-id +/// on and also providing override +/// when calling +/// +public class ChatCompletion_ServiceSelection(ITestOutputHelper output) : BaseTest(output) +{ + private const string ServiceKeyGood = "chat-good"; + private const string ServiceKeyBad = "chat-bad"; + + [Fact] + public async Task UseServiceSelectionWithChatCompletionAgentAsync() + { + // Create kernel with two instances of IChatCompletionService + // One service is configured with a valid API key and the other with an + // invalid key that will result in a 401 Unauthorized error. + Kernel kernel = CreateKernelWithTwoServices(); + + // Define the agent targeting ServiceId = ServiceKeyGood + ChatCompletionAgent agentGood = + new() + { + Kernel = kernel, + Arguments = new KernelArguments(new OpenAIPromptExecutionSettings() { ServiceId = ServiceKeyGood }), + }; + + // Define the agent targeting ServiceId = ServiceKeyBad + ChatCompletionAgent agentBad = + new() + { + Kernel = kernel, + Arguments = new KernelArguments(new OpenAIPromptExecutionSettings() { ServiceId = ServiceKeyBad }), + }; + + // Define the agent with no explicit ServiceId defined + ChatCompletionAgent agentDefault = new() { Kernel = kernel }; + + // Invoke agent as initialized with ServiceId = ServiceKeyGood: Expect agent response + Console.WriteLine("\n[Agent With Good ServiceId]"); + await InvokeAgentAsync(agentGood); + + // Invoke agent as initialized with ServiceId = ServiceKeyBad: Expect failure due to invalid service key + Console.WriteLine("\n[Agent With Bad ServiceId]"); + await InvokeAgentAsync(agentBad); + + // Invoke agent as initialized with no explicit ServiceId: Expect agent response + Console.WriteLine("\n[Agent With No ServiceId]"); + await InvokeAgentAsync(agentDefault); + + // Invoke agent with override arguments where ServiceId = ServiceKeyGood: Expect agent response + Console.WriteLine("\n[Bad Agent: Good ServiceId Override]"); + await InvokeAgentAsync(agentBad, new(new OpenAIPromptExecutionSettings() { ServiceId = ServiceKeyGood })); + + // Invoke agent with override arguments where ServiceId = ServiceKeyBad: Expect failure due to invalid service key + Console.WriteLine("\n[Good Agent: Bad ServiceId Override]"); + await InvokeAgentAsync(agentGood, new(new OpenAIPromptExecutionSettings() { ServiceId = ServiceKeyBad })); + Console.WriteLine("\n[Default Agent: Bad ServiceId Override]"); + await InvokeAgentAsync(agentDefault, new(new OpenAIPromptExecutionSettings() { ServiceId = ServiceKeyBad })); + + // Invoke agent with override arguments with no explicit ServiceId: Expect agent response + Console.WriteLine("\n[Good Agent: No ServiceId Override]"); + await InvokeAgentAsync(agentGood, new(new OpenAIPromptExecutionSettings())); + Console.WriteLine("\n[Bad Agent: No ServiceId Override]"); + await InvokeAgentAsync(agentBad, new(new OpenAIPromptExecutionSettings())); + Console.WriteLine("\n[Default Agent: No ServiceId Override]"); + await InvokeAgentAsync(agentDefault, new(new OpenAIPromptExecutionSettings())); + + // Local function to invoke agent and display the conversation messages. + async Task InvokeAgentAsync(ChatCompletionAgent agent, KernelArguments? arguments = null) + { + ChatHistory chat = [new(AuthorRole.User, "Hello")]; + + try + { + await foreach (ChatMessageContent response in agent.InvokeAsync(chat, arguments)) + { + Console.WriteLine(response.Content); + } + } + catch (HttpOperationException exception) + { + Console.WriteLine($"Status: {exception.StatusCode}"); + } + } + } + + private Kernel CreateKernelWithTwoServices() + { + IKernelBuilder builder = Kernel.CreateBuilder(); + + if (this.UseOpenAIConfig) + { + builder.AddOpenAIChatCompletion( + TestConfiguration.OpenAI.ChatModelId, + "bad-key", + serviceId: ServiceKeyBad); + + builder.AddOpenAIChatCompletion( + TestConfiguration.OpenAI.ChatModelId, + TestConfiguration.OpenAI.ApiKey, + serviceId: ServiceKeyGood); + } + else + { + builder.AddAzureOpenAIChatCompletion( + TestConfiguration.AzureOpenAI.ChatDeploymentName, + TestConfiguration.AzureOpenAI.Endpoint, + "bad-key", + serviceId: ServiceKeyBad); + + builder.AddAzureOpenAIChatCompletion( + TestConfiguration.AzureOpenAI.ChatDeploymentName, + TestConfiguration.AzureOpenAI.Endpoint, + TestConfiguration.AzureOpenAI.ApiKey, + serviceId: ServiceKeyGood); + } + + return builder.Build(); + } +} diff --git a/dotnet/samples/Concepts/Agents/ChatCompletion_Streaming.cs b/dotnet/samples/Concepts/Agents/ChatCompletion_Streaming.cs index 258e12166a6b..d3e94386af96 100644 --- a/dotnet/samples/Concepts/Agents/ChatCompletion_Streaming.cs +++ b/dotnet/samples/Concepts/Agents/ChatCompletion_Streaming.cs @@ -49,7 +49,7 @@ public async Task UseStreamingChatCompletionAgentWithPluginAsync() Name = "Host", Instructions = MenuInstructions, Kernel = this.CreateKernelWithChatCompletion(), - ExecutionSettings = new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }, + Arguments = new KernelArguments(new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }), }; // Initialize plugin and add to the agent's Kernel (same as direct Kernel usage). diff --git a/dotnet/samples/Concepts/Agents/MixedChat_Reset.cs b/dotnet/samples/Concepts/Agents/MixedChat_Reset.cs new file mode 100644 index 000000000000..92aa8a9ce9d4 --- /dev/null +++ b/dotnet/samples/Concepts/Agents/MixedChat_Reset.cs @@ -0,0 +1,90 @@ +// Copyright (c) Microsoft. All rights reserved. +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Agents; +using Microsoft.SemanticKernel.Agents.OpenAI; +using Microsoft.SemanticKernel.ChatCompletion; +using Microsoft.SemanticKernel.Connectors.OpenAI; + +namespace Agents; + +/// +/// Demonstrate the use of . +/// +public class MixedChat_Reset(ITestOutputHelper output) : BaseTest(output) +{ + private const string AgentInstructions = + """ + The user may either provide information or query on information previously provided. + If the query does not correspond with information provided, inform the user that their query cannot be answered. + """; + + [Fact] + public async Task ResetChatAsync() + { + OpenAIFileService fileService = new(TestConfiguration.OpenAI.ApiKey); + + // Define the agents + OpenAIAssistantAgent assistantAgent = + await OpenAIAssistantAgent.CreateAsync( + kernel: new(), + config: new(this.ApiKey, this.Endpoint), + new() + { + Name = nameof(OpenAIAssistantAgent), + Instructions = AgentInstructions, + ModelId = this.Model, + }); + + ChatCompletionAgent chatAgent = + new() + { + Name = nameof(ChatCompletionAgent), + Instructions = AgentInstructions, + Kernel = this.CreateKernelWithChatCompletion(), + }; + + // Create a chat for agent interaction. + AgentGroupChat chat = new(); + + // Respond to user input + try + { + await InvokeAgentAsync(assistantAgent, "What is my favorite color?"); + await InvokeAgentAsync(chatAgent); + + await InvokeAgentAsync(assistantAgent, "I like green."); + await InvokeAgentAsync(chatAgent); + + await InvokeAgentAsync(assistantAgent, "What is my favorite color?"); + await InvokeAgentAsync(chatAgent); + + await chat.ResetAsync(); + + await InvokeAgentAsync(assistantAgent, "What is my favorite color?"); + await InvokeAgentAsync(chatAgent); + } + finally + { + await chat.ResetAsync(); + await assistantAgent.DeleteAsync(); + } + + // Local function to invoke agent and display the conversation messages. + async Task InvokeAgentAsync(Agent agent, string? input = null) + { + if (!string.IsNullOrWhiteSpace(input)) + { + chat.AddChatMessage(new(AuthorRole.User, input)); + Console.WriteLine($"\n# {AuthorRole.User}: '{input}'"); + } + + await foreach (ChatMessageContent message in chat.InvokeAsync(agent)) + { + if (!string.IsNullOrWhiteSpace(message.Content)) + { + Console.WriteLine($"\n# {message.Role} - {message.AuthorName ?? "*"}: '{message.Content}'"); + } + } + } + } +} diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs index 22b6eec9baaf..46aadfc243b0 100644 --- a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs +++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletion.cs @@ -1,5 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. +using Azure.Identity; using Microsoft.SemanticKernel.ChatCompletion; using Microsoft.SemanticKernel.Connectors.OpenAI; @@ -11,7 +12,7 @@ public class OpenAI_ChatCompletion(ITestOutputHelper output) : BaseTest(output) [Fact] public async Task OpenAIChatSampleAsync() { - Console.WriteLine("======== Open AI - ChatGPT ========"); + Console.WriteLine("======== Open AI - Chat Completion ========"); OpenAIChatCompletionService chatCompletionService = new(TestConfiguration.OpenAI.ChatModelId, TestConfiguration.OpenAI.ApiKey); @@ -49,7 +50,7 @@ I hope these suggestions are helpful! [Fact] public async Task AzureOpenAIChatSampleAsync() { - Console.WriteLine("======== Azure Open AI - ChatGPT ========"); + Console.WriteLine("======== Azure Open AI - Chat Completion ========"); AzureOpenAIChatCompletionService chatCompletionService = new( deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, @@ -60,6 +61,24 @@ public async Task AzureOpenAIChatSampleAsync() await StartChatAsync(chatCompletionService); } + /// + /// Sample showing how to use Azure Open AI Chat Completion with Azure Default Credential. + /// If local auth is disabled in the Azure Open AI deployment, you can use Azure Default Credential to authenticate. + /// + [Fact] + public async Task AzureOpenAIWithDefaultAzureCredentialSampleAsync() + { + Console.WriteLine("======== Azure Open AI - Chat Completion with Azure Default Credential ========"); + + AzureOpenAIChatCompletionService chatCompletionService = new( + deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, + endpoint: TestConfiguration.AzureOpenAI.Endpoint, + credentials: new DefaultAzureCredential(), + modelId: TestConfiguration.AzureOpenAI.ChatModelId); + + await StartChatAsync(chatCompletionService); + } + private async Task StartChatAsync(IChatCompletionService chatGPT) { Console.WriteLine("Chat content:"); diff --git a/dotnet/samples/Concepts/Concepts.csproj b/dotnet/samples/Concepts/Concepts.csproj index dd43184b6612..89cc2c897d61 100644 --- a/dotnet/samples/Concepts/Concepts.csproj +++ b/dotnet/samples/Concepts/Concepts.csproj @@ -14,6 +14,7 @@ + diff --git a/dotnet/samples/Concepts/Memory/TextChunkerUsage.cs b/dotnet/samples/Concepts/Memory/TextChunkerUsage.cs index a42e769ae916..51d50c619903 100644 --- a/dotnet/samples/Concepts/Memory/TextChunkerUsage.cs +++ b/dotnet/samples/Concepts/Memory/TextChunkerUsage.cs @@ -8,7 +8,7 @@ namespace Memory; public class TextChunkerUsage(ITestOutputHelper output) : BaseTest(output) { - private static readonly Tokenizer s_tokenizer = Tokenizer.CreateTiktokenForModel("gpt-4"); + private static readonly Tokenizer s_tokenizer = TiktokenTokenizer.CreateForModel("gpt-4"); [Fact] public void RunExample() diff --git a/dotnet/samples/Concepts/Memory/TextChunkingAndEmbedding.cs b/dotnet/samples/Concepts/Memory/TextChunkingAndEmbedding.cs index 013bb4961621..04a74656e948 100644 --- a/dotnet/samples/Concepts/Memory/TextChunkingAndEmbedding.cs +++ b/dotnet/samples/Concepts/Memory/TextChunkingAndEmbedding.cs @@ -9,7 +9,7 @@ namespace Memory; public class TextChunkingAndEmbedding(ITestOutputHelper output) : BaseTest(output) { private const string EmbeddingModelName = "text-embedding-ada-002"; - private static readonly Tokenizer s_tokenizer = Tokenizer.CreateTiktokenForModel(EmbeddingModelName); + private static readonly Tokenizer s_tokenizer = TiktokenTokenizer.CreateForModel(EmbeddingModelName); [Fact] public async Task RunAsync() diff --git a/dotnet/samples/Concepts/Memory/VectorStoreFixtures/VectorStoreInfra.cs b/dotnet/samples/Concepts/Memory/VectorStoreFixtures/VectorStoreInfra.cs new file mode 100644 index 000000000000..ea498f20c5ab --- /dev/null +++ b/dotnet/samples/Concepts/Memory/VectorStoreFixtures/VectorStoreInfra.cs @@ -0,0 +1,108 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Docker.DotNet; +using Docker.DotNet.Models; + +namespace Memory.VectorStoreFixtures; + +/// +/// Helper class that creates and deletes containers for the vector store examples. +/// +internal static class VectorStoreInfra +{ + /// + /// Setup the qdrant container by pulling the image and running it. + /// + /// The docker client to create the container with. + /// The id of the container. + public static async Task SetupQdrantContainerAsync(DockerClient client) + { + await client.Images.CreateImageAsync( + new ImagesCreateParameters + { + FromImage = "qdrant/qdrant", + Tag = "latest", + }, + null, + new Progress()); + + var container = await client.Containers.CreateContainerAsync(new CreateContainerParameters() + { + Image = "qdrant/qdrant", + HostConfig = new HostConfig() + { + PortBindings = new Dictionary> + { + {"6333", new List {new() {HostPort = "6333" } }}, + {"6334", new List {new() {HostPort = "6334" } }} + }, + PublishAllPorts = true + }, + ExposedPorts = new Dictionary + { + { "6333", default }, + { "6334", default } + }, + }); + + await client.Containers.StartContainerAsync( + container.ID, + new ContainerStartParameters()); + + return container.ID; + } + + /// + /// Setup the redis container by pulling the image and running it. + /// + /// The docker client to create the container with. + /// The id of the container. + public static async Task SetupRedisContainerAsync(DockerClient client) + { + await client.Images.CreateImageAsync( + new ImagesCreateParameters + { + FromImage = "redis/redis-stack", + Tag = "latest", + }, + null, + new Progress()); + + var container = await client.Containers.CreateContainerAsync(new CreateContainerParameters() + { + Image = "redis/redis-stack", + HostConfig = new HostConfig() + { + PortBindings = new Dictionary> + { + {"6379", new List {new() {HostPort = "6379"}}}, + {"8001", new List {new() {HostPort = "8001"}}} + }, + PublishAllPorts = true + }, + ExposedPorts = new Dictionary + { + { "6379", default }, + { "8001", default } + }, + }); + + await client.Containers.StartContainerAsync( + container.ID, + new ContainerStartParameters()); + + return container.ID; + } + + /// + /// Stop and delete the container with the specified id. + /// + /// The docker client to delete the container in. + /// The id of the container to delete. + /// An async task. + public static async Task DeleteContainerAsync(DockerClient client, string containerId) + { + await client.Containers.StopContainerAsync(containerId, new ContainerStopParameters()); + await client.Containers.RemoveContainerAsync(containerId, new ContainerRemoveParameters()); + } +} diff --git a/dotnet/samples/Concepts/Memory/VectorStoreFixtures/VectorStoreQdrantContainerFixture.cs b/dotnet/samples/Concepts/Memory/VectorStoreFixtures/VectorStoreQdrantContainerFixture.cs new file mode 100644 index 000000000000..820b5d3bf172 --- /dev/null +++ b/dotnet/samples/Concepts/Memory/VectorStoreFixtures/VectorStoreQdrantContainerFixture.cs @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Docker.DotNet; +using Qdrant.Client; + +namespace Memory.VectorStoreFixtures; + +/// +/// Fixture to use for creating a Qdrant container before tests and delete it after tests. +/// +public class VectorStoreQdrantContainerFixture : IAsyncLifetime +{ + private DockerClient? _dockerClient; + private string? _qdrantContainerId; + + public async Task InitializeAsync() + { + } + + public async Task ManualInitializeAsync() + { + if (this._qdrantContainerId == null) + { + // Connect to docker and start the docker container. + using var dockerClientConfiguration = new DockerClientConfiguration(); + this._dockerClient = dockerClientConfiguration.CreateClient(); + this._qdrantContainerId = await VectorStoreInfra.SetupQdrantContainerAsync(this._dockerClient); + + // Delay until the Qdrant server is ready. + var qdrantClient = new QdrantClient("localhost"); + var succeeded = false; + var attemptCount = 0; + while (!succeeded && attemptCount++ < 10) + { + try + { + await qdrantClient.ListCollectionsAsync(); + succeeded = true; + } + catch (Exception) + { + await Task.Delay(1000); + } + } + } + } + + public async Task DisposeAsync() + { + if (this._dockerClient != null && this._qdrantContainerId != null) + { + // Delete docker container. + await VectorStoreInfra.DeleteContainerAsync(this._dockerClient, this._qdrantContainerId); + } + } +} diff --git a/dotnet/samples/Concepts/Memory/VectorStoreFixtures/VectorStoreRedisContainerFixture.cs b/dotnet/samples/Concepts/Memory/VectorStoreFixtures/VectorStoreRedisContainerFixture.cs new file mode 100644 index 000000000000..eb35b7ff555f --- /dev/null +++ b/dotnet/samples/Concepts/Memory/VectorStoreFixtures/VectorStoreRedisContainerFixture.cs @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Docker.DotNet; + +namespace Memory.VectorStoreFixtures; + +/// +/// Fixture to use for creating a Redis container before tests and delete it after tests. +/// +public class VectorStoreRedisContainerFixture : IAsyncLifetime +{ + private DockerClient? _dockerClient; + private string? _redisContainerId; + + public async Task InitializeAsync() + { + } + + public async Task ManualInitializeAsync() + { + if (this._redisContainerId == null) + { + // Connect to docker and start the docker container. + using var dockerClientConfiguration = new DockerClientConfiguration(); + this._dockerClient = dockerClientConfiguration.CreateClient(); + this._redisContainerId = await VectorStoreInfra.SetupRedisContainerAsync(this._dockerClient); + } + } + + public async Task DisposeAsync() + { + if (this._dockerClient != null && this._redisContainerId != null) + { + // Delete docker container. + await VectorStoreInfra.DeleteContainerAsync(this._dockerClient, this._redisContainerId); + } + } +} diff --git a/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_CustomMapper.cs b/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_CustomMapper.cs new file mode 100644 index 000000000000..db8e259f4e7a --- /dev/null +++ b/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_CustomMapper.cs @@ -0,0 +1,204 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using System.Text.Json.Nodes; +using Memory.VectorStoreFixtures; +using Microsoft.SemanticKernel.Connectors.OpenAI; +using Microsoft.SemanticKernel.Connectors.Redis; +using Microsoft.SemanticKernel.Data; +using Microsoft.SemanticKernel.Embeddings; +using StackExchange.Redis; + +namespace Memory; + +/// +/// An example showing how to ingest data into a vector store using with a custom mapper. +/// In this example, the storage model differs significantly from the data model, so a custom mapper is used to map between the two. +/// A is used to define the schema of the storage model, and this means that the connector +/// will not try and infer the schema from the data model. +/// In storage the data is stored as a JSON object that looks similar to this: +/// +/// { +/// "Term": "API", +/// "Definition": "Application Programming Interface. A set of rules and specifications that allow software components to communicate and exchange data.", +/// "DefinitionEmbedding": [ ... ] +/// } +/// +/// However, the data model is a class with a property for key and two dictionaries for the data (Term and Definition) and vector (DefinitionEmbedding). +/// +/// The example shows the following steps: +/// 1. Create an embedding generator. +/// 2. Create a Redis Vector Store using a custom factory for creating collections. +/// When constructing a collection, the factory injects a custom mapper that maps between the data model and the storage model if required. +/// 3. Ingest some data into the vector store. +/// 4. Read the data back from the vector store. +/// +/// You need a local instance of Docker running, since the associated fixture will try and start a Redis container in the local docker instance to run against. +/// +public class VectorStore_DataIngestion_CustomMapper(ITestOutputHelper output, VectorStoreRedisContainerFixture redisFixture) : BaseTest(output), IClassFixture +{ + /// + /// A record definition for the glossary entries that defines the storage schema of the record. + /// + private static readonly VectorStoreRecordDefinition s_glossaryDefinition = new() + { + Properties = new List + { + new VectorStoreRecordKeyProperty("Key", typeof(string)), + new VectorStoreRecordDataProperty("Term", typeof(string)), + new VectorStoreRecordDataProperty("Definition", typeof(string)), + new VectorStoreRecordVectorProperty("DefinitionEmbedding", typeof(ReadOnlyMemory)) { Dimensions = 1536, DistanceFunction = DistanceFunction.DotProductSimilarity } + } + }; + + [Fact] + public async Task ExampleAsync() + { + // Create an embedding generation service. + var textEmbeddingGenerationService = new AzureOpenAITextEmbeddingGenerationService( + TestConfiguration.AzureOpenAIEmbeddings.DeploymentName, + TestConfiguration.AzureOpenAIEmbeddings.Endpoint, + TestConfiguration.AzureOpenAIEmbeddings.ApiKey); + + // Initiate the docker container and construct the vector store using the custom factory for creating collections. + await redisFixture.ManualInitializeAsync(); + ConnectionMultiplexer redis = ConnectionMultiplexer.Connect("localhost:6379"); + var vectorStore = new RedisVectorStore(redis.GetDatabase(), new() { VectorStoreCollectionFactory = new Factory() }); + + // Get and create collection if it doesn't exist, using the record definition containing the storage model. + var collection = vectorStore.GetCollection("skglossary", s_glossaryDefinition); + await collection.CreateCollectionIfNotExistsAsync(); + + // Create glossary entries and generate embeddings for them. + var glossaryEntries = CreateGlossaryEntries().ToList(); + var tasks = glossaryEntries.Select(entry => Task.Run(async () => + { + entry.Vectors["DefinitionEmbedding"] = await textEmbeddingGenerationService.GenerateEmbeddingAsync((string)entry.Data["Definition"]); + })); + await Task.WhenAll(tasks); + + // Upsert the glossary entries into the collection and return their keys. + var upsertedKeysTasks = glossaryEntries.Select(x => collection.UpsertAsync(x)); + var upsertedKeys = await Task.WhenAll(upsertedKeysTasks); + + // Retrieve one of the upserted records from the collection. + var upsertedRecord = await collection.GetAsync(upsertedKeys.First(), new() { IncludeVectors = true }); + + // Write upserted keys and one of the upserted records to the console. + Console.WriteLine($"Upserted keys: {string.Join(", ", upsertedKeys)}"); + Console.WriteLine($"Upserted record: {JsonSerializer.Serialize(upsertedRecord)}"); + } + + /// + /// A custom mapper that maps between the data model and the storage model. + /// + private sealed class Mapper : IVectorStoreRecordMapper + { + public (string Key, JsonNode Node) MapFromDataToStorageModel(GenericDataModel dataModel) + { + var jsonObject = new JsonObject(); + + jsonObject.Add("Term", dataModel.Data["Term"].ToString()); + jsonObject.Add("Definition", dataModel.Data["Definition"].ToString()); + + var vector = (ReadOnlyMemory)dataModel.Vectors["DefinitionEmbedding"]; + var jsonArray = new JsonArray(vector.ToArray().Select(x => JsonValue.Create(x)).ToArray()); + jsonObject.Add("DefinitionEmbedding", jsonArray); + + return (dataModel.Key, jsonObject); + } + + public GenericDataModel MapFromStorageToDataModel((string Key, JsonNode Node) storageModel, StorageToDataModelMapperOptions options) + { + var dataModel = new GenericDataModel + { + Key = storageModel.Key, + Data = new Dictionary + { + { "Term", (string)storageModel.Node["Term"]! }, + { "Definition", (string)storageModel.Node["Definition"]! } + }, + Vectors = new Dictionary + { + { "DefinitionEmbedding", new ReadOnlyMemory(storageModel.Node["DefinitionEmbedding"]!.AsArray().Select(x => (float)x!).ToArray()) } + } + }; + + return dataModel; + } + } + + /// + /// A factory for creating collections in the vector store + /// + private sealed class Factory : IRedisVectorStoreRecordCollectionFactory + { + public IVectorStoreRecordCollection CreateVectorStoreRecordCollection(IDatabase database, string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition) + where TKey : notnull + where TRecord : class + { + // If the record definition is the glossary definition and the record type is the generic data model, inject the custom mapper into the collection options. + if (vectorStoreRecordDefinition == s_glossaryDefinition && typeof(TRecord) == typeof(GenericDataModel)) + { + var customCollection = new RedisJsonVectorStoreRecordCollection(database, name, new() { VectorStoreRecordDefinition = vectorStoreRecordDefinition, JsonNodeCustomMapper = new Mapper() }) as IVectorStoreRecordCollection; + return customCollection!; + } + + // Otherwise, just create a standard collection with the default mapper. + var collection = new RedisJsonVectorStoreRecordCollection(database, name, new() { VectorStoreRecordDefinition = vectorStoreRecordDefinition }) as IVectorStoreRecordCollection; + return collection!; + } + } + + /// + /// Sample generic data model class that can store any data. + /// + private sealed class GenericDataModel + { + public string Key { get; set; } + + public Dictionary Data { get; set; } + + public Dictionary Vectors { get; set; } + } + + /// + /// Create some sample glossary entries using the generic data model. + /// + /// A list of sample glossary entries. + private static IEnumerable CreateGlossaryEntries() + { + yield return new GenericDataModel + { + Key = "1", + Data = new() + { + { "Term", "API" }, + { "Definition", "Application Programming Interface. A set of rules and specifications that allow software components to communicate and exchange data." } + }, + Vectors = new() + }; + + yield return new GenericDataModel + { + Key = "2", + Data = new() + { + { "Term", "Connectors" }, + { "Definition", "Connectors allow you to integrate with various services provide AI capabilities, including LLM, AudioToText, TextToAudio, Embedding generation, etc." } + }, + Vectors = new() + }; + + yield return new GenericDataModel + { + Key = "3", + Data = new() + { + { "Term", "RAG" }, + { "Definition", "Retrieval Augmented Generation - a term that refers to the process of retrieving additional data to provide as context to an LLM to use when generating a response (completion) to a user’s question (prompt)." } + }, + Vectors = new() + }; + } +} diff --git a/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_MultiStore.cs b/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_MultiStore.cs new file mode 100644 index 000000000000..18f0e5b476ca --- /dev/null +++ b/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_MultiStore.cs @@ -0,0 +1,256 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using Memory.VectorStoreFixtures; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.OpenAI; +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Microsoft.SemanticKernel.Connectors.Redis; +using Microsoft.SemanticKernel.Data; +using Microsoft.SemanticKernel.Embeddings; +using Qdrant.Client; +using StackExchange.Redis; + +namespace Memory; + +/// +/// An example showing how to ingest data into a vector store using , or . +/// Since Redis and Volatile supports string keys and Qdrant supports ulong or Guid keys, this example also shows how you can have common code +/// that works with both types of keys by using a generic key generator function. +/// +/// The example shows the following steps: +/// 1. Register a vector store and embedding generator with the DI container. +/// 2. Register a class (DataIngestor) with the DI container that uses the vector store and embedding generator to ingest data. +/// 3. Ingest some data into the vector store. +/// 4. Read the data back from the vector store. +/// +/// For some databases in this sample (Redis & Qdrant), you need a local instance of Docker running, since the associated fixtures will try and start containers in the local docker instance to run against. +/// +[Collection("Sequential")] +public class VectorStore_DataIngestion_MultiStore(ITestOutputHelper output, VectorStoreRedisContainerFixture redisFixture, VectorStoreQdrantContainerFixture qdrantFixture) : BaseTest(output), IClassFixture, IClassFixture +{ + /// + /// Example with dependency injection. + /// + /// The type of database to run the example for. + [Theory] + [InlineData("Redis")] + [InlineData("Qdrant")] + [InlineData("Volatile")] + public async Task ExampleWithDIAsync(string databaseType) + { + // Use the kernel for DI purposes. + var kernelBuilder = Kernel + .CreateBuilder(); + + // Register an embedding generation service with the DI container. + kernelBuilder.AddAzureOpenAITextEmbeddingGeneration( + deploymentName: TestConfiguration.AzureOpenAIEmbeddings.DeploymentName, + endpoint: TestConfiguration.AzureOpenAIEmbeddings.Endpoint, + apiKey: TestConfiguration.AzureOpenAIEmbeddings.ApiKey); + + // Register the chosen vector store with the DI container and initialize docker containers via the fixtures where needed. + if (databaseType == "Redis") + { + await redisFixture.ManualInitializeAsync(); + kernelBuilder.AddRedisVectorStore("localhost:6379"); + } + else if (databaseType == "Qdrant") + { + await qdrantFixture.ManualInitializeAsync(); + kernelBuilder.AddQdrantVectorStore("localhost"); + } + else if (databaseType == "Volatile") + { + kernelBuilder.AddVolatileVectorStore(); + } + + // Register the DataIngestor with the DI container. + kernelBuilder.Services.AddTransient(); + + // Build the kernel. + var kernel = kernelBuilder.Build(); + + // Build a DataIngestor object using the DI container. + var dataIngestor = kernel.GetRequiredService(); + + // Invoke the data ingestor using an appropriate key generator function for each database type. + // Redis and Volatile supports string keys, while Qdrant supports ulong or Guid keys, so we use a different key generator for each key type. + if (databaseType == "Redis" || databaseType == "Volatile") + { + await this.UpsertDataAndReadFromVectorStoreAsync(dataIngestor, () => Guid.NewGuid().ToString()); + } + else if (databaseType == "Qdrant") + { + await this.UpsertDataAndReadFromVectorStoreAsync(dataIngestor, () => Guid.NewGuid()); + } + } + + /// + /// Example without dependency injection. + /// + /// The type of database to run the example for. + [Theory] + [InlineData("Redis")] + [InlineData("Qdrant")] + [InlineData("Volatile")] + public async Task ExampleWithoutDIAsync(string databaseType) + { + // Create an embedding generation service. + var textEmbeddingGenerationService = new AzureOpenAITextEmbeddingGenerationService( + TestConfiguration.AzureOpenAIEmbeddings.DeploymentName, + TestConfiguration.AzureOpenAIEmbeddings.Endpoint, + TestConfiguration.AzureOpenAIEmbeddings.ApiKey); + + // Construct the chosen vector store and initialize docker containers via the fixtures where needed. + IVectorStore vectorStore; + if (databaseType == "Redis") + { + await redisFixture.ManualInitializeAsync(); + var database = ConnectionMultiplexer.Connect("localhost:6379").GetDatabase(); + vectorStore = new RedisVectorStore(database); + } + else if (databaseType == "Qdrant") + { + await qdrantFixture.ManualInitializeAsync(); + var qdrantClient = new QdrantClient("localhost"); + vectorStore = new QdrantVectorStore(qdrantClient); + } + else if (databaseType == "Volatile") + { + vectorStore = new VolatileVectorStore(); + } + else + { + throw new ArgumentException("Invalid database type."); + } + + // Create the DataIngestor. + var dataIngestor = new DataIngestor(vectorStore, textEmbeddingGenerationService); + + // Invoke the data ingestor using an appropriate key generator function for each database type. + // Redis and Volatile supports string keys, while Qdrant supports ulong or Guid keys, so we use a different key generator for each key type. + if (databaseType == "Redis" || databaseType == "Volatile") + { + await this.UpsertDataAndReadFromVectorStoreAsync(dataIngestor, () => Guid.NewGuid().ToString()); + } + else if (databaseType == "Qdrant") + { + await this.UpsertDataAndReadFromVectorStoreAsync(dataIngestor, () => Guid.NewGuid()); + } + } + + private async Task UpsertDataAndReadFromVectorStoreAsync(DataIngestor dataIngestor, Func uniqueKeyGenerator) + where TKey : notnull + { + // Ingest some data into the vector store. + var upsertedKeys = await dataIngestor.ImportDataAsync(uniqueKeyGenerator); + + // Get one of the upserted records. + var upsertedRecord = await dataIngestor.GetGlossaryAsync(upsertedKeys.First()); + + // Write upserted keys and one of the upserted records to the console. + Console.WriteLine($"Upserted keys: {string.Join(", ", upsertedKeys)}"); + Console.WriteLine($"Upserted record: {JsonSerializer.Serialize(upsertedRecord)}"); + } + + /// + /// Sample class that does ingestion of sample data into a vector store and allows retrieval of data from the vector store. + /// + /// The vector store to ingest data into. + /// Used to generate embeddings for the data being ingested. + private sealed class DataIngestor(IVectorStore vectorStore, ITextEmbeddingGenerationService textEmbeddingGenerationService) + { + /// + /// Create some glossary entries and upsert them into the vector store. + /// + /// The keys of the upserted glossary entries. + /// The type of the keys in the vector store. + public async Task> ImportDataAsync(Func uniqueKeyGenerator) + where TKey : notnull + { + // Get and create collection if it doesn't exist. + var collection = vectorStore.GetCollection>("skglossary"); + await collection.CreateCollectionIfNotExistsAsync(); + + // Create glossary entries and generate embeddings for them. + var glossaryEntries = CreateGlossaryEntries(uniqueKeyGenerator).ToList(); + var tasks = glossaryEntries.Select(entry => Task.Run(async () => + { + entry.DefinitionEmbedding = await textEmbeddingGenerationService.GenerateEmbeddingAsync(entry.Definition); + })); + await Task.WhenAll(tasks); + + // Upsert the glossary entries into the collection and return their keys. + var upsertedKeys = glossaryEntries.Select(x => collection.UpsertAsync(x)); + return await Task.WhenAll(upsertedKeys); + } + + /// + /// Get a glossary entry from the vector store. + /// + /// The key of the glossary entry to retrieve. + /// The glossary entry. + /// The type of the keys in the vector store. + public Task?> GetGlossaryAsync(TKey key) + where TKey : notnull + { + var collection = vectorStore.GetCollection>("skglossary"); + return collection.GetAsync(key, new() { IncludeVectors = true }); + } + } + + /// + /// Create some sample glossary entries. + /// + /// The type of the model key. + /// A function that can be used to generate unique keys for the model in the type that the model requires. + /// A list of sample glossary entries. + private static IEnumerable> CreateGlossaryEntries(Func uniqueKeyGenerator) + { + yield return new Glossary + { + Key = uniqueKeyGenerator(), + Term = "API", + Definition = "Application Programming Interface. A set of rules and specifications that allow software components to communicate and exchange data." + }; + + yield return new Glossary + { + Key = uniqueKeyGenerator(), + Term = "Connectors", + Definition = "Connectors allow you to integrate with various services provide AI capabilities, including LLM, AudioToText, TextToAudio, Embedding generation, etc." + }; + + yield return new Glossary + { + Key = uniqueKeyGenerator(), + Term = "RAG", + Definition = "Retrieval Augmented Generation - a term that refers to the process of retrieving additional data to provide as context to an LLM to use when generating a response (completion) to a user’s question (prompt)." + }; + } + + /// + /// Sample model class that represents a glossary entry. + /// + /// + /// Note that each property is decorated with an attribute that specifies how the property should be treated by the vector store. + /// This allows us to create a collection in the vector store and upsert and retrieve instances of this class without any further configuration. + /// + /// The type of the model key. + private sealed class Glossary + { + [VectorStoreRecordKey] + public TKey Key { get; set; } + + [VectorStoreRecordData] + public string Term { get; set; } + + [VectorStoreRecordData] + public string Definition { get; set; } + + [VectorStoreRecordVector(1536)] + public ReadOnlyMemory DefinitionEmbedding { get; set; } + } +} diff --git a/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_Simple.cs b/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_Simple.cs new file mode 100644 index 000000000000..341e5c2bbda2 --- /dev/null +++ b/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_Simple.cs @@ -0,0 +1,113 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using Memory.VectorStoreFixtures; +using Microsoft.SemanticKernel.Connectors.OpenAI; +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Microsoft.SemanticKernel.Data; +using Microsoft.SemanticKernel.Embeddings; +using Qdrant.Client; + +namespace Memory; + +/// +/// A simple example showing how to ingest data into a vector store using . +/// +/// The example shows the following steps: +/// 1. Create an embedding generator. +/// 2. Create a Qdrant Vector Store. +/// 3. Ingest some data into the vector store. +/// 4. Read the data back from the vector store. +/// +/// You need a local instance of Docker running, since the associated fixture will try and start a Qdrant container in the local docker instance to run against. +/// +[Collection("Sequential")] +public class VectorStore_DataIngestion_Simple(ITestOutputHelper output, VectorStoreQdrantContainerFixture qdrantFixture) : BaseTest(output), IClassFixture +{ + [Fact] + public async Task ExampleAsync() + { + // Create an embedding generation service. + var textEmbeddingGenerationService = new AzureOpenAITextEmbeddingGenerationService( + TestConfiguration.AzureOpenAIEmbeddings.DeploymentName, + TestConfiguration.AzureOpenAIEmbeddings.Endpoint, + TestConfiguration.AzureOpenAIEmbeddings.ApiKey); + + // Initiate the docker container and construct the vector store. + await qdrantFixture.ManualInitializeAsync(); + var vectorStore = new QdrantVectorStore(new QdrantClient("localhost")); + + // Get and create collection if it doesn't exist. + var collection = vectorStore.GetCollection("skglossary"); + await collection.CreateCollectionIfNotExistsAsync(); + + // Create glossary entries and generate embeddings for them. + var glossaryEntries = CreateGlossaryEntries().ToList(); + var tasks = glossaryEntries.Select(entry => Task.Run(async () => + { + entry.DefinitionEmbedding = await textEmbeddingGenerationService.GenerateEmbeddingAsync(entry.Definition); + })); + await Task.WhenAll(tasks); + + // Upsert the glossary entries into the collection and return their keys. + var upsertedKeysTasks = glossaryEntries.Select(x => collection.UpsertAsync(x)); + var upsertedKeys = await Task.WhenAll(upsertedKeysTasks); + + // Retrieve one of the upserted records from the collection. + var upsertedRecord = await collection.GetAsync(upsertedKeys.First(), new() { IncludeVectors = true }); + + // Write upserted keys and one of the upserted records to the console. + Console.WriteLine($"Upserted keys: {string.Join(", ", upsertedKeys)}"); + Console.WriteLine($"Upserted record: {JsonSerializer.Serialize(upsertedRecord)}"); + } + + /// + /// Sample model class that represents a glossary entry. + /// + /// + /// Note that each property is decorated with an attribute that specifies how the property should be treated by the vector store. + /// This allows us to create a collection in the vector store and upsert and retrieve instances of this class without any further configuration. + /// + private sealed class Glossary + { + [VectorStoreRecordKey] + public ulong Key { get; set; } + + [VectorStoreRecordData] + public string Term { get; set; } + + [VectorStoreRecordData] + public string Definition { get; set; } + + [VectorStoreRecordVector(1536)] + public ReadOnlyMemory DefinitionEmbedding { get; set; } + } + + /// + /// Create some sample glossary entries. + /// + /// A list of sample glossary entries. + private static IEnumerable CreateGlossaryEntries() + { + yield return new Glossary + { + Key = 1, + Term = "API", + Definition = "Application Programming Interface. A set of rules and specifications that allow software components to communicate and exchange data." + }; + + yield return new Glossary + { + Key = 2, + Term = "Connectors", + Definition = "Connectors allow you to integrate with various services provide AI capabilities, including LLM, AudioToText, TextToAudio, Embedding generation, etc." + }; + + yield return new Glossary + { + Key = 3, + Term = "RAG", + Definition = "Retrieval Augmented Generation - a term that refers to the process of retrieving additional data to provide as context to an LLM to use when generating a response (completion) to a user’s question (prompt)." + }; + } +} diff --git a/dotnet/samples/Concepts/Plugins/OpenApiPlugin_CustomHttpContentReader.cs b/dotnet/samples/Concepts/Plugins/OpenApiPlugin_CustomHttpContentReader.cs new file mode 100644 index 000000000000..829e5a42abb3 --- /dev/null +++ b/dotnet/samples/Concepts/Plugins/OpenApiPlugin_CustomHttpContentReader.cs @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using System.Text.Json.Serialization; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Plugins.OpenApi; + +namespace Plugins; + +/// +/// Sample shows how to register a custom HTTP content reader for an Open API plugin. +/// +public sealed class CustomHttpContentReaderForOpenApiPlugin(ITestOutputHelper output) : BaseTest(output) +{ + [Fact] + public async Task ShowReadingJsonAsStreamAsync() + { + var kernel = new Kernel(); + + // Register the custom HTTP content reader + var executionParameters = new OpenApiFunctionExecutionParameters() { HttpResponseContentReader = ReadHttpResponseContentAsync }; + + // Create OpenAPI plugin + var plugin = await OpenApiKernelPluginFactory.CreateFromOpenApiAsync("RepairService", "Resources/Plugins/RepairServicePlugin/repair-service.json", executionParameters); + + // Create a repair so it can be read as a stream in the following step + var arguments = new KernelArguments + { + ["title"] = "The Case of the Broken Gizmo", + ["description"] = "It's broken. Send help!", + ["assignedTo"] = "Tech Magician" + }; + var createResult = await plugin["createRepair"].InvokeAsync(kernel, arguments); + Console.WriteLine(createResult.ToString()); + + // List relevant repairs + arguments = new KernelArguments + { + ["assignedTo"] = "Tech Magician" + }; + var listResult = await plugin["listRepairs"].InvokeAsync(kernel, arguments); + using var reader = new StreamReader((Stream)listResult.GetValue()!.Content!); + var content = await reader.ReadToEndAsync(); + var repairs = JsonSerializer.Deserialize(content); + Console.WriteLine(content); + + // Delete the repair + arguments = new KernelArguments + { + ["id"] = repairs!.Where(r => r.AssignedTo == "Tech Magician").First().Id.ToString() + }; + var deleteResult = await plugin["deleteRepair"].InvokeAsync(kernel, arguments); + Console.WriteLine(deleteResult.ToString()); + } + + /// + /// A custom HTTP content reader to change the default behavior of reading HTTP content. + /// + /// The HTTP response content reader context. + /// The cancellation token. + /// The HTTP response content. + private static async Task ReadHttpResponseContentAsync(HttpResponseContentReaderContext context, CancellationToken cancellationToken) + { + // Read JSON content as a stream rather than as a string, which is the default behavior + if (context.Response.Content.Headers.ContentType?.MediaType == "application/json") + { + return await context.Response.Content.ReadAsStreamAsync(cancellationToken); + } + + // HTTP request and response properties can be used to decide how to read the content. + // The 'if' operator below is not relevant to the current example and is just for demonstration purposes. + if (context.Request.Headers.Contains("x-stream")) + { + return await context.Response.Content.ReadAsStreamAsync(cancellationToken); + } + + // Return null to indicate that any other HTTP content not handled above should be read by the default reader. + return null; + } + + private sealed class Repair + { + [JsonPropertyName("id")] + public int? Id { get; set; } + + [JsonPropertyName("title")] + public string? Title { get; set; } + + [JsonPropertyName("description")] + public string? Description { get; set; } + + [JsonPropertyName("assignedTo")] + public string? AssignedTo { get; set; } + + [JsonPropertyName("date")] + public string? Date { get; set; } + + [JsonPropertyName("image")] + public string? Image { get; set; } + } +} diff --git a/dotnet/samples/Concepts/README.md b/dotnet/samples/Concepts/README.md index 77427c605193..26eef28982a7 100644 --- a/dotnet/samples/Concepts/README.md +++ b/dotnet/samples/Concepts/README.md @@ -104,6 +104,9 @@ Down below you can find the code snippets that demonstrate the usage of many Sem - [TextMemoryPlugin_GeminiEmbeddingGeneration](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Memory/TextMemoryPlugin_GeminiEmbeddingGeneration.cs) - [TextMemoryPlugin_MultipleMemoryStore](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Memory/TextMemoryPlugin_MultipleMemoryStore.cs) - [TextMemoryPlugin_RecallJsonSerializationWithOptions](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Memory/TextMemoryPlugin_RecallJsonSerializationWithOptions.cs) +- [VectorStore_DataIngestion_Simple: A simple example of how to do data ingestion into a vector store when getting started.](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_Simple.cs) +- [VectorStore_DataIngestion_MultiStore: An example of data ingestion that uses the same code to ingest into multiple vector stores types.](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_MultiStore.cs) +- [VectorStore_DataIngestion_CustomMapper: An example that shows how to use a custom mapper for when your data model and storage model doesn't match.](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Memory/VectorStore_DataIngestion_CustomMapper.cs) ## Optimization - Examples of different cost and performance optimization techniques diff --git a/dotnet/samples/Demos/AIModelRouter/AIModelRouter.csproj b/dotnet/samples/Demos/AIModelRouter/AIModelRouter.csproj new file mode 100644 index 000000000000..fb5862e3270a --- /dev/null +++ b/dotnet/samples/Demos/AIModelRouter/AIModelRouter.csproj @@ -0,0 +1,20 @@ + + + + Exe + net8.0;netstandard2.0 + enable + enable + 5ee045b0-aea3-4f08-8d31-32d1a6f8fed0 + + + + + + + + + + + + diff --git a/dotnet/samples/Demos/AIModelRouter/CustomRouter.cs b/dotnet/samples/Demos/AIModelRouter/CustomRouter.cs new file mode 100644 index 000000000000..ff2767a289c8 --- /dev/null +++ b/dotnet/samples/Demos/AIModelRouter/CustomRouter.cs @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft. All rights reserved. + +#pragma warning disable SKEXP0001 +#pragma warning disable SKEXP0010 +#pragma warning disable CA2249 // Consider using 'string.Contains' instead of 'string.IndexOf' + +namespace AIModelRouter; + +/// +/// This class is for demonstration purposes only. +/// In a real-world scenario, you would use a more sophisticated routing mechanism, such as another local model for +/// deciding which service to use based on the user's input or any other criteria. +/// +public class CustomRouter() +{ + /// + /// Returns the best service id to use based on the user's input. + /// This demonstration uses a simple logic where your input is checked for specific keywords as a deciding factor, + /// if no keyword is found it defaults to the first service in the list. + /// + /// User's input prompt + /// List of service ids to choose from in order of importance, defaulting to the first + /// Service id. + public string FindService(string lookupPrompt, IReadOnlyList serviceIds) + { + // The order matters, if the keyword is not found, the first one is used. + foreach (var serviceId in serviceIds) + { + if (Contains(lookupPrompt, serviceId)) { return serviceId; } + } + + return serviceIds[0]; + } + + // Ensure compatibility with both netstandard2.0 and net8.0 by using IndexOf instead of Contains + private static bool Contains(string prompt, string pattern) + => prompt.IndexOf(pattern, StringComparison.CurrentCultureIgnoreCase) >= 0; +} diff --git a/dotnet/samples/Demos/AIModelRouter/Program.cs b/dotnet/samples/Demos/AIModelRouter/Program.cs new file mode 100644 index 000000000000..5bafa4934883 --- /dev/null +++ b/dotnet/samples/Demos/AIModelRouter/Program.cs @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.Configuration; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; + +#pragma warning disable SKEXP0001 +#pragma warning disable SKEXP0010 +#pragma warning disable CA2249 // Consider using 'string.Contains' instead of 'string.IndexOf' + +namespace AIModelRouter; + +internal sealed partial class Program +{ + private static async Task Main(string[] args) + { + Console.ForegroundColor = ConsoleColor.White; + + var config = new ConfigurationBuilder().AddUserSecrets().Build(); + + ServiceCollection services = new(); + + // Adding multiple connectors targeting different providers / models. + services.AddKernel() /* LMStudio model is selected in server side. */ + .AddOpenAIChatCompletion(serviceId: "lmstudio", modelId: "N/A", endpoint: new Uri("http://localhost:1234"), apiKey: null) + .AddOpenAIChatCompletion(serviceId: "ollama", modelId: "phi3", endpoint: new Uri("http://localhost:11434"), apiKey: null) + .AddOpenAIChatCompletion(serviceId: "openai", modelId: "gpt-4o", apiKey: config["OpenAI:ApiKey"]!) + + // Adding a custom filter to capture router selected service id + .Services.AddSingleton(new SelectedServiceFilter()); + + var kernel = services.BuildServiceProvider().GetRequiredService(); + var router = new CustomRouter(); + + while (true) + { + Console.Write("\n\nUser > "); + var userMessage = Console.ReadLine(); + + // Exit application if the user enters an empty message + if (string.IsNullOrWhiteSpace(userMessage)) { return; } + + // Find the best service to use based on the user's input + KernelArguments arguments = new(new PromptExecutionSettings() + { + ServiceId = router.FindService(userMessage, ["lmstudio", "ollama", "openai"]) + }); + + // Invoke the prompt and print the response + await foreach (var chatChunk in kernel.InvokePromptStreamingAsync(userMessage, arguments).ConfigureAwait(false)) + { + Console.Write(chatChunk); + } + } + } +} diff --git a/dotnet/samples/Demos/AIModelRouter/README.md b/dotnet/samples/Demos/AIModelRouter/README.md new file mode 100644 index 000000000000..92ac37e7c81e --- /dev/null +++ b/dotnet/samples/Demos/AIModelRouter/README.md @@ -0,0 +1,51 @@ +# AI Model Router + +This sample demonstrates how to implement an AI Model Router using Semantic Kernel connectors to direct requests to various AI models based on user input. As part of this example we integrate LMStudio, Ollama, and OpenAI, utilizing the OpenAI Connector for LMStudio and Ollama due to their compatibility with the OpenAI API. + +> [!IMPORTANT] +> You can modify to use any other combination of connector or OpenAI compatible API model provider. + +## Semantic Kernel Features Used + +- [Chat Completion Service](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/IChatCompletionService.cs) - Using the Chat Completion Service [OpenAI Connector implementation](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/Connectors/Connectors.OpenAI/ChatCompletion/OpenAIChatCompletionService.cs) to generate responses from the LLM. +- [Filters](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/IChatCompletionService.cs), using to capture selected service and log in the console. + +## Prerequisites + +- [.NET 8](https://dotnet.microsoft.com/download/dotnet/8.0). + +## Configuring the sample + +The sample can be configured by using the command line with .NET [Secret Manager](https://learn.microsoft.com/en-us/aspnet/core/security/app-secrets) to avoid the risk of leaking secrets into the repository, branches and pull requests. + +### Using .NET [Secret Manager](https://learn.microsoft.com/en-us/aspnet/core/security/app-secrets) + +```powershell +# OpenAI (Not required if using Azure OpenAI) +dotnet user-secrets set "OpenAI:ApiKey" "... your api key ... " +``` + +## Running the sample + +After configuring the sample, to build and run the console application just hit `F5`. + +To build and run the console application from the terminal use the following commands: + +```powershell +dotnet build +dotnet run +``` + +### Example of a conversation + +> **User** > OpenAI, what is Jupiter? Keep it simple. + +> **Assistant** > Sure! Jupiter is the largest planet in our solar system. It's a gas giant, mostly made of hydrogen and helium, and it has a lot of storms, including the famous Great Red Spot. Jupiter also has at least 79 moons. + +> **User** > Ollama, what is Jupiter? Keep it simple. + +> **Assistant** > Jupiter is a giant planet in our solar system known for being the largest and most massive, famous for its spectacled clouds and dozens of moons including Ganymede which is bigger than Earth! + +> **User** > LMStudio, what is Jupiter? Keep it simple. + +> **Assistant** > Jupiter is the fifth planet from the Sun in our Solar System and one of its gas giants alongside Saturn, Uranus, and Neptune. It's famous for having a massive storm called the Great Red Spot that has been raging for hundreds of years. \ No newline at end of file diff --git a/dotnet/samples/Demos/AIModelRouter/SelectedServiceFilter.cs b/dotnet/samples/Demos/AIModelRouter/SelectedServiceFilter.cs new file mode 100644 index 000000000000..9824d57ebd55 --- /dev/null +++ b/dotnet/samples/Demos/AIModelRouter/SelectedServiceFilter.cs @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.SemanticKernel; + +#pragma warning disable SKEXP0001 +#pragma warning disable SKEXP0010 +#pragma warning disable CA2249 // Consider using 'string.Contains' instead of 'string.IndexOf' + +namespace AIModelRouter; + +/// +/// Using a filter to log the service being used for the prompt. +/// +public class SelectedServiceFilter : IPromptRenderFilter +{ + /// + public Task OnPromptRenderAsync(PromptRenderContext context, Func next) + { + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine($"Selected service id: '{context.Arguments.ExecutionSettings?.FirstOrDefault().Key}'"); + + Console.ForegroundColor = ConsoleColor.White; + Console.Write("Assistant > "); + return next(context); + } +} diff --git a/dotnet/samples/GettingStartedWithAgents/Step2_Plugins.cs b/dotnet/samples/GettingStartedWithAgents/Step2_Plugins.cs index 38741bbb2e7c..7946adc7f687 100644 --- a/dotnet/samples/GettingStartedWithAgents/Step2_Plugins.cs +++ b/dotnet/samples/GettingStartedWithAgents/Step2_Plugins.cs @@ -26,7 +26,7 @@ public async Task UseChatCompletionWithPluginAgentAsync() Instructions = HostInstructions, Name = HostName, Kernel = this.CreateKernelWithChatCompletion(), - ExecutionSettings = new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }, + Arguments = new KernelArguments(new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }), }; // Initialize plugin and add to the agent's Kernel (same as direct Kernel usage). diff --git a/dotnet/samples/GettingStartedWithAgents/Step4_KernelFunctionStrategies.cs b/dotnet/samples/GettingStartedWithAgents/Step4_KernelFunctionStrategies.cs index 9cabe0193d3e..d71b6ae26767 100644 --- a/dotnet/samples/GettingStartedWithAgents/Step4_KernelFunctionStrategies.cs +++ b/dotnet/samples/GettingStartedWithAgents/Step4_KernelFunctionStrategies.cs @@ -72,7 +72,6 @@ State only the name of the participant to take the next turn. - {{{CopyWriterName}}} Always follow these rules when selecting the next participant: - - After user input, it is {{{CopyWriterName}}}'a turn. - After {{{CopyWriterName}}} replies, it is {{{ReviewerName}}}'s turn. - After {{{ReviewerName}}} provides feedback, it is {{{CopyWriterName}}}'s turn. @@ -105,6 +104,8 @@ State only the name of the participant to take the next turn. SelectionStrategy = new KernelFunctionSelectionStrategy(selectionFunction, CreateKernelWithChatCompletion()) { + // Always start with the writer agent. + InitialAgent = agentWriter, // Returns the entire result value as a string. ResultParser = (result) => result.GetValue() ?? CopyWriterName, // The prompt variable name for the agents argument. diff --git a/dotnet/src/Agents/Abstractions/AgentChannel.cs b/dotnet/src/Agents/Abstractions/AgentChannel.cs index 9788464a2adb..34f7a8030896 100644 --- a/dotnet/src/Agents/Abstractions/AgentChannel.cs +++ b/dotnet/src/Agents/Abstractions/AgentChannel.cs @@ -25,6 +25,15 @@ public abstract class AgentChannel /// The to monitor for cancellation requests. The default is . protected internal abstract Task ReceiveAsync(IEnumerable history, CancellationToken cancellationToken = default); + /// + /// Reset any persistent state associated with the channel. + /// + /// The to monitor for cancellation requests. The default is . + /// + /// The channel wont' be reused; rather, it will be discarded and a new one created. + /// + protected internal abstract Task ResetAsync(CancellationToken cancellationToken = default); + /// /// Perform a discrete incremental interaction between a single and . /// diff --git a/dotnet/src/Agents/Abstractions/AgentChat.cs b/dotnet/src/Agents/Abstractions/AgentChat.cs index f4654963444e..cdc46024ece7 100644 --- a/dotnet/src/Agents/Abstractions/AgentChat.cs +++ b/dotnet/src/Agents/Abstractions/AgentChat.cs @@ -266,6 +266,29 @@ async Task GetOrCreateChannelAsync() } } + /// + /// Reset the chat, clearing all history and persisted state. + /// All agents will remain present. + /// + /// The to monitor for cancellation requests. The default is . + public async Task ResetAsync(CancellationToken cancellationToken = default) + { + this.SetActivityOrThrow(); // Disallow concurrent access to chat + + try + { + Task[] resetTasks = this._agentChannels.Values.Select(c => c.ResetAsync(cancellationToken)).ToArray(); + await Task.WhenAll(resetTasks).ConfigureAwait(false); + this._agentChannels.Clear(); + this._channelMap.Clear(); + this.History.Clear(); + } + finally + { + this.ClearActivitySignal(); + } + } + /// /// Clear activity signal to indicate that activity has ceased. /// diff --git a/dotnet/src/Agents/Abstractions/AggregatorChannel.cs b/dotnet/src/Agents/Abstractions/AggregatorChannel.cs index 73561a4eba8b..c7123abf9b71 100644 --- a/dotnet/src/Agents/Abstractions/AggregatorChannel.cs +++ b/dotnet/src/Agents/Abstractions/AggregatorChannel.cs @@ -47,6 +47,7 @@ protected internal override IAsyncEnumerable GetHistoryAsync } } + /// protected internal override Task ReceiveAsync(IEnumerable history, CancellationToken cancellationToken = default) { // Always receive the initial history from the owning chat. @@ -54,4 +55,8 @@ protected internal override Task ReceiveAsync(IEnumerable hi return Task.CompletedTask; } + + /// + protected internal override Task ResetAsync(CancellationToken cancellationToken = default) => + this._chat.ResetAsync(cancellationToken); } diff --git a/dotnet/src/Agents/Abstractions/ChatHistoryKernelAgent.cs b/dotnet/src/Agents/Abstractions/ChatHistoryKernelAgent.cs deleted file mode 100644 index 3de87da3de06..000000000000 --- a/dotnet/src/Agents/Abstractions/ChatHistoryKernelAgent.cs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using Microsoft.Extensions.Logging; -using Microsoft.SemanticKernel.ChatCompletion; - -namespace Microsoft.SemanticKernel.Agents; - -/// -/// A specialization bound to a . -/// -public abstract class ChatHistoryKernelAgent : KernelAgent, IChatHistoryHandler -{ - /// - protected internal sealed override IEnumerable GetChannelKeys() - { - yield return typeof(ChatHistoryChannel).FullName!; - } - - /// - protected internal sealed override Task CreateChannelAsync(CancellationToken cancellationToken) - { - ChatHistoryChannel channel = - new() - { - Logger = this.LoggerFactory.CreateLogger() - }; - - return Task.FromResult(channel); - } - - /// - public abstract IAsyncEnumerable InvokeAsync( - ChatHistory history, - CancellationToken cancellationToken = default); - - /// - public abstract IAsyncEnumerable InvokeStreamingAsync( - ChatHistory history, - CancellationToken cancellationToken = default); -} diff --git a/dotnet/src/Agents/Abstractions/Extensions/ChatHistoryExtensions.cs b/dotnet/src/Agents/Abstractions/Extensions/ChatHistoryExtensions.cs index a7b2273ece9e..d8ef44a416a1 100644 --- a/dotnet/src/Agents/Abstractions/Extensions/ChatHistoryExtensions.cs +++ b/dotnet/src/Agents/Abstractions/Extensions/ChatHistoryExtensions.cs @@ -8,7 +8,7 @@ namespace Microsoft.SemanticKernel.Agents.Extensions; /// /// Extension methods for /// -internal static class ChatHistoryExtensions +public static class ChatHistoryExtensions { /// /// Enumeration of chat-history in descending order. diff --git a/dotnet/src/Agents/Abstractions/IChatHistoryHandler.cs b/dotnet/src/Agents/Abstractions/IChatHistoryHandler.cs deleted file mode 100644 index 8b7dab748c81..000000000000 --- a/dotnet/src/Agents/Abstractions/IChatHistoryHandler.cs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. -using System.Collections.Generic; -using System.Threading; -using Microsoft.SemanticKernel.ChatCompletion; - -namespace Microsoft.SemanticKernel.Agents; - -/// -/// Contract for an agent that utilizes a . -/// -public interface IChatHistoryHandler -{ - /// - /// Entry point for calling into an agent from a . - /// - /// The chat history at the point the channel is created. - /// The to monitor for cancellation requests. The default is . - /// Asynchronous enumeration of messages. - IAsyncEnumerable InvokeAsync( - ChatHistory history, - CancellationToken cancellationToken = default); - - /// - /// Entry point for calling into an agent from a for streaming content. - /// - /// The chat history at the point the channel is created. - /// The to monitor for cancellation requests. The default is . - /// Asynchronous enumeration of streaming content. - public abstract IAsyncEnumerable InvokeStreamingAsync( - ChatHistory history, - CancellationToken cancellationToken = default); -} diff --git a/dotnet/src/Agents/Abstractions/KernelAgent.cs b/dotnet/src/Agents/Abstractions/KernelAgent.cs index 061705670a2a..1df425972495 100644 --- a/dotnet/src/Agents/Abstractions/KernelAgent.cs +++ b/dotnet/src/Agents/Abstractions/KernelAgent.cs @@ -17,5 +17,5 @@ public abstract class KernelAgent : Agent /// /// Defaults to empty Kernel, but may be overridden. /// - public Kernel Kernel { get; init; } = new Kernel(); + public Kernel Kernel { get; init; } = new(); } diff --git a/dotnet/src/Agents/Core/AgentGroupChat.cs b/dotnet/src/Agents/Core/AgentGroupChat.cs index 928326745b97..bff8f90f34b3 100644 --- a/dotnet/src/Agents/Core/AgentGroupChat.cs +++ b/dotnet/src/Agents/Core/AgentGroupChat.cs @@ -8,7 +8,6 @@ using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Abstractions; using Microsoft.SemanticKernel.Agents.Chat; -using Microsoft.SemanticKernel.ChatCompletion; namespace Microsoft.SemanticKernel.Agents; @@ -53,7 +52,7 @@ public void AddAgent(Agent agent) /// The interactions will proceed according to the and the /// defined via . /// In the absence of an , this method will not invoke any agents. - /// Any agent may be explicitly selected by calling . + /// Any agent may be explicitly selected by calling . /// /// The to monitor for cancellation requests. The default is . /// Asynchronous enumeration of messages. @@ -77,30 +76,11 @@ public override async IAsyncEnumerable InvokeAsync([Enumerat for (int index = 0; index < this.ExecutionSettings.TerminationStrategy.MaximumIterations; index++) { // Identify next agent using strategy - this.Logger.LogAgentGroupChatSelectingAgent(nameof(InvokeAsync), this.ExecutionSettings.SelectionStrategy.GetType()); - - Agent agent; - try - { - agent = await this.ExecutionSettings.SelectionStrategy.NextAsync(this.Agents, this.History, cancellationToken).ConfigureAwait(false); - } - catch (Exception exception) - { - this.Logger.LogAgentGroupChatNoAgentSelected(nameof(InvokeAsync), exception); - throw; - } - - this.Logger.LogAgentGroupChatSelectedAgent(nameof(InvokeAsync), agent.GetType(), agent.Id, this.ExecutionSettings.SelectionStrategy.GetType()); + Agent agent = await this.SelectAgentAsync(cancellationToken).ConfigureAwait(false); // Invoke agent and process messages along with termination - await foreach (var message in base.InvokeAgentAsync(agent, cancellationToken).ConfigureAwait(false)) + await foreach (var message in this.InvokeAsync(agent, cancellationToken).ConfigureAwait(false)) { - if (message.Role == AuthorRole.Assistant) - { - var task = this.ExecutionSettings.TerminationStrategy.ShouldTerminateAsync(agent, this.History, cancellationToken); - this.IsComplete = await task.ConfigureAwait(false); - } - yield return message; } @@ -122,45 +102,23 @@ public override async IAsyncEnumerable InvokeAsync([Enumerat /// /// Specified agent joins the chat. /// > - public IAsyncEnumerable InvokeAsync( - Agent agent, - CancellationToken cancellationToken = default) => - this.InvokeAsync(agent, isJoining: true, cancellationToken); - - /// - /// Process a single interaction between a given an a irregardless of - /// the defined via . Likewise, this does - /// not regard as it only takes a single turn for the specified agent. - /// - /// The agent actively interacting with the chat. - /// Optional flag to control if agent is joining the chat. - /// The to monitor for cancellation requests. The default is . - /// Asynchronous enumeration of messages. public async IAsyncEnumerable InvokeAsync( Agent agent, - bool isJoining, [EnumeratorCancellation] CancellationToken cancellationToken = default) { this.EnsureStrategyLoggerAssignment(); this.Logger.LogAgentGroupChatInvokingAgent(nameof(InvokeAsync), agent.GetType(), agent.Id); - if (isJoining) - { - this.AddAgent(agent); - } + this.AddAgent(agent); await foreach (var message in base.InvokeAgentAsync(agent, cancellationToken).ConfigureAwait(false)) { - if (message.Role == AuthorRole.Assistant) - { - var task = this.ExecutionSettings.TerminationStrategy.ShouldTerminateAsync(agent, this.History, cancellationToken); - this.IsComplete = await task.ConfigureAwait(false); - } - yield return message; } + this.IsComplete = await this.ExecutionSettings.TerminationStrategy.ShouldTerminateAsync(agent, this.History, cancellationToken).ConfigureAwait(false); + this.Logger.LogAgentGroupChatYield(nameof(InvokeAsync), this.IsComplete); } @@ -187,4 +145,25 @@ private void EnsureStrategyLoggerAssignment() this.ExecutionSettings.TerminationStrategy.Logger = this.LoggerFactory.CreateLogger(this.ExecutionSettings.TerminationStrategy.GetType()); } } + + private async Task SelectAgentAsync(CancellationToken cancellationToken) + { + this.Logger.LogAgentGroupChatSelectingAgent(nameof(InvokeAsync), this.ExecutionSettings.SelectionStrategy.GetType()); + + Agent agent; + + try + { + agent = await this.ExecutionSettings.SelectionStrategy.NextAsync(this.Agents, this.History, cancellationToken).ConfigureAwait(false); + } + catch (Exception exception) + { + this.Logger.LogAgentGroupChatNoAgentSelected(nameof(InvokeAsync), exception); + throw; + } + + this.Logger.LogAgentGroupChatSelectedAgent(nameof(InvokeAsync), agent.GetType(), agent.Id, this.ExecutionSettings.SelectionStrategy.GetType()); + + return agent; + } } diff --git a/dotnet/src/Agents/Core/Chat/KernelFunctionSelectionStrategy.cs b/dotnet/src/Agents/Core/Chat/KernelFunctionSelectionStrategy.cs index d912ed147eb6..00ea8c1e2965 100644 --- a/dotnet/src/Agents/Core/Chat/KernelFunctionSelectionStrategy.cs +++ b/dotnet/src/Agents/Core/Chat/KernelFunctionSelectionStrategy.cs @@ -47,6 +47,11 @@ public class KernelFunctionSelectionStrategy(KernelFunction function, Kernel ker /// public KernelFunction Function { get; } = function; + /// + /// When set, will use in the event of a failure to select an agent. + /// + public bool UseInitialAgentAsFallback { get; init; } + /// /// The used when invoking . /// @@ -59,7 +64,7 @@ public class KernelFunctionSelectionStrategy(KernelFunction function, Kernel ker public Func ResultParser { get; init; } = (result) => result.GetValue() ?? string.Empty; /// - public sealed override async Task NextAsync(IReadOnlyList agents, IReadOnlyList history, CancellationToken cancellationToken = default) + protected sealed override async Task SelectAgentAsync(IReadOnlyList agents, IReadOnlyList history, CancellationToken cancellationToken = default) { KernelArguments originalArguments = this.Arguments ?? []; KernelArguments arguments = @@ -76,13 +81,17 @@ public sealed override async Task NextAsync(IReadOnlyList agents, this.Logger.LogKernelFunctionSelectionStrategyInvokedFunction(nameof(NextAsync), this.Function.PluginName, this.Function.Name, result.ValueType); string? agentName = this.ResultParser.Invoke(result); - if (string.IsNullOrEmpty(agentName)) + if (string.IsNullOrEmpty(agentName) && (!this.UseInitialAgentAsFallback || this.InitialAgent == null)) { throw new KernelException("Agent Failure - Strategy unable to determine next agent."); } - return - agents.FirstOrDefault(a => (a.Name ?? a.Id) == agentName) ?? - throw new KernelException($"Agent Failure - Strategy unable to select next agent: {agentName}"); + Agent? agent = agents.FirstOrDefault(a => (a.Name ?? a.Id) == agentName); + if (agent == null && this.UseInitialAgentAsFallback) + { + agent = this.InitialAgent; + } + + return agent ?? throw new KernelException($"Agent Failure - Strategy unable to select next agent: {agentName}"); } } diff --git a/dotnet/src/Agents/Core/Chat/SelectionStrategy.cs b/dotnet/src/Agents/Core/Chat/SelectionStrategy.cs index 5aa58b99e194..1ba5fb502649 100644 --- a/dotnet/src/Agents/Core/Chat/SelectionStrategy.cs +++ b/dotnet/src/Agents/Core/Chat/SelectionStrategy.cs @@ -12,6 +12,19 @@ namespace Microsoft.SemanticKernel.Agents.Chat; /// public abstract class SelectionStrategy { + /// + /// Flag indicating if an agent has been selected (first time). + /// + protected bool HasSelected { get; private set; } + + /// + /// An optional agent for initial selection. + /// + /// + /// Useful to avoid latency in initial agent selection. + /// + public Agent? InitialAgent { get; set; } + /// /// The associated with the . /// @@ -24,5 +37,29 @@ public abstract class SelectionStrategy /// The chat history. /// The to monitor for cancellation requests. The default is . /// The agent who shall take the next turn. - public abstract Task NextAsync(IReadOnlyList agents, IReadOnlyList history, CancellationToken cancellationToken = default); + public async Task NextAsync(IReadOnlyList agents, IReadOnlyList history, CancellationToken cancellationToken = default) + { + if (agents.Count == 0 && this.InitialAgent == null) + { + throw new KernelException("Agent Failure - No agents present to select."); + } + + Agent agent = + (!this.HasSelected && this.InitialAgent != null) ? + this.InitialAgent : + await this.SelectAgentAsync(agents, history, cancellationToken).ConfigureAwait(false); + + this.HasSelected = true; + + return agent; + } + + /// + /// Determine which agent goes next. + /// + /// The agents participating in chat. + /// The chat history. + /// The to monitor for cancellation requests. The default is . + /// The agent who shall take the next turn. + protected abstract Task SelectAgentAsync(IReadOnlyList agents, IReadOnlyList history, CancellationToken cancellationToken = default); } diff --git a/dotnet/src/Agents/Core/Chat/SequentialSelectionStrategy.cs b/dotnet/src/Agents/Core/Chat/SequentialSelectionStrategy.cs index 878cd7530eed..4707a5724f28 100644 --- a/dotnet/src/Agents/Core/Chat/SequentialSelectionStrategy.cs +++ b/dotnet/src/Agents/Core/Chat/SequentialSelectionStrategy.cs @@ -20,11 +20,15 @@ public sealed class SequentialSelectionStrategy : SelectionStrategy public void Reset() => this._index = 0; /// - public override Task NextAsync(IReadOnlyList agents, IReadOnlyList history, CancellationToken cancellationToken = default) + protected override Task SelectAgentAsync(IReadOnlyList agents, IReadOnlyList history, CancellationToken cancellationToken = default) { - if (agents.Count == 0) + if (this.HasSelected && + this.InitialAgent != null && + agents.Count > 0 && + agents[0] == this.InitialAgent) { - throw new KernelException("Agent Failure - No agents present to select."); + // Avoid selecting first agent twice + IncrementIndex(); } // Set of agents array may not align with previous execution, constrain index to valid range. @@ -33,12 +37,17 @@ public override Task NextAsync(IReadOnlyList agents, IReadOnlyList this._index = 0; } - var agent = agents[this._index]; + Agent agent = agents[this._index]; this.Logger.LogSequentialSelectionStrategySelectedAgent(nameof(NextAsync), this._index, agents.Count, agent.Id); - this._index = (this._index + 1) % agents.Count; + IncrementIndex(); return Task.FromResult(agent); + + void IncrementIndex() + { + this._index = (this._index + 1) % agents.Count; + } } } diff --git a/dotnet/src/Agents/Core/ChatCompletionAgent.cs b/dotnet/src/Agents/Core/ChatCompletionAgent.cs index 1e9ea3d3208e..212c56038484 100644 --- a/dotnet/src/Agents/Core/ChatCompletionAgent.cs +++ b/dotnet/src/Agents/Core/ChatCompletionAgent.cs @@ -4,6 +4,7 @@ using System.Threading; using System.Threading.Tasks; using Microsoft.SemanticKernel.ChatCompletion; +using Microsoft.SemanticKernel.Services; namespace Microsoft.SemanticKernel.Agents; @@ -12,21 +13,21 @@ namespace Microsoft.SemanticKernel.Agents; /// /// /// NOTE: Enable OpenAIPromptExecutionSettings.ToolCallBehavior for agent plugins. -/// () +/// () /// public sealed class ChatCompletionAgent : ChatHistoryKernelAgent { - /// - /// Optional execution settings for the agent. - /// - public PromptExecutionSettings? ExecutionSettings { get; set; } - /// public override async IAsyncEnumerable InvokeAsync( ChatHistory history, + KernelArguments? arguments = null, + Kernel? kernel = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - IChatCompletionService chatCompletionService = this.Kernel.GetRequiredService(); + kernel ??= this.Kernel; + arguments ??= this.Arguments; + + (IChatCompletionService chatCompletionService, PromptExecutionSettings? executionSettings) = this.GetChatCompletionService(kernel, arguments); ChatHistory chat = this.SetupAgentChatHistory(history); @@ -37,8 +38,8 @@ public override async IAsyncEnumerable InvokeAsync( IReadOnlyList messages = await chatCompletionService.GetChatMessageContentsAsync( chat, - this.ExecutionSettings, - this.Kernel, + executionSettings, + kernel, cancellationToken).ConfigureAwait(false); this.Logger.LogAgentChatServiceInvokedAgent(nameof(InvokeAsync), this.Id, chatCompletionService.GetType(), messages.Count); @@ -55,7 +56,6 @@ await chatCompletionService.GetChatMessageContentsAsync( foreach (ChatMessageContent message in messages ?? []) { - // TODO: MESSAGE SOURCE - ISSUE #5731 message.AuthorName = this.Name; yield return message; @@ -65,9 +65,14 @@ await chatCompletionService.GetChatMessageContentsAsync( /// public override async IAsyncEnumerable InvokeStreamingAsync( ChatHistory history, + KernelArguments? arguments = null, + Kernel? kernel = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - IChatCompletionService chatCompletionService = this.Kernel.GetRequiredService(); + kernel ??= this.Kernel; + arguments ??= this.Arguments; + + (IChatCompletionService chatCompletionService, PromptExecutionSettings? executionSettings) = this.GetChatCompletionService(kernel, arguments); ChatHistory chat = this.SetupAgentChatHistory(history); @@ -78,15 +83,14 @@ public override async IAsyncEnumerable InvokeStream IAsyncEnumerable messages = chatCompletionService.GetStreamingChatMessageContentsAsync( chat, - this.ExecutionSettings, - this.Kernel, + executionSettings, + kernel, cancellationToken); this.Logger.LogAgentChatServiceInvokedStreamingAgent(nameof(InvokeAsync), this.Id, chatCompletionService.GetType()); await foreach (StreamingChatMessageContent message in messages.ConfigureAwait(false)) { - // TODO: MESSAGE SOURCE - ISSUE #5731 message.AuthorName = this.Name; yield return message; @@ -103,6 +107,19 @@ public override async IAsyncEnumerable InvokeStream } } + private (IChatCompletionService service, PromptExecutionSettings? executionSettings) GetChatCompletionService(Kernel kernel, KernelArguments? arguments) + { + // Need to provide a KernelFunction to the service selector as a container for the execution-settings. + KernelFunction nullPrompt = KernelFunctionFactory.CreateFromPrompt("placeholder", arguments?.ExecutionSettings?.Values); + (IChatCompletionService chatCompletionService, PromptExecutionSettings? executionSettings) = + kernel.ServiceSelector.SelectAIService( + kernel, + nullPrompt, + arguments ?? []); + + return (chatCompletionService, executionSettings); + } + private ChatHistory SetupAgentChatHistory(IReadOnlyList history) { ChatHistory chat = []; diff --git a/dotnet/src/Agents/Abstractions/ChatHistoryChannel.cs b/dotnet/src/Agents/Core/ChatHistoryChannel.cs similarity index 77% rename from dotnet/src/Agents/Abstractions/ChatHistoryChannel.cs rename to dotnet/src/Agents/Core/ChatHistoryChannel.cs index 5dcb6b9b0204..0ff06a39b222 100644 --- a/dotnet/src/Agents/Abstractions/ChatHistoryChannel.cs +++ b/dotnet/src/Agents/Core/ChatHistoryChannel.cs @@ -10,22 +10,25 @@ namespace Microsoft.SemanticKernel.Agents; /// -/// A specialization for that acts upon a . +/// A specialization for that acts upon a . /// -public class ChatHistoryChannel : AgentChannel +public sealed class ChatHistoryChannel : AgentChannel { private readonly ChatHistory _history; /// - protected internal sealed override async IAsyncEnumerable<(bool IsVisible, ChatMessageContent Message)> InvokeAsync( + protected override async IAsyncEnumerable<(bool IsVisible, ChatMessageContent Message)> InvokeAsync( Agent agent, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - if (agent is not IChatHistoryHandler historyHandler) + if (agent is not ChatHistoryKernelAgent historyAgent) { throw new KernelException($"Invalid channel binding for agent: {agent.Id} ({agent.GetType().FullName})"); } + // Pre-process history reduction. + await historyAgent.ReduceAsync(this._history, cancellationToken).ConfigureAwait(false); + // Capture the current message count to evaluate history mutation. int messageCount = this._history.Count; HashSet mutatedHistory = []; @@ -34,7 +37,7 @@ public class ChatHistoryChannel : AgentChannel Queue messageQueue = []; ChatMessageContent? yieldMessage = null; - await foreach (ChatMessageContent responseMessage in historyHandler.InvokeAsync(this._history, cancellationToken).ConfigureAwait(false)) + await foreach (ChatMessageContent responseMessage in historyAgent.InvokeAsync(this._history, null, null, cancellationToken).ConfigureAwait(false)) { // Capture all messages that have been included in the mutated the history. for (int messageIndex = messageCount; messageIndex < this._history.Count; messageIndex++) @@ -74,7 +77,7 @@ bool IsMessageVisible(ChatMessageContent message) => } /// - protected internal sealed override Task ReceiveAsync(IEnumerable history, CancellationToken cancellationToken) + protected override Task ReceiveAsync(IEnumerable history, CancellationToken cancellationToken) { this._history.AddRange(history); @@ -82,11 +85,19 @@ protected internal sealed override Task ReceiveAsync(IEnumerable - protected internal sealed override IAsyncEnumerable GetHistoryAsync(CancellationToken cancellationToken) + protected override IAsyncEnumerable GetHistoryAsync(CancellationToken cancellationToken) { return this._history.ToDescendingAsync(); } + /// + protected override Task ResetAsync(CancellationToken cancellationToken = default) + { + this._history.Clear(); + + return Task.CompletedTask; + } + /// /// Initializes a new instance of the class. /// diff --git a/dotnet/src/Agents/Core/ChatHistoryKernelAgent.cs b/dotnet/src/Agents/Core/ChatHistoryKernelAgent.cs new file mode 100644 index 000000000000..b14363c4bb44 --- /dev/null +++ b/dotnet/src/Agents/Core/ChatHistoryKernelAgent.cs @@ -0,0 +1,80 @@ +// Copyright (c) Microsoft. All rights reserved. +using System.Collections.Generic; +using System.Globalization; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.Logging; +using Microsoft.SemanticKernel.Agents.History; +using Microsoft.SemanticKernel.ChatCompletion; + +namespace Microsoft.SemanticKernel.Agents; + +/// +/// A specialization bound to a . +/// +/// +/// NOTE: Enable OpenAIPromptExecutionSettings.ToolCallBehavior for agent plugins. +/// () +/// +public abstract class ChatHistoryKernelAgent : KernelAgent +{ + /// + /// Optional arguments for the agent. + /// + public KernelArguments? Arguments { get; init; } + + /// + public IChatHistoryReducer? HistoryReducer { get; init; } + + /// + public abstract IAsyncEnumerable InvokeAsync( + ChatHistory history, + KernelArguments? arguments = null, + Kernel? kernel = null, + CancellationToken cancellationToken = default); + + /// + public abstract IAsyncEnumerable InvokeStreamingAsync( + ChatHistory history, + KernelArguments? arguments = null, + Kernel? kernel = null, + CancellationToken cancellationToken = default); + + /// + /// Reduce the provided history + /// + /// The source history + /// The to monitor for cancellation requests. The default is . + /// + public Task ReduceAsync(ChatHistory history, CancellationToken cancellationToken = default) => + history.ReduceAsync(this.HistoryReducer, cancellationToken); + + /// + protected sealed override IEnumerable GetChannelKeys() + { + yield return typeof(ChatHistoryChannel).FullName!; + + // Agents with different reducers shall not share the same channel. + // Agents with the same or equivalent reducer shall share the same channel. + if (this.HistoryReducer != null) + { + // Explicitly include the reducer type to eliminate the possibility of hash collisions + // with custom implementations of IChatHistoryReducer. + yield return this.HistoryReducer.GetType().FullName!; + + yield return this.HistoryReducer.GetHashCode().ToString(CultureInfo.InvariantCulture); + } + } + + /// + protected sealed override Task CreateChannelAsync(CancellationToken cancellationToken) + { + ChatHistoryChannel channel = + new() + { + Logger = this.LoggerFactory.CreateLogger() + }; + + return Task.FromResult(channel); + } +} diff --git a/dotnet/src/Agents/Core/History/ChatHistoryReducerExtensions.cs b/dotnet/src/Agents/Core/History/ChatHistoryReducerExtensions.cs new file mode 100644 index 000000000000..c884846baafa --- /dev/null +++ b/dotnet/src/Agents/Core/History/ChatHistoryReducerExtensions.cs @@ -0,0 +1,166 @@ +// Copyright (c) Microsoft. All rights reserved. +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.ChatCompletion; + +namespace Microsoft.SemanticKernel.Agents.History; + +/// +/// Discrete operations used when reducing chat history. +/// +/// +/// Allows for improved testability. +/// +internal static class ChatHistoryReducerExtensions +{ + /// + /// Extract a range of messages from the source history. + /// + /// The source history + /// The index of the first message to extract + /// The index of the last message to extract + /// The optional filter to apply to each message + public static IEnumerable Extract(this IReadOnlyList history, int startIndex, int? finalIndex = null, Func? filter = null) + { + int maxIndex = history.Count - 1; + if (startIndex > maxIndex) + { + yield break; + } + + finalIndex ??= maxIndex; + + finalIndex = Math.Min(finalIndex.Value, maxIndex); + + for (int index = startIndex; index <= finalIndex; ++index) + { + if (filter?.Invoke(history[index]) ?? false) + { + continue; + } + + yield return history[index]; + } + } + + /// + /// Identify the index of the first message that is not a summary message, as indicated by + /// the presence of the specified metadata key. + /// + /// The source history + /// The metadata key that identifies a summary message. + public static int LocateSummarizationBoundary(this IReadOnlyList history, string summaryKey) + { + for (int index = 0; index < history.Count; ++index) + { + ChatMessageContent message = history[index]; + + if (!message.Metadata?.ContainsKey(summaryKey) ?? true) + { + return index; + } + } + + return history.Count; + } + + /// + /// Identify the index of the first message at or beyond the specified targetCount that + /// does not orphan sensitive content. + /// Specifically: function calls and results shall not be separated since chat-completion requires that + /// a function-call always be followed by a function-result. + /// In addition, the first user message (if present) within the threshold window will be included + /// in order to maintain context with the subsequent assistant responses. + /// + /// The source history + /// The desired message count, should reduction occur. + /// + /// The threshold, beyond targetCount, required to trigger reduction. + /// History is not reduces it the message count is less than targetCount + thresholdCount. + /// + /// + /// Optionally ignore an offset from the start of the history. + /// This is useful when messages have been injected that are not part of the raw dialog + /// (such as summarization). + /// + /// An index that identifies the starting point for a reduced history that does not orphan sensitive content. + public static int LocateSafeReductionIndex(this IReadOnlyList history, int targetCount, int? thresholdCount = null, int offsetCount = 0) + { + // Compute the index of the truncation threshold + int thresholdIndex = history.Count - (thresholdCount ?? 0) - targetCount; + + if (thresholdIndex <= offsetCount) + { + // History is too short to truncate + return 0; + } + + // Compute the index of truncation target + int messageIndex = history.Count - targetCount; + + // Skip function related content + while (messageIndex >= 0) + { + if (!history[messageIndex].Items.Any(i => i is FunctionCallContent || i is FunctionResultContent)) + { + break; + } + + --messageIndex; + } + + // Capture the earliest non-function related message + int targetIndex = messageIndex; + + // Scan for user message within truncation range to maximize chat cohesion + while (messageIndex >= thresholdIndex) + { + // A user message provides a superb truncation point + if (history[messageIndex].Role == AuthorRole.User) + { + return messageIndex; + } + + --messageIndex; + } + + // No user message found, fallback to the earliest non-function related message + return targetIndex; + } + + /// + /// Process history reduction and mutate the provided history. + /// + /// The source history + /// The target reducer + /// The to monitor for cancellation requests. The default is . + /// True if reduction has occurred. + /// + /// Using the existing for a reduction in collection size eliminates the need + /// for re-allocation (of memory). + /// + public static async Task ReduceAsync(this ChatHistory history, IChatHistoryReducer? reducer, CancellationToken cancellationToken) + { + if (reducer == null) + { + return false; + } + + IEnumerable? reducedHistory = await reducer.ReduceAsync(history, cancellationToken).ConfigureAwait(false); + + if (reducedHistory == null) + { + return false; + } + + // Mutate the history in place + ChatMessageContent[] reduced = reducedHistory.ToArray(); + history.Clear(); + history.AddRange(reduced); + + return true; + } +} diff --git a/dotnet/src/Agents/Core/History/ChatHistorySummarizationReducer.cs b/dotnet/src/Agents/Core/History/ChatHistorySummarizationReducer.cs new file mode 100644 index 000000000000..a45bfa57011d --- /dev/null +++ b/dotnet/src/Agents/Core/History/ChatHistorySummarizationReducer.cs @@ -0,0 +1,166 @@ +// Copyright (c) Microsoft. All rights reserved. +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.ChatCompletion; + +namespace Microsoft.SemanticKernel.Agents.History; + +/// +/// Reduce the chat history by summarizing message past the target message count. +/// +/// +/// Summarization will always avoid orphaning function-content as the presence of +/// a function-call _must_ be followed by a function-result. When a threshold count is +/// is provided (recommended), reduction will scan within the threshold window in an attempt to +/// avoid orphaning a user message from an assistant response. +/// +public class ChatHistorySummarizationReducer : IChatHistoryReducer +{ + /// + /// Metadata key to indicate a summary message. + /// + public const string SummaryMetadataKey = "__summary__"; + + /// + /// The default summarization system instructions. + /// + public const string DefaultSummarizationPrompt = + """ + Provide a concise and complete summarizion of the entire dialog that does not exceed 5 sentences + + This summary must always: + - Consider both user and assistant interactions + - Maintain continuity for the purpose of further dialog + - Include details from any existing summary + - Focus on the most significant aspects of the dialog + + This summary must never: + - Critique, correct, interpret, presume, or assume + - Identify faults, mistakes, misunderstanding, or correctness + - Analyze what has not occurred + - Exclude details from any existing summary + """; + + /// + /// System instructions for summarization. Defaults to . + /// + public string SummarizationInstructions { get; init; } = DefaultSummarizationPrompt; + + /// + /// Flag to indicate if an exception should be thrown if summarization fails. + /// + public bool FailOnError { get; init; } = true; + + /// + /// Flag to indicate summarization is maintained in a single message, or if a series of + /// summations are generated over time. + /// + /// + /// Not using a single summary may ultimately result in a chat history that exceeds the token limit. + /// + public bool UseSingleSummary { get; init; } = true; + + /// + public async Task?> ReduceAsync(IReadOnlyList history, CancellationToken cancellationToken = default) + { + // Identify where summary messages end and regular history begins + int insertionPoint = history.LocateSummarizationBoundary(SummaryMetadataKey); + + // First pass to determine the truncation index + int truncationIndex = history.LocateSafeReductionIndex(this._targetCount, this._thresholdCount, insertionPoint); + + IEnumerable? truncatedHistory = null; + + if (truncationIndex > 0) + { + // Second pass to extract history for summarization + IEnumerable summarizedHistory = + history.Extract( + this.UseSingleSummary ? 0 : insertionPoint, + truncationIndex, + (m) => m.Items.Any(i => i is FunctionCallContent || i is FunctionResultContent)); + + try + { + // Summarize + ChatHistory summarizationRequest = [.. summarizedHistory, new ChatMessageContent(AuthorRole.System, this.SummarizationInstructions)]; + ChatMessageContent summary = await this._service.GetChatMessageContentAsync(summarizationRequest, cancellationToken: cancellationToken).ConfigureAwait(false); + summary.Metadata = new Dictionary { { SummaryMetadataKey, true } }; + + // Assembly the summarized history + truncatedHistory = AssemblySummarizedHistory(summary); + } + catch + { + if (this.FailOnError) + { + throw; + } + } + } + + return truncatedHistory; + + // Inner function to assemble the summarized history + IEnumerable AssemblySummarizedHistory(ChatMessageContent? summary) + { + if (insertionPoint > 0 && !this.UseSingleSummary) + { + for (int index = 0; index <= insertionPoint - 1; ++index) + { + yield return history[index]; + } + } + + if (summary != null) + { + yield return summary; + } + + for (int index = truncationIndex; index < history.Count; ++index) + { + yield return history[index]; + } + } + } + + /// + /// Initializes a new instance of the class. + /// + /// A instance to be used for summarization. + /// The desired number of target messages after reduction. + /// An optional number of messages beyond the 'targetCount' that must be present in order to trigger reduction/ + /// + /// While the 'thresholdCount' is optional, it is recommended to provided so that reduction is not triggered + /// for every incremental addition to the chat history beyond the 'targetCount'. + /// > + public ChatHistorySummarizationReducer(IChatCompletionService service, int targetCount, int? thresholdCount = null) + { + Verify.NotNull(service, nameof(service)); + Verify.True(targetCount > 0, "Target message count must be greater than zero."); + Verify.True(!thresholdCount.HasValue || thresholdCount > 0, "The reduction threshold length must be greater than zero."); + + this._service = service; + this._targetCount = targetCount; + this._thresholdCount = thresholdCount ?? 0; + } + + /// + public override bool Equals(object? obj) + { + ChatHistorySummarizationReducer? other = obj as ChatHistorySummarizationReducer; + return other != null && + this._thresholdCount == other._thresholdCount && + this._targetCount == other._targetCount; + } + + /// + public override int GetHashCode() => HashCode.Combine(nameof(ChatHistorySummarizationReducer), this._thresholdCount, this._targetCount, this.SummarizationInstructions, this.UseSingleSummary); + + private readonly IChatCompletionService _service; + private readonly int _thresholdCount; + private readonly int _targetCount; +} diff --git a/dotnet/src/Agents/Core/History/ChatHistoryTruncationReducer.cs b/dotnet/src/Agents/Core/History/ChatHistoryTruncationReducer.cs new file mode 100644 index 000000000000..be9ca7868f87 --- /dev/null +++ b/dotnet/src/Agents/Core/History/ChatHistoryTruncationReducer.cs @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft. All rights reserved. +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace Microsoft.SemanticKernel.Agents.History; + +/// +/// Truncate the chat history to the target message count. +/// +/// +/// Truncation will always avoid orphaning function-content as the presence of +/// a function-call _must_ be followed by a function-result. When a threshold count is +/// is provided (recommended), reduction will scan within the threshold window in an attempt to +/// avoid orphaning a user message from an assistant response. +/// +public class ChatHistoryTruncationReducer : IChatHistoryReducer +{ + /// + public Task?> ReduceAsync(IReadOnlyList history, CancellationToken cancellationToken = default) + { + // First pass to determine the truncation index + int truncationIndex = history.LocateSafeReductionIndex(this._targetCount, this._thresholdCount); + + IEnumerable? truncatedHistory = null; + + if (truncationIndex > 0) + { + // Second pass to truncate the history + truncatedHistory = history.Extract(truncationIndex); + } + + return Task.FromResult(truncatedHistory); + } + + /// + /// Initializes a new instance of the class. + /// + /// The desired number of target messages after reduction. + /// An optional number of messages beyond the 'targetCount' that must be present in order to trigger reduction/ + /// + /// While the 'thresholdCount' is optional, it is recommended to provided so that reduction is not triggered + /// for every incremental addition to the chat history beyond the 'targetCount'. + /// > + public ChatHistoryTruncationReducer(int targetCount, int? thresholdCount = null) + { + Verify.True(targetCount > 0, "Target message count must be greater than zero."); + Verify.True(!thresholdCount.HasValue || thresholdCount > 0, "The reduction threshold length must be greater than zero."); + + this._targetCount = targetCount; + + this._thresholdCount = thresholdCount ?? 0; + } + + /// + public override bool Equals(object? obj) + { + ChatHistoryTruncationReducer? other = obj as ChatHistoryTruncationReducer; + return other != null && + this._thresholdCount == other._thresholdCount && + this._targetCount == other._targetCount; + } + + /// + public override int GetHashCode() => HashCode.Combine(nameof(ChatHistoryTruncationReducer), this._thresholdCount, this._targetCount); + + private readonly int _thresholdCount; + private readonly int _targetCount; +} diff --git a/dotnet/src/Agents/Core/History/IChatHistoryReducer.cs b/dotnet/src/Agents/Core/History/IChatHistoryReducer.cs new file mode 100644 index 000000000000..884fbcf42bc1 --- /dev/null +++ b/dotnet/src/Agents/Core/History/IChatHistoryReducer.cs @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft. All rights reserved. +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace Microsoft.SemanticKernel.Agents.History; + +/// +/// Defines a contract for a reducing chat history. +/// +public interface IChatHistoryReducer +{ + /// + /// Each reducer shall override equality evaluation so that different reducers + /// of the same configuration can be evaluated for equivalency. + /// + bool Equals(object? obj); + + /// + /// Each reducer shall implement custom hash-code generation so that different reducers + /// of the same configuration can be evaluated for equivalency. + /// + int GetHashCode(); + + /// + /// Optionally reduces the chat history. + /// + /// The source history (which may have been previously reduced) + /// The to monitor for cancellation requests. The default is . + /// The reduced history, or 'null' if no reduction has occurred + Task?> ReduceAsync(IReadOnlyList history, CancellationToken cancellationToken = default); +} diff --git a/dotnet/src/Agents/OpenAI/AssistantThreadActions.cs b/dotnet/src/Agents/OpenAI/AssistantThreadActions.cs index cdb5ae3faea7..cfc7a905cfc7 100644 --- a/dotnet/src/Agents/OpenAI/AssistantThreadActions.cs +++ b/dotnet/src/Agents/OpenAI/AssistantThreadActions.cs @@ -109,14 +109,21 @@ public static async IAsyncEnumerable GetMessagesAsync(Assist /// The thread identifier /// Config to utilize when polling for run state. /// The logger to utilize (might be agent or channel scoped) + /// The plugins and other state. + /// Optional arguments to pass to the agents's invocation, including any . /// The to monitor for cancellation requests. The default is . /// Asynchronous enumeration of messages. + /// + /// The `arguments` parameter is not currently used by the agent, but is provided for future extensibility. + /// public static async IAsyncEnumerable<(bool IsVisible, ChatMessageContent Message)> InvokeAsync( OpenAIAssistantAgent agent, AssistantsClient client, string threadId, OpenAIAssistantConfiguration.PollingConfiguration pollingConfiguration, ILogger logger, + Kernel kernel, + KernelArguments? arguments, [EnumeratorCancellation] CancellationToken cancellationToken) { if (agent.IsDeleted) @@ -124,7 +131,7 @@ public static async IAsyncEnumerable GetMessagesAsync(Assist throw new KernelException($"Agent Failure - {nameof(OpenAIAssistantAgent)} agent is deleted: {agent.Id}."); } - ToolDefinition[]? tools = [.. agent.Tools, .. agent.Kernel.Plugins.SelectMany(p => p.Select(f => f.ToToolDefinition(p.Name, FunctionDelimiter)))]; + ToolDefinition[]? tools = [.. agent.Tools, .. kernel.Plugins.SelectMany(p => p.Select(f => f.ToToolDefinition(p.Name, FunctionDelimiter)))]; logger.LogOpenAIAssistantCreatingRun(nameof(InvokeAsync), threadId); @@ -408,6 +415,7 @@ private static ChatMessageContent GenerateCodeInterpreterContent(string agentNam ]) { AuthorName = agentName, + Metadata = new Dictionary { { OpenAIAssistantAgent.CodeInterpreterMetadataKey, true } }, }; } diff --git a/dotnet/src/Agents/OpenAI/OpenAIAssistantAgent.cs b/dotnet/src/Agents/OpenAI/OpenAIAssistantAgent.cs index 8e8797fa8885..6746c6c50d9a 100644 --- a/dotnet/src/Agents/OpenAI/OpenAIAssistantAgent.cs +++ b/dotnet/src/Agents/OpenAI/OpenAIAssistantAgent.cs @@ -18,12 +18,25 @@ namespace Microsoft.SemanticKernel.Agents.OpenAI; /// /// A specialization based on Open AI Assistant / GPT. /// -public sealed partial class OpenAIAssistantAgent : KernelAgent +public sealed class OpenAIAssistantAgent : KernelAgent { + /// + /// Metadata key that identifies code-interpreter content. + /// + public const string CodeInterpreterMetadataKey = "code"; + private readonly Assistant _assistant; private readonly AssistantsClient _client; private readonly OpenAIAssistantConfiguration _config; + /// + /// Optional arguments for the agent. + /// + /// + /// This property is not currently used by the agent, but is provided for future extensibility. + /// + public KernelArguments? Arguments { get; init; } + /// /// A list of previously uploaded file IDs to attach to the assistant. /// @@ -238,15 +251,25 @@ public async Task DeleteAsync(CancellationToken cancellationToken = defaul /// Invoke the assistant on the specified thread. /// /// The thread identifier + /// Optional arguments to pass to the agents's invocation, including any . + /// The containing services, plugins, and other state for use by the agent. /// The to monitor for cancellation requests. The default is . /// Asynchronous enumeration of messages. + /// + /// The `arguments` parameter is not currently used by the agent, but is provided for future extensibility. + /// public async IAsyncEnumerable InvokeAsync( string threadId, + KernelArguments? arguments = null, + Kernel? kernel = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) { this.ThrowIfDeleted(); - await foreach ((bool isVisible, ChatMessageContent message) in AssistantThreadActions.InvokeAsync(this, this._client, threadId, this._config.Polling, this.Logger, cancellationToken).ConfigureAwait(false)) + kernel ??= this.Kernel; + arguments ??= this.Arguments; + + await foreach ((bool isVisible, ChatMessageContent message) in AssistantThreadActions.InvokeAsync(this, this._client, threadId, this._config.Polling, this.Logger, kernel, arguments, cancellationToken).ConfigureAwait(false)) { if (isVisible) { diff --git a/dotnet/src/Agents/OpenAI/OpenAIAssistantChannel.cs b/dotnet/src/Agents/OpenAI/OpenAIAssistantChannel.cs index 48fdefa65fe9..5b4600e64542 100644 --- a/dotnet/src/Agents/OpenAI/OpenAIAssistantChannel.cs +++ b/dotnet/src/Agents/OpenAI/OpenAIAssistantChannel.cs @@ -31,7 +31,7 @@ protected override async Task ReceiveAsync(IEnumerable histo { agent.ThrowIfDeleted(); - return AssistantThreadActions.InvokeAsync(agent, this._client, this._threadId, pollingConfiguration, this.Logger, cancellationToken); + return AssistantThreadActions.InvokeAsync(agent, this._client, this._threadId, pollingConfiguration, this.Logger, agent.Kernel, agent.Arguments, cancellationToken); } /// @@ -39,4 +39,8 @@ protected override IAsyncEnumerable GetHistoryAsync(Cancella { return AssistantThreadActions.GetMessagesAsync(this._client, this._threadId, cancellationToken); } + + /// + protected override Task ResetAsync(CancellationToken cancellationToken = default) => + this._client.DeleteThreadAsync(this._threadId, cancellationToken); } diff --git a/dotnet/src/Agents/UnitTests/AgentChannelTests.cs b/dotnet/src/Agents/UnitTests/AgentChannelTests.cs index 2a680614a54f..50aa328ebc67 100644 --- a/dotnet/src/Agents/UnitTests/AgentChannelTests.cs +++ b/dotnet/src/Agents/UnitTests/AgentChannelTests.cs @@ -61,6 +61,11 @@ protected internal override Task ReceiveAsync(IEnumerable hi { throw new NotImplementedException(); } + + protected internal override Task ResetAsync(CancellationToken cancellationToken = default) + { + throw new NotImplementedException(); + } } private sealed class NextAgent : TestAgent; diff --git a/dotnet/src/Agents/UnitTests/AgentChatTests.cs b/dotnet/src/Agents/UnitTests/AgentChatTests.cs index 89ff7f02cff2..fc295e2b5550 100644 --- a/dotnet/src/Agents/UnitTests/AgentChatTests.cs +++ b/dotnet/src/Agents/UnitTests/AgentChatTests.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. using System.Collections.Generic; using System.Linq; -using System.Runtime.CompilerServices; using System.Threading; using System.Threading.Tasks; using Microsoft.SemanticKernel; @@ -54,6 +53,14 @@ public async Task VerifyAgentChatLifecycleAsync() // Verify final history await this.VerifyHistoryAsync(expectedCount: 5, chat.GetChatMessagesAsync()); // Primary history await this.VerifyHistoryAsync(expectedCount: 5, chat.GetChatMessagesAsync(chat.Agent)); // Agent history + + // Reset verify + await chat.ResetAsync(); + Assert.Equal(2, chat.Agent.InvokeCount); + + // Verify final history + await this.VerifyHistoryAsync(expectedCount: 0, chat.GetChatMessagesAsync()); // Primary history + await this.VerifyHistoryAsync(expectedCount: 0, chat.GetChatMessagesAsync(chat.Agent)); // Agent history } /// @@ -110,50 +117,15 @@ async Task SynchronizedInvokeAsync() private async Task VerifyHistoryAsync(int expectedCount, IAsyncEnumerable history) { - if (expectedCount == 0) - { - Assert.Empty(history); - } - else - { - Assert.NotEmpty(history); - Assert.Equal(expectedCount, await history.CountAsync()); - } + Assert.Equal(expectedCount, await history.CountAsync()); } private sealed class TestChat : AgentChat { - public TestAgent Agent { get; } = new TestAgent(); + public MockAgent Agent { get; } = new() { Response = [new(AuthorRole.Assistant, "sup")] }; public override IAsyncEnumerable InvokeAsync( CancellationToken cancellationToken = default) => this.InvokeAgentAsync(this.Agent, cancellationToken); } - - private sealed class TestAgent : ChatHistoryKernelAgent - { - public int InvokeCount { get; private set; } - - public override async IAsyncEnumerable InvokeAsync( - ChatHistory history, - [EnumeratorCancellation] CancellationToken cancellationToken = default) - { - await Task.Delay(0, cancellationToken); - - this.InvokeCount++; - - yield return new ChatMessageContent(AuthorRole.Assistant, "sup"); - } - - public override IAsyncEnumerable InvokeStreamingAsync( - ChatHistory history, - CancellationToken cancellationToken = default) - { - this.InvokeCount++; - - StreamingChatMessageContent[] contents = [new(AuthorRole.Assistant, "sup")]; - - return contents.ToAsyncEnumerable(); - } - } } diff --git a/dotnet/src/Agents/UnitTests/AggregatorAgentTests.cs b/dotnet/src/Agents/UnitTests/AggregatorAgentTests.cs index c4a974cbadc9..1a607ea7e6c7 100644 --- a/dotnet/src/Agents/UnitTests/AggregatorAgentTests.cs +++ b/dotnet/src/Agents/UnitTests/AggregatorAgentTests.cs @@ -1,11 +1,9 @@ // Copyright (c) Microsoft. All rights reserved. using System.Linq; -using System.Threading; using System.Threading.Tasks; using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.Agents; using Microsoft.SemanticKernel.ChatCompletion; -using Moq; using Xunit; namespace SemanticKernel.Agents.UnitTests; @@ -23,9 +21,9 @@ public class AggregatorAgentTests [InlineData(AggregatorMode.Flat, 2)] public async Task VerifyAggregatorAgentUsageAsync(AggregatorMode mode, int modeOffset) { - Agent agent1 = CreateMockAgent().Object; - Agent agent2 = CreateMockAgent().Object; - Agent agent3 = CreateMockAgent().Object; + Agent agent1 = CreateMockAgent(); + Agent agent2 = CreateMockAgent(); + Agent agent3 = CreateMockAgent(); AgentGroupChat groupChat = new(agent1, agent2, agent3) @@ -81,13 +79,5 @@ public async Task VerifyAggregatorAgentUsageAsync(AggregatorMode mode, int modeO Assert.Equal(5, messages.Length); // Total messages on inner chat once synchronized (agent equivalent) } - private static Mock CreateMockAgent() - { - Mock agent = new(); - - ChatMessageContent[] messages = [new ChatMessageContent(AuthorRole.Assistant, "test agent")]; - agent.Setup(a => a.InvokeAsync(It.IsAny(), It.IsAny())).Returns(() => messages.ToAsyncEnumerable()); - - return agent; - } + private static MockAgent CreateMockAgent() => new() { Response = [new(AuthorRole.Assistant, "test")] }; } diff --git a/dotnet/src/Agents/UnitTests/Core/AgentGroupChatTests.cs b/dotnet/src/Agents/UnitTests/Core/AgentGroupChatTests.cs index 921e0acce016..7c3267e3ad73 100644 --- a/dotnet/src/Agents/UnitTests/Core/AgentGroupChatTests.cs +++ b/dotnet/src/Agents/UnitTests/Core/AgentGroupChatTests.cs @@ -8,7 +8,6 @@ using Microsoft.SemanticKernel.Agents; using Microsoft.SemanticKernel.Agents.Chat; using Microsoft.SemanticKernel.ChatCompletion; -using Moq; using Xunit; namespace SemanticKernel.Agents.UnitTests.Core; @@ -39,10 +38,10 @@ public void VerifyGroupAgentChatDefaultState() [Fact] public async Task VerifyGroupAgentChatAgentMembershipAsync() { - Agent agent1 = CreateMockAgent().Object; - Agent agent2 = CreateMockAgent().Object; - Agent agent3 = CreateMockAgent().Object; - Agent agent4 = CreateMockAgent().Object; + Agent agent1 = CreateMockAgent(); + Agent agent2 = CreateMockAgent(); + Agent agent3 = CreateMockAgent(); + Agent agent4 = CreateMockAgent(); AgentGroupChat chat = new(agent1, agent2); Assert.Equal(2, chat.Agents.Count); @@ -50,10 +49,7 @@ public async Task VerifyGroupAgentChatAgentMembershipAsync() chat.AddAgent(agent3); Assert.Equal(3, chat.Agents.Count); - var messages = await chat.InvokeAsync(agent4, isJoining: false).ToArrayAsync(); - Assert.Equal(3, chat.Agents.Count); - - messages = await chat.InvokeAsync(agent4).ToArrayAsync(); + ChatMessageContent[] messages = await chat.InvokeAsync(agent4).ToArrayAsync(); Assert.Equal(4, chat.Agents.Count); } @@ -63,9 +59,9 @@ public async Task VerifyGroupAgentChatAgentMembershipAsync() [Fact] public async Task VerifyGroupAgentChatMultiTurnAsync() { - Agent agent1 = CreateMockAgent().Object; - Agent agent2 = CreateMockAgent().Object; - Agent agent3 = CreateMockAgent().Object; + Agent agent1 = CreateMockAgent(); + Agent agent2 = CreateMockAgent(); + Agent agent3 = CreateMockAgent(); AgentGroupChat chat = new(agent1, agent2, agent3) @@ -162,7 +158,7 @@ public async Task VerifyGroupAgentChatMultiTurnTerminationAsync() [Fact] public async Task VerifyGroupAgentChatDiscreteTerminationAsync() { - Agent agent1 = CreateMockAgent().Object; + Agent agent1 = CreateMockAgent(); AgentGroupChat chat = new() @@ -186,22 +182,14 @@ public async Task VerifyGroupAgentChatDiscreteTerminationAsync() private static AgentGroupChat Create3AgentChat() { - Agent agent1 = CreateMockAgent().Object; - Agent agent2 = CreateMockAgent().Object; - Agent agent3 = CreateMockAgent().Object; + Agent agent1 = CreateMockAgent(); + Agent agent2 = CreateMockAgent(); + Agent agent3 = CreateMockAgent(); return new(agent1, agent2, agent3); } - private static Mock CreateMockAgent() - { - Mock agent = new(); - - ChatMessageContent[] messages = [new ChatMessageContent(AuthorRole.Assistant, "test")]; - agent.Setup(a => a.InvokeAsync(It.IsAny(), It.IsAny())).Returns(() => messages.ToAsyncEnumerable()); - - return agent; - } + private static MockAgent CreateMockAgent() => new() { Response = [new(AuthorRole.Assistant, "test")] }; private sealed class TestTerminationStrategy(bool shouldTerminate) : TerminationStrategy { @@ -213,7 +201,7 @@ protected override Task ShouldAgentTerminateAsync(Agent agent, IReadOnlyLi private sealed class FailedSelectionStrategy : SelectionStrategy { - public override Task NextAsync(IReadOnlyList agents, IReadOnlyList history, CancellationToken cancellationToken = default) + protected override Task SelectAgentAsync(IReadOnlyList agents, IReadOnlyList history, CancellationToken cancellationToken = default) { throw new InvalidOperationException(); } diff --git a/dotnet/src/Agents/UnitTests/Core/Chat/KernelFunctionSelectionStrategyTests.cs b/dotnet/src/Agents/UnitTests/Core/Chat/KernelFunctionSelectionStrategyTests.cs index af045e67873d..275ef0e0bf5e 100644 --- a/dotnet/src/Agents/UnitTests/Core/Chat/KernelFunctionSelectionStrategyTests.cs +++ b/dotnet/src/Agents/UnitTests/Core/Chat/KernelFunctionSelectionStrategyTests.cs @@ -27,41 +27,125 @@ public async Task VerifyKernelFunctionSelectionStrategyDefaultsAsync() KernelFunctionSelectionStrategy strategy = new(plugin.Single(), new()) { + AgentsVariableName = "_a_", + HistoryVariableName = "_h_", ResultParser = (result) => result.GetValue() ?? string.Empty, }; Assert.Null(strategy.Arguments); Assert.NotNull(strategy.Kernel); Assert.NotNull(strategy.ResultParser); + Assert.Equal("_a_", strategy.AgentsVariableName); + Assert.Equal("_h_", strategy.HistoryVariableName); Agent nextAgent = await strategy.NextAsync([mockAgent.Object], []); Assert.NotNull(nextAgent); Assert.Equal(mockAgent.Object, nextAgent); } + /// + /// Verify default state and behavior + /// + [Fact] + public async Task VerifyKernelFunctionSelectionStrategyInitialAgentAsync() + { + Mock mockAgent1 = new(); + Mock mockAgent2 = new(); + KernelPlugin plugin = KernelPluginFactory.CreateFromObject(new TestPlugin(mockAgent2.Object.Id)); + + KernelFunctionSelectionStrategy strategy = + new(plugin.Single(), new()) + { + InitialAgent = mockAgent1.Object, + ResultParser = (result) => result.GetValue() ?? string.Empty, + }; + + Agent nextAgent = await strategy.NextAsync([mockAgent2.Object], []); + + Assert.NotNull(nextAgent); + Assert.Equal(mockAgent1.Object, nextAgent); + } /// /// Verify strategy mismatch. /// [Fact] - public async Task VerifyKernelFunctionSelectionStrategyParsingAsync() + public async Task VerifyKernelFunctionSelectionStrategyNullAgentAsync() { Mock mockAgent = new(); - KernelPlugin plugin = KernelPluginFactory.CreateFromObject(new TestPlugin(string.Empty)); + KernelPlugin plugin = KernelPluginFactory.CreateFromObject(new TestPlugin(null)); KernelFunctionSelectionStrategy strategy = new(plugin.Single(), new()) { Arguments = new(new OpenAIPromptExecutionSettings()) { { "key", mockAgent.Object.Name } }, - ResultParser = (result) => result.GetValue() ?? string.Empty, }; await Assert.ThrowsAsync(() => strategy.NextAsync([mockAgent.Object], [])); + + strategy = + new(plugin.Single(), new()) + { + Arguments = new(new OpenAIPromptExecutionSettings()) { { "key", mockAgent.Object.Name } }, + UseInitialAgentAsFallback = true + }; + + await Assert.ThrowsAsync(() => strategy.NextAsync([mockAgent.Object], [])); + } + + /// + /// Verify strategy mismatch. + /// + [Fact] + public async Task VerifyKernelFunctionSelectionStrategyBadAgentFallbackWithNoInitialAgentAsync() + { + Mock mockAgent = new(); + KernelPlugin plugin = KernelPluginFactory.CreateFromObject(new TestPlugin("bad")); + + KernelFunctionSelectionStrategy strategy = + new(plugin.Single(), new()) + { + Arguments = new(new OpenAIPromptExecutionSettings()) { { "key", mockAgent.Object.Name } }, + }; + + await Assert.ThrowsAsync(() => strategy.NextAsync([mockAgent.Object], [])); + + strategy = + new(plugin.Single(), new()) + { + Arguments = new(new OpenAIPromptExecutionSettings()) { { "key", mockAgent.Object.Name } }, + UseInitialAgentAsFallback = true + }; + + await Assert.ThrowsAsync(() => strategy.NextAsync([mockAgent.Object], [])); + } + + /// + /// Verify strategy mismatch. + /// + [Fact] + public async Task VerifyKernelFunctionSelectionStrategyBadAgentFallbackAsync() + { + Mock mockAgent = new(); + KernelPlugin plugin = KernelPluginFactory.CreateFromObject(new TestPlugin("bad")); + + KernelFunctionSelectionStrategy strategy = + new(plugin.Single(), new()) + { + Arguments = new(new OpenAIPromptExecutionSettings()) { { "key", mockAgent.Object.Name } }, + InitialAgent = mockAgent.Object, + UseInitialAgentAsFallback = true + }; + + Agent nextAgent = await strategy.NextAsync([mockAgent.Object], []); + + Assert.NotNull(nextAgent); + Assert.Equal(mockAgent.Object, nextAgent); } - private sealed class TestPlugin(string agentName) + private sealed class TestPlugin(string? agentName) { [KernelFunction] - public string GetValue() => agentName; + public string? GetValue() => agentName; } } diff --git a/dotnet/src/Agents/UnitTests/Core/Chat/SequentialSelectionStrategyTests.cs b/dotnet/src/Agents/UnitTests/Core/Chat/SequentialSelectionStrategyTests.cs index 04339a8309e4..bb8fb4665b36 100644 --- a/dotnet/src/Agents/UnitTests/Core/Chat/SequentialSelectionStrategyTests.cs +++ b/dotnet/src/Agents/UnitTests/Core/Chat/SequentialSelectionStrategyTests.cs @@ -25,25 +25,45 @@ public async Task VerifySequentialSelectionStrategyTurnsAsync() Agent[] agents = [agent1.Object, agent2.Object]; SequentialSelectionStrategy strategy = new(); - await VerifyNextAgent(agent1.Object); - await VerifyNextAgent(agent2.Object); - await VerifyNextAgent(agent1.Object); - await VerifyNextAgent(agent2.Object); - await VerifyNextAgent(agent1.Object); + await VerifyNextAgentAsync(agent1.Object, agents, strategy); + await VerifyNextAgentAsync(agent2.Object, agents, strategy); + await VerifyNextAgentAsync(agent1.Object, agents, strategy); + await VerifyNextAgentAsync(agent2.Object, agents, strategy); + await VerifyNextAgentAsync(agent1.Object, agents, strategy); strategy.Reset(); - await VerifyNextAgent(agent1.Object); + await VerifyNextAgentAsync(agent1.Object, agents, strategy); // Verify index does not exceed current bounds. agents = [agent1.Object]; - await VerifyNextAgent(agent1.Object); - - async Task VerifyNextAgent(Agent agent1) - { - Agent? nextAgent = await strategy.NextAsync(agents, []); - Assert.NotNull(nextAgent); - Assert.Equal(agent1.Id, nextAgent.Id); - } + await VerifyNextAgentAsync(agent1.Object, agents, strategy); + } + + /// + /// Verify provides agents in expected order. + /// + [Fact] + public async Task VerifySequentialSelectionStrategyInitialAgentAsync() + { + Mock agent1 = new(); + Mock agent2 = new(); + + Agent[] agents = [agent1.Object, agent2.Object]; + SequentialSelectionStrategy strategy = + new() + { + InitialAgent = agent2.Object + }; + + await VerifyNextAgentAsync(agent2.Object, agents, strategy); + await VerifyNextAgentAsync(agent1.Object, agents, strategy); + } + + private static async Task VerifyNextAgentAsync(Agent expectedAgent, Agent[] agents, SequentialSelectionStrategy strategy) + { + Agent? nextAgent = await strategy.NextAsync(agents, []); + Assert.NotNull(nextAgent); + Assert.Equal(expectedAgent.Id, nextAgent.Id); } /// diff --git a/dotnet/src/Agents/UnitTests/Core/ChatCompletionAgentTests.cs b/dotnet/src/Agents/UnitTests/Core/ChatCompletionAgentTests.cs index ae7657c8189c..c8a1c0578613 100644 --- a/dotnet/src/Agents/UnitTests/Core/ChatCompletionAgentTests.cs +++ b/dotnet/src/Agents/UnitTests/Core/ChatCompletionAgentTests.cs @@ -34,7 +34,7 @@ public void VerifyChatCompletionAgentDefinition() Assert.Equal("test instructions", agent.Instructions); Assert.Equal("test description", agent.Description); Assert.Equal("test name", agent.Name); - Assert.Null(agent.ExecutionSettings); + Assert.Null(agent.Arguments); } /// @@ -56,7 +56,7 @@ public async Task VerifyChatCompletionAgentInvocationAsync() { Instructions = "test instructions", Kernel = CreateKernel(mockService.Object), - ExecutionSettings = new(), + Arguments = [], }; var result = await agent.InvokeAsync([]).ToArrayAsync(); @@ -98,7 +98,7 @@ public async Task VerifyChatCompletionAgentStreamingAsync() { Instructions = "test instructions", Kernel = CreateKernel(mockService.Object), - ExecutionSettings = new(), + Arguments = [], }; var result = await agent.InvokeStreamingAsync([]).ToArrayAsync(); diff --git a/dotnet/src/Agents/UnitTests/ChatHistoryChannelTests.cs b/dotnet/src/Agents/UnitTests/Core/ChatHistoryChannelTests.cs similarity index 91% rename from dotnet/src/Agents/UnitTests/ChatHistoryChannelTests.cs rename to dotnet/src/Agents/UnitTests/Core/ChatHistoryChannelTests.cs index 7ef624c61ab9..6732da6628e8 100644 --- a/dotnet/src/Agents/UnitTests/ChatHistoryChannelTests.cs +++ b/dotnet/src/Agents/UnitTests/Core/ChatHistoryChannelTests.cs @@ -8,7 +8,7 @@ using Microsoft.SemanticKernel.Agents; using Xunit; -namespace SemanticKernel.Agents.UnitTests; +namespace SemanticKernel.Agents.UnitTests.Core; /// /// Unit testing of . @@ -17,7 +17,7 @@ public class ChatHistoryChannelTests { /// /// Verify a throws if passed an agent that - /// does not implement . + /// does not implement . /// [Fact] public async Task VerifyAgentWithoutIChatHistoryHandlerAsync() diff --git a/dotnet/src/Agents/UnitTests/Core/History/ChatHistoryReducerExtensionsTests.cs b/dotnet/src/Agents/UnitTests/Core/History/ChatHistoryReducerExtensionsTests.cs new file mode 100644 index 000000000000..a75533474147 --- /dev/null +++ b/dotnet/src/Agents/UnitTests/Core/History/ChatHistoryReducerExtensionsTests.cs @@ -0,0 +1,228 @@ +// Copyright (c) Microsoft. All rights reserved. +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Agents.History; +using Microsoft.SemanticKernel.ChatCompletion; +using Moq; +using Xunit; + +namespace SemanticKernel.Agents.UnitTests.Core.History; + +/// +/// Unit testing of . +/// +public class ChatHistoryReducerExtensionsTests +{ + /// + /// Verify the extraction of a set of messages from an input set. + /// + [Theory] + [InlineData(100, 0, 1)] + [InlineData(100, 0, 9)] + [InlineData(100, 0, 99)] + [InlineData(100, 80)] + [InlineData(100, 80, 81)] + [InlineData(100, 0)] + [InlineData(100, int.MaxValue, null, 0)] + [InlineData(100, 0, int.MaxValue, 100)] + public void VerifyChatHistoryExtraction(int messageCount, int startIndex, int? endIndex = null, int? expectedCount = null) + { + ChatHistory history = [.. MockHistoryGenerator.CreateSimpleHistory(messageCount)]; + + ChatMessageContent[] extractedHistory = history.Extract(startIndex, endIndex).ToArray(); + + int finalIndex = endIndex ?? messageCount - 1; + finalIndex = Math.Min(finalIndex, messageCount - 1); + + expectedCount ??= finalIndex - startIndex + 1; + + Assert.Equal(expectedCount, extractedHistory.Length); + + if (extractedHistory.Length > 0) + { + Assert.Contains($"#{startIndex}", extractedHistory[0].Content); + Assert.Contains($"#{finalIndex}", extractedHistory[^1].Content); + } + } + + /// + /// Verify identifying the first non-summary message index. + /// + [Theory] + [InlineData(0, 100)] + [InlineData(1, 100)] + [InlineData(100, 10)] + [InlineData(100, 0)] + public void VerifyGetFinalSummaryIndex(int summaryCount, int regularCount) + { + ChatHistory summaries = [.. MockHistoryGenerator.CreateSimpleHistory(summaryCount)]; + foreach (ChatMessageContent summary in summaries) + { + summary.Metadata = new Dictionary() { { "summary", true } }; + } + + ChatHistory history = [.. summaries, .. MockHistoryGenerator.CreateSimpleHistory(regularCount)]; + + int finalSummaryIndex = history.LocateSummarizationBoundary("summary"); + + Assert.Equal(summaryCount, finalSummaryIndex); + } + + /// + /// Verify a instance is not reduced. + /// + [Fact] + public async Task VerifyChatHistoryNotReducedAsync() + { + ChatHistory history = []; + + bool isReduced = await history.ReduceAsync(null, default); + + Assert.False(isReduced); + Assert.Empty(history); + + Mock mockReducer = new(); + mockReducer.Setup(r => r.ReduceAsync(It.IsAny>(), default)).ReturnsAsync((IEnumerable?)null); + isReduced = await history.ReduceAsync(mockReducer.Object, default); + + Assert.False(isReduced); + Assert.Empty(history); + } + + /// + /// Verify a instance is reduced. + /// + [Fact] + public async Task VerifyChatHistoryReducedAsync() + { + Mock mockReducer = new(); + mockReducer.Setup(r => r.ReduceAsync(It.IsAny>(), default)).ReturnsAsync((IEnumerable?)[]); + + ChatHistory history = [.. MockHistoryGenerator.CreateSimpleHistory(10)]; + + bool isReduced = await history.ReduceAsync(mockReducer.Object, default); + + Assert.True(isReduced); + Assert.Empty(history); + } + + /// + /// Verify starting index (0) is identified when message count does not exceed the limit. + /// + [Theory] + [InlineData(0, 1)] + [InlineData(1, 1)] + [InlineData(1, 2)] + [InlineData(1, int.MaxValue)] + [InlineData(5, 1, 5)] + [InlineData(5, 4, 2)] + [InlineData(5, 5, 1)] + [InlineData(900, 500, 400)] + [InlineData(900, 500, int.MaxValue)] + public void VerifyLocateSafeReductionIndexNone(int messageCount, int targetCount, int? thresholdCount = null) + { + // Shape of history doesn't matter since reduction is not expected + ChatHistory sourceHistory = [.. MockHistoryGenerator.CreateHistoryWithUserInput(messageCount)]; + + int reductionIndex = sourceHistory.LocateSafeReductionIndex(targetCount, thresholdCount); + + Assert.Equal(0, reductionIndex); + } + + /// + /// Verify the expected index ) is identified when message count exceeds the limit. + /// + [Theory] + [InlineData(2, 1)] + [InlineData(3, 2)] + [InlineData(3, 1, 1)] + [InlineData(6, 1, 4)] + [InlineData(6, 4, 1)] + [InlineData(6, 5)] + [InlineData(1000, 500, 400)] + [InlineData(1000, 500, 499)] + public void VerifyLocateSafeReductionIndexFound(int messageCount, int targetCount, int? thresholdCount = null) + { + // Generate history with only assistant messages + ChatHistory sourceHistory = [.. MockHistoryGenerator.CreateSimpleHistory(messageCount)]; + + int reductionIndex = sourceHistory.LocateSafeReductionIndex(targetCount, thresholdCount); + + Assert.True(reductionIndex > 0); + Assert.Equal(targetCount, messageCount - reductionIndex); + } + + /// + /// Verify the expected index ) is identified when message count exceeds the limit. + /// History contains alternating user and assistant messages. + /// + [Theory] + [InlineData(2, 1)] + [InlineData(3, 2)] + [InlineData(3, 1, 1)] + [InlineData(6, 1, 4)] + [InlineData(6, 4, 1)] + [InlineData(6, 5)] + [InlineData(1000, 500, 400)] + [InlineData(1000, 500, 499)] + public void VerifyLocateSafeReductionIndexFoundWithUser(int messageCount, int targetCount, int? thresholdCount = null) + { + // Generate history with alternating user and assistant messages + ChatHistory sourceHistory = [.. MockHistoryGenerator.CreateHistoryWithUserInput(messageCount)]; + + int reductionIndex = sourceHistory.LocateSafeReductionIndex(targetCount, thresholdCount); + + Assert.True(reductionIndex > 0); + + // The reduction length should align with a user message, if threshold is specified + bool hasThreshold = thresholdCount > 0; + int expectedCount = targetCount + (hasThreshold && sourceHistory[^targetCount].Role != AuthorRole.User ? 1 : 0); + + Assert.Equal(expectedCount, messageCount - reductionIndex); + } + + /// + /// Verify the expected index ) is identified when message count exceeds the limit. + /// History contains alternating user and assistant messages along with function + /// related content. + /// + [Theory] + [InlineData(4)] + [InlineData(4, 3)] + [InlineData(5)] + [InlineData(5, 8)] + [InlineData(6)] + [InlineData(6, 7)] + [InlineData(7)] + [InlineData(8)] + [InlineData(9)] + public void VerifyLocateSafeReductionIndexWithFunctionContent(int targetCount, int? thresholdCount = null) + { + // Generate a history with function call on index 5 and 9 and + // function result on index 6 and 10 (total length: 14) + ChatHistory sourceHistory = [.. MockHistoryGenerator.CreateHistoryWithFunctionContent()]; + + ChatHistoryTruncationReducer reducer = new(targetCount, thresholdCount); + + int reductionIndex = sourceHistory.LocateSafeReductionIndex(targetCount, thresholdCount); + + Assert.True(reductionIndex > 0); + + // The reduction length avoid splitting function call and result, regardless of threshold + int expectedCount = targetCount; + + if (sourceHistory[sourceHistory.Count - targetCount].Items.Any(i => i is FunctionCallContent)) + { + expectedCount += 1; + } + else if (sourceHistory[sourceHistory.Count - targetCount].Items.Any(i => i is FunctionResultContent)) + { + expectedCount += 2; + } + + Assert.Equal(expectedCount, sourceHistory.Count - reductionIndex); + } +} diff --git a/dotnet/src/Agents/UnitTests/Core/History/ChatHistorySummarizationReducerTests.cs b/dotnet/src/Agents/UnitTests/Core/History/ChatHistorySummarizationReducerTests.cs new file mode 100644 index 000000000000..f464b6a8214a --- /dev/null +++ b/dotnet/src/Agents/UnitTests/Core/History/ChatHistorySummarizationReducerTests.cs @@ -0,0 +1,207 @@ +// Copyright (c) Microsoft. All rights reserved. +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Agents.History; +using Microsoft.SemanticKernel.ChatCompletion; +using Moq; +using Xunit; + +namespace SemanticKernel.Agents.UnitTests.Core.History; + +/// +/// Unit testing of . +/// +public class ChatHistorySummarizationReducerTests +{ + /// + /// Ensure that the constructor arguments are validated. + /// + [Theory] + [InlineData(-1)] + [InlineData(-1, int.MaxValue)] + [InlineData(int.MaxValue, -1)] + public void VerifyChatHistoryConstructorArgumentValidation(int targetCount, int? thresholdCount = null) + { + Mock mockCompletionService = this.CreateMockCompletionService(); + + Assert.Throws(() => new ChatHistorySummarizationReducer(mockCompletionService.Object, targetCount, thresholdCount)); + } + + /// + /// Verify object state after initialization. + /// + [Fact] + public void VerifyChatHistoryInitializationState() + { + Mock mockCompletionService = this.CreateMockCompletionService(); + + ChatHistorySummarizationReducer reducer = new(mockCompletionService.Object, 10); + + Assert.Equal(ChatHistorySummarizationReducer.DefaultSummarizationPrompt, reducer.SummarizationInstructions); + Assert.True(reducer.FailOnError); + + reducer = + new(mockCompletionService.Object, 10) + { + FailOnError = false, + SummarizationInstructions = "instructions", + }; + + Assert.NotEqual(ChatHistorySummarizationReducer.DefaultSummarizationPrompt, reducer.SummarizationInstructions); + Assert.False(reducer.FailOnError); + } + + /// + /// Validate hash-code expresses reducer equivalency. + /// + [Fact] + public void VerifyChatHistoryHasCode() + { + HashSet reducers = []; + + Mock mockCompletionService = this.CreateMockCompletionService(); + + int hashCode1 = GenerateHashCode(3, 4); + int hashCode2 = GenerateHashCode(33, 44); + int hashCode3 = GenerateHashCode(3000, 4000); + int hashCode4 = GenerateHashCode(3000, 4000); + + Assert.NotEqual(hashCode1, hashCode2); + Assert.NotEqual(hashCode2, hashCode3); + Assert.Equal(hashCode3, hashCode4); + Assert.Equal(3, reducers.Count); + + int GenerateHashCode(int targetCount, int thresholdCount) + { + ChatHistorySummarizationReducer reducer = new(mockCompletionService.Object, targetCount, thresholdCount); + + reducers.Add(reducer); + + return reducer.GetHashCode(); + } + } + + /// + /// Validate silent summarization failure when set to 'false'. + /// + [Fact] + public async Task VerifyChatHistoryReductionSilentFailureAsync() + { + Mock mockCompletionService = this.CreateMockCompletionService(throwException: true); + IReadOnlyList sourceHistory = MockHistoryGenerator.CreateSimpleHistory(20).ToArray(); + + ChatHistorySummarizationReducer reducer = new(mockCompletionService.Object, 10) { FailOnError = false }; + IEnumerable? reducedHistory = await reducer.ReduceAsync(sourceHistory); + + Assert.Null(reducedHistory); + } + + /// + /// Validate exception on summarization failure when set to 'true'. + /// + [Fact] + public async Task VerifyChatHistoryReductionThrowsOnFailureAsync() + { + Mock mockCompletionService = this.CreateMockCompletionService(throwException: true); + IReadOnlyList sourceHistory = MockHistoryGenerator.CreateSimpleHistory(20).ToArray(); + + ChatHistorySummarizationReducer reducer = new(mockCompletionService.Object, 10); + await Assert.ThrowsAsync(() => reducer.ReduceAsync(sourceHistory)); + } + + /// + /// Validate history not reduced when source history does not exceed target threshold. + /// + [Fact] + public async Task VerifyChatHistoryNotReducedAsync() + { + Mock mockCompletionService = this.CreateMockCompletionService(); + IReadOnlyList sourceHistory = MockHistoryGenerator.CreateSimpleHistory(20).ToArray(); + + ChatHistorySummarizationReducer reducer = new(mockCompletionService.Object, 20); + IEnumerable? reducedHistory = await reducer.ReduceAsync(sourceHistory); + + Assert.Null(reducedHistory); + } + + /// + /// Validate history reduced when source history exceeds target threshold. + /// + [Fact] + public async Task VerifyChatHistoryReducedAsync() + { + Mock mockCompletionService = this.CreateMockCompletionService(); + IReadOnlyList sourceHistory = MockHistoryGenerator.CreateSimpleHistory(20).ToArray(); + + ChatHistorySummarizationReducer reducer = new(mockCompletionService.Object, 10); + IEnumerable? reducedHistory = await reducer.ReduceAsync(sourceHistory); + + ChatMessageContent[] messages = VerifyReducedHistory(reducedHistory, 11); + VerifySummarization(messages[0]); + } + + /// + /// Validate history re-summarized on second occurrence of source history exceeding target threshold. + /// + [Fact] + public async Task VerifyChatHistoryRereducedAsync() + { + Mock mockCompletionService = this.CreateMockCompletionService(); + IReadOnlyList sourceHistory = MockHistoryGenerator.CreateSimpleHistory(20).ToArray(); + + ChatHistorySummarizationReducer reducer = new(mockCompletionService.Object, 10); + IEnumerable? reducedHistory = await reducer.ReduceAsync(sourceHistory); + reducedHistory = await reducer.ReduceAsync([.. reducedHistory!, .. sourceHistory]); + + ChatMessageContent[] messages = VerifyReducedHistory(reducedHistory, 11); + VerifySummarization(messages[0]); + + reducer = new(mockCompletionService.Object, 10) { UseSingleSummary = false }; + reducedHistory = await reducer.ReduceAsync([.. reducedHistory!, .. sourceHistory]); + + messages = VerifyReducedHistory(reducedHistory, 12); + VerifySummarization(messages[0]); + VerifySummarization(messages[1]); + } + + private static ChatMessageContent[] VerifyReducedHistory(IEnumerable? reducedHistory, int expectedCount) + { + Assert.NotNull(reducedHistory); + ChatMessageContent[] messages = reducedHistory.ToArray(); + Assert.Equal(expectedCount, messages.Length); + + return messages; + } + + private static void VerifySummarization(ChatMessageContent message) + { + Assert.NotNull(message.Metadata); + Assert.True(message.Metadata!.ContainsKey(ChatHistorySummarizationReducer.SummaryMetadataKey)); + } + + private Mock CreateMockCompletionService(bool throwException = false) + { + Mock mock = new(); + var setup = mock.Setup( + s => + s.GetChatMessageContentsAsync( + It.IsAny(), + It.IsAny(), + It.IsAny(), + default)); + + if (throwException) + { + setup.ThrowsAsync(new HttpOperationException("whoops")); + } + else + { + setup.ReturnsAsync([new(AuthorRole.Assistant, "summary")]); + } + + return mock; + } +} diff --git a/dotnet/src/Agents/UnitTests/Core/History/ChatHistoryTruncationReducerTests.cs b/dotnet/src/Agents/UnitTests/Core/History/ChatHistoryTruncationReducerTests.cs new file mode 100644 index 000000000000..eebcf8fc6136 --- /dev/null +++ b/dotnet/src/Agents/UnitTests/Core/History/ChatHistoryTruncationReducerTests.cs @@ -0,0 +1,106 @@ +// Copyright (c) Microsoft. All rights reserved. +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Agents.History; +using Xunit; + +namespace SemanticKernel.Agents.UnitTests.Core.History; + +/// +/// Unit testing of . +/// +public class ChatHistoryTruncationReducerTests +{ + /// + /// Ensure that the constructor arguments are validated. + /// + [Theory] + [InlineData(-1)] + [InlineData(-1, int.MaxValue)] + [InlineData(int.MaxValue, -1)] + public void VerifyChatHistoryConstructorArgumentValidation(int targetCount, int? thresholdCount = null) + { + Assert.Throws(() => new ChatHistoryTruncationReducer(targetCount, thresholdCount)); + } + + /// + /// Validate hash-code expresses reducer equivalency. + /// + [Fact] + public void VerifyChatHistoryHasCode() + { + HashSet reducers = []; + + int hashCode1 = GenerateHashCode(3, 4); + int hashCode2 = GenerateHashCode(33, 44); + int hashCode3 = GenerateHashCode(3000, 4000); + int hashCode4 = GenerateHashCode(3000, 4000); + + Assert.NotEqual(hashCode1, hashCode2); + Assert.NotEqual(hashCode2, hashCode3); + Assert.Equal(hashCode3, hashCode4); + Assert.Equal(3, reducers.Count); + + int GenerateHashCode(int targetCount, int thresholdCount) + { + ChatHistoryTruncationReducer reducer = new(targetCount, thresholdCount); + + reducers.Add(reducer); + + return reducer.GetHashCode(); + } + } + + /// + /// Validate history not reduced when source history does not exceed target threshold. + /// + [Fact] + public async Task VerifyChatHistoryNotReducedAsync() + { + IReadOnlyList sourceHistory = MockHistoryGenerator.CreateSimpleHistory(10).ToArray(); + + ChatHistoryTruncationReducer reducer = new(20); + IEnumerable? reducedHistory = await reducer.ReduceAsync(sourceHistory); + + Assert.Null(reducedHistory); + } + + /// + /// Validate history reduced when source history exceeds target threshold. + /// + [Fact] + public async Task VerifyChatHistoryReducedAsync() + { + IReadOnlyList sourceHistory = MockHistoryGenerator.CreateSimpleHistory(20).ToArray(); + + ChatHistoryTruncationReducer reducer = new(10); + IEnumerable? reducedHistory = await reducer.ReduceAsync(sourceHistory); + + VerifyReducedHistory(reducedHistory, 10); + } + + /// + /// Validate history re-summarized on second occurrence of source history exceeding target threshold. + /// + [Fact] + public async Task VerifyChatHistoryRereducedAsync() + { + IReadOnlyList sourceHistory = MockHistoryGenerator.CreateSimpleHistory(20).ToArray(); + + ChatHistoryTruncationReducer reducer = new(10); + IEnumerable? reducedHistory = await reducer.ReduceAsync(sourceHistory); + reducedHistory = await reducer.ReduceAsync([.. reducedHistory!, .. sourceHistory]); + + VerifyReducedHistory(reducedHistory, 10); + } + + private static void VerifyReducedHistory(IEnumerable? reducedHistory, int expectedCount) + { + Assert.NotNull(reducedHistory); + ChatMessageContent[] messages = reducedHistory.ToArray(); + Assert.Equal(expectedCount, messages.Length); + } +} diff --git a/dotnet/src/Agents/UnitTests/Core/History/MockHistoryGenerator.cs b/dotnet/src/Agents/UnitTests/Core/History/MockHistoryGenerator.cs new file mode 100644 index 000000000000..375b6fc9aa40 --- /dev/null +++ b/dotnet/src/Agents/UnitTests/Core/History/MockHistoryGenerator.cs @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft. All rights reserved. +using System.Collections.Generic; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.ChatCompletion; + +namespace SemanticKernel.Agents.UnitTests.Core.History; + +/// +/// Factory for generating chat history for various test scenarios. +/// +internal static class MockHistoryGenerator +{ + /// + /// Create a homogeneous list of assistant messages. + /// + public static IEnumerable CreateSimpleHistory(int messageCount) + { + for (int index = 0; index < messageCount; ++index) + { + yield return new ChatMessageContent(AuthorRole.Assistant, $"message #{index}"); + } + } + + /// + /// Create an alternating list of user and assistant messages. + /// + public static IEnumerable CreateHistoryWithUserInput(int messageCount) + { + for (int index = 0; index < messageCount; ++index) + { + yield return + index % 2 == 1 ? + new ChatMessageContent(AuthorRole.Assistant, $"asistant response: {index}") : + new ChatMessageContent(AuthorRole.User, $"user input: {index}"); + } + } + + /// + /// Create an alternating list of user and assistant messages with function content + /// injected at indexes: + /// + /// - 5: function call + /// - 6: function result + /// - 9: function call + /// - 10: function result + /// + /// Total message count: 14 messages. + /// + public static IEnumerable CreateHistoryWithFunctionContent() + { + yield return new ChatMessageContent(AuthorRole.User, "user input: 0"); + yield return new ChatMessageContent(AuthorRole.Assistant, "asistant response: 1"); + yield return new ChatMessageContent(AuthorRole.User, "user input: 2"); + yield return new ChatMessageContent(AuthorRole.Assistant, "asistant response: 3"); + yield return new ChatMessageContent(AuthorRole.User, "user input: 4"); + yield return new ChatMessageContent(AuthorRole.Assistant, [new FunctionCallContent("function call: 5")]); + yield return new ChatMessageContent(AuthorRole.Tool, [new FunctionResultContent("function result: 6")]); + yield return new ChatMessageContent(AuthorRole.Assistant, "asistant response: 7"); + yield return new ChatMessageContent(AuthorRole.User, "user input: 8"); + yield return new ChatMessageContent(AuthorRole.Assistant, [new FunctionCallContent("function call: 9")]); + yield return new ChatMessageContent(AuthorRole.Tool, [new FunctionResultContent("function result: 10")]); + yield return new ChatMessageContent(AuthorRole.Assistant, "asistant response: 11"); + yield return new ChatMessageContent(AuthorRole.User, "user input: 12"); + yield return new ChatMessageContent(AuthorRole.Assistant, "asistant response: 13"); + } +} diff --git a/dotnet/src/Agents/UnitTests/Internal/BroadcastQueueTests.cs b/dotnet/src/Agents/UnitTests/Internal/BroadcastQueueTests.cs index 452a0566e11f..987c67fce804 100644 --- a/dotnet/src/Agents/UnitTests/Internal/BroadcastQueueTests.cs +++ b/dotnet/src/Agents/UnitTests/Internal/BroadcastQueueTests.cs @@ -148,6 +148,11 @@ protected internal override async Task ReceiveAsync(IEnumerable +/// Mock definition of with a contract. +/// +internal sealed class MockAgent : ChatHistoryKernelAgent +{ + public int InvokeCount { get; private set; } + + public IReadOnlyList Response { get; set; } = []; + + public override IAsyncEnumerable InvokeAsync( + ChatHistory history, + KernelArguments? arguments = null, + Kernel? kernel = null, + CancellationToken cancellationToken = default) + { + this.InvokeCount++; + + return this.Response.ToAsyncEnumerable(); + } + + public override IAsyncEnumerable InvokeStreamingAsync( + ChatHistory history, + KernelArguments? arguments = null, + Kernel? kernel = null, + CancellationToken cancellationToken = default) + { + this.InvokeCount++; + return this.Response.Select(m => new StreamingChatMessageContent(m.Role, m.Content)).ToAsyncEnumerable(); + } +} diff --git a/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchKernelBuilderExtensionsTests.cs b/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchKernelBuilderExtensionsTests.cs new file mode 100644 index 000000000000..740c3898ce03 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchKernelBuilderExtensionsTests.cs @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using Azure; +using Azure.Core; +using Azure.Search.Documents.Indexes; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.AzureAISearch; +using Microsoft.SemanticKernel.Data; +using Moq; +using Xunit; + +namespace SemanticKernel.Connectors.AzureAISearch.UnitTests; + +/// +/// Tests for the class. +/// +public class AzureAISearchKernelBuilderExtensionsTests +{ + private readonly IKernelBuilder _kernelBuilder; + + public AzureAISearchKernelBuilderExtensionsTests() + { + this._kernelBuilder = Kernel.CreateBuilder(); + } + + [Fact] + public void AddVectorStoreRegistersClass() + { + // Arrange. + this._kernelBuilder.Services.AddSingleton(Mock.Of()); + + // Act. + this._kernelBuilder.AddAzureAISearchVectorStore(); + + // Assert. + this.AssertVectorStoreCreated(); + } + + [Fact] + public void AddVectorStoreWithUriAndCredsRegistersClass() + { + // Act. + this._kernelBuilder.AddAzureAISearchVectorStore(new Uri("https://localhost"), new AzureKeyCredential("fakeKey")); + + // Assert. + this.AssertVectorStoreCreated(); + } + + [Fact] + public void AddVectorStoreWithUriAndTokenCredsRegistersClass() + { + // Act. + this._kernelBuilder.AddAzureAISearchVectorStore(new Uri("https://localhost"), Mock.Of()); + + // Assert. + this.AssertVectorStoreCreated(); + } + + private void AssertVectorStoreCreated() + { + var kernel = this._kernelBuilder.Build(); + var vectorStore = kernel.Services.GetRequiredService(); + Assert.NotNull(vectorStore); + Assert.IsType(vectorStore); + } +} diff --git a/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchServiceCollectionExtensionsTests.cs b/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchServiceCollectionExtensionsTests.cs new file mode 100644 index 000000000000..e021d62c8159 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchServiceCollectionExtensionsTests.cs @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using Azure; +using Azure.Core; +using Azure.Search.Documents.Indexes; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.AzureAISearch; +using Microsoft.SemanticKernel.Data; +using Moq; +using Xunit; + +namespace SemanticKernel.Connectors.AzureAISearch.UnitTests; + +/// +/// Tests for the class. +/// +public class AzureAISearchServiceCollectionExtensionsTests +{ + private readonly IServiceCollection _serviceCollection; + + public AzureAISearchServiceCollectionExtensionsTests() + { + this._serviceCollection = new ServiceCollection(); + } + + [Fact] + public void AddVectorStoreRegistersClass() + { + // Arrange. + this._serviceCollection.AddSingleton(Mock.Of()); + + // Act. + this._serviceCollection.AddAzureAISearchVectorStore(); + + // Assert. + this.AssertVectorStoreCreated(); + } + + [Fact] + public void AddVectorStoreWithUriAndCredsRegistersClass() + { + // Act. + this._serviceCollection.AddAzureAISearchVectorStore(new Uri("https://localhost"), new AzureKeyCredential("fakeKey")); + + // Assert. + this.AssertVectorStoreCreated(); + } + + [Fact] + public void AddVectorStoreWithUriAndTokenCredsRegistersClass() + { + // Act. + this._serviceCollection.AddAzureAISearchVectorStore(new Uri("https://localhost"), Mock.Of()); + + // Assert. + this.AssertVectorStoreCreated(); + } + + private void AssertVectorStoreCreated() + { + var serviceProvider = this._serviceCollection.BuildServiceProvider(); + var vectorStore = serviceProvider.GetRequiredService(); + Assert.NotNull(vectorStore); + Assert.IsType(vectorStore); + } +} diff --git a/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchVectorStoreCollectionCreateMappingTests.cs b/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchVectorStoreCollectionCreateMappingTests.cs new file mode 100644 index 000000000000..075880775324 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchVectorStoreCollectionCreateMappingTests.cs @@ -0,0 +1,210 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using Azure.Search.Documents.Indexes.Models; +using Microsoft.SemanticKernel.Connectors.AzureAISearch; +using Microsoft.SemanticKernel.Data; +using Xunit; + +namespace SemanticKernel.Connectors.AzureAISearch.UnitTests; + +/// +/// Contains tests for the class. +/// +public class AzureAISearchVectorStoreCollectionCreateMappingTests +{ + [Fact] + public void MapKeyFieldCreatesSearchableField() + { + // Arrange + var keyProperty = new VectorStoreRecordKeyProperty("testkey", typeof(string)); + var storagePropertyName = "test_key"; + + // Act + var result = AzureAISearchVectorStoreCollectionCreateMapping.MapKeyField(keyProperty, storagePropertyName); + + // Assert + Assert.NotNull(result); + Assert.Equal(storagePropertyName, result.Name); + Assert.True(result.IsKey); + Assert.True(result.IsFilterable); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void MapFilterableStringDataFieldCreatesSimpleField(bool isFilterable) + { + // Arrange + var dataProperty = new VectorStoreRecordDataProperty("testdata", typeof(string)) { IsFilterable = isFilterable }; + var storagePropertyName = "test_data"; + + // Act + var result = AzureAISearchVectorStoreCollectionCreateMapping.MapDataField(dataProperty, storagePropertyName); + + // Assert + Assert.NotNull(result); + Assert.IsType(result); + Assert.Equal(storagePropertyName, result.Name); + Assert.False(result.IsKey); + Assert.Equal(isFilterable, result.IsFilterable); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void MapFullTextSearchableStringDataFieldCreatesSearchableField(bool isFilterable) + { + // Arrange + var dataProperty = new VectorStoreRecordDataProperty("testdata", typeof(string)) { IsFilterable = isFilterable, IsFullTextSearchable = true }; + var storagePropertyName = "test_data"; + + // Act + var result = AzureAISearchVectorStoreCollectionCreateMapping.MapDataField(dataProperty, storagePropertyName); + + // Assert + Assert.NotNull(result); + Assert.IsType(result); + Assert.Equal(storagePropertyName, result.Name); + Assert.False(result.IsKey); + Assert.Equal(isFilterable, result.IsFilterable); + } + + [Fact] + public void MapFullTextSearchableStringDataFieldThrowsForInvalidType() + { + // Arrange + var dataProperty = new VectorStoreRecordDataProperty("testdata", typeof(int)) { IsFullTextSearchable = true }; + var storagePropertyName = "test_data"; + + // Act & Assert + Assert.Throws(() => AzureAISearchVectorStoreCollectionCreateMapping.MapDataField(dataProperty, storagePropertyName)); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void MapDataFieldCreatesSimpleField(bool isFilterable) + { + // Arrange + var dataProperty = new VectorStoreRecordDataProperty("testdata", typeof(int)) { IsFilterable = isFilterable }; + var storagePropertyName = "test_data"; + + // Act + var result = AzureAISearchVectorStoreCollectionCreateMapping.MapDataField(dataProperty, storagePropertyName); + + // Assert + Assert.NotNull(result); + Assert.IsType(result); + Assert.Equal(storagePropertyName, result.Name); + Assert.Equal(SearchFieldDataType.Int32, result.Type); + Assert.False(result.IsKey); + Assert.Equal(isFilterable, result.IsFilterable); + } + + [Fact] + public void MapVectorFieldCreatesVectorSearchField() + { + // Arrange + var vectorProperty = new VectorStoreRecordVectorProperty("testvector", typeof(ReadOnlyMemory)) { Dimensions = 10, IndexKind = IndexKind.Flat, DistanceFunction = DistanceFunction.DotProductSimilarity }; + var storagePropertyName = "test_vector"; + + // Act + var (vectorSearchField, algorithmConfiguration, vectorSearchProfile) = AzureAISearchVectorStoreCollectionCreateMapping.MapVectorField(vectorProperty, storagePropertyName); + + // Assert + Assert.NotNull(vectorSearchField); + Assert.NotNull(algorithmConfiguration); + Assert.NotNull(vectorSearchProfile); + Assert.Equal(storagePropertyName, vectorSearchField.Name); + Assert.Equal(vectorProperty.Dimensions, vectorSearchField.VectorSearchDimensions); + + Assert.Equal("test_vectorAlgoConfig", algorithmConfiguration.Name); + Assert.IsType(algorithmConfiguration); + var flatConfig = algorithmConfiguration as ExhaustiveKnnAlgorithmConfiguration; + Assert.Equal(VectorSearchAlgorithmMetric.DotProduct, flatConfig!.Parameters.Metric); + + Assert.Equal("test_vectorProfile", vectorSearchProfile.Name); + Assert.Equal("test_vectorAlgoConfig", vectorSearchProfile.AlgorithmConfigurationName); + } + + [Theory] + [InlineData(IndexKind.Hnsw, typeof(HnswAlgorithmConfiguration))] + [InlineData(IndexKind.Flat, typeof(ExhaustiveKnnAlgorithmConfiguration))] + public void MapVectorFieldCreatesExpectedAlgoConfigTypes(string indexKind, Type algoConfigType) + { + // Arrange + var vectorProperty = new VectorStoreRecordVectorProperty("testvector", typeof(ReadOnlyMemory)) { Dimensions = 10, IndexKind = indexKind, DistanceFunction = DistanceFunction.DotProductSimilarity }; + var storagePropertyName = "test_vector"; + + // Act + var (vectorSearchField, algorithmConfiguration, vectorSearchProfile) = AzureAISearchVectorStoreCollectionCreateMapping.MapVectorField(vectorProperty, storagePropertyName); + + // Assert + Assert.Equal("test_vectorAlgoConfig", algorithmConfiguration.Name); + Assert.Equal(algoConfigType, algorithmConfiguration.GetType()); + } + + [Fact] + public void MapVectorFieldDefaultsToHsnwAndCosine() + { + // Arrange + var vectorProperty = new VectorStoreRecordVectorProperty("testvector", typeof(ReadOnlyMemory)) { Dimensions = 10 }; + var storagePropertyName = "test_vector"; + + // Act + var (vectorSearchField, algorithmConfiguration, vectorSearchProfile) = AzureAISearchVectorStoreCollectionCreateMapping.MapVectorField(vectorProperty, storagePropertyName); + + // Assert + Assert.IsType(algorithmConfiguration); + var hnswConfig = algorithmConfiguration as HnswAlgorithmConfiguration; + Assert.Equal(VectorSearchAlgorithmMetric.Cosine, hnswConfig!.Parameters.Metric); + } + + [Fact] + public void MapVectorFieldThrowsForUnsupportedDistanceFunction() + { + // Arrange + var vectorProperty = new VectorStoreRecordVectorProperty("testvector", typeof(ReadOnlyMemory)) { Dimensions = 10, DistanceFunction = DistanceFunction.ManhattanDistance }; + var storagePropertyName = "test_vector"; + + // Act & Assert + Assert.Throws(() => AzureAISearchVectorStoreCollectionCreateMapping.MapVectorField(vectorProperty, storagePropertyName)); + } + + [Fact] + public void MapVectorFieldThrowsForMissingDimensionsCount() + { + // Arrange + var vectorProperty = new VectorStoreRecordVectorProperty("testvector", typeof(ReadOnlyMemory)); + var storagePropertyName = "test_vector"; + + // Act & Assert + Assert.Throws(() => AzureAISearchVectorStoreCollectionCreateMapping.MapVectorField(vectorProperty, storagePropertyName)); + } + + [Theory] + [MemberData(nameof(DataTypeMappingOptions))] + public void GetSDKFieldDataTypeMapsTypesCorrectly(Type propertyType, SearchFieldDataType searchFieldDataType) + { + // Act & Assert + Assert.Equal(searchFieldDataType, AzureAISearchVectorStoreCollectionCreateMapping.GetSDKFieldDataType(propertyType)); + } + + public static IEnumerable DataTypeMappingOptions() + { + yield return new object[] { typeof(string), SearchFieldDataType.String }; + yield return new object[] { typeof(bool), SearchFieldDataType.Boolean }; + yield return new object[] { typeof(int), SearchFieldDataType.Int32 }; + yield return new object[] { typeof(long), SearchFieldDataType.Int64 }; + yield return new object[] { typeof(float), SearchFieldDataType.Double }; + yield return new object[] { typeof(double), SearchFieldDataType.Double }; + yield return new object[] { typeof(DateTime), SearchFieldDataType.DateTimeOffset }; + yield return new object[] { typeof(DateTimeOffset), SearchFieldDataType.DateTimeOffset }; + + yield return new object[] { typeof(string[]), SearchFieldDataType.Collection(SearchFieldDataType.String) }; + yield return new object[] { typeof(IEnumerable), SearchFieldDataType.Collection(SearchFieldDataType.String) }; + yield return new object[] { typeof(List), SearchFieldDataType.Collection(SearchFieldDataType.String) }; + } +} diff --git a/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchVectorStoreRecordCollectionTests.cs b/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..c303613248f0 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchVectorStoreRecordCollectionTests.cs @@ -0,0 +1,618 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text.Json; +using System.Text.Json.Nodes; +using System.Text.Json.Serialization; +using System.Threading; +using System.Threading.Tasks; +using Azure; +using Azure.Search.Documents; +using Azure.Search.Documents.Indexes; +using Azure.Search.Documents.Indexes.Models; +using Azure.Search.Documents.Models; +using Microsoft.SemanticKernel.Connectors.AzureAISearch; +using Microsoft.SemanticKernel.Data; +using Moq; +using Xunit; + +namespace SemanticKernel.Connectors.AzureAISearch.UnitTests; + +/// +/// Contains tests for the class. +/// +public class AzureAISearchVectorStoreRecordCollectionTests +{ + private const string TestCollectionName = "testcollection"; + private const string TestRecordKey1 = "testid1"; + private const string TestRecordKey2 = "testid2"; + + private readonly Mock _searchIndexClientMock; + private readonly Mock _searchClientMock; + + private readonly CancellationToken _testCancellationToken = new(false); + + public AzureAISearchVectorStoreRecordCollectionTests() + { + this._searchClientMock = new Mock(MockBehavior.Strict); + this._searchIndexClientMock = new Mock(MockBehavior.Strict); + this._searchIndexClientMock.Setup(x => x.GetSearchClient(TestCollectionName)).Returns(this._searchClientMock.Object); + } + + [Theory] + [InlineData(TestCollectionName, true)] + [InlineData("nonexistentcollection", false)] + public async Task CollectionExistsReturnsCollectionStateAsync(string collectionName, bool expectedExists) + { + this._searchIndexClientMock.Setup(x => x.GetSearchClient(collectionName)).Returns(this._searchClientMock.Object); + + // Arrange. + if (expectedExists) + { + this._searchIndexClientMock + .Setup(x => x.GetIndexAsync(collectionName, this._testCancellationToken)) + .Returns(Task.FromResult?>(null)); + } + else + { + this._searchIndexClientMock + .Setup(x => x.GetIndexAsync(collectionName, this._testCancellationToken)) + .ThrowsAsync(new RequestFailedException(404, "Index not found")); + } + + var sut = new AzureAISearchVectorStoreRecordCollection(this._searchIndexClientMock.Object, collectionName); + + // Act. + var actual = await sut.CollectionExistsAsync(this._testCancellationToken); + + // Assert. + Assert.Equal(expectedExists, actual); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task CreateCollectionCallsSDKAsync(bool useDefinition, bool useCustomJsonSerializerOptions) + { + // Arrange. + this._searchIndexClientMock + .Setup(x => x.CreateIndexAsync(It.IsAny(), this._testCancellationToken)) + .ReturnsAsync(Response.FromValue(new SearchIndex(TestCollectionName), Mock.Of())); + + var sut = this.CreateRecordCollection(useDefinition, useCustomJsonSerializerOptions); + + // Act. + await sut.CreateCollectionAsync(); + + // Assert. + var expectedFieldNames = useCustomJsonSerializerOptions ? new[] { "key", "storage_data1", "data2", "storage_vector1", "vector2" } : new[] { "Key", "storage_data1", "Data2", "storage_vector1", "Vector2" }; + this._searchIndexClientMock + .Verify( + x => x.CreateIndexAsync( + It.Is(si => si.Fields.Count == 5 && si.Fields.Select(f => f.Name).SequenceEqual(expectedFieldNames) && si.Name == TestCollectionName && si.VectorSearch.Profiles.Count == 2 && si.VectorSearch.Algorithms.Count == 2), + this._testCancellationToken), + Times.Once); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task CreateCollectionIfNotExistsSDKAsync(bool useDefinition, bool expectedExists) + { + // Arrange. + if (expectedExists) + { + this._searchIndexClientMock + .Setup(x => x.GetIndexAsync(TestCollectionName, this._testCancellationToken)) + .Returns(Task.FromResult?>(null)); + } + else + { + this._searchIndexClientMock + .Setup(x => x.GetIndexAsync(TestCollectionName, this._testCancellationToken)) + .ThrowsAsync(new RequestFailedException(404, "Index not found")); + } + + this._searchIndexClientMock + .Setup(x => x.CreateIndexAsync(It.IsAny(), this._testCancellationToken)) + .ReturnsAsync(Response.FromValue(new SearchIndex(TestCollectionName), Mock.Of())); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act. + await sut.CreateCollectionIfNotExistsAsync(); + + // Assert. + if (expectedExists) + { + this._searchIndexClientMock + .Verify( + x => x.CreateIndexAsync( + It.IsAny(), + this._testCancellationToken), + Times.Never); + } + else + { + this._searchIndexClientMock + .Verify( + x => x.CreateIndexAsync( + It.Is(si => si.Fields.Count == 5 && si.Name == TestCollectionName && si.VectorSearch.Profiles.Count == 2 && si.VectorSearch.Algorithms.Count == 2), + this._testCancellationToken), + Times.Once); + } + } + + [Fact] + public async Task CanDeleteCollectionAsync() + { + // Arrange. + this._searchIndexClientMock + .Setup(x => x.DeleteIndexAsync(TestCollectionName, this._testCancellationToken)) + .Returns(Task.FromResult(null)); + + var sut = this.CreateRecordCollection(false); + + // Act. + await sut.DeleteCollectionAsync(this._testCancellationToken); + + // Assert. + this._searchIndexClientMock.Verify(x => x.DeleteIndexAsync(TestCollectionName, this._testCancellationToken), Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanGetRecordWithVectorsAsync(bool useDefinition) + { + // Arrange. + this._searchClientMock.Setup( + x => x.GetDocumentAsync( + TestRecordKey1, + It.Is(x => !x.SelectedFields.Any()), + this._testCancellationToken)) + .ReturnsAsync(Response.FromValue(CreateModel(TestRecordKey1, true), Mock.Of())); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act. + var actual = await sut.GetAsync( + TestRecordKey1, + new() { IncludeVectors = true }, + this._testCancellationToken); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(TestRecordKey1, actual.Key); + Assert.Equal("data 1", actual.Data1); + Assert.Equal("data 2", actual.Data2); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector1!.Value.ToArray()); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector2!.Value.ToArray()); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task CanGetRecordWithoutVectorsAsync(bool useDefinition, bool useCustomJsonSerializerOptions) + { + // Arrange. + var storageObject = JsonSerializer.SerializeToNode(CreateModel(TestRecordKey1, false))!.AsObject(); + + var expectedSelectFields = useCustomJsonSerializerOptions ? new[] { "storage_data1", "data2", "key" } : new[] { "storage_data1", "Data2", "Key" }; + this._searchClientMock.Setup( + x => x.GetDocumentAsync( + TestRecordKey1, + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(Response.FromValue(CreateModel(TestRecordKey1, true), Mock.Of())); + + var sut = this.CreateRecordCollection(useDefinition, useCustomJsonSerializerOptions); + + // Act. + var actual = await sut.GetAsync( + TestRecordKey1, + new() { IncludeVectors = false }, + this._testCancellationToken); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(TestRecordKey1, actual.Key); + Assert.Equal("data 1", actual.Data1); + Assert.Equal("data 2", actual.Data2); + + this._searchClientMock.Verify( + x => x.GetDocumentAsync( + TestRecordKey1, + It.Is(x => x.SelectedFields.SequenceEqual(expectedSelectFields)), + this._testCancellationToken), + Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanGetManyRecordsWithVectorsAsync(bool useDefinition) + { + // Arrange. + this._searchClientMock.Setup( + x => x.GetDocumentAsync( + It.IsAny(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync((string id, GetDocumentOptions options, CancellationToken cancellationToken) => + { + return Response.FromValue(CreateModel(id, true), Mock.Of()); + }); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act. + var actual = await sut.GetBatchAsync( + [TestRecordKey1, TestRecordKey2], + new() { IncludeVectors = true }, + this._testCancellationToken).ToListAsync(); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(2, actual.Count); + Assert.Equal(TestRecordKey1, actual[0].Key); + Assert.Equal(TestRecordKey2, actual[1].Key); + } + + [Fact] + public async Task CanGetRecordWithCustomMapperAsync() + { + // Arrange. + var storageObject = JsonSerializer.SerializeToNode(CreateModel(TestRecordKey1, true))!.AsObject(); + + // Arrange GetDocumentAsync mock returning JsonObject. + this._searchClientMock.Setup( + x => x.GetDocumentAsync( + TestRecordKey1, + It.Is(x => !x.SelectedFields.Any()), + this._testCancellationToken)) + .ReturnsAsync(Response.FromValue(storageObject, Mock.Of())); + + // Arrange mapper mock from JsonObject to data model. + var mapperMock = new Mock>(MockBehavior.Strict); + mapperMock.Setup( + x => x.MapFromStorageToDataModel( + storageObject, + It.Is(x => x.IncludeVectors))) + .Returns(CreateModel(TestRecordKey1, true)); + + // Arrange target with custom mapper. + var sut = new AzureAISearchVectorStoreRecordCollection( + this._searchIndexClientMock.Object, + TestCollectionName, + new() + { + JsonObjectCustomMapper = mapperMock.Object + }); + + // Act. + var actual = await sut.GetAsync(TestRecordKey1, new() { IncludeVectors = true }, this._testCancellationToken); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(TestRecordKey1, actual.Key); + Assert.Equal("data 1", actual.Data1); + Assert.Equal("data 2", actual.Data2); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector1!.Value.ToArray()); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector2!.Value.ToArray()); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanDeleteRecordAsync(bool useDefinition) + { + // Arrange. +#pragma warning disable Moq1002 // Moq: No matching constructor + var indexDocumentsResultMock = new Mock(MockBehavior.Strict, new List()); +#pragma warning restore Moq1002 // Moq: No matching constructor + + this._searchClientMock.Setup( + x => x.DeleteDocumentsAsync( + It.IsAny(), + It.IsAny>(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(Response.FromValue(indexDocumentsResultMock.Object, Mock.Of())); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act. + await sut.DeleteAsync( + TestRecordKey1, + cancellationToken: this._testCancellationToken); + + // Assert. + this._searchClientMock.Verify( + x => x.DeleteDocumentsAsync( + "Key", + It.Is>(x => x.Count() == 1 && x.Contains(TestRecordKey1)), + It.IsAny(), + this._testCancellationToken), + Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanDeleteManyRecordsWithVectorsAsync(bool useDefinition) + { + // Arrange. +#pragma warning disable Moq1002 // Moq: No matching constructor + var indexDocumentsResultMock = new Mock(MockBehavior.Strict, new List()); +#pragma warning restore Moq1002 // Moq: No matching constructor + + this._searchClientMock.Setup( + x => x.DeleteDocumentsAsync( + It.IsAny(), + It.IsAny>(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(Response.FromValue(indexDocumentsResultMock.Object, Mock.Of())); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act. + await sut.DeleteBatchAsync( + [TestRecordKey1, TestRecordKey2], + cancellationToken: this._testCancellationToken); + + // Assert. + this._searchClientMock.Verify( + x => x.DeleteDocumentsAsync( + "Key", + It.Is>(x => x.Count() == 2 && x.Contains(TestRecordKey1) && x.Contains(TestRecordKey2)), + It.IsAny(), + this._testCancellationToken), + Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanUpsertRecordAsync(bool useDefinition) + { + // Arrange upload result object. +#pragma warning disable Moq1002 // Moq: No matching constructor + var indexingResult = new Mock(MockBehavior.Strict, TestRecordKey1, true, 200); + var indexingResults = new List(); + indexingResults.Add(indexingResult.Object); + var indexDocumentsResultMock = new Mock(MockBehavior.Strict, indexingResults); +#pragma warning restore Moq1002 // Moq: No matching constructor + + // Arrange upload. + this._searchClientMock.Setup( + x => x.UploadDocumentsAsync( + It.IsAny>(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(Response.FromValue(indexDocumentsResultMock.Object, Mock.Of())); + + // Arrange sut. + var sut = this.CreateRecordCollection(useDefinition); + + var model = CreateModel(TestRecordKey1, true); + + // Act. + var actual = await sut.UpsertAsync( + model, + cancellationToken: this._testCancellationToken); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(TestRecordKey1, actual); + this._searchClientMock.Verify( + x => x.UploadDocumentsAsync( + It.Is>(x => x.Count() == 1 && x.First().Key == TestRecordKey1), + It.Is(x => x.ThrowOnAnyError == true), + this._testCancellationToken), + Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanUpsertManyRecordsAsync(bool useDefinition) + { + // Arrange upload result object. +#pragma warning disable Moq1002 // Moq: No matching constructor + var indexingResult1 = new Mock(MockBehavior.Strict, TestRecordKey1, true, 200); + var indexingResult2 = new Mock(MockBehavior.Strict, TestRecordKey2, true, 200); + + var indexingResults = new List(); + indexingResults.Add(indexingResult1.Object); + indexingResults.Add(indexingResult2.Object); + var indexDocumentsResultMock = new Mock(MockBehavior.Strict, indexingResults); +#pragma warning restore Moq1002 // Moq: No matching constructor + + // Arrange upload. + this._searchClientMock.Setup( + x => x.UploadDocumentsAsync( + It.IsAny>(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(Response.FromValue(indexDocumentsResultMock.Object, Mock.Of())); + + // Arrange sut. + var sut = this.CreateRecordCollection(useDefinition); + + var model1 = CreateModel(TestRecordKey1, true); + var model2 = CreateModel(TestRecordKey2, true); + + // Act. + var actual = await sut.UpsertBatchAsync( + [model1, model2], + cancellationToken: this._testCancellationToken).ToListAsync(); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(2, actual.Count); + Assert.Equal(TestRecordKey1, actual[0]); + Assert.Equal(TestRecordKey2, actual[1]); + + this._searchClientMock.Verify( + x => x.UploadDocumentsAsync( + It.Is>(x => x.Count() == 2 && x.First().Key == TestRecordKey1 && x.ElementAt(1).Key == TestRecordKey2), + It.Is(x => x.ThrowOnAnyError == true), + this._testCancellationToken), + Times.Once); + } + + [Fact] + public async Task CanUpsertRecordWithCustomMapperAsync() + { + // Arrange. +#pragma warning disable Moq1002 // Moq: No matching constructor + var indexingResult = new Mock(MockBehavior.Strict, TestRecordKey1, true, 200); + var indexingResults = new List(); + indexingResults.Add(indexingResult.Object); + var indexDocumentsResultMock = new Mock(MockBehavior.Strict, indexingResults); +#pragma warning restore Moq1002 // Moq: No matching constructor + + var model = CreateModel(TestRecordKey1, true); + var storageObject = JsonSerializer.SerializeToNode(model)!.AsObject(); + + // Arrange UploadDocumentsAsync mock returning upsert result. + this._searchClientMock.Setup( + x => x.UploadDocumentsAsync( + It.IsAny>(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync((IEnumerable documents, IndexDocumentsOptions options, CancellationToken cancellationToken) => + { + // Need to force a materialization of the documents enumerable here, otherwise the mapper (and therefore its mock) doesn't get invoked. + var materializedDocuments = documents.ToList(); + return Response.FromValue(indexDocumentsResultMock.Object, Mock.Of()); + }); + + // Arrange mapper mock from data model to JsonObject. + var mapperMock = new Mock>(MockBehavior.Strict); + mapperMock + .Setup(x => x.MapFromDataToStorageModel(It.IsAny())) + .Returns(storageObject); + + // Arrange target with custom mapper. + var sut = new AzureAISearchVectorStoreRecordCollection( + this._searchIndexClientMock.Object, + TestCollectionName, + new() + { + JsonObjectCustomMapper = mapperMock.Object + }); + + // Act. + await sut.UpsertAsync( + model, + null, + this._testCancellationToken); + + // Assert. + mapperMock + .Verify( + x => x.MapFromDataToStorageModel(It.Is(x => x.Key == TestRecordKey1)), + Times.Once); + } + + /// + /// Tests that the collection can be created even if the definition and the type do not match. + /// In this case, the expectation is that a custom mapper will be provided to map between the + /// schema as defined by the definition and the different data model. + /// + [Fact] + public void CanCreateCollectionWithMismatchedDefinitionAndType() + { + // Arrange. + var definition = new VectorStoreRecordDefinition() + { + Properties = new List + { + new VectorStoreRecordKeyProperty("Id", typeof(string)), + new VectorStoreRecordDataProperty("Text", typeof(string)), + new VectorStoreRecordVectorProperty("Embedding", typeof(ReadOnlyMemory)) { Dimensions = 4 }, + } + }; + + // Act. + var sut = new AzureAISearchVectorStoreRecordCollection( + this._searchIndexClientMock.Object, + TestCollectionName, + new() { VectorStoreRecordDefinition = definition, JsonObjectCustomMapper = Mock.Of>() }); + } + + private AzureAISearchVectorStoreRecordCollection CreateRecordCollection(bool useDefinition, bool useCustomJsonSerializerOptions = false) + { + return new AzureAISearchVectorStoreRecordCollection( + this._searchIndexClientMock.Object, + TestCollectionName, + new() + { + VectorStoreRecordDefinition = useDefinition ? this._multiPropsDefinition : null, + JsonSerializerOptions = useCustomJsonSerializerOptions ? this._customJsonSerializerOptions : null + }); + } + + private static MultiPropsModel CreateModel(string key, bool withVectors) + { + return new MultiPropsModel + { + Key = key, + Data1 = "data 1", + Data2 = "data 2", + Vector1 = withVectors ? new float[] { 1, 2, 3, 4 } : null, + Vector2 = withVectors ? new float[] { 1, 2, 3, 4 } : null, + NotAnnotated = null, + }; + } + + private readonly JsonSerializerOptions _customJsonSerializerOptions = new() + { + PropertyNamingPolicy = JsonNamingPolicy.CamelCase + }; + + private readonly VectorStoreRecordDefinition _multiPropsDefinition = new() + { + Properties = + [ + new VectorStoreRecordKeyProperty("Key", typeof(string)), + new VectorStoreRecordDataProperty("Data1", typeof(string)), + new VectorStoreRecordDataProperty("Data2", typeof(string)), + new VectorStoreRecordVectorProperty("Vector1", typeof(ReadOnlyMemory)) { Dimensions = 4 }, + new VectorStoreRecordVectorProperty("Vector2", typeof(ReadOnlyMemory)) { Dimensions = 4 } + ] + }; + + public sealed class MultiPropsModel + { + [VectorStoreRecordKey] + public string Key { get; set; } = string.Empty; + + [JsonPropertyName("storage_data1")] + [VectorStoreRecordData] + public string Data1 { get; set; } = string.Empty; + + [VectorStoreRecordData] + public string Data2 { get; set; } = string.Empty; + + [JsonPropertyName("storage_vector1")] + [VectorStoreRecordVector(4)] + public ReadOnlyMemory? Vector1 { get; set; } + + [VectorStoreRecordVector(4)] + public ReadOnlyMemory? Vector2 { get; set; } + + public string? NotAnnotated { get; set; } + } +} diff --git a/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchVectorStoreTests.cs b/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchVectorStoreTests.cs new file mode 100644 index 000000000000..889b486da2ad --- /dev/null +++ b/dotnet/src/Connectors/Connectors.AzureAISearch.UnitTests/AzureAISearchVectorStoreTests.cs @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Azure; +using Azure.Search.Documents; +using Azure.Search.Documents.Indexes; +using Microsoft.SemanticKernel.Connectors.AzureAISearch; +using Microsoft.SemanticKernel.Data; +using Moq; +using Xunit; + +namespace SemanticKernel.Connectors.AzureAISearch.UnitTests; + +/// +/// Contains tests for the class. +/// +public class AzureAISearchVectorStoreTests +{ + private const string TestCollectionName = "testcollection"; + + private readonly Mock _searchIndexClientMock; + private readonly Mock _searchClientMock; + + private readonly CancellationToken _testCancellationToken = new(false); + + public AzureAISearchVectorStoreTests() + { + this._searchClientMock = new Mock(MockBehavior.Strict); + this._searchIndexClientMock = new Mock(MockBehavior.Strict); + this._searchIndexClientMock.Setup(x => x.GetSearchClient(TestCollectionName)).Returns(this._searchClientMock.Object); + } + + [Fact] + public void GetCollectionReturnsCollection() + { + // Arrange. + var sut = new AzureAISearchVectorStore(this._searchIndexClientMock.Object); + + // Act. + var actual = sut.GetCollection(TestCollectionName); + + // Assert. + Assert.NotNull(actual); + Assert.IsType>(actual); + } + + [Fact] + public void GetCollectionCallsFactoryIfProvided() + { + // Arrange. + var factoryMock = new Mock(MockBehavior.Strict); + var collectionMock = new Mock>(MockBehavior.Strict); + factoryMock + .Setup(x => x.CreateVectorStoreRecordCollection(this._searchIndexClientMock.Object, TestCollectionName, null)) + .Returns(collectionMock.Object); + var sut = new AzureAISearchVectorStore(this._searchIndexClientMock.Object, new() { VectorStoreCollectionFactory = factoryMock.Object }); + + // Act. + var actual = sut.GetCollection(TestCollectionName); + + // Assert. + Assert.Equal(collectionMock.Object, actual); + } + + [Fact] + public void GetCollectionThrowsForInvalidKeyType() + { + // Arrange. + var sut = new AzureAISearchVectorStore(this._searchIndexClientMock.Object); + + // Act & Assert. + Assert.Throws(() => sut.GetCollection(TestCollectionName)); + } + + [Fact] + public async Task ListCollectionNamesCallsSDKAsync() + { + // Arrange async enumerator mock. + var iterationCounter = 0; + var asyncEnumeratorMock = new Mock>(MockBehavior.Strict); + asyncEnumeratorMock.Setup(x => x.MoveNextAsync()).Returns(() => ValueTask.FromResult(iterationCounter++ <= 4)); + asyncEnumeratorMock.Setup(x => x.Current).Returns(() => $"testcollection{iterationCounter}"); + + // Arrange pageable mock. + var pageableMock = new Mock>(MockBehavior.Strict); + pageableMock.Setup(x => x.GetAsyncEnumerator(this._testCancellationToken)).Returns(asyncEnumeratorMock.Object); + + // Arrange search index client mock and sut. + this._searchIndexClientMock + .Setup(x => x.GetIndexNamesAsync(this._testCancellationToken)) + .Returns(pageableMock.Object); + var sut = new AzureAISearchVectorStore(this._searchIndexClientMock.Object); + + // Act. + var actual = sut.ListCollectionNamesAsync(this._testCancellationToken); + + // Assert. + Assert.NotNull(actual); + var actualList = await actual.ToListAsync(); + Assert.Equal(5, actualList.Count); + Assert.All(actualList, (value, index) => Assert.Equal($"testcollection{index + 1}", value)); + } + + public sealed class SinglePropsModel + { + [VectorStoreRecordKey] + public string Key { get; set; } = string.Empty; + + [VectorStoreRecordData] + public string Data { get; set; } = string.Empty; + + [VectorStoreRecordVector(4)] + public ReadOnlyMemory? Vector { get; set; } + + public string? NotAnnotated { get; set; } + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchKernelBuilderExtensions.cs b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchKernelBuilderExtensions.cs new file mode 100644 index 000000000000..f9b04e3e53da --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchKernelBuilderExtensions.cs @@ -0,0 +1,59 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using Azure; +using Azure.Core; +using Azure.Search.Documents.Indexes; +using Microsoft.SemanticKernel.Connectors.AzureAISearch; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel; + +/// +/// Extension methods to register Azure AI Search instances on the . +/// +public static class AzureAISearchKernelBuilderExtensions +{ + /// + /// Register an Azure AI Search with the specified service ID and where is retrieved from the dependency injection container. + /// + /// The builder to register the on. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The kernel builder. + public static IKernelBuilder AddAzureAISearchVectorStore(this IKernelBuilder builder, AzureAISearchVectorStoreOptions? options = default, string? serviceId = default) + { + builder.Services.AddAzureAISearchVectorStore(options, serviceId); + return builder; + } + + /// + /// Register an Azure AI Search with the provided and and the specified service ID. + /// + /// The builder to register the on. + /// The service endpoint for Azure AI Search. + /// The credential to authenticate to Azure AI Search with. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The kernel builder. + public static IKernelBuilder AddAzureAISearchVectorStore(this IKernelBuilder builder, Uri endpoint, TokenCredential tokenCredential, AzureAISearchVectorStoreOptions? options = default, string? serviceId = default) + { + builder.Services.AddAzureAISearchVectorStore(endpoint, tokenCredential, options, serviceId); + return builder; + } + + /// + /// Register an Azure AI Search with the provided and and the specified service ID. + /// + /// The builder to register the on. + /// The service endpoint for Azure AI Search. + /// The credential to authenticate to Azure AI Search with. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The kernel builder. + public static IKernelBuilder AddAzureAISearchVectorStore(this IKernelBuilder builder, Uri endpoint, AzureKeyCredential credential, AzureAISearchVectorStoreOptions? options = default, string? serviceId = default) + { + builder.Services.AddAzureAISearchVectorStore(endpoint, credential, options, serviceId); + return builder; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchServiceCollectionExtensions.cs b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchServiceCollectionExtensions.cs new file mode 100644 index 000000000000..7e2de2e8e83e --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchServiceCollectionExtensions.cs @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using Azure; +using Azure.Core; +using Azure.Search.Documents.Indexes; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel.Connectors.AzureAISearch; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel; + +/// +/// Extension methods to register Azure AI Search instances on an . +/// +public static class AzureAISearchServiceCollectionExtensions +{ + /// + /// Register an Azure AI Search with the specified service ID and where is retrieved from the dependency injection container. + /// + /// The to register the on. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The service collection. + public static IServiceCollection AddAzureAISearchVectorStore(this IServiceCollection services, AzureAISearchVectorStoreOptions? options = default, string? serviceId = default) + { + // If we are not constructing the SearchIndexClient, add the IVectorStore as transient, since we + // cannot make assumptions about how SearchIndexClient is being managed. + services.AddKeyedTransient( + serviceId, + (sp, obj) => + { + var searchIndexClient = sp.GetRequiredService(); + var selectedOptions = options ?? sp.GetService(); + + return new AzureAISearchVectorStore( + searchIndexClient, + selectedOptions); + }); + + return services; + } + + /// + /// Register an Azure AI Search with the provided and and the specified service ID. + /// + /// The to register the on. + /// The service endpoint for Azure AI Search. + /// The credential to authenticate to Azure AI Search with. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The service collection. + public static IServiceCollection AddAzureAISearchVectorStore(this IServiceCollection services, Uri endpoint, TokenCredential tokenCredential, AzureAISearchVectorStoreOptions? options = default, string? serviceId = default) + { + Verify.NotNull(endpoint); + Verify.NotNull(tokenCredential); + + services.AddKeyedSingleton( + serviceId, + (sp, obj) => + { + var searchIndexClient = new SearchIndexClient(endpoint, tokenCredential); + var selectedOptions = options ?? sp.GetService(); + + return new AzureAISearchVectorStore( + searchIndexClient, + selectedOptions); + }); + + return services; + } + + /// + /// Register an Azure AI Search with the provided and and the specified service ID. + /// + /// The to register the on. + /// The service endpoint for Azure AI Search. + /// The credential to authenticate to Azure AI Search with. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The service collection. + public static IServiceCollection AddAzureAISearchVectorStore(this IServiceCollection services, Uri endpoint, AzureKeyCredential credential, AzureAISearchVectorStoreOptions? options = default, string? serviceId = default) + { + Verify.NotNull(endpoint); + Verify.NotNull(credential); + + services.AddKeyedSingleton( + serviceId, + (sp, obj) => + { + var searchIndexClient = new SearchIndexClient(endpoint, credential); + var selectedOptions = options ?? sp.GetService(); + + return new AzureAISearchVectorStore( + searchIndexClient, + selectedOptions); + }); + + return services; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStore.cs b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStore.cs new file mode 100644 index 000000000000..2ca2bf9577f5 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStore.cs @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Azure; +using Azure.Search.Documents.Indexes; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel.Connectors.AzureAISearch; + +/// +/// Class for accessing the list of collections in a Azure AI Search vector store. +/// +/// +/// This class can be used with collections of any schema type, but requires you to provide schema information when getting a collection. +/// +public sealed class AzureAISearchVectorStore : IVectorStore +{ + /// The name of this database for telemetry purposes. + private const string DatabaseName = "AzureAISearch"; + + /// Azure AI Search client that can be used to manage the list of indices in an Azure AI Search Service. + private readonly SearchIndexClient _searchIndexClient; + + /// Optional configuration options for this class. + private readonly AzureAISearchVectorStoreOptions _options; + + /// + /// Initializes a new instance of the class. + /// + /// Azure AI Search client that can be used to manage the list of indices in an Azure AI Search Service. + /// Optional configuration options for this class. + public AzureAISearchVectorStore(SearchIndexClient searchIndexClient, AzureAISearchVectorStoreOptions? options = default) + { + Verify.NotNull(searchIndexClient); + + this._searchIndexClient = searchIndexClient; + this._options = options ?? new AzureAISearchVectorStoreOptions(); + } + + /// + public IVectorStoreRecordCollection GetCollection(string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition = null) + where TKey : notnull + where TRecord : class + { + if (typeof(TKey) != typeof(string)) + { + throw new NotSupportedException("Only string keys are supported."); + } + + if (this._options.VectorStoreCollectionFactory is not null) + { + return this._options.VectorStoreCollectionFactory.CreateVectorStoreRecordCollection(this._searchIndexClient, name, vectorStoreRecordDefinition); + } + + var directlyCreatedStore = new AzureAISearchVectorStoreRecordCollection(this._searchIndexClient, name, new AzureAISearchVectorStoreRecordCollectionOptions() { VectorStoreRecordDefinition = vectorStoreRecordDefinition }) as IVectorStoreRecordCollection; + return directlyCreatedStore!; + } + + /// + public async IAsyncEnumerable ListCollectionNamesAsync([EnumeratorCancellation] CancellationToken cancellationToken = default) + { + var indexNamesEnumerable = this._searchIndexClient.GetIndexNamesAsync(cancellationToken).ConfigureAwait(false); + var indexNamesEnumerator = indexNamesEnumerable.GetAsyncEnumerator(); + + var nextResult = await GetNextIndexNameAsync(indexNamesEnumerator).ConfigureAwait(false); + while (nextResult.more) + { + yield return nextResult.name; + nextResult = await GetNextIndexNameAsync(indexNamesEnumerator).ConfigureAwait(false); + } + } + + /// + /// Helper method to get the next index name from the enumerator with a try catch around the move next call to convert + /// any to , since try catch is not supported + /// around a yield return. + /// + /// The enumerator to get the next result from. + /// A value indicating whether there are more results and the current string if true. + private static async Task<(string name, bool more)> GetNextIndexNameAsync(ConfiguredCancelableAsyncEnumerable.Enumerator enumerator) + { + const string OperationName = "GetIndexNames"; + + try + { + var more = await enumerator.MoveNextAsync(); + return (enumerator.Current, more); + } + catch (AggregateException ex) when (ex.InnerException is RequestFailedException innerEx) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + OperationName = OperationName + }; + } + catch (RequestFailedException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + OperationName = OperationName + }; + } + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreCollectionCreateMapping.cs b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreCollectionCreateMapping.cs new file mode 100644 index 000000000000..2ee086d69d53 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreCollectionCreateMapping.cs @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using Azure.Search.Documents.Indexes.Models; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel.Connectors.AzureAISearch; + +/// +/// Contains mapping helpers to use when creating a Azure AI Search vector collection. +/// +internal static class AzureAISearchVectorStoreCollectionCreateMapping +{ + /// + /// Map from a to an Azure AI Search . + /// + /// The key property definition. + /// The name of the property in storage. + /// The for the provided property definition. + public static SearchableField MapKeyField(VectorStoreRecordKeyProperty keyProperty, string storagePropertyName) + { + return new SearchableField(storagePropertyName) { IsKey = true, IsFilterable = true }; + } + + /// + /// Map from a to an Azure AI Search . + /// + /// The data property definition. + /// The name of the property in storage. + /// The for the provided property definition. + /// Throws when the definition is missing required information. + public static SimpleField MapDataField(VectorStoreRecordDataProperty dataProperty, string storagePropertyName) + { + if (dataProperty.IsFullTextSearchable) + { + if (dataProperty.PropertyType != typeof(string)) + { + throw new InvalidOperationException($"Property {nameof(dataProperty.IsFullTextSearchable)} on {nameof(VectorStoreRecordDataProperty)} '{dataProperty.DataModelPropertyName}' is set to true, but the property type is not a string. The Azure AI Search VectorStore supports {nameof(dataProperty.IsFullTextSearchable)} on string properties only."); + } + + return new SearchableField(storagePropertyName) { IsFilterable = dataProperty.IsFilterable }; + } + + return new SimpleField(storagePropertyName, AzureAISearchVectorStoreCollectionCreateMapping.GetSDKFieldDataType(dataProperty.PropertyType)) { IsFilterable = dataProperty.IsFilterable }; + } + + /// + /// Map form a to an Azure AI Search and generate the required index configuration. + /// + /// The vector property definition. + /// The name of the property in storage. + /// The and required index configuration. + /// Throws when the definition is missing required information, or unsupported options are configured. + public static (VectorSearchField vectorSearchField, VectorSearchAlgorithmConfiguration algorithmConfiguration, VectorSearchProfile vectorSearchProfile) MapVectorField(VectorStoreRecordVectorProperty vectorProperty, string storagePropertyName) + { + if (vectorProperty.Dimensions is not > 0) + { + throw new InvalidOperationException($"Property {nameof(vectorProperty.Dimensions)} on {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' must be set to a positive integer to create a collection."); + } + + // Build a name for the profile and algorithm configuration based on the property name + // since we'll just create a separate one for each vector property. + var vectorSearchProfileName = $"{storagePropertyName}Profile"; + var algorithmConfigName = $"{storagePropertyName}AlgoConfig"; + + // Read the vector index settings from the property definition and create the right index configuration. + var indexKind = AzureAISearchVectorStoreCollectionCreateMapping.GetSKIndexKind(vectorProperty); + var algorithmMetric = AzureAISearchVectorStoreCollectionCreateMapping.GetSDKDistanceAlgorithm(vectorProperty); + + VectorSearchAlgorithmConfiguration algorithmConfiguration = indexKind switch + { + IndexKind.Hnsw => new HnswAlgorithmConfiguration(algorithmConfigName) { Parameters = new HnswParameters { Metric = algorithmMetric } }, + IndexKind.Flat => new ExhaustiveKnnAlgorithmConfiguration(algorithmConfigName) { Parameters = new ExhaustiveKnnParameters { Metric = algorithmMetric } }, + _ => throw new InvalidOperationException($"Index kind '{indexKind}' on {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' is not supported by the Azure AI Search VectorStore.") + }; + var vectorSearchProfile = new VectorSearchProfile(vectorSearchProfileName, algorithmConfigName); + + return (new VectorSearchField(storagePropertyName, vectorProperty.Dimensions.Value, vectorSearchProfileName), algorithmConfiguration, vectorSearchProfile); + } + + /// + /// Get the configured from the given . + /// If none is configured the default is . + /// + /// The vector property definition. + /// The configured or default . + public static string GetSKIndexKind(VectorStoreRecordVectorProperty vectorProperty) + { + if (vectorProperty.IndexKind is null) + { + return IndexKind.Hnsw; + } + + return vectorProperty.IndexKind; + } + + /// + /// Get the configured from the given . + /// If none is configured, the default is . + /// + /// The vector property definition. + /// The chosen . + /// Thrown if a distance function is chosen that isn't supported by Azure AI Search. + public static VectorSearchAlgorithmMetric GetSDKDistanceAlgorithm(VectorStoreRecordVectorProperty vectorProperty) + { + if (vectorProperty.DistanceFunction is null) + { + return VectorSearchAlgorithmMetric.Cosine; + } + + return vectorProperty.DistanceFunction switch + { + DistanceFunction.CosineSimilarity => VectorSearchAlgorithmMetric.Cosine, + DistanceFunction.DotProductSimilarity => VectorSearchAlgorithmMetric.DotProduct, + DistanceFunction.EuclideanDistance => VectorSearchAlgorithmMetric.Euclidean, + _ => throw new InvalidOperationException($"Distance function '{vectorProperty.DistanceFunction}' for {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' is not supported by the Azure AI Search VectorStore.") + }; + } + + /// + /// Maps the given property type to the corresponding . + /// + /// The property type to map. + /// The that corresponds to the given property type." + /// Thrown if the given type is not supported. + public static SearchFieldDataType GetSDKFieldDataType(Type propertyType) + { + return propertyType switch + { + Type stringType when stringType == typeof(string) => SearchFieldDataType.String, + Type boolType when boolType == typeof(bool) || boolType == typeof(bool?) => SearchFieldDataType.Boolean, + Type intType when intType == typeof(int) || intType == typeof(int?) => SearchFieldDataType.Int32, + Type longType when longType == typeof(long) || longType == typeof(long?) => SearchFieldDataType.Int64, + Type floatType when floatType == typeof(float) || floatType == typeof(float?) => SearchFieldDataType.Double, + Type doubleType when doubleType == typeof(double) || doubleType == typeof(double?) => SearchFieldDataType.Double, + Type dateTimeType when dateTimeType == typeof(DateTime) || dateTimeType == typeof(DateTime?) => SearchFieldDataType.DateTimeOffset, + Type dateTimeOffsetType when dateTimeOffsetType == typeof(DateTimeOffset) || dateTimeOffsetType == typeof(DateTimeOffset?) => SearchFieldDataType.DateTimeOffset, + Type collectionType when typeof(IEnumerable).IsAssignableFrom(collectionType) => SearchFieldDataType.Collection(GetSDKFieldDataType(GetEnumerableType(propertyType))), + _ => throw new InvalidOperationException($"Data type '{propertyType}' for {nameof(VectorStoreRecordDataProperty)} is not supported by the Azure AI Search VectorStore.") + }; + } + + /// + /// Gets the type of object stored in the given enumerable type. + /// + /// The enumerable to get the stored type for. + /// The type of object stored in the given enumerable type. + /// Thrown when the given type is not enumerable. + public static Type GetEnumerableType(Type type) + { + if (type is IEnumerable) + { + return typeof(object); + } + else if (type.IsArray) + { + return type.GetElementType()!; + } + + if (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(IEnumerable<>)) + { + return type.GetGenericArguments()[0]; + } + + if (type.GetInterfaces().FirstOrDefault(i => i.IsGenericType && i.GetGenericTypeDefinition() == typeof(IEnumerable<>)) is Type enumerableInterface) + { + return enumerableInterface.GetGenericArguments()[0]; + } + + throw new InvalidOperationException($"Data type '{type}' for {nameof(VectorStoreRecordDataProperty)} is not supported by the Azure AI Search VectorStore."); + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreOptions.cs b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreOptions.cs new file mode 100644 index 000000000000..e8d54c8b7740 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreOptions.cs @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace Microsoft.SemanticKernel.Connectors.AzureAISearch; + +/// +/// Options when creating a . +/// +public sealed class AzureAISearchVectorStoreOptions +{ + /// + /// An optional factory to use for constructing instances, if custom options are required. + /// + public IAzureAISearchVectorStoreRecordCollectionFactory? VectorStoreCollectionFactory { get; init; } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreRecordCollection.cs b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreRecordCollection.cs new file mode 100644 index 000000000000..21018b39c223 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreRecordCollection.cs @@ -0,0 +1,450 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text.Json; +using System.Text.Json.Nodes; +using System.Threading; +using System.Threading.Tasks; +using Azure; +using Azure.Search.Documents; +using Azure.Search.Documents.Indexes; +using Azure.Search.Documents.Indexes.Models; +using Azure.Search.Documents.Models; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel.Connectors.AzureAISearch; + +/// +/// Service for storing and retrieving vector records, that uses Azure AI Search as the underlying storage. +/// +/// The data model to use for adding, updating and retrieving data from storage. +#pragma warning disable CA1711 // Identifiers should not have incorrect suffix +public sealed class AzureAISearchVectorStoreRecordCollection : IVectorStoreRecordCollection +#pragma warning restore CA1711 // Identifiers should not have incorrect suffix + where TRecord : class +{ + /// The name of this database for telemetry purposes. + private const string DatabaseName = "AzureAISearch"; + + /// A set of types that a key on the provided model may have. + private static readonly HashSet s_supportedKeyTypes = + [ + typeof(string) + ]; + + /// A set of types that data properties on the provided model may have. + private static readonly HashSet s_supportedDataTypes = + [ + typeof(string), + typeof(int), + typeof(long), + typeof(double), + typeof(float), + typeof(bool), + typeof(DateTimeOffset), + typeof(int?), + typeof(long?), + typeof(double?), + typeof(float?), + typeof(bool?), + typeof(DateTimeOffset?), + ]; + + /// A set of types that vectors on the provided model may have. + /// + /// Azure AI Search is adding support for more types than just float32, but these are not available for use via the + /// SDK yet. We will update this list as the SDK is updated. + /// + /// + private static readonly HashSet s_supportedVectorTypes = + [ + typeof(ReadOnlyMemory), + typeof(ReadOnlyMemory?) + ]; + + /// Azure AI Search client that can be used to manage the list of indices in an Azure AI Search Service. + private readonly SearchIndexClient _searchIndexClient; + + /// Azure AI Search client that can be used to manage data in an Azure AI Search Service index. + private readonly SearchClient _searchClient; + + /// The name of the collection that this will access. + private readonly string _collectionName; + + /// Optional configuration options for this class. + private readonly AzureAISearchVectorStoreRecordCollectionOptions _options; + + /// A definition of the current storage model. + private readonly VectorStoreRecordDefinition _vectorStoreRecordDefinition; + + /// The storage name of the key field for the collections that this class is used with. + private readonly string _keyStoragePropertyName; + + /// The storage names of all non vector fields on the current model. + private readonly List _nonVectorStoragePropertyNames = new(); + + /// A dictionary that maps from a property name to the storage name that should be used when serializing it to json for data and vector properties. + private readonly Dictionary _storagePropertyNames = new(); + + /// + /// Initializes a new instance of the class. + /// + /// Azure AI Search client that can be used to manage the list of indices in an Azure AI Search Service. + /// The name of the collection that this will access. + /// Optional configuration options for this class. + /// Thrown when is null. + /// Thrown when options are misconfigured. + public AzureAISearchVectorStoreRecordCollection(SearchIndexClient searchIndexClient, string collectionName, AzureAISearchVectorStoreRecordCollectionOptions? options = default) + { + // Verify. + Verify.NotNull(searchIndexClient); + Verify.NotNullOrWhiteSpace(collectionName); + + // Assign. + this._searchIndexClient = searchIndexClient; + this._collectionName = collectionName; + this._options = options ?? new AzureAISearchVectorStoreRecordCollectionOptions(); + this._searchClient = this._searchIndexClient.GetSearchClient(collectionName); + this._vectorStoreRecordDefinition = this._options.VectorStoreRecordDefinition ?? VectorStoreRecordPropertyReader.CreateVectorStoreRecordDefinitionFromType(typeof(TRecord), true); + var jsonSerializerOptions = this._options.JsonSerializerOptions ?? JsonSerializerOptions.Default; + + // Validate property types. + var properties = VectorStoreRecordPropertyReader.SplitDefinitionAndVerify(typeof(TRecord).Name, this._vectorStoreRecordDefinition, supportsMultipleVectors: true, requiresAtLeastOneVector: false); + VectorStoreRecordPropertyReader.VerifyPropertyTypes([properties.KeyProperty], s_supportedKeyTypes, "Key"); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(properties.DataProperties, s_supportedDataTypes, "Data", supportEnumerable: true); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(properties.VectorProperties, s_supportedVectorTypes, "Vector"); + + // Get storage names and store for later use. + this._storagePropertyNames = VectorStoreRecordPropertyReader.BuildPropertyNameToJsonPropertyNameMap(properties, typeof(TRecord), jsonSerializerOptions); + this._keyStoragePropertyName = this._storagePropertyNames[properties.KeyProperty.DataModelPropertyName]; + this._nonVectorStoragePropertyNames = properties.DataProperties + .Cast() + .Concat([properties.KeyProperty]) + .Select(x => this._storagePropertyNames[x.DataModelPropertyName]) + .ToList(); + } + + /// + public string CollectionName => this._collectionName; + + /// + public async Task CollectionExistsAsync(CancellationToken cancellationToken = default) + { + try + { + await this._searchIndexClient.GetIndexAsync(this._collectionName, cancellationToken).ConfigureAwait(false); + return true; + } + catch (RequestFailedException ex) when (ex.Status == 404) + { + return false; + } + catch (RequestFailedException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = "GetIndex" + }; + } + } + + /// + public Task CreateCollectionAsync(CancellationToken cancellationToken = default) + { + var vectorSearchConfig = new VectorSearch(); + var searchFields = new List(); + + // Loop through all properties and create the search fields. + foreach (var property in this._vectorStoreRecordDefinition.Properties) + { + // Key property. + if (property is VectorStoreRecordKeyProperty keyProperty) + { + searchFields.Add(AzureAISearchVectorStoreCollectionCreateMapping.MapKeyField(keyProperty, this._keyStoragePropertyName)); + } + + // Data property. + if (property is VectorStoreRecordDataProperty dataProperty) + { + searchFields.Add(AzureAISearchVectorStoreCollectionCreateMapping.MapDataField(dataProperty, this._storagePropertyNames[dataProperty.DataModelPropertyName])); + } + + // Vector property. + if (property is VectorStoreRecordVectorProperty vectorProperty) + { + (VectorSearchField vectorSearchField, VectorSearchAlgorithmConfiguration algorithmConfiguration, VectorSearchProfile vectorSearchProfile) = AzureAISearchVectorStoreCollectionCreateMapping.MapVectorField( + vectorProperty, + this._storagePropertyNames[vectorProperty.DataModelPropertyName]); + + // Add the search field, plus its profile and algorithm configuration to the search config. + searchFields.Add(vectorSearchField); + vectorSearchConfig.Algorithms.Add(algorithmConfiguration); + vectorSearchConfig.Profiles.Add(vectorSearchProfile); + } + } + + // Create the index. + var searchIndex = new SearchIndex(this._collectionName, searchFields); + searchIndex.VectorSearch = vectorSearchConfig; + + return this.RunOperationAsync( + "CreateIndex", + () => this._searchIndexClient.CreateIndexAsync(searchIndex, cancellationToken)); + } + + /// + public async Task CreateCollectionIfNotExistsAsync(CancellationToken cancellationToken = default) + { + if (!await this.CollectionExistsAsync(cancellationToken).ConfigureAwait(false)) + { + await this.CreateCollectionAsync(cancellationToken).ConfigureAwait(false); + } + } + + /// + public Task DeleteCollectionAsync(CancellationToken cancellationToken = default) + { + return this.RunOperationAsync( + "DeleteIndex", + () => this._searchIndexClient.DeleteIndexAsync(this._collectionName, cancellationToken)); + } + + /// + public Task GetAsync(string key, GetRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNullOrWhiteSpace(key); + + // Create Options. + var innerOptions = this.ConvertGetDocumentOptions(options); + var includeVectors = options?.IncludeVectors ?? false; + + // Get record. + return this.GetDocumentAndMapToDataModelAsync(key, includeVectors, innerOptions, cancellationToken); + } + + /// + public async IAsyncEnumerable GetBatchAsync(IEnumerable keys, GetRecordOptions? options = default, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + Verify.NotNull(keys); + + // Create Options + var innerOptions = this.ConvertGetDocumentOptions(options); + var includeVectors = options?.IncludeVectors ?? false; + + // Get records in parallel. + var tasks = keys.Select(key => this.GetDocumentAndMapToDataModelAsync(key, includeVectors, innerOptions, cancellationToken)); + var results = await Task.WhenAll(tasks).ConfigureAwait(false); + foreach (var result in results) + { + if (result is not null) + { + yield return result; + } + } + } + + /// + public Task DeleteAsync(string key, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNullOrWhiteSpace(key); + + // Remove record. + return this.RunOperationAsync( + "DeleteDocuments", + () => this._searchClient.DeleteDocumentsAsync(this._keyStoragePropertyName, [key], new IndexDocumentsOptions(), cancellationToken)); + } + + /// + public Task DeleteBatchAsync(IEnumerable keys, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(keys); + + // Remove records. + return this.RunOperationAsync( + "DeleteDocuments", + () => this._searchClient.DeleteDocumentsAsync(this._keyStoragePropertyName, keys, new IndexDocumentsOptions(), cancellationToken)); + } + + /// + public async Task UpsertAsync(TRecord record, UpsertRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(record); + + // Create options. + var innerOptions = new IndexDocumentsOptions { ThrowOnAnyError = true }; + + // Upsert record. + var results = await this.MapToStorageModelAndUploadDocumentAsync([record], innerOptions, cancellationToken).ConfigureAwait(false); + return results.Value.Results[0].Key; + } + + /// + public async IAsyncEnumerable UpsertBatchAsync(IEnumerable records, UpsertRecordOptions? options = default, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + Verify.NotNull(records); + + // Create Options + var innerOptions = new IndexDocumentsOptions { ThrowOnAnyError = true }; + + // Upsert records + var results = await this.MapToStorageModelAndUploadDocumentAsync(records, innerOptions, cancellationToken).ConfigureAwait(false); + + // Get results + var resultKeys = results.Value.Results.Select(x => x.Key).ToList(); + foreach (var resultKey in resultKeys) { yield return resultKey; } + } + + /// + /// Get the document with the given key and map it to the data model using the configured mapper type. + /// + /// The key of the record to get. + /// A value indicating whether to include vectors in the result or not. + /// The Azure AI Search sdk options for getting a document. + /// The to monitor for cancellation requests. The default is . + /// The retrieved document, mapped to the consumer data model. + private async Task GetDocumentAndMapToDataModelAsync( + string key, + bool includeVectors, + GetDocumentOptions innerOptions, + CancellationToken cancellationToken) + { + const string OperationName = "GetDocument"; + + // Use the user provided mapper. + if (this._options.JsonObjectCustomMapper is not null) + { + var jsonObject = await this.RunOperationAsync( + OperationName, + () => GetDocumentWithNotFoundHandlingAsync(this._searchClient, key, innerOptions, cancellationToken)).ConfigureAwait(false); + + if (jsonObject is null) + { + return null; + } + + return VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + OperationName, + () => this._options.JsonObjectCustomMapper!.MapFromStorageToDataModel(jsonObject, new() { IncludeVectors = includeVectors })); + } + + // Use the built in Azure AI Search mapper. + return await this.RunOperationAsync( + OperationName, + () => GetDocumentWithNotFoundHandlingAsync(this._searchClient, key, innerOptions, cancellationToken)).ConfigureAwait(false); + } + + /// + /// Map the data model to the storage model and upload the document. + /// + /// The records to upload. + /// The Azure AI Search sdk options for uploading a document. + /// The to monitor for cancellation requests. The default is . + /// The document upload result. + private Task> MapToStorageModelAndUploadDocumentAsync( + IEnumerable records, + IndexDocumentsOptions innerOptions, + CancellationToken cancellationToken) + { + const string OperationName = "UploadDocuments"; + + // Use the user provided mapper. + if (this._options.JsonObjectCustomMapper is not null) + { + var jsonObjects = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + OperationName, + () => records.Select(this._options.JsonObjectCustomMapper!.MapFromDataToStorageModel)); + + return this.RunOperationAsync( + OperationName, + () => this._searchClient.UploadDocumentsAsync(jsonObjects, innerOptions, cancellationToken)); + } + + // Use the built in Azure AI Search mapper. + return this.RunOperationAsync( + OperationName, + () => this._searchClient.UploadDocumentsAsync(records, innerOptions, cancellationToken)); + } + + /// + /// Convert the public options model to the Azure AI Search options model. + /// + /// The public options model. + /// The Azure AI Search options model. + private GetDocumentOptions ConvertGetDocumentOptions(GetRecordOptions? options) + { + var innerOptions = new GetDocumentOptions(); + if (options?.IncludeVectors is false) + { + innerOptions.SelectedFields.AddRange(this._nonVectorStoragePropertyNames); + } + + return innerOptions; + } + + /// + /// Get a document with the given key, and return null if it is not found. + /// + /// The type to deserialize the document to. + /// The search client to use when fetching the document. + /// The key of the record to get. + /// The Azure AI Search sdk options for getting a document. + /// The to monitor for cancellation requests. The default is . + /// The retrieved document, mapped to the consumer data model, or null if not found. + private static async Task GetDocumentWithNotFoundHandlingAsync( + SearchClient searchClient, + string key, + GetDocumentOptions innerOptions, + CancellationToken cancellationToken) + { + try + { + return await searchClient.GetDocumentAsync(key, innerOptions, cancellationToken).ConfigureAwait(false); + } + catch (RequestFailedException ex) when (ex.Status == 404) + { + return default; + } + } + + /// + /// Run the given operation and wrap any with ."/> + /// + /// The response type of the operation. + /// The type of database operation being run. + /// The operation to run. + /// The result of the operation. + private async Task RunOperationAsync(string operationName, Func> operation) + { + try + { + return await operation.Invoke().ConfigureAwait(false); + } + catch (AggregateException ex) when (ex.InnerException is RequestFailedException innerEx) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = operationName + }; + } + catch (RequestFailedException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = operationName + }; + } + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreRecordCollectionOptions.cs b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreRecordCollectionOptions.cs new file mode 100644 index 000000000000..462dcd5d6e66 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/AzureAISearchVectorStoreRecordCollectionOptions.cs @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using System.Text.Json.Nodes; +using Azure.Search.Documents.Indexes; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel.Connectors.AzureAISearch; + +/// +/// Options when creating a . +/// +public sealed class AzureAISearchVectorStoreRecordCollectionOptions + where TRecord : class +{ + /// + /// Gets or sets an optional custom mapper to use when converting between the data model and the Azure AI Search record. + /// + /// + /// If not set, the default mapper that is provided by the Azure AI Search client SDK will be used. + /// + public IVectorStoreRecordMapper? JsonObjectCustomMapper { get; init; } = null; + + /// + /// Gets or sets an optional record definition that defines the schema of the record type. + /// + /// + /// If not provided, the schema will be inferred from the record model class using reflection. + /// In this case, the record model properties must be annotated with the appropriate attributes to indicate their usage. + /// See , and . + /// + public VectorStoreRecordDefinition? VectorStoreRecordDefinition { get; init; } = null; + + /// + /// Gets or sets the JSON serializer options to use when converting between the data model and the Azure AI Search record. + /// Note that when using the default mapper, you will need to provide the same set of both here and when constructing the . + /// + public JsonSerializerOptions? JsonSerializerOptions { get; init; } = null; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/IAzureAISearchVectorStoreRecordCollectionFactory.cs b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/IAzureAISearchVectorStoreRecordCollectionFactory.cs new file mode 100644 index 000000000000..3e7dc2d82bc9 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.AzureAISearch/IAzureAISearchVectorStoreRecordCollectionFactory.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Azure.Search.Documents.Indexes; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel.Connectors.AzureAISearch; + +/// +/// Interface for constructing Azure AI Search instances when using to retrieve these. +/// +public interface IAzureAISearchVectorStoreRecordCollectionFactory +{ + /// + /// Constructs a new instance of the . + /// + /// The data type of the record key. + /// The data model to use for adding, updating and retrieving data from storage. + /// Azure AI Search client that can be used to manage the list of indices in an Azure AI Search Service. + /// The name of the collection to connect to. + /// An optional record definition that defines the schema of the record type. If not present, attributes on will be used. + /// The new instance of . + IVectorStoreRecordCollection CreateVectorStoreRecordCollection(SearchIndexClient searchIndexClient, string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition) + where TKey : notnull + where TRecord : class; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Pinecone/Connectors.Memory.Pinecone.csproj b/dotnet/src/Connectors/Connectors.Memory.Pinecone/Connectors.Memory.Pinecone.csproj index 462a89b0bd8b..69b47fe172f0 100644 --- a/dotnet/src/Connectors/Connectors.Memory.Pinecone/Connectors.Memory.Pinecone.csproj +++ b/dotnet/src/Connectors/Connectors.Memory.Pinecone/Connectors.Memory.Pinecone.csproj @@ -19,6 +19,7 @@ + diff --git a/dotnet/src/Connectors/Connectors.Memory.Pinecone/IPineconeVectorStoreRecordCollectionFactory.cs b/dotnet/src/Connectors/Connectors.Memory.Pinecone/IPineconeVectorStoreRecordCollectionFactory.cs new file mode 100644 index 000000000000..965639e93c8e --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Pinecone/IPineconeVectorStoreRecordCollectionFactory.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.SemanticKernel.Data; +using Sdk = Pinecone; + +namespace Microsoft.SemanticKernel.Connectors.Pinecone; + +/// +/// Interface for constructing Pinecone instances when using to retrieve these. +/// +public interface IPineconeVectorStoreRecordCollectionFactory +{ + /// + /// Constructs a new instance of the . + /// + /// The data type of the record key. + /// The data model to use for adding, updating and retrieving data from storage. + /// Pinecone client that can be used to manage the collections and points in a Pinecone store. + /// The name of the collection to connect to. + /// An optional record definition that defines the schema of the record type. If not present, attributes on will be used. + /// The new instance of . + IVectorStoreRecordCollection CreateVectorStoreRecordCollection(Sdk.PineconeClient pineconeClient, string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition) + where TKey : notnull + where TRecord : class; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeKernelBuilderExtensions.cs b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeKernelBuilderExtensions.cs new file mode 100644 index 000000000000..0c992f789d2a --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeKernelBuilderExtensions.cs @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.SemanticKernel.Connectors.Pinecone; +using Microsoft.SemanticKernel.Data; +using Sdk = Pinecone; + +namespace Microsoft.SemanticKernel; + +/// +/// Extension methods to register Pinecone instances on the . +/// +public static class PineconeKernelBuilderExtensions +{ + /// + /// Register a Pinecone with the specified service ID and where is retrieved from the dependency injection container. + /// + /// The builder to register the on. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The kernel builder. + public static IKernelBuilder AddPineconeVectorStore(this IKernelBuilder builder, PineconeVectorStoreOptions? options = default, string? serviceId = default) + { + builder.Services.AddPineconeVectorStore(options, serviceId); + return builder; + } + + /// + /// Register a Pinecone with the specified service ID and where is constructed using the provided apikey. + /// + /// The builder to register the on. + /// The api key for Pinecone. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The kernel builder. + public static IKernelBuilder AddPineconeVectorStore(this IKernelBuilder builder, string apiKey, PineconeVectorStoreOptions? options = default, string? serviceId = default) + { + builder.Services.AddPineconeVectorStore(apiKey, options, serviceId); + return builder; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeServiceCollectionExtensions.cs b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeServiceCollectionExtensions.cs new file mode 100644 index 000000000000..d6f76f9beaa7 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeServiceCollectionExtensions.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel.Connectors.Pinecone; +using Microsoft.SemanticKernel.Data; +using Sdk = Pinecone; + +namespace Microsoft.SemanticKernel; + +/// +/// Extension methods to register Pinecone instances on an . +/// +public static class PineconeServiceCollectionExtensions +{ + /// + /// Register a Pinecone with the specified service ID and where is retrieved from the dependency injection container. + /// + /// The to register the on. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The service collection. + public static IServiceCollection AddPineconeVectorStore(this IServiceCollection services, PineconeVectorStoreOptions? options = default, string? serviceId = default) + { + // If we are not constructing the PineconeClient, add the IVectorStore as transient, since we + // cannot make assumptions about how PineconeClient is being managed. + services.AddKeyedTransient( + serviceId, + (sp, obj) => + { + var pineconeClient = sp.GetRequiredService(); + var selectedOptions = options ?? sp.GetService(); + + return new PineconeVectorStore( + pineconeClient, + selectedOptions); + }); + + return services; + } + + /// + /// Register a Pinecone with the specified service ID and where is constructed using the provided apikey. + /// + /// The to register the on. + /// The api key for Pinecone. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The service collection. + public static IServiceCollection AddPineconeVectorStore(this IServiceCollection services, string apiKey, PineconeVectorStoreOptions? options = default, string? serviceId = default) + { + services.AddKeyedSingleton( + serviceId, + (sp, obj) => + { + var pineconeClient = new Sdk.PineconeClient(apiKey); + var selectedOptions = options ?? sp.GetService(); + + return new PineconeVectorStore( + pineconeClient, + selectedOptions); + }); + + return services; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStore.cs b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStore.cs new file mode 100644 index 000000000000..ec5b6114c801 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStore.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading; +using Grpc.Core; +using Microsoft.SemanticKernel.Data; +using Pinecone; +using Sdk = Pinecone; + +namespace Microsoft.SemanticKernel.Connectors.Pinecone; + +/// +/// Class for accessing the list of collections in a Pinecone vector store. +/// +/// +/// This class can be used with collections of any schema type, but requires you to provide schema information when getting a collection. +/// +public sealed class PineconeVectorStore : IVectorStore +{ + private const string DatabaseName = "Pinecone"; + private const string ListCollectionsName = "ListCollections"; + + private readonly Sdk.PineconeClient _pineconeClient; + private readonly PineconeVectorStoreOptions _options; + + /// + /// Initializes a new instance of the class. + /// + /// Pinecone client that can be used to manage the collections and points in a Pinecone store. + /// Optional configuration options for this class. + public PineconeVectorStore(Sdk.PineconeClient pineconeClient, PineconeVectorStoreOptions? options = default) + { + Verify.NotNull(pineconeClient); + + this._pineconeClient = pineconeClient; + this._options = options ?? new PineconeVectorStoreOptions(); + } + + /// + public IVectorStoreRecordCollection GetCollection(string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition = null) + where TKey : notnull + where TRecord : class + { + if (typeof(TKey) != typeof(string)) + { + throw new NotSupportedException("Only string keys are supported."); + } + + if (this._options.VectorStoreCollectionFactory is not null) + { + return this._options.VectorStoreCollectionFactory.CreateVectorStoreRecordCollection(this._pineconeClient, name, vectorStoreRecordDefinition); + } + + return (new PineconeVectorStoreRecordCollection( + this._pineconeClient, + name, + new PineconeVectorStoreRecordCollectionOptions() { VectorStoreRecordDefinition = vectorStoreRecordDefinition }) as IVectorStoreRecordCollection)!; + } + + /// + public async IAsyncEnumerable ListCollectionNamesAsync([EnumeratorCancellation] CancellationToken cancellationToken = default) + { + IndexDetails[] collections; + + try + { + collections = await this._pineconeClient.ListIndexes(cancellationToken).ConfigureAwait(false); + } + catch (RpcException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + OperationName = ListCollectionsName + }; + } + + foreach (var collection in collections) + { + yield return collection.Name; + } + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreCollectionCreateMapping.cs b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreCollectionCreateMapping.cs new file mode 100644 index 000000000000..0a50cf2ac399 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreCollectionCreateMapping.cs @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using Microsoft.SemanticKernel.Data; +using Pinecone; + +namespace Microsoft.SemanticKernel.Connectors.Pinecone; + +/// +/// Contains mapping helpers to use when creating a Pinecone vector collection. +/// +internal static class PineconeVectorStoreCollectionCreateMapping +{ + /// + /// Maps information stored in to a structure used by Pinecone SDK to create a serverless index. + /// + /// The property to map. + /// The structure containing settings used to create a serverless index. + /// Thrown if the property is missing information or has unsupported options specified. + public static (uint Dimension, Metric Metric) MapServerlessIndex(VectorStoreRecordVectorProperty vectorProperty) + { + if (vectorProperty!.Dimensions is not > 0) + { + throw new InvalidOperationException($"Property {nameof(vectorProperty.Dimensions)} on {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' must be set to a positive integer to create a collection."); + } + + return (Dimension: (uint)vectorProperty.Dimensions, Metric: GetSDKMetricAlgorithm(vectorProperty)); + } + + /// + /// Get the configured from the given . + /// If none is configured, the default is . + /// + /// The vector property definition. + /// The chosen . + /// Thrown if a distance function is chosen that isn't supported by Pinecone. + public static Metric GetSDKMetricAlgorithm(VectorStoreRecordVectorProperty vectorProperty) + => vectorProperty.DistanceFunction switch + { + DistanceFunction.CosineSimilarity => Metric.Cosine, + DistanceFunction.DotProductSimilarity => Metric.DotProduct, + DistanceFunction.EuclideanDistance => Metric.Euclidean, + null => Metric.Cosine, + _ => throw new InvalidOperationException($"Distance function '{vectorProperty.DistanceFunction}' for {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' is not supported by the Pinecone VectorStore.") + }; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreOptions.cs b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreOptions.cs new file mode 100644 index 000000000000..7a6fc9767f62 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreOptions.cs @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace Microsoft.SemanticKernel.Connectors.Pinecone; + +/// +/// Options when creating a . +/// +public sealed class PineconeVectorStoreOptions +{ + /// + /// An optional factory to use for constructing instances, if custom options are required. + /// + public IPineconeVectorStoreRecordCollectionFactory? VectorStoreCollectionFactory { get; init; } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreRecordCollection.cs b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreRecordCollection.cs new file mode 100644 index 000000000000..323681f629be --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreRecordCollection.cs @@ -0,0 +1,273 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Grpc.Core; +using Microsoft.SemanticKernel.Data; +using Pinecone.Grpc; +using Sdk = Pinecone; + +namespace Microsoft.SemanticKernel.Connectors.Pinecone; + +/// +/// Service for storing and retrieving vector records, that uses Pinecone as the underlying storage. +/// +/// The data model to use for adding, updating and retrieving data from storage. +#pragma warning disable CA1711 // Identifiers should not have incorrect suffix +public sealed class PineconeVectorStoreRecordCollection : IVectorStoreRecordCollection +#pragma warning restore CA1711 // Identifiers should not have incorrect suffix + where TRecord : class +{ + private const string DatabaseName = "Pinecone"; + private const string CreateCollectionName = "CreateCollection"; + private const string CollectionExistsName = "CollectionExists"; + private const string DeleteCollectionName = "DeleteCollection"; + + private const string UpsertOperationName = "Upsert"; + private const string DeleteOperationName = "Delete"; + private const string GetOperationName = "Get"; + + private readonly Sdk.PineconeClient _pineconeClient; + private readonly PineconeVectorStoreRecordCollectionOptions _options; + private readonly VectorStoreRecordDefinition _vectorStoreRecordDefinition; + private readonly IVectorStoreRecordMapper _mapper; + + private Sdk.Index? _index; + + /// + public string CollectionName { get; } + + /// + /// Initializes a new instance of the class. + /// + /// Pinecone client that can be used to manage the collections and vectors in a Pinecone store. + /// Optional configuration options for this class. + /// Thrown if the is null. + /// The name of the collection that this will access. + /// Thrown for any misconfigured options. + public PineconeVectorStoreRecordCollection(Sdk.PineconeClient pineconeClient, string collectionName, PineconeVectorStoreRecordCollectionOptions? options = null) + { + Verify.NotNull(pineconeClient); + + this._pineconeClient = pineconeClient; + this.CollectionName = collectionName; + this._options = options ?? new PineconeVectorStoreRecordCollectionOptions(); + this._vectorStoreRecordDefinition = this._options.VectorStoreRecordDefinition ?? VectorStoreRecordPropertyReader.CreateVectorStoreRecordDefinitionFromType(typeof(TRecord), true); + + if (this._options.VectorCustomMapper is null) + { + this._mapper = new PineconeVectorStoreRecordMapper(this._vectorStoreRecordDefinition); + } + else + { + this._mapper = this._options.VectorCustomMapper; + } + } + + /// + public async Task CollectionExistsAsync(CancellationToken cancellationToken = default) + { + var result = await this.RunOperationAsync( + CollectionExistsName, + async () => + { + var collections = await this._pineconeClient.ListIndexes(cancellationToken).ConfigureAwait(false); + + return collections.Any(x => x.Name == this.CollectionName); + }).ConfigureAwait(false); + + return result; + } + + /// + public async Task CreateCollectionAsync(CancellationToken cancellationToken = default) + { + // we already run through record property validation, so a single VectorStoreRecordVectorProperty is guaranteed. + var vectorProperty = this._vectorStoreRecordDefinition.Properties.OfType().First(); + var (dimension, metric) = PineconeVectorStoreCollectionCreateMapping.MapServerlessIndex(vectorProperty); + + await this.RunOperationAsync( + CreateCollectionName, + () => this._pineconeClient.CreateServerlessIndex( + this.CollectionName, + dimension, + metric, + this._options.ServerlessIndexCloud, + this._options.ServerlessIndexRegion, + cancellationToken)).ConfigureAwait(false); + } + + /// + public async Task CreateCollectionIfNotExistsAsync(CancellationToken cancellationToken = default) + { + if (!await this.CollectionExistsAsync(cancellationToken).ConfigureAwait(false)) + { + await this.CreateCollectionAsync(cancellationToken).ConfigureAwait(false); + } + } + + /// + public Task DeleteCollectionAsync(CancellationToken cancellationToken = default) + => this.RunOperationAsync( + DeleteCollectionName, + () => this._pineconeClient.DeleteIndex(this.CollectionName, cancellationToken)); + + /// + public async Task GetAsync(string key, GetRecordOptions? options = null, CancellationToken cancellationToken = default) + { + Verify.NotNull(key); + + var records = await this.GetBatchAsync([key], options, cancellationToken).ToListAsync(cancellationToken).ConfigureAwait(false); + + return records.FirstOrDefault(); + } + + /// + public async IAsyncEnumerable GetBatchAsync( + IEnumerable keys, + GetRecordOptions? options = default, + [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + Verify.NotNull(keys); + + var indexNamespace = this.GetIndexNamespace(); + var mapperOptions = new StorageToDataModelMapperOptions { IncludeVectors = options?.IncludeVectors ?? false }; + + var index = await this.GetIndexAsync(this.CollectionName, cancellationToken).ConfigureAwait(false); + + var results = await this.RunOperationAsync( + GetOperationName, + () => index.Fetch(keys, indexNamespace, cancellationToken)).ConfigureAwait(false); + + var records = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this.CollectionName, + GetOperationName, + () => results.Values.Select(x => this._mapper.MapFromStorageToDataModel(x, mapperOptions)).ToList()); + + foreach (var record in records) + { + yield return record; + } + } + + /// + public Task DeleteAsync(string key, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNullOrWhiteSpace(key); + + return this.DeleteBatchAsync([key], options, cancellationToken); + } + + /// + public async Task DeleteBatchAsync(IEnumerable keys, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(keys); + + var indexNamespace = this.GetIndexNamespace(); + + var index = await this.GetIndexAsync(this.CollectionName, cancellationToken).ConfigureAwait(false); + + await this.RunOperationAsync( + DeleteOperationName, + () => index.Delete(keys, indexNamespace, cancellationToken)).ConfigureAwait(false); + } + + /// + public async Task UpsertAsync(TRecord record, UpsertRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(record); + + var indexNamespace = this.GetIndexNamespace(); + + var index = await this.GetIndexAsync(this.CollectionName, cancellationToken).ConfigureAwait(false); + + var vector = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this.CollectionName, + UpsertOperationName, + () => this._mapper.MapFromDataToStorageModel(record)); + + await this.RunOperationAsync( + UpsertOperationName, + () => index.Upsert([vector], indexNamespace, cancellationToken)).ConfigureAwait(false); + + return vector.Id; + } + + /// + public async IAsyncEnumerable UpsertBatchAsync( + IEnumerable records, + UpsertRecordOptions? options = default, + [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + Verify.NotNull(records); + + var indexNamespace = this.GetIndexNamespace(); + + var index = await this.GetIndexAsync(this.CollectionName, cancellationToken).ConfigureAwait(false); + + var vectors = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this.CollectionName, + UpsertOperationName, + () => records.Select(this._mapper.MapFromDataToStorageModel).ToList()); + + await this.RunOperationAsync( + UpsertOperationName, + () => index.Upsert(vectors, indexNamespace, cancellationToken)).ConfigureAwait(false); + + foreach (var vector in vectors) + { + yield return vector.Id; + } + } + + private async Task RunOperationAsync(string operationName, Func> operation) + { + try + { + return await operation.Invoke().ConfigureAwait(false); + } + catch (RpcException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this.CollectionName, + OperationName = operationName + }; + } + } + + private async Task RunOperationAsync(string operationName, Func operation) + { + try + { + await operation.Invoke().ConfigureAwait(false); + } + catch (RpcException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this.CollectionName, + OperationName = operationName + }; + } + } + + private async Task> GetIndexAsync(string indexName, CancellationToken cancellationToken) + { + this._index ??= await this._pineconeClient.GetIndex(indexName, cancellationToken).ConfigureAwait(false); + + return this._index; + } + + private string? GetIndexNamespace() + => this._options.IndexNamespace; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreRecordCollectionOptions.cs b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreRecordCollectionOptions.cs new file mode 100644 index 000000000000..f328524ec758 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreRecordCollectionOptions.cs @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.SemanticKernel.Data; +using Pinecone; + +namespace Microsoft.SemanticKernel.Connectors.Pinecone; + +/// +/// Options when creating a . +/// +public sealed class PineconeVectorStoreRecordCollectionOptions + where TRecord : class +{ + /// + /// Gets or sets an optional custom mapper to use when converting between the data model and the Pinecone vector. + /// + public IVectorStoreRecordMapper? VectorCustomMapper { get; init; } = null; + + /// + /// Gets or sets an optional record definition that defines the schema of the record type. + /// + /// + /// If not provided, the schema will be inferred from the record model class using reflection. + /// In this case, the record model properties must be annotated with the appropriate attributes to indicate their usage. + /// See , and . + /// + public VectorStoreRecordDefinition? VectorStoreRecordDefinition { get; init; } = null; + + /// + /// Gets or sets the value for a namespace within the Pinecone index that will be used for operations involving records (Get, Upsert, Delete)."/> + /// + public string? IndexNamespace { get; init; } = null; + + /// + /// Gets or sets the value for public cloud where the serverless index is hosted. + /// + /// + /// This value is only used when creating a new Pinecone index. Default value is 'aws'. + /// + public string ServerlessIndexCloud { get; init; } = "aws"; + + /// + /// Gets or sets the value for region where the serverless index is created. + /// + /// + /// This option is only used when creating a new Pinecone index. Default value is 'us-east-1'. + /// + public string ServerlessIndexRegion { get; init; } = "us-east-1"; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreRecordMapper.cs b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreRecordMapper.cs new file mode 100644 index 000000000000..da1d95ad6de9 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Pinecone/PineconeVectorStoreRecordMapper.cs @@ -0,0 +1,192 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using System.Text.Json; +using System.Text.Json.Nodes; +using Microsoft.SemanticKernel.Data; +using Pinecone; + +namespace Microsoft.SemanticKernel.Connectors.Pinecone; + +/// +/// Mapper between a Pinecone record and the consumer data model that uses json as an intermediary to allow supporting a wide range of models. +/// +/// The consumer data model to map to or from. +internal sealed class PineconeVectorStoreRecordMapper : IVectorStoreRecordMapper + where TRecord : class +{ + /// A set of types that a key on the provided model may have. + private static readonly HashSet s_supportedKeyTypes = [typeof(string)]; + + /// A set of types that data properties on the provided model may have. + private static readonly HashSet s_supportedDataTypes = + [ + typeof(bool), + typeof(bool?), + typeof(string), + typeof(int), + typeof(int?), + typeof(long), + typeof(long?), + typeof(float), + typeof(float?), + typeof(double), + typeof(double?), + typeof(decimal), + typeof(decimal?), + ]; + + /// A set of types that enumerable data properties on the provided model may use as their element types. + private static readonly HashSet s_supportedEnumerableDataElementTypes = + [ + typeof(string) + ]; + + /// A set of types that vectors on the provided model may have. + private static readonly HashSet s_supportedVectorTypes = + [ + typeof(ReadOnlyMemory), + typeof(ReadOnlyMemory?), + ]; + + private readonly PropertyInfo _keyPropertyInfo; + + private readonly List _dataPropertiesInfo; + + private readonly PropertyInfo _vectorPropertyInfo; + + private readonly Dictionary _storagePropertyNames = []; + + private readonly Dictionary _jsonPropertyNames = []; + + /// + /// Initializes a new instance of the class. + /// + /// The record definition that defines the schema of the record type. + public PineconeVectorStoreRecordMapper( + VectorStoreRecordDefinition vectorStoreRecordDefinition) + { + // Validate property types. + var propertiesInfo = VectorStoreRecordPropertyReader.FindProperties(typeof(TRecord), vectorStoreRecordDefinition, supportsMultipleVectors: false); + VectorStoreRecordPropertyReader.VerifyPropertyTypes([propertiesInfo.KeyProperty], s_supportedKeyTypes, "Key"); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(propertiesInfo.DataProperties, s_supportedDataTypes, s_supportedEnumerableDataElementTypes, "Data"); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(propertiesInfo.VectorProperties, s_supportedVectorTypes, "Vector"); + + // Assign. + this._keyPropertyInfo = propertiesInfo.KeyProperty; + this._dataPropertiesInfo = propertiesInfo.DataProperties; + this._vectorPropertyInfo = propertiesInfo.VectorProperties[0]; + + // Get storage names and store for later use. + var properties = VectorStoreRecordPropertyReader.SplitDefinitionAndVerify(typeof(TRecord).Name, vectorStoreRecordDefinition, supportsMultipleVectors: false, requiresAtLeastOneVector: true); + this._jsonPropertyNames = VectorStoreRecordPropertyReader.BuildPropertyNameToJsonPropertyNameMap(properties, typeof(TRecord), JsonSerializerOptions.Default); + this._storagePropertyNames = VectorStoreRecordPropertyReader.BuildPropertyNameToStorageNameMap(properties); + } + + /// + public Vector MapFromDataToStorageModel(TRecord dataModel) + { + var keyObject = this._keyPropertyInfo.GetValue(dataModel); + if (keyObject is null) + { + throw new VectorStoreRecordMappingException($"Key property {this._keyPropertyInfo.Name} on provided record of type {typeof(TRecord).FullName} may not be null."); + } + + var metadata = new MetadataMap(); + foreach (var dataPropertyInfo in this._dataPropertiesInfo) + { + var propertyName = this._storagePropertyNames[dataPropertyInfo.Name]; + var propertyValue = dataPropertyInfo.GetValue(dataModel); + if (propertyValue != null) + { + metadata[propertyName] = ConvertToMetadataValue(propertyValue); + } + } + + var valuesObject = this._vectorPropertyInfo.GetValue(dataModel); + if (valuesObject is not ReadOnlyMemory values) + { + throw new VectorStoreRecordMappingException($"Vector property {this._vectorPropertyInfo.Name} on provided record of type {typeof(TRecord).FullName} may not be null."); + } + + // TODO: what about sparse values? + var result = new Vector + { + Id = (string)keyObject, + Values = values.ToArray(), + Metadata = metadata, + SparseValues = null + }; + + return result; + } + + /// + public TRecord MapFromStorageToDataModel(Vector storageModel, StorageToDataModelMapperOptions options) + { + var keyJsonName = this._jsonPropertyNames[this._keyPropertyInfo.Name]; + var outputJsonObject = new JsonObject + { + { keyJsonName, JsonValue.Create(storageModel.Id) }, + }; + + if (options?.IncludeVectors is true) + { + var propertyName = this._storagePropertyNames[this._vectorPropertyInfo.Name]; + var jsonName = this._jsonPropertyNames[this._vectorPropertyInfo.Name]; + outputJsonObject.Add(jsonName, new JsonArray(storageModel.Values.Select(x => JsonValue.Create(x)).ToArray())); + } + + if (storageModel.Metadata != null) + { + foreach (var dataProperty in this._dataPropertiesInfo) + { + var propertyName = this._storagePropertyNames[dataProperty.Name]; + var jsonName = this._jsonPropertyNames[dataProperty.Name]; + + if (storageModel.Metadata.TryGetValue(propertyName, out var value)) + { + outputJsonObject.Add(jsonName, ConvertFromMetadataValueToJsonNode(value)); + } + } + } + + return outputJsonObject.Deserialize()!; + } + + private static JsonNode? ConvertFromMetadataValueToJsonNode(MetadataValue metadataValue) + => metadataValue.Inner switch + { + null => null, + bool boolValue => JsonValue.Create(boolValue), + string stringValue => JsonValue.Create(stringValue), + int intValue => JsonValue.Create(intValue), + long longValue => JsonValue.Create(longValue), + float floatValue => JsonValue.Create(floatValue), + double doubleValue => JsonValue.Create(doubleValue), + decimal decimalValue => JsonValue.Create(decimalValue), + MetadataValue[] array => new JsonArray(array.Select(ConvertFromMetadataValueToJsonNode).ToArray()), + List list => new JsonArray(list.Select(ConvertFromMetadataValueToJsonNode).ToArray()), + _ => throw new VectorStoreRecordMappingException($"Unsupported metadata type: '{metadataValue.Inner?.GetType().FullName}'."), + }; + + // TODO: take advantage of MetadataValue.TryCreate once we upgrade the version of Pinecone.NET + private static MetadataValue ConvertToMetadataValue(object? sourceValue) + => sourceValue switch + { + bool boolValue => boolValue, + string stringValue => stringValue, + int intValue => intValue, + long longValue => longValue, + float floatValue => floatValue, + double doubleValue => doubleValue, + decimal decimalValue => decimalValue, + string[] stringArray => stringArray, + List stringList => stringList, + IEnumerable stringEnumerable => stringEnumerable.ToArray(), + _ => throw new VectorStoreRecordMappingException($"Unsupported source value type '{sourceValue?.GetType().FullName}'.") + }; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/Connectors.Memory.Qdrant.csproj b/dotnet/src/Connectors/Connectors.Memory.Qdrant/Connectors.Memory.Qdrant.csproj index d9037605f6e5..322a58d22400 100644 --- a/dotnet/src/Connectors/Connectors.Memory.Qdrant/Connectors.Memory.Qdrant.csproj +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/Connectors.Memory.Qdrant.csproj @@ -20,10 +20,12 @@ + + diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/IQdrantVectorStoreRecordCollectionFactory.cs b/dotnet/src/Connectors/Connectors.Memory.Qdrant/IQdrantVectorStoreRecordCollectionFactory.cs new file mode 100644 index 000000000000..2f93e14dfb82 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/IQdrantVectorStoreRecordCollectionFactory.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.SemanticKernel.Data; +using Qdrant.Client; + +namespace Microsoft.SemanticKernel.Connectors.Qdrant; + +/// +/// Interface for constructing Qdrant instances when using to retrieve these. +/// +public interface IQdrantVectorStoreRecordCollectionFactory +{ + /// + /// Constructs a new instance of the . + /// + /// The data type of the record key. + /// The data model to use for adding, updating and retrieving data from storage. + /// Qdrant client that can be used to manage the collections and points in a Qdrant store. + /// The name of the collection to connect to. + /// An optional record definition that defines the schema of the record type. If not present, attributes on will be used. + /// The new instance of . + IVectorStoreRecordCollection CreateVectorStoreRecordCollection(QdrantClient qdrantClient, string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition) + where TKey : notnull + where TRecord : class; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/MockableQdrantClient.cs b/dotnet/src/Connectors/Connectors.Memory.Qdrant/MockableQdrantClient.cs new file mode 100644 index 000000000000..020455558b7d --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/MockableQdrantClient.cs @@ -0,0 +1,258 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using Qdrant.Client; +using Qdrant.Client.Grpc; + +namespace Microsoft.SemanticKernel.Connectors.Qdrant; + +/// +/// Decorator class for that exposes the required methods as virtual allowing for mocking in unit tests. +/// +internal class MockableQdrantClient +{ + /// Qdrant client that can be used to manage the collections and points in a Qdrant store. + private readonly QdrantClient _qdrantClient; + + /// + /// Initializes a new instance of the class. + /// + /// Qdrant client that can be used to manage the collections and points in a Qdrant store. + public MockableQdrantClient(QdrantClient qdrantClient) + { + Verify.NotNull(qdrantClient); + this._qdrantClient = qdrantClient; + } + +#pragma warning disable CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. + + /// + /// Constructor for mocking purposes only. + /// + internal MockableQdrantClient() + { + } + +#pragma warning restore CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. + + /// + /// Gets the internal that this mockable instance wraps. + /// + public QdrantClient QdrantClient => this._qdrantClient; + + /// + /// Check if a collection exists. + /// + /// The name of the collection. + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task CollectionExistsAsync( + string collectionName, + CancellationToken cancellationToken = default) + => this._qdrantClient.CollectionExistsAsync(collectionName, cancellationToken); + + /// + /// Creates a new collection with the given parameters. + /// + /// The name of the collection to be created. + /// + /// Configuration of the vector storage. Vector params contains size and distance for the vector storage. + /// This overload creates a single anonymous vector storage. + /// + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task CreateCollectionAsync( + string collectionName, + VectorParams vectorsConfig, + CancellationToken cancellationToken = default) + => this._qdrantClient.CreateCollectionAsync( + collectionName, + vectorsConfig, + cancellationToken: cancellationToken); + + /// + /// Creates a new collection with the given parameters. + /// + /// The name of the collection to be created. + /// + /// Configuration of the vector storage. Vector params contains size and distance for the vector storage. + /// This overload creates a vector storage for each key in the provided map. + /// + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task CreateCollectionAsync( + string collectionName, + VectorParamsMap? vectorsConfig = null, + CancellationToken cancellationToken = default) + => this._qdrantClient.CreateCollectionAsync( + collectionName, + vectorsConfig, + cancellationToken: cancellationToken); + + /// + /// Creates a payload field index in a collection. + /// + /// The name of the collection. + /// Field name to index. + /// The schema type of the field. + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task CreatePayloadIndexAsync( + string collectionName, + string fieldName, + PayloadSchemaType schemaType = PayloadSchemaType.Keyword, + CancellationToken cancellationToken = default) + => this._qdrantClient.CreatePayloadIndexAsync(collectionName, fieldName, schemaType, cancellationToken: cancellationToken); + + /// + /// Drop a collection and all its associated data. + /// + /// The name of the collection. + /// Wait timeout for operation commit in seconds, if not specified - default value will be supplied + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task DeleteCollectionAsync( + string collectionName, + TimeSpan? timeout = null, + CancellationToken cancellationToken = default) + => this._qdrantClient.DeleteCollectionAsync(collectionName, timeout, cancellationToken); + + /// + /// Gets the names of all existing collections. + /// + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task> ListCollectionsAsync(CancellationToken cancellationToken = default) + => this._qdrantClient.ListCollectionsAsync(cancellationToken); + + /// + /// Delete a point. + /// + /// The name of the collection. + /// The ID to delete. + /// Whether to wait until the changes have been applied. Defaults to true. + /// Write ordering guarantees. Defaults to Weak. + /// Option for custom sharding to specify used shard keys. + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task DeleteAsync( + string collectionName, + ulong id, + bool wait = true, + WriteOrderingType? ordering = null, + ShardKeySelector? shardKeySelector = null, + CancellationToken cancellationToken = default) + => this._qdrantClient.DeleteAsync(collectionName, id, wait, ordering, shardKeySelector, cancellationToken: cancellationToken); + + /// + /// Delete a point. + /// + /// The name of the collection. + /// The ID to delete. + /// Whether to wait until the changes have been applied. Defaults to true. + /// Write ordering guarantees. Defaults to Weak. + /// Option for custom sharding to specify used shard keys. + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task DeleteAsync( + string collectionName, + Guid id, + bool wait = true, + WriteOrderingType? ordering = null, + ShardKeySelector? shardKeySelector = null, + CancellationToken cancellationToken = default) + => this._qdrantClient.DeleteAsync(collectionName, id, wait, ordering, shardKeySelector, cancellationToken: cancellationToken); + + /// + /// Delete a point. + /// + /// The name of the collection. + /// The IDs to delete. + /// Whether to wait until the changes have been applied. Defaults to true. + /// Write ordering guarantees. Defaults to Weak. + /// Option for custom sharding to specify used shard keys. + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task DeleteAsync( + string collectionName, + IReadOnlyList ids, + bool wait = true, + WriteOrderingType? ordering = null, + ShardKeySelector? shardKeySelector = null, + CancellationToken cancellationToken = default) + => this._qdrantClient.DeleteAsync(collectionName, ids, wait, ordering, shardKeySelector, cancellationToken: cancellationToken); + + /// + /// Delete a point. + /// + /// The name of the collection. + /// The IDs to delete. + /// Whether to wait until the changes have been applied. Defaults to true. + /// Write ordering guarantees. Defaults to Weak. + /// Option for custom sharding to specify used shard keys. + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task DeleteAsync( + string collectionName, + IReadOnlyList ids, + bool wait = true, + WriteOrderingType? ordering = null, + ShardKeySelector? shardKeySelector = null, + CancellationToken cancellationToken = default) + => this._qdrantClient.DeleteAsync(collectionName, ids, wait, ordering, shardKeySelector, cancellationToken: cancellationToken); + + /// + /// Perform insert and updates on points. If a point with a given ID already exists, it will be overwritten. + /// + /// The name of the collection. + /// The points to be upserted. + /// Whether to wait until the changes have been applied. Defaults to true. + /// Write ordering guarantees. + /// Option for custom sharding to specify used shard keys. + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task UpsertAsync( + string collectionName, + IReadOnlyList points, + bool wait = true, + WriteOrderingType? ordering = null, + ShardKeySelector? shardKeySelector = null, + CancellationToken cancellationToken = default) + => this._qdrantClient.UpsertAsync(collectionName, points, wait, ordering, shardKeySelector, cancellationToken); + + /// + /// Retrieve points. + /// + /// The name of the collection. + /// List of points to retrieve. + /// Whether to include the payload or not. + /// Whether to include the vectors or not. + /// Options for specifying read consistency guarantees. + /// Option for custom sharding to specify used shard keys. + /// + /// The token to monitor for cancellation requests. The default value is . + /// + public virtual Task> RetrieveAsync( + string collectionName, + IReadOnlyList ids, + bool withPayload = true, + bool withVectors = false, + ReadConsistency? readConsistency = null, + ShardKeySelector? shardKeySelector = null, + CancellationToken cancellationToken = default) + => this._qdrantClient.RetrieveAsync(collectionName, ids, withPayload, withVectors, readConsistency, shardKeySelector, cancellationToken); +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantKernelBuilderExtensions.cs b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantKernelBuilderExtensions.cs new file mode 100644 index 000000000000..0f32f044832f --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantKernelBuilderExtensions.cs @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client; + +namespace Microsoft.SemanticKernel; + +/// +/// Extension methods to register Qdrant instances on the . +/// +public static class QdrantKernelBuilderExtensions +{ + /// + /// Register a Qdrant with the specified service ID and where is retrieved from the dependency injection container. + /// + /// The builder to register the on. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The kernel builder. + public static IKernelBuilder AddQdrantVectorStore(this IKernelBuilder builder, QdrantVectorStoreOptions? options = default, string? serviceId = default) + { + builder.Services.AddQdrantVectorStore(options, serviceId); + return builder; + } + /// + /// Register a Qdrant with the specified service ID and where is constructed using the provided parameters. + /// + /// The builder to register the on. + /// The Qdrant service host name. + /// The Qdrant service port. + /// A value indicating whether to use HTTPS for communicating with Qdrant. + /// The Qdrant service API key. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The kernel builder. + public static IKernelBuilder AddQdrantVectorStore(this IKernelBuilder builder, string host, int port = 6334, bool https = false, string? apiKey = default, QdrantVectorStoreOptions? options = default, string? serviceId = default) + { + builder.Services.AddQdrantVectorStore(host, port, https, apiKey, options, serviceId); + return builder; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantServiceCollectionExtensions.cs b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantServiceCollectionExtensions.cs new file mode 100644 index 000000000000..5fd908153e9a --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantServiceCollectionExtensions.cs @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client; + +namespace Microsoft.SemanticKernel; + +/// +/// Extension methods to register Qdrant instances on an . +/// +public static class QdrantServiceCollectionExtensions +{ + /// + /// Register a Qdrant with the specified service ID and where is retrieved from the dependency injection container. + /// + /// The to register the on. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The service collection. + public static IServiceCollection AddQdrantVectorStore(this IServiceCollection services, QdrantVectorStoreOptions? options = default, string? serviceId = default) + { + // If we are not constructing the QdrantClient, add the IVectorStore as transient, since we + // cannot make assumptions about how QdrantClient is being managed. + services.AddKeyedTransient( + serviceId, + (sp, obj) => + { + var qdrantClient = sp.GetRequiredService(); + var selectedOptions = options ?? sp.GetService(); + + return new QdrantVectorStore( + qdrantClient, + selectedOptions); + }); + + return services; + } + /// + /// Register a Qdrant with the specified service ID and where is constructed using the provided parameters. + /// + /// The to register the on. + /// The Qdrant service host name. + /// The Qdrant service port. + /// A value indicating whether to use HTTPS for communicating with Qdrant. + /// The Qdrant service API key. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The service collection. + public static IServiceCollection AddQdrantVectorStore(this IServiceCollection services, string host, int port = 6334, bool https = false, string? apiKey = default, QdrantVectorStoreOptions? options = default, string? serviceId = default) + { + services.AddKeyedSingleton( + serviceId, + (sp, obj) => + { + var qdrantClient = new QdrantClient(host, port, https, apiKey); + var selectedOptions = options ?? sp.GetService(); + + return new QdrantVectorStore( + qdrantClient, + selectedOptions); + }); + + return services; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStore.cs b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStore.cs new file mode 100644 index 000000000000..ef9c9f1593f0 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStore.cs @@ -0,0 +1,96 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading; +using Grpc.Core; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client; + +namespace Microsoft.SemanticKernel.Connectors.Qdrant; + +/// +/// Class for accessing the list of collections in a Qdrant vector store. +/// +/// +/// This class can be used with collections of any schema type, but requires you to provide schema information when getting a collection. +/// +public sealed class QdrantVectorStore : IVectorStore +{ + /// The name of this database for telemetry purposes. + private const string DatabaseName = "Qdrant"; + + /// Qdrant client that can be used to manage the collections and points in a Qdrant store. + private readonly MockableQdrantClient _qdrantClient; + + /// Optional configuration options for this class. + private readonly QdrantVectorStoreOptions _options; + + /// + /// Initializes a new instance of the class. + /// + /// Qdrant client that can be used to manage the collections and points in a Qdrant store. + /// Optional configuration options for this class. + public QdrantVectorStore(QdrantClient qdrantClient, QdrantVectorStoreOptions? options = default) + : this(new MockableQdrantClient(qdrantClient), options) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// Qdrant client that can be used to manage the collections and points in a Qdrant store. + /// Optional configuration options for this class. + internal QdrantVectorStore(MockableQdrantClient qdrantClient, QdrantVectorStoreOptions? options = default) + { + Verify.NotNull(qdrantClient); + + this._qdrantClient = qdrantClient; + this._options = options ?? new QdrantVectorStoreOptions(); + } + + /// + public IVectorStoreRecordCollection GetCollection(string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition = null) + where TKey : notnull + where TRecord : class + { + if (typeof(TKey) != typeof(ulong) && typeof(TKey) != typeof(Guid)) + { + throw new NotSupportedException("Only ulong and Guid keys are supported."); + } + + if (this._options.VectorStoreCollectionFactory is not null) + { + return this._options.VectorStoreCollectionFactory.CreateVectorStoreRecordCollection(this._qdrantClient.QdrantClient, name, vectorStoreRecordDefinition); + } + + var directlyCreatedStore = new QdrantVectorStoreRecordCollection(this._qdrantClient, name, new QdrantVectorStoreRecordCollectionOptions() { VectorStoreRecordDefinition = vectorStoreRecordDefinition }); + var castCreatedStore = directlyCreatedStore as IVectorStoreRecordCollection; + return castCreatedStore!; + } + + /// + public async IAsyncEnumerable ListCollectionNamesAsync([EnumeratorCancellation] CancellationToken cancellationToken = default) + { + IReadOnlyList collections; + + try + { + collections = await this._qdrantClient.ListCollectionsAsync(cancellationToken).ConfigureAwait(false); + } + catch (RpcException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + OperationName = "ListCollections" + }; + } + + foreach (var collection in collections) + { + yield return collection; + } + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreCollectionCreateMapping.cs b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreCollectionCreateMapping.cs new file mode 100644 index 000000000000..e637ae2e06ab --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreCollectionCreateMapping.cs @@ -0,0 +1,118 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client.Grpc; + +namespace Microsoft.SemanticKernel.Connectors.Qdrant; + +/// +/// Contains mapping helpers to use when creating a qdrant vector collection. +/// +internal static class QdrantVectorStoreCollectionCreateMapping +{ + /// A dictionary of types and their matching qdrant index schema type. + public static readonly Dictionary s_schemaTypeMap = new() + { + { typeof(short), PayloadSchemaType.Integer }, + { typeof(sbyte), PayloadSchemaType.Integer }, + { typeof(byte), PayloadSchemaType.Integer }, + { typeof(ushort), PayloadSchemaType.Integer }, + { typeof(int), PayloadSchemaType.Integer }, + { typeof(uint), PayloadSchemaType.Integer }, + { typeof(long), PayloadSchemaType.Integer }, + { typeof(ulong), PayloadSchemaType.Integer }, + { typeof(float), PayloadSchemaType.Float }, + { typeof(double), PayloadSchemaType.Float }, + { typeof(decimal), PayloadSchemaType.Float }, + + { typeof(short?), PayloadSchemaType.Integer }, + { typeof(sbyte?), PayloadSchemaType.Integer }, + { typeof(byte?), PayloadSchemaType.Integer }, + { typeof(ushort?), PayloadSchemaType.Integer }, + { typeof(int?), PayloadSchemaType.Integer }, + { typeof(uint?), PayloadSchemaType.Integer }, + { typeof(long?), PayloadSchemaType.Integer }, + { typeof(ulong?), PayloadSchemaType.Integer }, + { typeof(float?), PayloadSchemaType.Float }, + { typeof(double?), PayloadSchemaType.Float }, + { typeof(decimal?), PayloadSchemaType.Float }, + + { typeof(string), PayloadSchemaType.Keyword }, + { typeof(DateTime), PayloadSchemaType.Datetime }, + { typeof(bool), PayloadSchemaType.Bool }, + + { typeof(DateTime?), PayloadSchemaType.Datetime }, + { typeof(bool?), PayloadSchemaType.Bool }, + }; + + /// + /// Maps a single to a qdrant . + /// + /// The property to map. + /// The mapped . + /// Thrown if the property is missing information or has unsupported options specified. + public static VectorParams MapSingleVector(VectorStoreRecordVectorProperty vectorProperty) + { + if (vectorProperty!.Dimensions is not > 0) + { + throw new InvalidOperationException($"Property {nameof(vectorProperty.Dimensions)} on {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' must be set to a positive integer to create a collection."); + } + + if (vectorProperty!.IndexKind is not null && vectorProperty!.IndexKind != IndexKind.Hnsw) + { + throw new InvalidOperationException($"Index kind '{vectorProperty!.IndexKind}' for {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' is not supported by the Qdrant VectorStore."); + } + + return new VectorParams { Size = (ulong)vectorProperty.Dimensions, Distance = QdrantVectorStoreCollectionCreateMapping.GetSDKDistanceAlgorithm(vectorProperty) }; + } + + /// + /// Maps a collection of to a qdrant . + /// + /// The properties to map. + /// The mapping of property names to storage names. + /// THe mapped . + /// Thrown if the property is missing information or has unsupported options specified. + public static VectorParamsMap MapNamedVectors(IEnumerable vectorProperties, Dictionary storagePropertyNames) + { + var vectorParamsMap = new VectorParamsMap(); + + foreach (var vectorProperty in vectorProperties) + { + var storageName = storagePropertyNames[vectorProperty.DataModelPropertyName]; + + // Add each vector property to the vectors map. + vectorParamsMap.Map.Add( + storageName, + MapSingleVector(vectorProperty)); + } + + return vectorParamsMap; + } + + /// + /// Get the configured from the given . + /// If none is configured, the default is . + /// + /// The vector property definition. + /// The chosen . + /// Thrown if a distance function is chosen that isn't supported by qdrant. + public static Distance GetSDKDistanceAlgorithm(VectorStoreRecordVectorProperty vectorProperty) + { + if (vectorProperty.DistanceFunction is null) + { + return Distance.Cosine; + } + + return vectorProperty.DistanceFunction switch + { + DistanceFunction.CosineSimilarity => Distance.Cosine, + DistanceFunction.DotProductSimilarity => Distance.Dot, + DistanceFunction.EuclideanDistance => Distance.Euclid, + DistanceFunction.ManhattanDistance => Distance.Manhattan, + _ => throw new InvalidOperationException($"Distance function '{vectorProperty.DistanceFunction}' for {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' is not supported by the Qdrant VectorStore.") + }; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreOptions.cs b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreOptions.cs new file mode 100644 index 000000000000..c3ead1bdee2d --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreOptions.cs @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace Microsoft.SemanticKernel.Connectors.Qdrant; + +/// +/// Options when creating a . +/// +public sealed class QdrantVectorStoreOptions +{ + /// + /// Gets or sets a value indicating whether the vectors in the store are named and multiple vectors are supported, or whether there is just a single unnamed vector per qdrant point. + /// Defaults to single vector per point. + /// + public bool HasNamedVectors { get; set; } = false; + + /// + /// An optional factory to use for constructing instances, if custom options are required. + /// + public IQdrantVectorStoreRecordCollectionFactory? VectorStoreCollectionFactory { get; init; } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreRecordCollection.cs b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreRecordCollection.cs new file mode 100644 index 000000000000..a49c530b2cdb --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreRecordCollection.cs @@ -0,0 +1,481 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Grpc.Core; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client; +using Qdrant.Client.Grpc; + +namespace Microsoft.SemanticKernel.Connectors.Qdrant; + +/// +/// Service for storing and retrieving vector records, that uses Qdrant as the underlying storage. +/// +/// The data model to use for adding, updating and retrieving data from storage. +#pragma warning disable CA1711 // Identifiers should not have incorrect suffix +public sealed class QdrantVectorStoreRecordCollection : IVectorStoreRecordCollection, IVectorStoreRecordCollection +#pragma warning restore CA1711 // Identifiers should not have incorrect suffix + where TRecord : class +{ + /// A set of types that a key on the provided model may have. + private static readonly HashSet s_supportedKeyTypes = + [ + typeof(ulong), + typeof(Guid) + ]; + + /// The name of this database for telemetry purposes. + private const string DatabaseName = "Qdrant"; + + /// The name of the upsert operation for telemetry purposes. + private const string UpsertName = "Upsert"; + + /// The name of the Delete operation for telemetry purposes. + private const string DeleteName = "Delete"; + + /// Qdrant client that can be used to manage the collections and points in a Qdrant store. + private readonly MockableQdrantClient _qdrantClient; + + /// The name of the collection that this will access. + private readonly string _collectionName; + + /// Optional configuration options for this class. + private readonly QdrantVectorStoreRecordCollectionOptions _options; + + /// A definition of the current storage model. + private readonly VectorStoreRecordDefinition _vectorStoreRecordDefinition; + + /// A mapper to use for converting between qdrant point and consumer models. + private readonly IVectorStoreRecordMapper _mapper; + + /// A dictionary that maps from a property name to the configured name that should be used when storing it. + private readonly Dictionary _storagePropertyNames = new(); + + /// + /// Initializes a new instance of the class. + /// + /// Qdrant client that can be used to manage the collections and points in a Qdrant store. + /// The name of the collection that this will access. + /// Optional configuration options for this class. + /// Thrown if the is null. + /// Thrown for any misconfigured options. + public QdrantVectorStoreRecordCollection(QdrantClient qdrantClient, string collectionName, QdrantVectorStoreRecordCollectionOptions? options = null) + : this(new MockableQdrantClient(qdrantClient), collectionName, options) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// Qdrant client that can be used to manage the collections and points in a Qdrant store. + /// The name of the collection that this will access. + /// Optional configuration options for this class. + /// Thrown if the is null. + /// Thrown for any misconfigured options. + internal QdrantVectorStoreRecordCollection(MockableQdrantClient qdrantClient, string collectionName, QdrantVectorStoreRecordCollectionOptions? options = null) + { + // Verify. + Verify.NotNull(qdrantClient); + Verify.NotNullOrWhiteSpace(collectionName); + + // Assign. + this._qdrantClient = qdrantClient; + this._collectionName = collectionName; + this._options = options ?? new QdrantVectorStoreRecordCollectionOptions(); + this._vectorStoreRecordDefinition = this._options.VectorStoreRecordDefinition ?? VectorStoreRecordPropertyReader.CreateVectorStoreRecordDefinitionFromType(typeof(TRecord), true); + + // Validate property types. + var properties = VectorStoreRecordPropertyReader.SplitDefinitionAndVerify(typeof(TRecord).Name, this._vectorStoreRecordDefinition, supportsMultipleVectors: this._options.HasNamedVectors, requiresAtLeastOneVector: !this._options.HasNamedVectors); + VectorStoreRecordPropertyReader.VerifyPropertyTypes([properties.KeyProperty], s_supportedKeyTypes, "Key"); + + // Build a map of property names to storage names. + this._storagePropertyNames = VectorStoreRecordPropertyReader.BuildPropertyNameToStorageNameMap(properties); + + // Assign Mapper. + if (this._options.PointStructCustomMapper is not null) + { + // Custom Mapper. + this._mapper = this._options.PointStructCustomMapper; + } + else + { + // Default Mapper. + this._mapper = new QdrantVectorStoreRecordMapper( + this._vectorStoreRecordDefinition, + this._options.HasNamedVectors, + this._storagePropertyNames); + } + } + + /// + public string CollectionName => this._collectionName; + + /// + public Task CollectionExistsAsync(CancellationToken cancellationToken = default) + { + return this.RunOperationAsync( + "CollectionExists", + () => this._qdrantClient.CollectionExistsAsync(this._collectionName, cancellationToken)); + } + + /// + public async Task CreateCollectionAsync(CancellationToken cancellationToken = default) + { + if (!this._options.HasNamedVectors) + { + // If we are not using named vectors, we can only have one vector property. We can assume we have exactly one, since this is already verified in the constructor. + var singleVectorProperty = this._vectorStoreRecordDefinition.Properties.OfType().First(); + + // Map the single vector property to the qdrant config. + var vectorParams = QdrantVectorStoreCollectionCreateMapping.MapSingleVector(singleVectorProperty!); + + // Create the collection with the single unnamed vector. + await this.RunOperationAsync( + "CreateCollection", + () => this._qdrantClient.CreateCollectionAsync( + this._collectionName, + vectorParams, + cancellationToken: cancellationToken)).ConfigureAwait(false); + } + else + { + // Since we are using named vectors, iterate over all vector properties. + var vectorProperties = this._vectorStoreRecordDefinition.Properties.OfType(); + + // Map the named vectors to the qdrant config. + var vectorParamsMap = QdrantVectorStoreCollectionCreateMapping.MapNamedVectors(vectorProperties, this._storagePropertyNames); + + // Create the collection with named vectors. + await this.RunOperationAsync( + "CreateCollection", + () => this._qdrantClient.CreateCollectionAsync( + this._collectionName, + vectorParamsMap, + cancellationToken: cancellationToken)).ConfigureAwait(false); + } + + // Add indexes for each of the data properties that require filtering. + var dataProperties = this._vectorStoreRecordDefinition.Properties.OfType().Where(x => x.IsFilterable); + foreach (var dataProperty in dataProperties) + { + var storageFieldName = this._storagePropertyNames[dataProperty.DataModelPropertyName]; + var schemaType = QdrantVectorStoreCollectionCreateMapping.s_schemaTypeMap[dataProperty.PropertyType!]; + + await this.RunOperationAsync( + "CreatePayloadIndex", + () => this._qdrantClient.CreatePayloadIndexAsync( + this._collectionName, + storageFieldName, + schemaType, + cancellationToken: cancellationToken)).ConfigureAwait(false); + } + + // Add indexes for each of the data properties that require full text search. + dataProperties = this._vectorStoreRecordDefinition.Properties.OfType().Where(x => x.IsFullTextSearchable); + foreach (var dataProperty in dataProperties) + { + if (dataProperty.PropertyType != typeof(string)) + { + throw new InvalidOperationException($"Property {nameof(dataProperty.IsFullTextSearchable)} on {nameof(VectorStoreRecordDataProperty)} '{dataProperty.DataModelPropertyName}' is set to true, but the property type is not a string. The Qdrant VectorStore supports {nameof(dataProperty.IsFullTextSearchable)} on string properties only."); + } + + var storageFieldName = this._storagePropertyNames[dataProperty.DataModelPropertyName]; + + await this.RunOperationAsync( + "CreatePayloadIndex", + () => this._qdrantClient.CreatePayloadIndexAsync( + this._collectionName, + storageFieldName, + PayloadSchemaType.Text, + cancellationToken: cancellationToken)).ConfigureAwait(false); + } + } + + /// + public async Task CreateCollectionIfNotExistsAsync(CancellationToken cancellationToken = default) + { + if (!await this.CollectionExistsAsync(cancellationToken).ConfigureAwait(false)) + { + await this.CreateCollectionAsync(cancellationToken).ConfigureAwait(false); + } + } + + /// + public Task DeleteCollectionAsync(CancellationToken cancellationToken = default) + { + return this.RunOperationAsync( + "DeleteCollection", + () => this._qdrantClient.DeleteCollectionAsync(this._collectionName, null, cancellationToken)); + } + + /// + public async Task GetAsync(ulong key, GetRecordOptions? options = null, CancellationToken cancellationToken = default) + { + Verify.NotNull(key); + + var retrievedPoints = await this.GetBatchAsync([key], options, cancellationToken).ToListAsync(cancellationToken).ConfigureAwait(false); + return retrievedPoints.FirstOrDefault(); + } + + /// + public async Task GetAsync(Guid key, GetRecordOptions? options = null, CancellationToken cancellationToken = default) + { + Verify.NotNull(key); + + var retrievedPoints = await this.GetBatchAsync([key], options, cancellationToken).ToListAsync(cancellationToken).ConfigureAwait(false); + return retrievedPoints.FirstOrDefault(); + } + + /// + public IAsyncEnumerable GetBatchAsync(IEnumerable keys, GetRecordOptions? options = default, CancellationToken cancellationToken = default) + { + return this.GetBatchByPointIdAsync(keys, key => new PointId { Num = key }, options, cancellationToken); + } + + /// + public IAsyncEnumerable GetBatchAsync(IEnumerable keys, GetRecordOptions? options = default, CancellationToken cancellationToken = default) + { + return this.GetBatchByPointIdAsync(keys, key => new PointId { Uuid = key.ToString("D") }, options, cancellationToken); + } + + /// + public Task DeleteAsync(ulong key, DeleteRecordOptions? options = null, CancellationToken cancellationToken = default) + { + Verify.NotNull(key); + + return this.RunOperationAsync( + DeleteName, + () => this._qdrantClient.DeleteAsync( + this._collectionName, + key, + wait: true, + cancellationToken: cancellationToken)); + } + + /// + public Task DeleteAsync(Guid key, DeleteRecordOptions? options = null, CancellationToken cancellationToken = default) + { + Verify.NotNull(key); + + return this.RunOperationAsync( + DeleteName, + () => this._qdrantClient.DeleteAsync( + this._collectionName, + key, + wait: true, + cancellationToken: cancellationToken)); + } + + /// + public Task DeleteBatchAsync(IEnumerable keys, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(keys); + + return this.RunOperationAsync( + DeleteName, + () => this._qdrantClient.DeleteAsync( + this._collectionName, + keys.ToList(), + wait: true, + cancellationToken: cancellationToken)); + } + + /// + public Task DeleteBatchAsync(IEnumerable keys, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(keys); + + return this.RunOperationAsync( + DeleteName, + () => this._qdrantClient.DeleteAsync( + this._collectionName, + keys.ToList(), + wait: true, + cancellationToken: cancellationToken)); + } + + /// + public async Task UpsertAsync(TRecord record, UpsertRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(record); + + // Create point from record. + var pointStruct = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + UpsertName, + () => this._mapper.MapFromDataToStorageModel(record)); + + // Upsert. + await this.RunOperationAsync( + UpsertName, + () => this._qdrantClient.UpsertAsync(this._collectionName, [pointStruct], true, cancellationToken: cancellationToken)).ConfigureAwait(false); + return pointStruct.Id.Num; + } + + /// + async Task IVectorStoreRecordCollection.UpsertAsync(TRecord record, UpsertRecordOptions? options, CancellationToken cancellationToken) + { + Verify.NotNull(record); + + // Create point from record. + var pointStruct = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + UpsertName, + () => this._mapper.MapFromDataToStorageModel(record)); + + // Upsert. + await this.RunOperationAsync( + UpsertName, + () => this._qdrantClient.UpsertAsync(this._collectionName, [pointStruct], true, cancellationToken: cancellationToken)).ConfigureAwait(false); + return Guid.Parse(pointStruct.Id.Uuid); + } + + /// + public async IAsyncEnumerable UpsertBatchAsync(IEnumerable records, UpsertRecordOptions? options = default, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + Verify.NotNull(records); + + // Create points from records. + var pointStructs = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + UpsertName, + () => records.Select(this._mapper.MapFromDataToStorageModel).ToList()); + + // Upsert. + await this.RunOperationAsync( + UpsertName, + () => this._qdrantClient.UpsertAsync(this._collectionName, pointStructs, true, cancellationToken: cancellationToken)).ConfigureAwait(false); + + foreach (var pointStruct in pointStructs) + { + yield return pointStruct.Id.Num; + } + } + + /// + async IAsyncEnumerable IVectorStoreRecordCollection.UpsertBatchAsync(IEnumerable records, UpsertRecordOptions? options, [EnumeratorCancellation] CancellationToken cancellationToken) + { + Verify.NotNull(records); + + // Create points from records. + var pointStructs = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + UpsertName, + () => records.Select(this._mapper.MapFromDataToStorageModel).ToList()); + + // Upsert. + await this.RunOperationAsync( + UpsertName, + () => this._qdrantClient.UpsertAsync(this._collectionName, pointStructs, true, cancellationToken: cancellationToken)).ConfigureAwait(false); + + foreach (var pointStruct in pointStructs) + { + yield return Guid.Parse(pointStruct.Id.Uuid); + } + } + + /// + /// Get the requested records from the Qdrant store using the provided keys. + /// + /// The keys of the points to retrieve. + /// Function to convert the provided keys to point ids. + /// The retrieval options. + /// The to monitor for cancellation requests. The default is . + /// The retrieved points. + private async IAsyncEnumerable GetBatchByPointIdAsync( + IEnumerable keys, + Func keyConverter, + GetRecordOptions? options, + [EnumeratorCancellation] CancellationToken cancellationToken) + { + const string OperationName = "Retrieve"; + Verify.NotNull(keys); + + // Create options. + var pointsIds = keys.Select(key => keyConverter(key)).ToArray(); + var includeVectors = options?.IncludeVectors ?? false; + + // Retrieve data points. + var retrievedPoints = await this.RunOperationAsync( + OperationName, + () => this._qdrantClient.RetrieveAsync(this._collectionName, pointsIds, true, includeVectors, cancellationToken: cancellationToken)).ConfigureAwait(false); + + // Convert the retrieved points to the target data model. + foreach (var retrievedPoint in retrievedPoints) + { + var pointStruct = new PointStruct + { + Id = retrievedPoint.Id, + Vectors = retrievedPoint.Vectors, + Payload = { } + }; + + foreach (KeyValuePair payloadEntry in retrievedPoint.Payload) + { + pointStruct.Payload.Add(payloadEntry.Key, payloadEntry.Value); + } + + yield return VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + OperationName, + () => this._mapper.MapFromStorageToDataModel(pointStruct, new() { IncludeVectors = includeVectors })); + } + } + + /// + /// Run the given operation and wrap any with ."/> + /// + /// The type of database operation being run. + /// The operation to run. + /// The result of the operation. + private async Task RunOperationAsync(string operationName, Func operation) + { + try + { + await operation.Invoke().ConfigureAwait(false); + } + catch (RpcException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = operationName + }; + } + } + + /// + /// Run the given operation and wrap any with ."/> + /// + /// The response type of the operation. + /// The type of database operation being run. + /// The operation to run. + /// The result of the operation. + private async Task RunOperationAsync(string operationName, Func> operation) + { + try + { + return await operation.Invoke().ConfigureAwait(false); + } + catch (RpcException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = operationName + }; + } + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreRecordCollectionOptions.cs b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreRecordCollectionOptions.cs new file mode 100644 index 000000000000..e6c51c97f6a6 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreRecordCollectionOptions.cs @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.SemanticKernel.Data; +using Qdrant.Client.Grpc; + +namespace Microsoft.SemanticKernel.Connectors.Qdrant; + +/// +/// Options when creating a . +/// +public sealed class QdrantVectorStoreRecordCollectionOptions + where TRecord : class +{ + /// + /// Gets or sets a value indicating whether the vectors in the store are named and multiple vectors are supported, or whether there is just a single unnamed vector per qdrant point. + /// Defaults to single vector per point. + /// + public bool HasNamedVectors { get; set; } = false; + + /// + /// Gets or sets an optional custom mapper to use when converting between the data model and the qdrant point. + /// + /// + /// If not set, a default mapper that uses json as an intermediary to allow automatic mapping to a wide variety of types will be used. + /// + public IVectorStoreRecordMapper? PointStructCustomMapper { get; init; } = null; + + /// + /// Gets or sets an optional record definition that defines the schema of the record type. + /// + /// + /// If not provided, the schema will be inferred from the record model class using reflection. + /// In this case, the record model properties must be annotated with the appropriate attributes to indicate their usage. + /// See , and . + /// + public VectorStoreRecordDefinition? VectorStoreRecordDefinition { get; init; } = null; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreRecordMapper.cs b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreRecordMapper.cs new file mode 100644 index 000000000000..2c4238982391 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Qdrant/QdrantVectorStoreRecordMapper.cs @@ -0,0 +1,298 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using System.Text.Json; +using System.Text.Json.Nodes; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client.Grpc; + +namespace Microsoft.SemanticKernel.Connectors.Qdrant; + +/// +/// Mapper between a Qdrant record and the consumer data model that uses json as an intermediary to allow supporting a wide range of models. +/// +/// The consumer data model to map to or from. +internal sealed class QdrantVectorStoreRecordMapper : IVectorStoreRecordMapper + where TRecord : class +{ + /// A set of types that data properties on the provided model may have. + private static readonly HashSet s_supportedDataTypes = + [ + typeof(string), + typeof(int), + typeof(long), + typeof(double), + typeof(float), + typeof(bool), + typeof(int?), + typeof(long?), + typeof(double?), + typeof(float?), + typeof(bool?) + ]; + + /// A set of types that vectors on the provided model may have. + /// + /// While qdrant supports float32 and uint64, the api only supports float64, therefore + /// any float32 vectors will be converted to float64 before being sent to qdrant. + /// + private static readonly HashSet s_supportedVectorTypes = + [ + typeof(ReadOnlyMemory), + typeof(ReadOnlyMemory?), + typeof(ReadOnlyMemory), + typeof(ReadOnlyMemory?) + ]; + + /// A property info object that points at the key property for the current model, allowing easy reading and writing of this property. + private readonly PropertyInfo _keyPropertyInfo; + + /// A list of property info objects that point at the data properties in the current model, and allows easy reading and writing of these properties. + private readonly List _dataPropertiesInfo; + + /// A list of property info objects that point at the vector properties in the current model, and allows easy reading and writing of these properties. + private readonly List _vectorPropertiesInfo; + + /// A dictionary that maps from a property name to the configured name that should be used when storing it. + private readonly Dictionary _storagePropertyNames; + + /// A dictionary that maps from a property name to the configured name that should be used when serializing it to json. + private readonly Dictionary _jsonPropertyNames = new(); + + /// A value indicating whether the vectors in the store are named, or whether there is just a single unnamed vector per qdrant point. + private readonly bool _hasNamedVectors; + + /// + /// Initializes a new instance of the class. + /// + /// The record definition that defines the schema of the record type. + /// A value indicating whether the vectors in the store are named, or whether there is just a single unnamed vector per qdrant point. + /// A dictionary that maps from a property name to the configured name that should be used when storing it. + public QdrantVectorStoreRecordMapper( + VectorStoreRecordDefinition vectorStoreRecordDefinition, + bool hasNamedVectors, + Dictionary storagePropertyNames) + { + Verify.NotNull(vectorStoreRecordDefinition); + Verify.NotNull(storagePropertyNames); + + // Validate property types. + var propertiesInfo = VectorStoreRecordPropertyReader.FindProperties(typeof(TRecord), vectorStoreRecordDefinition, supportsMultipleVectors: hasNamedVectors); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(propertiesInfo.DataProperties, s_supportedDataTypes, "Data", supportEnumerable: true); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(propertiesInfo.VectorProperties, s_supportedVectorTypes, "Vector"); + + // Assign. + this._hasNamedVectors = hasNamedVectors; + this._keyPropertyInfo = propertiesInfo.KeyProperty; + this._dataPropertiesInfo = propertiesInfo.DataProperties; + this._vectorPropertiesInfo = propertiesInfo.VectorProperties; + this._storagePropertyNames = storagePropertyNames; + + // Get json storage names and store for later use. + this._jsonPropertyNames = VectorStoreRecordPropertyReader.BuildPropertyNameToJsonPropertyNameMap(propertiesInfo, typeof(TRecord), JsonSerializerOptions.Default); + } + + /// + public PointStruct MapFromDataToStorageModel(TRecord dataModel) + { + PointId pointId; + if (this._keyPropertyInfo.PropertyType == typeof(ulong)) + { + var key = this._keyPropertyInfo.GetValue(dataModel) as ulong? ?? throw new VectorStoreRecordMappingException($"Missing key property {this._keyPropertyInfo.Name} on provided record of type {typeof(TRecord).FullName}."); + pointId = new PointId { Num = key }; + } + else if (this._keyPropertyInfo.PropertyType == typeof(Guid)) + { + var key = this._keyPropertyInfo.GetValue(dataModel) as Guid? ?? throw new VectorStoreRecordMappingException($"Missing key property {this._keyPropertyInfo.Name} on provided record of type {typeof(TRecord).FullName}."); + pointId = new PointId { Uuid = key.ToString("D") }; + } + else + { + throw new VectorStoreRecordMappingException($"Unsupported key type {this._keyPropertyInfo.PropertyType.FullName} for key property {this._keyPropertyInfo.Name} on provided record of type {typeof(TRecord).FullName}."); + } + + // Create point. + var pointStruct = new PointStruct + { + Id = pointId, + Vectors = new Vectors(), + Payload = { }, + }; + + // Add point payload. + foreach (var dataPropertyInfo in this._dataPropertiesInfo) + { + var propertyName = this._storagePropertyNames[dataPropertyInfo.Name]; + var propertyValue = dataPropertyInfo.GetValue(dataModel); + pointStruct.Payload.Add(propertyName, ConvertToGrpcFieldValue(propertyValue)); + } + + // Add vectors. + if (this._hasNamedVectors) + { + var namedVectors = new NamedVectors(); + foreach (var vectorPropertyInfo in this._vectorPropertiesInfo) + { + var propertyName = this._storagePropertyNames[vectorPropertyInfo.Name]; + var propertyValue = vectorPropertyInfo.GetValue(dataModel); + if (propertyValue is not null) + { + var castPropertyValue = (ReadOnlyMemory)propertyValue; + namedVectors.Vectors.Add(propertyName, castPropertyValue.ToArray()); + } + } + + pointStruct.Vectors.Vectors_ = namedVectors; + } + else + { + // We already verified in the constructor via FindProperties that there is exactly one vector property when not using named vectors. + var vectorPropertyInfo = this._vectorPropertiesInfo.First(); + if (vectorPropertyInfo.GetValue(dataModel) is ReadOnlyMemory floatROM) + { + pointStruct.Vectors.Vector = floatROM.ToArray(); + } + else + { + throw new VectorStoreRecordMappingException($"Vector property {vectorPropertyInfo.Name} on provided record of type {typeof(TRecord).FullName} may not be null when not using named vectors."); + } + } + + return pointStruct; + } + + /// + public TRecord MapFromStorageToDataModel(PointStruct storageModel, StorageToDataModelMapperOptions options) + { + // Get the key property name and value. + var keyJsonName = this._jsonPropertyNames[this._keyPropertyInfo.Name]; + var keyPropertyValue = storageModel.Id.HasNum ? storageModel.Id.Num as object : storageModel.Id.Uuid as object; + + // Create a json object to represent the point. + var outputJsonObject = new JsonObject + { + { keyJsonName, JsonValue.Create(keyPropertyValue) }, + }; + + // Add each vector property if embeddings are included in the point. + if (options?.IncludeVectors is true) + { + foreach (var vectorProperty in this._vectorPropertiesInfo) + { + var propertyName = this._storagePropertyNames[vectorProperty.Name]; + var jsonName = this._jsonPropertyNames[vectorProperty.Name]; + + if (this._hasNamedVectors) + { + if (storageModel.Vectors.Vectors_.Vectors.TryGetValue(propertyName, out var vector)) + { + outputJsonObject.Add(jsonName, new JsonArray(vector.Data.Select(x => JsonValue.Create(x)).ToArray())); + } + } + else + { + outputJsonObject.Add(jsonName, new JsonArray(storageModel.Vectors.Vector.Data.Select(x => JsonValue.Create(x)).ToArray())); + } + } + } + + // Add each data property. + foreach (var dataProperty in this._dataPropertiesInfo) + { + var propertyName = this._storagePropertyNames[dataProperty.Name]; + var jsonName = this._jsonPropertyNames[dataProperty.Name]; + + if (storageModel.Payload.TryGetValue(propertyName, out var value)) + { + outputJsonObject.Add(jsonName, ConvertFromGrpcFieldValueToJsonNode(value)); + } + } + + // Convert from json object to the target data model. + return JsonSerializer.Deserialize(outputJsonObject)!; + } + + /// + /// Convert the given to the correct native type based on its properties. + /// + /// The value to convert to a native type. + /// The converted native value. + /// Thrown when an unsupported type is encountered. + private static JsonNode? ConvertFromGrpcFieldValueToJsonNode(Value payloadValue) + { + return payloadValue.KindCase switch + { + Value.KindOneofCase.NullValue => null, + Value.KindOneofCase.IntegerValue => JsonValue.Create(payloadValue.IntegerValue), + Value.KindOneofCase.StringValue => JsonValue.Create(payloadValue.StringValue), + Value.KindOneofCase.DoubleValue => JsonValue.Create(payloadValue.DoubleValue), + Value.KindOneofCase.BoolValue => JsonValue.Create(payloadValue.BoolValue), + Value.KindOneofCase.ListValue => new JsonArray(payloadValue.ListValue.Values.Select(x => ConvertFromGrpcFieldValueToJsonNode(x)).ToArray()), + Value.KindOneofCase.StructValue => new JsonObject(payloadValue.StructValue.Fields.ToDictionary(x => x.Key, x => ConvertFromGrpcFieldValueToJsonNode(x.Value))), + _ => throw new VectorStoreRecordMappingException($"Unsupported grpc value kind {payloadValue.KindCase}."), + }; + } + + /// + /// Convert the given to a object that can be stored in Qdrant. + /// + /// The object to convert. + /// The converted Qdrant value. + /// Thrown when an unsupported type is encountered. + private static Value ConvertToGrpcFieldValue(object? sourceValue) + { + var value = new Value(); + if (sourceValue is null) + { + value.NullValue = NullValue.NullValue; + } + else if (sourceValue is int intValue) + { + value.IntegerValue = intValue; + } + else if (sourceValue is long longValue) + { + value.IntegerValue = longValue; + } + else if (sourceValue is string stringValue) + { + value.StringValue = stringValue; + } + else if (sourceValue is float floatValue) + { + value.DoubleValue = floatValue; + } + else if (sourceValue is double doubleValue) + { + value.DoubleValue = doubleValue; + } + else if (sourceValue is bool boolValue) + { + value.BoolValue = boolValue; + } + else if (sourceValue is IEnumerable || + sourceValue is IEnumerable || + sourceValue is IEnumerable || + sourceValue is IEnumerable || + sourceValue is IEnumerable || + sourceValue is IEnumerable) + { + var listValue = sourceValue as IEnumerable; + value.ListValue = new ListValue(); + foreach (var item in listValue!) + { + value.ListValue.Values.Add(ConvertToGrpcFieldValue(item)); + } + } + else + { + throw new VectorStoreRecordMappingException($"Unsupported source value type {sourceValue?.GetType().FullName}."); + } + + return value; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/IRedisVectorStoreRecordCollectionFactory.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/IRedisVectorStoreRecordCollectionFactory.cs new file mode 100644 index 000000000000..f4eae7661b7a --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/IRedisVectorStoreRecordCollectionFactory.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.SemanticKernel.Data; +using StackExchange.Redis; + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Interface for constructing Redis instances when using to retrieve these. +/// +public interface IRedisVectorStoreRecordCollectionFactory +{ + /// + /// Constructs a new instance of the . + /// + /// The data type of the record key. + /// The data model to use for adding, updating and retrieving data from storage. + /// The Redis database to read/write records from. + /// The name of the collection to connect to. + /// An optional record definition that defines the schema of the record type. If not present, attributes on will be used. + /// The new instance of . + IVectorStoreRecordCollection CreateVectorStoreRecordCollection(IDatabase database, string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition) + where TKey : notnull + where TRecord : class; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisHashSetVectorStoreRecordCollection.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisHashSetVectorStoreRecordCollection.cs new file mode 100644 index 000000000000..e68edb98870e --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisHashSetVectorStoreRecordCollection.cs @@ -0,0 +1,374 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Data; +using NRedisStack.RedisStackCommands; +using NRedisStack.Search; +using NRedisStack.Search.Literals.Enums; +using StackExchange.Redis; + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Service for storing and retrieving vector records, that uses Redis HashSets as the underlying storage. +/// +/// The data model to use for adding, updating and retrieving data from storage. +#pragma warning disable CA1711 // Identifiers should not have incorrect suffix +public sealed class RedisHashSetVectorStoreRecordCollection : IVectorStoreRecordCollection +#pragma warning restore CA1711 // Identifiers should not have incorrect suffix + where TRecord : class +{ + /// The name of this database for telemetry purposes. + private const string DatabaseName = "Redis"; + + /// A set of types that a key on the provided model may have. + private static readonly HashSet s_supportedKeyTypes = + [ + typeof(string) + ]; + + /// A set of types that data properties on the provided model may have. + private static readonly HashSet s_supportedDataTypes = + [ + typeof(string), + typeof(int), + typeof(uint), + typeof(long), + typeof(ulong), + typeof(double), + typeof(float), + typeof(bool), + typeof(int?), + typeof(uint?), + typeof(long?), + typeof(ulong?), + typeof(double?), + typeof(float?), + typeof(bool?) + ]; + + /// A set of types that vectors on the provided model may have. + private static readonly HashSet s_supportedVectorTypes = + [ + typeof(ReadOnlyMemory), + typeof(ReadOnlyMemory), + typeof(ReadOnlyMemory?), + typeof(ReadOnlyMemory?) + ]; + + /// The Redis database to read/write records from. + private readonly IDatabase _database; + + /// The name of the collection that this will access. + private readonly string _collectionName; + + /// Optional configuration options for this class. + private readonly RedisHashSetVectorStoreRecordCollectionOptions _options; + + /// A definition of the current storage model. + private readonly VectorStoreRecordDefinition _vectorStoreRecordDefinition; + + /// An array of the names of all the data properties that are part of the Redis payload, i.e. all properties except the key and vector properties. + private readonly RedisValue[] _dataStoragePropertyNames; + + /// A dictionary that maps from a property name to the storage name that should be used when serializing it to json for data and vector properties. + private readonly Dictionary _storagePropertyNames = new(); + + /// The mapper to use when mapping between the consumer data model and the Redis record. + private readonly IVectorStoreRecordMapper _mapper; + + /// + /// Initializes a new instance of the class. + /// + /// The Redis database to read/write records from. + /// The name of the collection that this will access. + /// Optional configuration options for this class. + /// Throw when parameters are invalid. + public RedisHashSetVectorStoreRecordCollection(IDatabase database, string collectionName, RedisHashSetVectorStoreRecordCollectionOptions? options = null) + { + // Verify. + Verify.NotNull(database); + Verify.NotNullOrWhiteSpace(collectionName); + + // Assign. + this._database = database; + this._collectionName = collectionName; + this._options = options ?? new RedisHashSetVectorStoreRecordCollectionOptions(); + this._vectorStoreRecordDefinition = this._options.VectorStoreRecordDefinition ?? VectorStoreRecordPropertyReader.CreateVectorStoreRecordDefinitionFromType(typeof(TRecord), true); + + // Validate property types. + var properties = VectorStoreRecordPropertyReader.SplitDefinitionAndVerify(typeof(TRecord).Name, this._vectorStoreRecordDefinition, supportsMultipleVectors: true, requiresAtLeastOneVector: false); + VectorStoreRecordPropertyReader.VerifyPropertyTypes([properties.KeyProperty], s_supportedKeyTypes, "Key"); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(properties.DataProperties, s_supportedDataTypes, "Data"); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(properties.VectorProperties, s_supportedVectorTypes, "Vector"); + + // Lookup storage property names. + this._storagePropertyNames = VectorStoreRecordPropertyReader.BuildPropertyNameToStorageNameMap(properties); + this._dataStoragePropertyNames = properties + .DataProperties + .Select(x => this._storagePropertyNames[x.DataModelPropertyName]) + .Select(RedisValue.Unbox) + .ToArray(); + + // Assign Mapper. + if (this._options.HashEntriesCustomMapper is not null) + { + this._mapper = this._options.HashEntriesCustomMapper; + } + else + { + this._mapper = new RedisHashSetVectorStoreRecordMapper(this._vectorStoreRecordDefinition, this._storagePropertyNames); + } + } + + /// + public string CollectionName => this._collectionName; + + /// + public async Task CollectionExistsAsync(CancellationToken cancellationToken = default) + { + try + { + await this._database.FT().InfoAsync(this._collectionName).ConfigureAwait(false); + return true; + } + catch (RedisServerException ex) when (ex.Message.Contains("Unknown index name")) + { + return false; + } + catch (RedisConnectionException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = "FT.INFO" + }; + } + } + + /// + public Task CreateCollectionAsync(CancellationToken cancellationToken = default) + { + // Map the record definition to a schema. + var schema = RedisVectorStoreCollectionCreateMapping.MapToSchema(this._vectorStoreRecordDefinition.Properties, this._storagePropertyNames); + + // Create the index creation params. + // Add the collection name and colon as the index prefix, which means that any record where the key is prefixed with this text will be indexed by this index + var createParams = new FTCreateParams() + .AddPrefix($"{this._collectionName}:") + .On(IndexDataType.HASH); + + // Create the index. + return this.RunOperationAsync("FT.CREATE", () => this._database.FT().CreateAsync(this._collectionName, createParams, schema)); + } + + /// + public async Task CreateCollectionIfNotExistsAsync(CancellationToken cancellationToken = default) + { + if (!await this.CollectionExistsAsync(cancellationToken).ConfigureAwait(false)) + { + await this.CreateCollectionAsync(cancellationToken).ConfigureAwait(false); + } + } + + /// + public Task DeleteCollectionAsync(CancellationToken cancellationToken = default) + { + return this.RunOperationAsync("FT.DROPINDEX", () => this._database.FT().DropIndexAsync(this._collectionName)); + } + + /// + public async Task GetAsync(string key, GetRecordOptions? options = null, CancellationToken cancellationToken = default) + { + Verify.NotNullOrWhiteSpace(key); + + // Create Options + var maybePrefixedKey = this.PrefixKeyIfNeeded(key); + var includeVectors = options?.IncludeVectors ?? false; + var operationName = includeVectors ? "HGETALL" : "HMGET"; + + // Get the Redis value. + HashEntry[] retrievedHashEntries; + if (includeVectors) + { + retrievedHashEntries = await this.RunOperationAsync( + operationName, + () => this._database.HashGetAllAsync(maybePrefixedKey)).ConfigureAwait(false); + } + else + { + var fieldKeys = this._dataStoragePropertyNames; + var retrievedValues = await this.RunOperationAsync( + operationName, + () => this._database.HashGetAsync(maybePrefixedKey, fieldKeys)).ConfigureAwait(false); + retrievedHashEntries = fieldKeys.Zip(retrievedValues, (field, value) => new HashEntry(field, value)).Where(x => x.Value.HasValue).ToArray(); + } + + // Return null if we found nothing. + if (retrievedHashEntries == null || retrievedHashEntries.Length == 0) + { + return null; + } + + // Convert to the caller's data model. + return VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + operationName, + () => + { + return this._mapper.MapFromStorageToDataModel((key, retrievedHashEntries), new() { IncludeVectors = includeVectors }); + }); + } + + /// + public async IAsyncEnumerable GetBatchAsync(IEnumerable keys, GetRecordOptions? options = default, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + Verify.NotNull(keys); + + // Get records in parallel. + var tasks = keys.Select(x => this.GetAsync(x, options, cancellationToken)); + var results = await Task.WhenAll(tasks).ConfigureAwait(false); + foreach (var result in results) + { + if (result is not null) + { + yield return result; + } + } + } + + /// + public Task DeleteAsync(string key, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNullOrWhiteSpace(key); + + // Create Options + var maybePrefixedKey = this.PrefixKeyIfNeeded(key); + + // Remove. + return this.RunOperationAsync( + "DEL", + () => this._database + .KeyDeleteAsync(maybePrefixedKey)); + } + + /// + public Task DeleteBatchAsync(IEnumerable keys, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(keys); + + // Remove records in parallel. + var tasks = keys.Select(key => this.DeleteAsync(key, options, cancellationToken)); + return Task.WhenAll(tasks); + } + + /// + public async Task UpsertAsync(TRecord record, UpsertRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(record); + + // Map. + var redisHashSetRecord = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + "HSET", + () => this._mapper.MapFromDataToStorageModel(record)); + + // Upsert. + var maybePrefixedKey = this.PrefixKeyIfNeeded(redisHashSetRecord.Key); + await this.RunOperationAsync( + "HSET", + () => this._database + .HashSetAsync( + maybePrefixedKey, + redisHashSetRecord.HashEntries)).ConfigureAwait(false); + + return redisHashSetRecord.Key; + } + + /// + public async IAsyncEnumerable UpsertBatchAsync(IEnumerable records, UpsertRecordOptions? options = default, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + Verify.NotNull(records); + + // Upsert records in parallel. + var tasks = records.Select(x => this.UpsertAsync(x, options, cancellationToken)); + var results = await Task.WhenAll(tasks).ConfigureAwait(false); + foreach (var result in results) + { + if (result is not null) + { + yield return result; + } + } + } + + /// + /// Prefix the key with the collection name if the option is set. + /// + /// The key to prefix. + /// The updated key if updating is required, otherwise the input key. + private string PrefixKeyIfNeeded(string key) + { + if (this._options.PrefixCollectionNameToKeyNames) + { + return $"{this._collectionName}:{key}"; + } + + return key; + } + + /// + /// Run the given operation and wrap any Redis exceptions with ."/> + /// + /// The response type of the operation. + /// The type of database operation being run. + /// The operation to run. + /// The result of the operation. + private async Task RunOperationAsync(string operationName, Func> operation) + { + try + { + return await operation.Invoke().ConfigureAwait(false); + } + catch (RedisConnectionException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = operationName + }; + } + } + + /// + /// Run the given operation and wrap any Redis exceptions with ."/> + /// + /// The type of database operation being run. + /// The operation to run. + /// The result of the operation. + private async Task RunOperationAsync(string operationName, Func operation) + { + try + { + await operation.Invoke().ConfigureAwait(false); + } + catch (RedisConnectionException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = operationName + }; + } + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisHashSetVectorStoreRecordCollectionOptions.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisHashSetVectorStoreRecordCollectionOptions.cs new file mode 100644 index 000000000000..7e17859ae0c9 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisHashSetVectorStoreRecordCollectionOptions.cs @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.SemanticKernel.Data; +using StackExchange.Redis; + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Options when creating a . +/// +public sealed class RedisHashSetVectorStoreRecordCollectionOptions + where TRecord : class +{ + /// + /// Gets or sets a value indicating whether the collection name should be prefixed to the + /// key names before reading or writing to the Redis store. Default is false. + /// + /// + /// For a record to be indexed by a specific Redis index, the key name must be prefixed with the matching prefix configured on the Redis index. + /// You can either pass in keys that are already prefixed, or set this option to true to have the collection name prefixed to the key names automatically. + /// + public bool PrefixCollectionNameToKeyNames { get; init; } = false; + + /// + /// Gets or sets an optional custom mapper to use when converting between the data model and the Redis record. + /// + public IVectorStoreRecordMapper? HashEntriesCustomMapper { get; init; } = null; + + /// + /// Gets or sets an optional record definition that defines the schema of the record type. + /// + /// + /// If not provided, the schema will be inferred from the record model class using reflection. + /// In this case, the record model properties must be annotated with the appropriate attributes to indicate their usage. + /// See , and . + /// + public VectorStoreRecordDefinition? VectorStoreRecordDefinition { get; init; } = null; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisHashSetVectorStoreRecordMapper.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisHashSetVectorStoreRecordMapper.cs new file mode 100644 index 000000000000..ef31bf09f475 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisHashSetVectorStoreRecordMapper.cs @@ -0,0 +1,169 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using System.Runtime.InteropServices; +using System.Text.Json; +using System.Text.Json.Nodes; +using Microsoft.SemanticKernel.Data; +using StackExchange.Redis; + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Class for mapping between a hashset stored in redis, and the consumer data model. +/// +/// The consumer data model to map to or from. +internal sealed class RedisHashSetVectorStoreRecordMapper : IVectorStoreRecordMapper + where TConsumerDataModel : class +{ + /// A property info object that points at the key property for the current model, allowing easy reading and writing of this property. + private readonly PropertyInfo _keyPropertyInfo; + + /// The name of the temporary json property that the key field will be serialized / parsed from. + private readonly string _keyFieldJsonPropertyName; + + /// A list of property info objects that point at the data properties in the current model, and allows easy reading and writing of these properties. + private readonly IEnumerable _dataPropertiesInfo; + + /// A list of property info objects that point at the vector properties in the current model, and allows easy reading and writing of these properties. + private readonly IEnumerable _vectorPropertiesInfo; + + /// A dictionary that maps from a property name to the configured name that should be used when storing it. + private readonly Dictionary _storagePropertyNames; + + /// A dictionary that maps from a property name to the configured name that should be used when serializing it to json for data and vector properties. + private readonly Dictionary _jsonPropertyNames = new(); + + /// + /// Initializes a new instance of the class. + /// + /// The record definition that defines the schema of the record type. + /// A dictionary that maps from a property name to the configured name that should be used when storing it. + public RedisHashSetVectorStoreRecordMapper( + VectorStoreRecordDefinition vectorStoreRecordDefinition, + Dictionary storagePropertyNames) + { + Verify.NotNull(vectorStoreRecordDefinition); + Verify.NotNull(storagePropertyNames); + + (PropertyInfo keyPropertyInfo, List dataPropertiesInfo, List vectorPropertiesInfo) = VectorStoreRecordPropertyReader.FindProperties(typeof(TConsumerDataModel), vectorStoreRecordDefinition, supportsMultipleVectors: true); + + this._keyPropertyInfo = keyPropertyInfo; + this._dataPropertiesInfo = dataPropertiesInfo; + this._vectorPropertiesInfo = vectorPropertiesInfo; + this._storagePropertyNames = storagePropertyNames; + + this._keyFieldJsonPropertyName = VectorStoreRecordPropertyReader.GetJsonPropertyName(JsonSerializerOptions.Default, keyPropertyInfo); + foreach (var property in dataPropertiesInfo.Concat(vectorPropertiesInfo)) + { + this._jsonPropertyNames[property.Name] = VectorStoreRecordPropertyReader.GetJsonPropertyName(JsonSerializerOptions.Default, property); + } + } + + /// + public (string Key, HashEntry[] HashEntries) MapFromDataToStorageModel(TConsumerDataModel dataModel) + { + var keyValue = this._keyPropertyInfo.GetValue(dataModel) as string ?? throw new VectorStoreRecordMappingException($"Missing key property {this._keyPropertyInfo.Name} on provided record of type {typeof(TConsumerDataModel).FullName}."); + + var hashEntries = new List(); + foreach (var property in this._dataPropertiesInfo) + { + var storageName = this._storagePropertyNames[property.Name]; + var value = property.GetValue(dataModel); + hashEntries.Add(new HashEntry(storageName, RedisValue.Unbox(value))); + } + + foreach (var property in this._vectorPropertiesInfo) + { + var storageName = this._storagePropertyNames[property.Name]; + var value = property.GetValue(dataModel); + if (value is not null) + { + // Convert the vector to a byte array and store it in the hash entry. + // We only support float and double vectors and we do checking in the + // collection constructor to ensure that the model has no other vector types. + if (value is ReadOnlyMemory rom) + { + hashEntries.Add(new HashEntry(storageName, ConvertVectorToBytes(rom))); + } + else if (value is ReadOnlyMemory rod) + { + hashEntries.Add(new HashEntry(storageName, ConvertVectorToBytes(rod))); + } + } + } + + return (keyValue, hashEntries.ToArray()); + } + + /// + public TConsumerDataModel MapFromStorageToDataModel((string Key, HashEntry[] HashEntries) storageModel, StorageToDataModelMapperOptions options) + { + var jsonObject = new JsonObject(); + + foreach (var property in this._dataPropertiesInfo) + { + var storageName = this._storagePropertyNames[property.Name]; + var jsonName = this._jsonPropertyNames[property.Name]; + var hashEntry = storageModel.HashEntries.FirstOrDefault(x => x.Name == storageName); + if (hashEntry.Name.HasValue) + { + var typeOrNullableType = Nullable.GetUnderlyingType(property.PropertyType) ?? property.PropertyType; + var convertedValue = Convert.ChangeType(hashEntry.Value, typeOrNullableType); + jsonObject.Add(jsonName, JsonValue.Create(convertedValue)); + } + } + + if (options.IncludeVectors) + { + foreach (var property in this._vectorPropertiesInfo) + { + var storageName = this._storagePropertyNames[property.Name]; + var jsonName = this._jsonPropertyNames[property.Name]; + + var hashEntry = storageModel.HashEntries.FirstOrDefault(x => x.Name == storageName); + if (hashEntry.Name.HasValue) + { + if (property.PropertyType == typeof(ReadOnlyMemory) || property.PropertyType == typeof(ReadOnlyMemory?)) + { + var array = MemoryMarshal.Cast((byte[])hashEntry.Value!).ToArray(); + jsonObject.Add(jsonName, JsonValue.Create(array)); + } + else if (property.PropertyType == typeof(ReadOnlyMemory) || property.PropertyType == typeof(ReadOnlyMemory?)) + { + var array = MemoryMarshal.Cast((byte[])hashEntry.Value!).ToArray(); + jsonObject.Add(jsonName, JsonValue.Create(array)); + } + else + { + throw new VectorStoreRecordMappingException($"Invalid vector type '{property.PropertyType.Name}' found on property '{property.Name}' on provided record of type '{typeof(TConsumerDataModel).FullName}'. Only float and double vectors are supported."); + } + } + } + } + + // Check that the key field is not already present in the redis value. + if (jsonObject.ContainsKey(this._keyFieldJsonPropertyName)) + { + throw new VectorStoreRecordMappingException($"Invalid data format for document with key '{storageModel.Key}'. Key property '{this._keyFieldJsonPropertyName}' is already present on retrieved object."); + } + + // Since the key is not stored in the redis value, add it back in before deserializing into the data model. + jsonObject.Add(this._keyFieldJsonPropertyName, storageModel.Key); + + return JsonSerializer.Deserialize(jsonObject)!; + } + + private static byte[] ConvertVectorToBytes(ReadOnlyMemory vector) + { + return MemoryMarshal.AsBytes(vector.Span).ToArray(); + } + + private static byte[] ConvertVectorToBytes(ReadOnlyMemory vector) + { + return MemoryMarshal.AsBytes(vector.Span).ToArray(); + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisJsonVectorStoreRecordCollection.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisJsonVectorStoreRecordCollection.cs new file mode 100644 index 000000000000..44a6bc41d195 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisJsonVectorStoreRecordCollection.cs @@ -0,0 +1,426 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text.Json; +using System.Text.Json.Nodes; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Data; +using NRedisStack.Json.DataTypes; +using NRedisStack.RedisStackCommands; +using NRedisStack.Search; +using NRedisStack.Search.Literals.Enums; +using StackExchange.Redis; + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Service for storing and retrieving vector records, that uses Redis JSON as the underlying storage. +/// +/// The data model to use for adding, updating and retrieving data from storage. +#pragma warning disable CA1711 // Identifiers should not have incorrect suffix +public sealed class RedisJsonVectorStoreRecordCollection : IVectorStoreRecordCollection +#pragma warning restore CA1711 // Identifiers should not have incorrect suffix + where TRecord : class +{ + /// The name of this database for telemetry purposes. + private const string DatabaseName = "Redis"; + + /// A set of types that a key on the provided model may have. + private static readonly HashSet s_supportedKeyTypes = + [ + typeof(string) + ]; + + /// A set of types that vectors on the provided model may have. + private static readonly HashSet s_supportedVectorTypes = + [ + typeof(ReadOnlyMemory), + typeof(ReadOnlyMemory), + typeof(ReadOnlyMemory?), + typeof(ReadOnlyMemory?) + ]; + + /// The Redis database to read/write records from. + private readonly IDatabase _database; + + /// The name of the collection that this will access. + private readonly string _collectionName; + + /// Optional configuration options for this class. + private readonly RedisJsonVectorStoreRecordCollectionOptions _options; + + /// A definition of the current storage model. + private readonly VectorStoreRecordDefinition _vectorStoreRecordDefinition; + + /// An array of the storage names of all the data properties that are part of the Redis payload, i.e. all properties except the key and vector properties. + private readonly string[] _dataStoragePropertyNames; + + /// A dictionary that maps from a property name to the storage name that should be used when serializing it to json for data and vector properties. + private readonly Dictionary _storagePropertyNames = new(); + + /// The mapper to use when mapping between the consumer data model and the Redis record. + private readonly IVectorStoreRecordMapper _mapper; + + /// The JSON serializer options to use when converting between the data model and the Redis record. + private readonly JsonSerializerOptions _jsonSerializerOptions; + + /// + /// Initializes a new instance of the class. + /// + /// The Redis database to read/write records from. + /// The name of the collection that this will access. + /// Optional configuration options for this class. + /// Throw when parameters are invalid. + public RedisJsonVectorStoreRecordCollection(IDatabase database, string collectionName, RedisJsonVectorStoreRecordCollectionOptions? options = null) + { + // Verify. + Verify.NotNull(database); + Verify.NotNullOrWhiteSpace(collectionName); + + // Assign. + this._database = database; + this._collectionName = collectionName; + this._options = options ?? new RedisJsonVectorStoreRecordCollectionOptions(); + this._jsonSerializerOptions = this._options.JsonSerializerOptions ?? JsonSerializerOptions.Default; + this._vectorStoreRecordDefinition = this._options.VectorStoreRecordDefinition ?? VectorStoreRecordPropertyReader.CreateVectorStoreRecordDefinitionFromType(typeof(TRecord), true); + + // Validate property types. + var properties = VectorStoreRecordPropertyReader.SplitDefinitionAndVerify(typeof(TRecord).Name, this._vectorStoreRecordDefinition, supportsMultipleVectors: true, requiresAtLeastOneVector: false); + VectorStoreRecordPropertyReader.VerifyPropertyTypes([properties.KeyProperty], s_supportedKeyTypes, "Key"); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(properties.VectorProperties, s_supportedVectorTypes, "Vector"); + + // Lookup json storage property names. + var keyJsonPropertyName = VectorStoreRecordPropertyReader.GetJsonPropertyName(properties.KeyProperty, typeof(TRecord), this._jsonSerializerOptions); + + // Lookup storage property names. + this._storagePropertyNames = VectorStoreRecordPropertyReader.BuildPropertyNameToJsonPropertyNameMap(properties, typeof(TRecord), this._jsonSerializerOptions); + this._dataStoragePropertyNames = properties + .DataProperties + .Select(x => this._storagePropertyNames[x.DataModelPropertyName]) + .ToArray(); + + // Assign Mapper. + if (this._options.JsonNodeCustomMapper is not null) + { + this._mapper = this._options.JsonNodeCustomMapper; + } + else + { + this._mapper = new RedisJsonVectorStoreRecordMapper(keyJsonPropertyName, this._jsonSerializerOptions); + } + } + + /// + public string CollectionName => this._collectionName; + + /// + public async Task CollectionExistsAsync(CancellationToken cancellationToken = default) + { + try + { + await this._database.FT().InfoAsync(this._collectionName).ConfigureAwait(false); + return true; + } + catch (RedisServerException ex) when (ex.Message.Contains("Unknown index name")) + { + return false; + } + catch (RedisConnectionException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = "FT.INFO" + }; + } + } + + /// + public Task CreateCollectionAsync(CancellationToken cancellationToken = default) + { + // Map the record definition to a schema. + var schema = RedisVectorStoreCollectionCreateMapping.MapToSchema(this._vectorStoreRecordDefinition.Properties, this._storagePropertyNames); + + // Create the index creation params. + // Add the collection name and colon as the index prefix, which means that any record where the key is prefixed with this text will be indexed by this index + var createParams = new FTCreateParams() + .AddPrefix($"{this._collectionName}:") + .On(IndexDataType.JSON); + + // Create the index. + return this.RunOperationAsync("FT.CREATE", () => this._database.FT().CreateAsync(this._collectionName, createParams, schema)); + } + + /// + public async Task CreateCollectionIfNotExistsAsync(CancellationToken cancellationToken = default) + { + if (!await this.CollectionExistsAsync(cancellationToken).ConfigureAwait(false)) + { + await this.CreateCollectionAsync(cancellationToken).ConfigureAwait(false); + } + } + + /// + public Task DeleteCollectionAsync(CancellationToken cancellationToken = default) + { + return this.RunOperationAsync("FT.DROPINDEX", () => this._database.FT().DropIndexAsync(this._collectionName)); + } + + /// + public async Task GetAsync(string key, GetRecordOptions? options = null, CancellationToken cancellationToken = default) + { + Verify.NotNullOrWhiteSpace(key); + + // Create Options + var maybePrefixedKey = this.PrefixKeyIfNeeded(key); + var includeVectors = options?.IncludeVectors ?? false; + + // Get the Redis value. + var redisResult = await this.RunOperationAsync( + "GET", + () => options?.IncludeVectors is true ? + this._database + .JSON() + .GetAsync(maybePrefixedKey) : + this._database + .JSON() + .GetAsync(maybePrefixedKey, this._dataStoragePropertyNames)).ConfigureAwait(false); + + // Check if the key was found before trying to parse the result. + if (redisResult.IsNull || redisResult is null) + { + return null; + } + + // Check if the value contained any JSON text before trying to parse the result. + var redisResultString = redisResult.ToString(); + if (redisResultString is null) + { + throw new VectorStoreRecordMappingException($"Document with key '{key}' does not contain any json."); + } + + // Convert to the caller's data model. + return VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + "GET", + () => + { + var node = JsonSerializer.Deserialize(redisResultString, this._jsonSerializerOptions)!; + return this._mapper.MapFromStorageToDataModel((key, node), new() { IncludeVectors = includeVectors }); + }); + } + + /// + public async IAsyncEnumerable GetBatchAsync(IEnumerable keys, GetRecordOptions? options = default, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + Verify.NotNull(keys); + var keysList = keys.ToList(); + + // Create Options + var maybePrefixedKeys = keysList.Select(key => this.PrefixKeyIfNeeded(key)); + var redisKeys = maybePrefixedKeys.Select(x => new RedisKey(x)).ToArray(); + var includeVectors = options?.IncludeVectors ?? false; + + // Get the list of Redis results. + var redisResults = await this.RunOperationAsync( + "MGET", + () => this._database + .JSON() + .MGetAsync(redisKeys, "$")).ConfigureAwait(false); + + // Loop through each key and result and convert to the caller's data model. + for (int i = 0; i < keysList.Count; i++) + { + var key = keysList[i]; + var redisResult = redisResults[i]; + + // Check if the key was found before trying to parse the result. + if (redisResult.IsNull || redisResult is null) + { + continue; + } + + // Check if the value contained any JSON text before trying to parse the result. + var redisResultString = redisResult.ToString(); + if (redisResultString is null) + { + throw new VectorStoreRecordMappingException($"Document with key '{key}' does not contain any json."); + } + + // Convert to the caller's data model. + yield return VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + "MGET", + () => + { + var node = JsonSerializer.Deserialize(redisResultString, this._jsonSerializerOptions)!; + return this._mapper.MapFromStorageToDataModel((key, node), new() { IncludeVectors = includeVectors }); + }); + } + } + + /// + public Task DeleteAsync(string key, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNullOrWhiteSpace(key); + + // Create Options + var maybePrefixedKey = this.PrefixKeyIfNeeded(key); + + // Remove. + return this.RunOperationAsync( + "DEL", + () => this._database + .JSON() + .DelAsync(maybePrefixedKey)); + } + + /// + public Task DeleteBatchAsync(IEnumerable keys, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(keys); + + // Remove records in parallel. + var tasks = keys.Select(key => this.DeleteAsync(key, options, cancellationToken)); + return Task.WhenAll(tasks); + } + + /// + public async Task UpsertAsync(TRecord record, UpsertRecordOptions? options = default, CancellationToken cancellationToken = default) + { + Verify.NotNull(record); + + // Map. + var redisJsonRecord = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + "SET", + () => + { + var mapResult = this._mapper.MapFromDataToStorageModel(record); + var serializedRecord = JsonSerializer.Serialize(mapResult.Node, this._jsonSerializerOptions); + return new { Key = mapResult.Key, SerializedRecord = serializedRecord }; + }); + + // Upsert. + var maybePrefixedKey = this.PrefixKeyIfNeeded(redisJsonRecord.Key); + await this.RunOperationAsync( + "SET", + () => this._database + .JSON() + .SetAsync( + maybePrefixedKey, + "$", + redisJsonRecord.SerializedRecord)).ConfigureAwait(false); + + return redisJsonRecord.Key; + } + + /// + public async IAsyncEnumerable UpsertBatchAsync(IEnumerable records, UpsertRecordOptions? options = default, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + Verify.NotNull(records); + + // Map. + var redisRecords = new List<(string maybePrefixedKey, string originalKey, string serializedRecord)>(); + foreach (var record in records) + { + var redisJsonRecord = VectorStoreErrorHandler.RunModelConversion( + DatabaseName, + this._collectionName, + "MSET", + () => + { + var mapResult = this._mapper.MapFromDataToStorageModel(record); + var serializedRecord = JsonSerializer.Serialize(mapResult.Node, this._jsonSerializerOptions); + return new { Key = mapResult.Key, SerializedRecord = serializedRecord }; + }); + + var maybePrefixedKey = this.PrefixKeyIfNeeded(redisJsonRecord.Key); + redisRecords.Add((maybePrefixedKey, redisJsonRecord.Key, redisJsonRecord.SerializedRecord)); + } + + // Upsert. + var keyPathValues = redisRecords.Select(x => new KeyPathValue(x.maybePrefixedKey, "$", x.serializedRecord)).ToArray(); + await this.RunOperationAsync( + "MSET", + () => this._database + .JSON() + .MSetAsync(keyPathValues)).ConfigureAwait(false); + + // Return keys of upserted records. + foreach (var record in redisRecords) + { + yield return record.originalKey; + } + } + + /// + /// Prefix the key with the collection name if the option is set. + /// + /// The key to prefix. + /// The updated key if updating is required, otherwise the input key. + private string PrefixKeyIfNeeded(string key) + { + if (this._options.PrefixCollectionNameToKeyNames) + { + return $"{this._collectionName}:{key}"; + } + + return key; + } + + /// + /// Run the given operation and wrap any Redis exceptions with ."/> + /// + /// The type of database operation being run. + /// The operation to run. + /// The result of the operation. + private async Task RunOperationAsync(string operationName, Func operation) + { + try + { + await operation.Invoke().ConfigureAwait(false); + } + catch (RedisException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = operationName + }; + } + } + + /// + /// Run the given operation and wrap any Redis exceptions with ."/> + /// + /// The response type of the operation. + /// The type of database operation being run. + /// The operation to run. + /// The result of the operation. + private async Task RunOperationAsync(string operationName, Func> operation) + { + try + { + return await operation.Invoke().ConfigureAwait(false); + } + catch (RedisException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + CollectionName = this._collectionName, + OperationName = operationName + }; + } + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisJsonVectorStoreRecordCollectionOptions.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisJsonVectorStoreRecordCollectionOptions.cs new file mode 100644 index 000000000000..382484e9cea9 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisJsonVectorStoreRecordCollectionOptions.cs @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using System.Text.Json.Nodes; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Options when creating a . +/// +public sealed class RedisJsonVectorStoreRecordCollectionOptions + where TRecord : class +{ + /// + /// Gets or sets a value indicating whether the collection name should be prefixed to the + /// key names before reading or writing to the Redis store. Default is true. + /// + /// + /// For a record to be indexed by a specific Redis index, the key name must be prefixed with the matching prefix configured on the Redis index. + /// You can either pass in keys that are already prefixed, or set this option to true to have the collection name prefixed to the key names automatically. + /// + public bool PrefixCollectionNameToKeyNames { get; init; } = true; + + /// + /// Gets or sets an optional custom mapper to use when converting between the data model and the Redis record. + /// + /// + /// If not set, the default built in mapper will be used, which uses record attrigutes or the provided to map the record. + /// + public IVectorStoreRecordMapper? JsonNodeCustomMapper { get; init; } = null; + + /// + /// Gets or sets an optional record definition that defines the schema of the record type. + /// + /// + /// If not provided, the schema will be inferred from the record model class using reflection. + /// In this case, the record model properties must be annotated with the appropriate attributes to indicate their usage. + /// See , and . + /// + public VectorStoreRecordDefinition? VectorStoreRecordDefinition { get; init; } = null; + + /// + /// Gets or sets the JSON serializer options to use when converting between the data model and the Redis record. + /// + public JsonSerializerOptions? JsonSerializerOptions { get; init; } = null; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisJsonVectorStoreRecordMapper.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisJsonVectorStoreRecordMapper.cs new file mode 100644 index 000000000000..3237c50c992e --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisJsonVectorStoreRecordMapper.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using System.Text.Json.Nodes; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Class for mapping between a json node stored in redis, and the consumer data model. +/// +/// The consumer data model to map to or from. +internal sealed class RedisJsonVectorStoreRecordMapper : IVectorStoreRecordMapper + where TConsumerDataModel : class +{ + /// The name of the temporary json property that the key field will be serialized / parsed from. + private readonly string _keyFieldJsonPropertyName; + + /// The JSON serializer options to use when converting between the data model and the Redis record. + private readonly JsonSerializerOptions _jsonSerializerOptions; + + /// + /// Initializes a new instance of the class. + /// + /// The name of the key field on the model when serialized to json. + /// The JSON serializer options to use when converting between the data model and the Redis record. + public RedisJsonVectorStoreRecordMapper(string keyFieldJsonPropertyName, JsonSerializerOptions jsonSerializerOptions) + { + Verify.NotNullOrWhiteSpace(keyFieldJsonPropertyName); + Verify.NotNull(jsonSerializerOptions); + + this._keyFieldJsonPropertyName = keyFieldJsonPropertyName; + this._jsonSerializerOptions = jsonSerializerOptions; + } + + /// + public (string Key, JsonNode Node) MapFromDataToStorageModel(TConsumerDataModel dataModel) + { + // Convert the provided record into a JsonNode object and try to get the key field for it. + // Since we already checked that the key field is a string in the constructor, and that it exists on the model, + // the only edge case we have to be concerned about is if the key field is null. + var jsonNode = JsonSerializer.SerializeToNode(dataModel, this._jsonSerializerOptions); + if (jsonNode!.AsObject().TryGetPropertyValue(this._keyFieldJsonPropertyName, out var keyField) && keyField is JsonValue jsonValue) + { + // Remove the key field from the JSON object since we don't want to store it in the redis payload. + var keyValue = jsonValue.ToString(); + jsonNode.AsObject().Remove(this._keyFieldJsonPropertyName); + + return (keyValue, jsonNode); + } + + throw new VectorStoreRecordMappingException($"Missing key field {this._keyFieldJsonPropertyName} on provided record of type {typeof(TConsumerDataModel).FullName}."); + } + + /// + public TConsumerDataModel MapFromStorageToDataModel((string Key, JsonNode Node) storageModel, StorageToDataModelMapperOptions options) + { + JsonObject jsonObject; + + // The redis result can be either a single object or an array with a single object in the case where we are doing an MGET. + if (storageModel.Node is JsonObject topLevelJsonObject) + { + jsonObject = topLevelJsonObject; + } + else if (storageModel.Node is JsonArray jsonArray && jsonArray.Count == 1 && jsonArray[0] is JsonObject arrayEntryJsonObject) + { + jsonObject = arrayEntryJsonObject; + } + else + { + throw new VectorStoreRecordMappingException($"Invalid data format for document with key '{storageModel.Key}'"); + } + + // Check that the key field is not already present in the redis value. + if (jsonObject.ContainsKey(this._keyFieldJsonPropertyName)) + { + throw new VectorStoreRecordMappingException($"Invalid data format for document with key '{storageModel.Key}'. Key property '{this._keyFieldJsonPropertyName}' is already present on retrieved object."); + } + + // Since the key is not stored in the redis value, add it back in before deserializing into the data model. + jsonObject.Add(this._keyFieldJsonPropertyName, storageModel.Key); + + return JsonSerializer.Deserialize(jsonObject, this._jsonSerializerOptions)!; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisKernelBuilderExtensions.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisKernelBuilderExtensions.cs new file mode 100644 index 000000000000..d338f0589e16 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisKernelBuilderExtensions.cs @@ -0,0 +1,40 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.SemanticKernel.Connectors.Redis; +using Microsoft.SemanticKernel.Data; +using StackExchange.Redis; + +namespace Microsoft.SemanticKernel; + +/// +/// Extension methods to register Redis instances on the . +/// +public static class RedisKernelBuilderExtensions +{ + /// + /// Register a Redis with the specified service ID and where the Redis is retrieved from the dependency injection container. + /// + /// The builder to register the on. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The kernel builder. + public static IKernelBuilder AddRedisVectorStore(this IKernelBuilder builder, RedisVectorStoreOptions? options = default, string? serviceId = default) + { + builder.Services.AddRedisVectorStore(options, serviceId); + return builder; + } + + /// + /// Register a Redis with the specified service ID and where the Redis is constructed using the provided . + /// + /// The builder to register the on. + /// The Redis connection configuration string. If not provided, an instance will be requested from the dependency injection container. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The kernel builder. + public static IKernelBuilder AddRedisVectorStore(this IKernelBuilder builder, string redisConnectionConfiguration, RedisVectorStoreOptions? options = default, string? serviceId = default) + { + builder.Services.AddRedisVectorStore(redisConnectionConfiguration, options, serviceId); + return builder; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisServiceCollectionExtensions.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisServiceCollectionExtensions.cs new file mode 100644 index 000000000000..3666d05871c4 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisServiceCollectionExtensions.cs @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel.Connectors.Redis; +using Microsoft.SemanticKernel.Data; +using StackExchange.Redis; + +namespace Microsoft.SemanticKernel; + +/// +/// Extension methods to register Redis instances on an . +/// +public static class RedisServiceCollectionExtensions +{ + /// + /// Register a Redis with the specified service ID and where the Redis is retrieved from the dependency injection container. + /// + /// The to register the on. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The service collection. + public static IServiceCollection AddRedisVectorStore(this IServiceCollection services, RedisVectorStoreOptions? options = default, string? serviceId = default) + { + // If we are not constructing the ConnectionMultiplexer, add the IVectorStore as transient, since we + // cannot make assumptions about how IDatabase is being managed. + services.AddKeyedTransient( + serviceId, + (sp, obj) => + { + var database = sp.GetRequiredService(); + var selectedOptions = options ?? sp.GetService(); + + return new RedisVectorStore( + database, + selectedOptions); + }); + + return services; + } + + /// + /// Register a Redis with the specified service ID and where the Redis is constructed using the provided . + /// + /// The to register the on. + /// The Redis connection configuration string. If not provided, an instance will be requested from the dependency injection container. + /// Optional options to further configure the . + /// An optional service id to use as the service key. + /// The service collection. + public static IServiceCollection AddRedisVectorStore(this IServiceCollection services, string redisConnectionConfiguration, RedisVectorStoreOptions? options = default, string? serviceId = default) + { + // If we are constructing the ConnectionMultiplexer, add the IVectorStore as singleton, since we are managing the lifetime + // of the ConnectionMultiplexer, and the recommendation from StackExchange.Redis is to share the ConnectionMultiplexer. + services.AddKeyedSingleton( + serviceId, + (sp, obj) => + { + var database = ConnectionMultiplexer.Connect(redisConnectionConfiguration).GetDatabase(); + var selectedOptions = options ?? sp.GetService(); + + return new RedisVectorStore( + database, + selectedOptions); + }); + + return services; + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisStorageType.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisStorageType.cs new file mode 100644 index 000000000000..9360fe448998 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisStorageType.cs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Indicates the way in which data is stored in redis. +/// +public enum RedisStorageType +{ + /// + /// Data is stored as JSON. + /// + Json, + + /// + /// Data is stored as collections of field-value pairs. + /// + HashSet +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisVectorStore.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisVectorStore.cs new file mode 100644 index 000000000000..51a933d36062 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisVectorStore.cs @@ -0,0 +1,98 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Runtime.CompilerServices; +using System.Threading; +using Microsoft.SemanticKernel.Data; +using NRedisStack.RedisStackCommands; +using StackExchange.Redis; + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Class for accessing the list of collections in a Redis vector store. +/// +/// +/// This class can be used with collections of any schema type, but requires you to provide schema information when getting a collection. +/// +public sealed class RedisVectorStore : IVectorStore +{ + /// The name of this database for telemetry purposes. + private const string DatabaseName = "Redis"; + + /// The redis database to read/write indices from. + private readonly IDatabase _database; + + /// Optional configuration options for this class. + private readonly RedisVectorStoreOptions _options; + + /// + /// Initializes a new instance of the class. + /// + /// The redis database to read/write indices from. + /// Optional configuration options for this class. + public RedisVectorStore(IDatabase database, RedisVectorStoreOptions? options = default) + { + Verify.NotNull(database); + + this._database = database; + this._options = options ?? new RedisVectorStoreOptions(); + } + + /// + public IVectorStoreRecordCollection GetCollection(string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition = null) + where TKey : notnull + where TRecord : class + { + if (typeof(TKey) != typeof(string)) + { + throw new NotSupportedException("Only string keys are supported."); + } + + if (this._options.VectorStoreCollectionFactory is not null) + { + return this._options.VectorStoreCollectionFactory.CreateVectorStoreRecordCollection(this._database, name, vectorStoreRecordDefinition); + } + + if (this._options.StorageType == RedisStorageType.HashSet) + { + var directlyCreatedStore = new RedisHashSetVectorStoreRecordCollection(this._database, name, new RedisHashSetVectorStoreRecordCollectionOptions() { VectorStoreRecordDefinition = vectorStoreRecordDefinition }) as IVectorStoreRecordCollection; + return directlyCreatedStore!; + } + else + { + var directlyCreatedStore = new RedisJsonVectorStoreRecordCollection(this._database, name, new RedisJsonVectorStoreRecordCollectionOptions() { VectorStoreRecordDefinition = vectorStoreRecordDefinition }) as IVectorStoreRecordCollection; + return directlyCreatedStore!; + } + } + + /// + public async IAsyncEnumerable ListCollectionNamesAsync([EnumeratorCancellation] CancellationToken cancellationToken = default) + { + const string OperationName = ""; + RedisResult[] listResult; + + try + { + listResult = await this._database.FT()._ListAsync().ConfigureAwait(false); + } + catch (RedisException ex) + { + throw new VectorStoreOperationException("Call to vector store failed.", ex) + { + VectorStoreType = DatabaseName, + OperationName = OperationName + }; + } + + foreach (var item in listResult) + { + var name = item.ToString(); + if (name != null) + { + yield return name; + } + } + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisVectorStoreCollectionCreateMapping.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisVectorStoreCollectionCreateMapping.cs new file mode 100644 index 000000000000..2bdb6a67b5ef --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisVectorStoreCollectionCreateMapping.cs @@ -0,0 +1,212 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using Microsoft.SemanticKernel.Data; +using NRedisStack.Search; + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Contains mapping helpers to use when creating a redis vector collection. +/// +internal static class RedisVectorStoreCollectionCreateMapping +{ + /// A set of number types that are supported for filtering. + public static readonly HashSet s_supportedFilterableNumericDataTypes = + [ + typeof(short), + typeof(sbyte), + typeof(byte), + typeof(ushort), + typeof(int), + typeof(uint), + typeof(long), + typeof(ulong), + typeof(float), + typeof(double), + typeof(decimal), + + typeof(short?), + typeof(sbyte?), + typeof(byte?), + typeof(ushort?), + typeof(int?), + typeof(uint?), + typeof(long?), + typeof(ulong?), + typeof(float?), + typeof(double?), + typeof(decimal?), + ]; + + /// + /// Map from the given list of items to the Redis . + /// + /// The property definitions to map from. + /// A dictionary that maps from a property name to the storage name that should be used when serializing it to json for data and vector properties. + /// The mapped Redis . + /// Thrown if there are missing required or unsupported configuration options set. + public static Schema MapToSchema(IEnumerable properties, Dictionary storagePropertyNames) + { + var schema = new Schema(); + + // Loop through all properties and create the index fields. + foreach (var property in properties) + { + // Key property. + if (property is VectorStoreRecordKeyProperty keyProperty) + { + // Do nothing, since key is not stored as part of the payload and therefore doesn't have to be added to the index. + continue; + } + + // Data property. + if (property is VectorStoreRecordDataProperty dataProperty && (dataProperty.IsFilterable || dataProperty.IsFullTextSearchable)) + { + var storageName = storagePropertyNames[dataProperty.DataModelPropertyName]; + + if (dataProperty.IsFilterable && dataProperty.IsFullTextSearchable) + { + throw new InvalidOperationException($"Property '{dataProperty.DataModelPropertyName}' has both {nameof(VectorStoreRecordDataProperty.IsFilterable)} and {nameof(VectorStoreRecordDataProperty.IsFullTextSearchable)} set to true, and this is not supported by the Redis VectorStore."); + } + + // Add full text search field index. + if (dataProperty.IsFullTextSearchable) + { + if (dataProperty.PropertyType == typeof(string) || (typeof(IEnumerable).IsAssignableFrom(dataProperty.PropertyType) && GetEnumerableType(dataProperty.PropertyType) == typeof(string))) + { + schema.AddTextField(new FieldName($"$.{storageName}", storageName)); + } + else + { + throw new InvalidOperationException($"Property {nameof(dataProperty.IsFullTextSearchable)} on {nameof(VectorStoreRecordDataProperty)} '{dataProperty.DataModelPropertyName}' is set to true, but the property type is not a string or IEnumerable. The Redis VectorStore supports {nameof(dataProperty.IsFullTextSearchable)} on string or IEnumerable properties only."); + } + } + + // Add filter field index. + if (dataProperty.IsFilterable) + { + if (dataProperty.PropertyType == typeof(string)) + { + schema.AddTagField(new FieldName($"$.{storageName}", storageName)); + } + else if (typeof(IEnumerable).IsAssignableFrom(dataProperty.PropertyType) && GetEnumerableType(dataProperty.PropertyType) == typeof(string)) + { + schema.AddTagField(new FieldName($"$.{storageName}.*", storageName)); + } + else if (RedisVectorStoreCollectionCreateMapping.s_supportedFilterableNumericDataTypes.Contains(dataProperty.PropertyType)) + { + schema.AddNumericField(new FieldName($"$.{storageName}", storageName)); + } + else + { + throw new InvalidOperationException($"Property '{dataProperty.DataModelPropertyName}' is marked as {nameof(VectorStoreRecordDataProperty.IsFilterable)}, but the property type '{dataProperty.PropertyType}' is not supported. Only string, IEnumerable and numeric properties are supported for filtering by the Redis VectorStore."); + } + } + + continue; + } + + // Vector property. + if (property is VectorStoreRecordVectorProperty vectorProperty) + { + if (vectorProperty.Dimensions is not > 0) + { + throw new InvalidOperationException($"Property {nameof(vectorProperty.Dimensions)} on {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' must be set to a positive integer to create a collection."); + } + + var storageName = storagePropertyNames[vectorProperty.DataModelPropertyName]; + var indexKind = GetSDKIndexKind(vectorProperty); + var distanceAlgorithm = GetSDKDistanceAlgorithm(vectorProperty); + var dimensions = vectorProperty.Dimensions.Value.ToString(CultureInfo.InvariantCulture); + schema.AddVectorField(new FieldName($"$.{storageName}", storageName), indexKind, new Dictionary() + { + ["TYPE"] = "FLOAT32", + ["DIM"] = dimensions, + ["DISTANCE_METRIC"] = distanceAlgorithm + }); + } + } + + return schema; + } + + /// + /// Get the configured from the given . + /// If none is configured the default is . + /// + /// The vector property definition. + /// The chosen . + /// Thrown if a index type was chosen that isn't supported by Redis. + public static Schema.VectorField.VectorAlgo GetSDKIndexKind(VectorStoreRecordVectorProperty vectorProperty) + { + if (vectorProperty.IndexKind is null) + { + return Schema.VectorField.VectorAlgo.HNSW; + } + + return vectorProperty.IndexKind switch + { + IndexKind.Hnsw => Schema.VectorField.VectorAlgo.HNSW, + IndexKind.Flat => Schema.VectorField.VectorAlgo.FLAT, + _ => throw new InvalidOperationException($"Index kind '{vectorProperty.IndexKind}' for {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' is not supported by the Redis VectorStore.") + }; + } + + /// + /// Get the configured distance metric from the given . + /// If none is configured, the default is cosine. + /// + /// The vector property definition. + /// The chosen distance metric. + /// Thrown if a distance function is chosen that isn't supported by Redis. + public static string GetSDKDistanceAlgorithm(VectorStoreRecordVectorProperty vectorProperty) + { + if (vectorProperty.DistanceFunction is null) + { + return "COSINE"; + } + + return vectorProperty.DistanceFunction switch + { + DistanceFunction.CosineSimilarity => "COSINE", + DistanceFunction.DotProductSimilarity => "IP", + DistanceFunction.EuclideanDistance => "L2", + _ => throw new InvalidOperationException($"Distance function '{vectorProperty.DistanceFunction}' for {nameof(VectorStoreRecordVectorProperty)} '{vectorProperty.DataModelPropertyName}' is not supported by the Redis VectorStore.") + }; + } + + /// + /// Gets the type of object stored in the given enumerable type. + /// + /// The enumerable to get the stored type for. + /// The type of object stored in the given enumerable type. + /// Thrown when the given type is not enumerable. + private static Type GetEnumerableType(Type type) + { + if (type is IEnumerable) + { + return typeof(object); + } + else if (type.IsArray) + { + return type.GetElementType()!; + } + + if (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(IEnumerable<>)) + { + return type.GetGenericArguments()[0]; + } + + if (type.GetInterfaces().FirstOrDefault(i => i.IsGenericType && i.GetGenericTypeDefinition() == typeof(IEnumerable<>)) is Type enumerableInterface) + { + return enumerableInterface.GetGenericArguments()[0]; + } + + throw new InvalidOperationException($"Data type '{type}' for {nameof(VectorStoreRecordDataProperty)} is not supported by the Redis VectorStore."); + } +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Redis/RedisVectorStoreOptions.cs b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisVectorStoreOptions.cs new file mode 100644 index 000000000000..0434b3c633ec --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Memory.Redis/RedisVectorStoreOptions.cs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft. All rights reserved. + +namespace Microsoft.SemanticKernel.Connectors.Redis; + +/// +/// Options when creating a . +/// +public sealed class RedisVectorStoreOptions +{ + /// + /// An optional factory to use for constructing instances, if custom options are required. + /// + public IRedisVectorStoreRecordCollectionFactory? VectorStoreCollectionFactory { get; init; } + + /// + /// Indicates the way in which data should be stored in redis. Default is . + /// + public RedisStorageType? StorageType { get; init; } = RedisStorageType.Json; +} diff --git a/dotnet/src/Connectors/Connectors.Memory.Sqlite/SqliteMemoryStore.cs b/dotnet/src/Connectors/Connectors.Memory.Sqlite/SqliteMemoryStore.cs index 3891df9c4de9..232b0e97b9dc 100644 --- a/dotnet/src/Connectors/Connectors.Memory.Sqlite/SqliteMemoryStore.cs +++ b/dotnet/src/Connectors/Connectors.Memory.Sqlite/SqliteMemoryStore.cs @@ -130,7 +130,6 @@ public async Task RemoveBatchAsync(string collectionName, IEnumerable ke yield break; } - var collectionMemories = new List(); List<(MemoryRecord Record, double Score)> embeddings = []; await foreach (var record in this.GetAllAsync(collectionName, cancellationToken).ConfigureAwait(false)) diff --git a/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/Client/MistralClientTests.cs b/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/Client/MistralClientTests.cs index 0394f7590b24..fbd082eb077c 100644 --- a/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/Client/MistralClientTests.cs +++ b/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/Client/MistralClientTests.cs @@ -566,7 +566,7 @@ private MistralClient CreateMistralClient(string modelId, string requestUri, par private MistralClient CreateMistralClientStreaming(string modelId, string requestUri, params string[] responseData) { var responses = responseData.Select(this.GetTestResponseAsBytes).ToArray(); - this.DelegatingHandler = new AssertingDelegatingHandler(requestUri, responses); + this.DelegatingHandler = new AssertingDelegatingHandler(requestUri, true, responses); this.HttpClient = new HttpClient(this.DelegatingHandler, false); var client = new MistralClient(modelId, this.HttpClient, "key"); return client; diff --git a/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/MistralTestBase.cs b/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/MistralTestBase.cs index d29adbe59ac6..ad60ce84b6f6 100644 --- a/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/MistralTestBase.cs +++ b/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/MistralTestBase.cs @@ -81,10 +81,10 @@ internal AssertingDelegatingHandler(string requestUri, params string[] responseS this._responseStringArray = responseStringArray; } - internal AssertingDelegatingHandler(string requestUri, params byte[][] responseBytesArray) + internal AssertingDelegatingHandler(string requestUri, bool stream = true, params byte[][] responseBytesArray) { this.RequestUri = new Uri(requestUri); - this.RequestHeaders = GetDefaultRequestHeaders("key", true); + this.RequestHeaders = GetDefaultRequestHeaders("key", stream); this._responseBytesArray = responseBytesArray; } diff --git a/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/Services/MistralAIChatCompletionServiceTests.cs b/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/Services/MistralAIChatCompletionServiceTests.cs index 061a4ee14fbd..2c282c2b6f08 100644 --- a/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/Services/MistralAIChatCompletionServiceTests.cs +++ b/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/Services/MistralAIChatCompletionServiceTests.cs @@ -1,7 +1,9 @@ // Copyright (c) Microsoft. All rights reserved. +using System; using System.Collections.Generic; using System.Net.Http; +using System.Text.Json; using System.Threading.Tasks; using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.ChatCompletion; @@ -42,7 +44,7 @@ public async Task ValidateGetStreamingChatMessageContentsAsync() { // Arrange var content = this.GetTestResponseAsBytes("chat_completions_streaming_response.txt"); - this.DelegatingHandler = new AssertingDelegatingHandler("https://api.mistral.ai/v1/chat/completions", content); + this.DelegatingHandler = new AssertingDelegatingHandler("https://api.mistral.ai/v1/chat/completions", true, content); this.HttpClient = new HttpClient(this.DelegatingHandler, false); var service = new MistralAIChatCompletionService("mistral-small-latest", "key", httpClient: this.HttpClient); @@ -70,4 +72,170 @@ public async Task ValidateGetStreamingChatMessageContentsAsync() Assert.NotNull(chunk.Metadata); } } + + [Fact] + public async Task GetChatMessageContentShouldSendMutatedChatHistoryToLLMAsync() + { + // Arrange + static void MutateChatHistory(AutoFunctionInvocationContext context, Func next) + { + // Remove the function call messages from the chat history to reduce token count. + context.ChatHistory.RemoveRange(1, 2); // Remove the `Date` function call and function result messages. + + next(context); + } + + var kernel = new Kernel(); + kernel.ImportPluginFromFunctions("WeatherPlugin", [KernelFunctionFactory.CreateFromMethod((string location) => "rainy", "GetWeather")]); + kernel.AutoFunctionInvocationFilters.Add(new AutoFunctionInvocationFilter(MutateChatHistory)); + + var firstResponse = this.GetTestResponseAsBytes("chat_completions_function_call_response.json"); + var secondResponse = this.GetTestResponseAsBytes("chat_completions_function_called_response.json"); + + this.DelegatingHandler = new AssertingDelegatingHandler("https://api.mistral.ai/v1/chat/completions", false, firstResponse, secondResponse); + this.HttpClient = new HttpClient(this.DelegatingHandler, false); + + var sut = new MistralAIChatCompletionService("mistral-small-latest", "key", httpClient: this.HttpClient); + + var chatHistory = new ChatHistory + { + new ChatMessageContent(AuthorRole.User, "What time is it?"), + new ChatMessageContent(AuthorRole.Assistant, [ + new FunctionCallContent("Date", "TimePlugin", "2") + ]), + new ChatMessageContent(AuthorRole.Tool, [ + new FunctionResultContent("Date", "TimePlugin", "2", "rainy") + ]), + new ChatMessageContent(AuthorRole.Assistant, "08/06/2024 00:00:00"), + new ChatMessageContent(AuthorRole.User, "Given the current time of day and weather, what is the likely color of the sky in Boston?") + }; + + // Act + await sut.GetChatMessageContentAsync(chatHistory, new MistralAIPromptExecutionSettings() { ToolCallBehavior = MistralAIToolCallBehavior.AutoInvokeKernelFunctions }, kernel); + + // Assert + var actualRequestContent = this.DelegatingHandler.RequestContent!; + Assert.NotNull(actualRequestContent); + + var optionsJson = JsonSerializer.Deserialize(actualRequestContent); + + var messages = optionsJson.GetProperty("messages"); + Assert.Equal(5, messages.GetArrayLength()); + + var userFirstPrompt = messages[0]; + Assert.Equal("user", userFirstPrompt.GetProperty("role").GetString()); + Assert.Equal("What time is it?", userFirstPrompt.GetProperty("content").ToString()); + + var assistantFirstResponse = messages[1]; + Assert.Equal("assistant", assistantFirstResponse.GetProperty("role").GetString()); + Assert.Equal("08/06/2024 00:00:00", assistantFirstResponse.GetProperty("content").GetString()); + + var userSecondPrompt = messages[2]; + Assert.Equal("user", userSecondPrompt.GetProperty("role").GetString()); + Assert.Equal("Given the current time of day and weather, what is the likely color of the sky in Boston?", userSecondPrompt.GetProperty("content").ToString()); + + var assistantSecondResponse = messages[3]; + Assert.Equal("assistant", assistantSecondResponse.GetProperty("role").GetString()); + Assert.Equal("ejOH4Z1A2", assistantSecondResponse.GetProperty("tool_calls")[0].GetProperty("id").GetString()); + Assert.Equal("WeatherPlugin-GetWeather", assistantSecondResponse.GetProperty("tool_calls")[0].GetProperty("function").GetProperty("name").GetString()); + + var functionResult = messages[4]; + Assert.Equal("tool", functionResult.GetProperty("role").GetString()); + Assert.Equal("rainy", functionResult.GetProperty("content").GetString()); + } + + [Fact] + public async Task GetStreamingChatMessageContentsShouldSendMutatedChatHistoryToLLMAsync() + { + // Arrange + static void MutateChatHistory(AutoFunctionInvocationContext context, Func next) + { + // Remove the function call messages from the chat history to reduce token count. + context.ChatHistory.RemoveRange(1, 2); // Remove the `Date` function call and function result messages. + + next(context); + } + + var kernel = new Kernel(); + kernel.ImportPluginFromFunctions("WeatherPlugin", [KernelFunctionFactory.CreateFromMethod(() => "rainy", "GetWeather")]); + kernel.AutoFunctionInvocationFilters.Add(new AutoFunctionInvocationFilter(MutateChatHistory)); + + var firstResponse = this.GetTestResponseAsBytes("chat_completions_streaming_function_call_response.txt"); + var secondResponse = this.GetTestResponseAsBytes("chat_completions_streaming_function_called_response.txt"); + + this.DelegatingHandler = new AssertingDelegatingHandler("https://api.mistral.ai/v1/chat/completions", true, firstResponse, secondResponse); + this.HttpClient = new HttpClient(this.DelegatingHandler, false); + + var sut = new MistralAIChatCompletionService("mistral-small-latest", "key", httpClient: this.HttpClient); + + var chatHistory = new ChatHistory + { + new ChatMessageContent(AuthorRole.User, "What time is it?"), + new ChatMessageContent(AuthorRole.Assistant, [ + new FunctionCallContent("Date", "TimePlugin", "2") + ]), + new ChatMessageContent(AuthorRole.Tool, [ + new FunctionResultContent("Date", "TimePlugin", "2", "rainy") + ]), + new ChatMessageContent(AuthorRole.Assistant, "08/06/2024 00:00:00"), + new ChatMessageContent(AuthorRole.User, "Given the current time of day and weather, what is the likely color of the sky in Boston?") + }; + + // Act + await foreach (var update in sut.GetStreamingChatMessageContentsAsync(chatHistory, new MistralAIPromptExecutionSettings() { ToolCallBehavior = MistralAIToolCallBehavior.AutoInvokeKernelFunctions }, kernel)) + { + } + + // Assert + var actualRequestContent = this.DelegatingHandler.RequestContent!; + Assert.NotNull(actualRequestContent); + + var optionsJson = JsonSerializer.Deserialize(actualRequestContent); + + var messages = optionsJson.GetProperty("messages"); + Assert.Equal(5, messages.GetArrayLength()); + + var userFirstPrompt = messages[0]; + Assert.Equal("user", userFirstPrompt.GetProperty("role").GetString()); + Assert.Equal("What time is it?", userFirstPrompt.GetProperty("content").ToString()); + + var assistantFirstResponse = messages[1]; + Assert.Equal("assistant", assistantFirstResponse.GetProperty("role").GetString()); + Assert.Equal("08/06/2024 00:00:00", assistantFirstResponse.GetProperty("content").GetString()); + + var userSecondPrompt = messages[2]; + Assert.Equal("user", userSecondPrompt.GetProperty("role").GetString()); + Assert.Equal("Given the current time of day and weather, what is the likely color of the sky in Boston?", userSecondPrompt.GetProperty("content").ToString()); + + var assistantSecondResponse = messages[3]; + Assert.Equal("assistant", assistantSecondResponse.GetProperty("role").GetString()); + Assert.Equal("u2ef3Udel", assistantSecondResponse.GetProperty("tool_calls")[0].GetProperty("id").GetString()); + Assert.Equal("WeatherPlugin-GetWeather", assistantSecondResponse.GetProperty("tool_calls")[0].GetProperty("function").GetProperty("name").GetString()); + + var functionResult = messages[4]; + Assert.Equal("tool", functionResult.GetProperty("role").GetString()); + Assert.Equal("rainy", functionResult.GetProperty("content").GetString()); + } + + private sealed class AutoFunctionInvocationFilter : IAutoFunctionInvocationFilter + { + private readonly Func, Task> _callback; + + public AutoFunctionInvocationFilter(Func, Task> callback) + { + Verify.NotNull(callback, nameof(callback)); + this._callback = callback; + } + + public AutoFunctionInvocationFilter(Action> callback) + { + Verify.NotNull(callback, nameof(callback)); + this._callback = (c, n) => { callback(c, n); return Task.CompletedTask; }; + } + + public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext context, Func next) + { + await this._callback(context, next); + } + } } diff --git a/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/TestData/chat_completions_function_call_response.json b/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/TestData/chat_completions_function_call_response.json index 7840b8e4d1d3..9aa1ab8556b9 100644 --- a/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/TestData/chat_completions_function_call_response.json +++ b/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/TestData/chat_completions_function_call_response.json @@ -11,7 +11,7 @@ "content": "", "tool_calls": [ { - "id": "ejOH4ZAso", + "id": "ejOH4Z1A2", "function": { "name": "WeatherPlugin-GetWeather", "arguments": "{\"location\": \"Paris, 75\"}" diff --git a/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/TestData/chat_completions_streaming_function_call_response.txt b/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/TestData/chat_completions_streaming_function_call_response.txt index 69d374d3773e..31701ba58f22 100644 --- a/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/TestData/chat_completions_streaming_function_call_response.txt +++ b/dotnet/src/Connectors/Connectors.MistralAI.UnitTests/TestData/chat_completions_streaming_function_call_response.txt @@ -1,5 +1,5 @@ data: {"id":"355a4e457cfb44348d5feda493ce2102","object":"chat.completion.chunk","created":1712601685,"model":"mistral-small-latest","choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null,"logprobs":null}]} -data: {"id":"355a4e457cfb44348d5feda493ce2102","object":"chat.completion.chunk","created":1712601685,"model":"mistral-small-latest","choices":[{"index":0,"delta":{"content":null,"tool_calls":[{"function":{"name":"WeatherPlugin-GetWeather","arguments":"{\"location\": \"Paris\", \"unit\": \"celsius\"}"}}]},"finish_reason":"tool_calls","logprobs":null}],"usage":{"prompt_tokens":118,"total_tokens":149,"completion_tokens":31}} +data: {"id":"355a4e457cfb44348d5feda493ce2102","object":"chat.completion.chunk","created":1712601685,"model":"mistral-small-latest","choices":[{"index":0,"delta":{"content":null,"tool_calls":[{"id":"u2ef3Udel", "function":{"name":"WeatherPlugin-GetWeather","arguments":"{\"location\": \"Paris\", \"unit\": \"celsius\"}"}}]},"finish_reason":"tool_calls","logprobs":null}],"usage":{"prompt_tokens":118,"total_tokens":149,"completion_tokens":31}} data: [DONE] \ No newline at end of file diff --git a/dotnet/src/Connectors/Connectors.MistralAI/Client/MistralClient.cs b/dotnet/src/Connectors/Connectors.MistralAI/Client/MistralClient.cs index d3007fef63d5..532bc94e6150 100644 --- a/dotnet/src/Connectors/Connectors.MistralAI/Client/MistralClient.cs +++ b/dotnet/src/Connectors/Connectors.MistralAI/Client/MistralClient.cs @@ -52,12 +52,13 @@ internal async Task> GetChatMessageContentsAsy string modelId = executionSettings?.ModelId ?? this._modelId; var mistralExecutionSettings = MistralAIPromptExecutionSettings.FromExecutionSettings(executionSettings); - var chatRequest = this.CreateChatCompletionRequest(modelId, stream: false, chatHistory, mistralExecutionSettings, kernel); var endpoint = this.GetEndpoint(mistralExecutionSettings, path: "chat/completions"); var autoInvoke = kernel is not null && mistralExecutionSettings.ToolCallBehavior?.MaximumAutoInvokeAttempts > 0 && s_inflightAutoInvokes.Value < MaxInflightAutoInvokes; for (int requestIndex = 1; ; requestIndex++) { + var chatRequest = this.CreateChatCompletionRequest(modelId, stream: false, chatHistory, mistralExecutionSettings, kernel); + ChatCompletionResponse? responseData = null; List responseContent; using (var activity = ModelDiagnostics.StartCompletionActivity(this._endpoint, this._modelId, ModelProvider, chatHistory, mistralExecutionSettings)) @@ -133,12 +134,8 @@ internal async Task> GetChatMessageContentsAsy Debug.Assert(kernel is not null); - // Add the original assistant message to the chatRequest; this is required for the service - // to understand the tool call responses. Also add the result message to the caller's chat - // history: if they don't want it, they can remove it, but this makes the data available, - // including metadata like usage. - chatRequest.AddMessage(chatChoice.Message!); - + // Add the result message to the caller's chat history; + // this is required for the service to understand the tool call responses. var chatMessageContent = this.ToChatMessageContent(modelId, responseData, chatChoice); chatHistory.Add(chatMessageContent); @@ -151,7 +148,7 @@ internal async Task> GetChatMessageContentsAsy // We currently only know about function tool calls. If it's anything else, we'll respond with an error. if (toolCall.Function is null) { - this.AddResponseMessage(chatRequest, chatHistory, toolCall, result: null, "Error: Tool call was not a function call."); + this.AddResponseMessage(chatHistory, toolCall, result: null, "Error: Tool call was not a function call."); continue; } @@ -161,14 +158,14 @@ internal async Task> GetChatMessageContentsAsy if (mistralExecutionSettings.ToolCallBehavior?.AllowAnyRequestedKernelFunction is not true && !IsRequestableTool(chatRequest, toolCall.Function!)) { - this.AddResponseMessage(chatRequest, chatHistory, toolCall, result: null, "Error: Function call chatRequest for a function that wasn't defined."); + this.AddResponseMessage(chatHistory, toolCall, result: null, "Error: Function call chatRequest for a function that wasn't defined."); continue; } // Find the function in the kernel and populate the arguments. if (!kernel!.Plugins.TryGetFunctionAndArguments(toolCall.Function, out KernelFunction? function, out KernelArguments? functionArgs)) { - this.AddResponseMessage(chatRequest, chatHistory, toolCall, result: null, "Error: Requested function could not be found."); + this.AddResponseMessage(chatHistory, toolCall, result: null, "Error: Requested function could not be found."); continue; } @@ -204,7 +201,7 @@ internal async Task> GetChatMessageContentsAsy catch (Exception e) #pragma warning restore CA1031 { - this.AddResponseMessage(chatRequest, chatHistory, toolCall, result: null, $"Error: Exception while invoking function. {e.Message}"); + this.AddResponseMessage(chatHistory, toolCall, result: null, $"Error: Exception while invoking function. {e.Message}"); continue; } finally @@ -218,7 +215,7 @@ internal async Task> GetChatMessageContentsAsy object functionResultValue = functionResult.GetValue() ?? string.Empty; var stringResult = ProcessFunctionResult(functionResultValue, mistralExecutionSettings.ToolCallBehavior); - this.AddResponseMessage(chatRequest, chatHistory, toolCall, result: stringResult, errorMessage: null); + this.AddResponseMessage(chatHistory, toolCall, result: stringResult, errorMessage: null); // If filter requested termination, returning latest function result. if (invocationContext.Terminate) @@ -272,12 +269,13 @@ internal async IAsyncEnumerable GetStreamingChatMes var mistralExecutionSettings = MistralAIPromptExecutionSettings.FromExecutionSettings(executionSettings); string modelId = mistralExecutionSettings.ModelId ?? this._modelId; - var chatRequest = this.CreateChatCompletionRequest(modelId, stream: true, chatHistory, mistralExecutionSettings, kernel); var autoInvoke = kernel is not null && mistralExecutionSettings.ToolCallBehavior?.MaximumAutoInvokeAttempts > 0 && s_inflightAutoInvokes.Value < MaxInflightAutoInvokes; List? toolCalls = null; for (int requestIndex = 1; ; requestIndex++) { + var chatRequest = this.CreateChatCompletionRequest(modelId, stream: true, chatHistory, mistralExecutionSettings, kernel); + // Reset state toolCalls?.Clear(); @@ -333,11 +331,7 @@ internal async IAsyncEnumerable GetStreamingChatMes // Create a copy of the tool calls to avoid modifying the original list toolCalls = new List(chatChoice.ToolCalls!); - // Add the original assistant message to the chatRequest; this is required for the service - // to understand the tool call responses. Also add the result message to the caller's chat - // history: if they don't want it, they can remove it, but this makes the data available, - // including metadata like usage. - chatRequest.AddMessage(new MistralChatMessage(streamedRole, completionChunk.GetContent(0)) { ToolCalls = chatChoice.ToolCalls }); + // Add the result message to the caller's chat history; this is required for the service to understand the tool call responses. chatHistory.Add(this.ToChatMessageContent(modelId, streamedRole!, completionChunk, chatChoice)); } } @@ -384,7 +378,7 @@ internal async IAsyncEnumerable GetStreamingChatMes // We currently only know about function tool calls. If it's anything else, we'll respond with an error. if (toolCall.Function is null) { - this.AddResponseMessage(chatRequest, chatHistory, toolCall, result: null, "Error: Tool call was not a function call."); + this.AddResponseMessage(chatHistory, toolCall, result: null, "Error: Tool call was not a function call."); continue; } @@ -394,14 +388,14 @@ internal async IAsyncEnumerable GetStreamingChatMes if (mistralExecutionSettings.ToolCallBehavior?.AllowAnyRequestedKernelFunction is not true && !IsRequestableTool(chatRequest, toolCall.Function!)) { - this.AddResponseMessage(chatRequest, chatHistory, toolCall, result: null, "Error: Function call chatRequest for a function that wasn't defined."); + this.AddResponseMessage(chatHistory, toolCall, result: null, "Error: Function call chatRequest for a function that wasn't defined."); continue; } // Find the function in the kernel and populate the arguments. if (!kernel!.Plugins.TryGetFunctionAndArguments(toolCall.Function, out KernelFunction? function, out KernelArguments? functionArgs)) { - this.AddResponseMessage(chatRequest, chatHistory, toolCall, result: null, "Error: Requested function could not be found."); + this.AddResponseMessage(chatHistory, toolCall, result: null, "Error: Requested function could not be found."); continue; } @@ -437,7 +431,7 @@ internal async IAsyncEnumerable GetStreamingChatMes catch (Exception e) #pragma warning restore CA1031 { - this.AddResponseMessage(chatRequest, chatHistory, toolCall, result: null, $"Error: Exception while invoking function. {e.Message}"); + this.AddResponseMessage(chatHistory, toolCall, result: null, $"Error: Exception while invoking function. {e.Message}"); continue; } finally @@ -451,7 +445,7 @@ internal async IAsyncEnumerable GetStreamingChatMes object functionResultValue = functionResult.GetValue() ?? string.Empty; var stringResult = ProcessFunctionResult(functionResultValue, mistralExecutionSettings.ToolCallBehavior); - this.AddResponseMessage(chatRequest, chatHistory, toolCall, result: stringResult, errorMessage: null); + this.AddResponseMessage(chatHistory, toolCall, result: stringResult, errorMessage: null); // If filter requested termination, returning latest function result and breaking request iteration loop. if (invocationContext.Terminate) @@ -924,7 +918,7 @@ private void AddFunctionCallContent(ChatMessageContent message, MistralToolCall message.Items.Add(functionCallContent); } - private void AddResponseMessage(ChatCompletionRequest chatRequest, ChatHistory chat, MistralToolCall toolCall, string? result, string? errorMessage) + private void AddResponseMessage(ChatHistory chat, MistralToolCall toolCall, string? result, string? errorMessage) { // Log any error if (errorMessage is not null && this._logger.IsEnabled(LogLevel.Debug)) @@ -933,9 +927,7 @@ private void AddResponseMessage(ChatCompletionRequest chatRequest, ChatHistory c this._logger.LogDebug("Failed to handle tool request ({ToolId}). {Error}", toolCall.Function?.Name, errorMessage); } - // Add the tool response message to both the chat options result ??= errorMessage ?? string.Empty; - chatRequest.AddMessage(new MistralChatMessage(AuthorRole.Tool.ToString(), result)); // Add the tool response message to the chat history var message = new ChatMessageContent(AuthorRole.Tool, result, metadata: new Dictionary { { nameof(MistralToolCall.Function), toolCall.Function } }); diff --git a/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantKernelBuilderExtensionsTests.cs b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantKernelBuilderExtensionsTests.cs new file mode 100644 index 000000000000..f0b4f327c0f0 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantKernelBuilderExtensionsTests.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client; +using Xunit; + +namespace SemanticKernel.Connectors.Qdrant.UnitTests; + +/// +/// Tests for the class. +/// +public class QdrantKernelBuilderExtensionsTests +{ + private readonly IKernelBuilder _kernelBuilder; + + public QdrantKernelBuilderExtensionsTests() + { + this._kernelBuilder = Kernel.CreateBuilder(); + } + + [Fact] + public void AddVectorStoreRegistersClass() + { + // Arrange. + using var qdrantClient = new QdrantClient("localhost"); + this._kernelBuilder.Services.AddSingleton(qdrantClient); + + // Act. + this._kernelBuilder.AddQdrantVectorStore(); + + // Assert. + this.AssertVectorStoreCreated(); + } + + [Fact] + public void AddVectorStoreWithHostAndPortAndCredsRegistersClass() + { + // Act. + this._kernelBuilder.AddQdrantVectorStore("localhost", 8080, true, "apikey"); + + // Assert. + this.AssertVectorStoreCreated(); + } + + [Fact] + public void AddVectorStoreWithHostRegistersClass() + { + // Act. + this._kernelBuilder.AddQdrantVectorStore("localhost"); + + // Assert. + this.AssertVectorStoreCreated(); + } + + private void AssertVectorStoreCreated() + { + var kernel = this._kernelBuilder.Build(); + var vectorStore = kernel.Services.GetRequiredService(); + Assert.NotNull(vectorStore); + Assert.IsType(vectorStore); + } +} diff --git a/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantServiceCollectionExtensionsTests.cs b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantServiceCollectionExtensionsTests.cs new file mode 100644 index 000000000000..d2219e395a79 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantServiceCollectionExtensionsTests.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client; +using Xunit; + +namespace SemanticKernel.Connectors.Qdrant.UnitTests; + +/// +/// Tests for the class. +/// +public class QdrantServiceCollectionExtensionsTests +{ + private readonly IServiceCollection _serviceCollection; + + public QdrantServiceCollectionExtensionsTests() + { + this._serviceCollection = new ServiceCollection(); + } + + [Fact] + public void AddVectorStoreRegistersClass() + { + // Arrange. + using var qdrantClient = new QdrantClient("localhost"); + this._serviceCollection.AddSingleton(qdrantClient); + + // Act. + this._serviceCollection.AddQdrantVectorStore(); + + // Assert. + this.AssertVectorStoreCreated(); + } + + [Fact] + public void AddVectorStoreWithHostAndPortAndCredsRegistersClass() + { + // Act. + this._serviceCollection.AddQdrantVectorStore("localhost", 8080, true, "apikey"); + + // Assert. + this.AssertVectorStoreCreated(); + } + + [Fact] + public void AddVectorStoreWithHostRegistersClass() + { + // Act. + this._serviceCollection.AddQdrantVectorStore("localhost"); + + // Assert. + this.AssertVectorStoreCreated(); + } + + private void AssertVectorStoreCreated() + { + var serviceProvider = this._serviceCollection.BuildServiceProvider(); + var vectorStore = serviceProvider.GetRequiredService(); + Assert.NotNull(vectorStore); + Assert.IsType(vectorStore); + } +} diff --git a/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreCollectionCreateMappingTests.cs b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreCollectionCreateMappingTests.cs new file mode 100644 index 000000000000..37cd1d8af53f --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreCollectionCreateMappingTests.cs @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client.Grpc; +using Xunit; + +namespace Microsoft.SemanticKernel.Connectors.Qdrant.UnitTests; + +/// +/// Contains tests for the class. +/// +public class QdrantVectorStoreCollectionCreateMappingTests +{ + [Fact] + public void MapSingleVectorCreatesVectorParams() + { + // Arrange. + var vectorProperty = new VectorStoreRecordVectorProperty("testvector", typeof(ReadOnlyMemory)) { Dimensions = 4, DistanceFunction = DistanceFunction.DotProductSimilarity }; + + // Act. + var actual = QdrantVectorStoreCollectionCreateMapping.MapSingleVector(vectorProperty); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(Distance.Dot, actual.Distance); + Assert.Equal(4ul, actual.Size); + } + + [Fact] + public void MapSingleVectorDefaultsToCosine() + { + // Arrange. + var vectorProperty = new VectorStoreRecordVectorProperty("testvector", typeof(ReadOnlyMemory)) { Dimensions = 4 }; + + // Act. + var actual = QdrantVectorStoreCollectionCreateMapping.MapSingleVector(vectorProperty); + + // Assert. + Assert.Equal(Distance.Cosine, actual.Distance); + } + + [Fact] + public void MapSingleVectorThrowsForUnsupportedDistanceFunction() + { + // Arrange. + var vectorProperty = new VectorStoreRecordVectorProperty("testvector", typeof(ReadOnlyMemory)) { Dimensions = 4, DistanceFunction = DistanceFunction.CosineDistance }; + + // Act and assert. + Assert.Throws(() => QdrantVectorStoreCollectionCreateMapping.MapSingleVector(vectorProperty)); + } + + [Theory] + [InlineData(null)] + [InlineData(0)] + public void MapSingleVectorThrowsIfDimensionsIsInvalid(int? dimensions) + { + // Arrange. + var vectorProperty = new VectorStoreRecordVectorProperty("testvector", typeof(ReadOnlyMemory)) { Dimensions = dimensions }; + + // Act and assert. + Assert.Throws(() => QdrantVectorStoreCollectionCreateMapping.MapSingleVector(vectorProperty)); + } + + [Fact] + public void MapNamedVectorsCreatesVectorParamsMap() + { + // Arrange. + var vectorProperties = new VectorStoreRecordVectorProperty[] + { + new("testvector1", typeof(ReadOnlyMemory)) { Dimensions = 10, DistanceFunction = DistanceFunction.EuclideanDistance }, + new("testvector2", typeof(ReadOnlyMemory)) { Dimensions = 20 } + }; + + var storagePropertyNames = new Dictionary + { + { "testvector1", "storage_testvector1" }, + { "testvector2", "storage_testvector2" } + }; + + // Act. + var actual = QdrantVectorStoreCollectionCreateMapping.MapNamedVectors(vectorProperties, storagePropertyNames); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(2, actual.Map.Count); + Assert.Equal(10ul, actual.Map["storage_testvector1"].Size); + Assert.Equal(Distance.Euclid, actual.Map["storage_testvector1"].Distance); + Assert.Equal(20ul, actual.Map["storage_testvector2"].Size); + Assert.Equal(Distance.Cosine, actual.Map["storage_testvector2"].Distance); + } +} diff --git a/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreRecordCollectionTests.cs b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..1889ceef5fef --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreRecordCollectionTests.cs @@ -0,0 +1,757 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text.Json.Serialization; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Data; +using Moq; +using Qdrant.Client.Grpc; +using Xunit; + +namespace Microsoft.SemanticKernel.Connectors.Qdrant.UnitTests; + +/// +/// Contains tests for the class. +/// +public class QdrantVectorStoreRecordCollectionTests +{ + private const string TestCollectionName = "testcollection"; + private const ulong UlongTestRecordKey1 = 1; + private const ulong UlongTestRecordKey2 = 2; + private static readonly Guid s_guidTestRecordKey1 = Guid.Parse("11111111-1111-1111-1111-111111111111"); + private static readonly Guid s_guidTestRecordKey2 = Guid.Parse("22222222-2222-2222-2222-222222222222"); + + private readonly Mock _qdrantClientMock; + + private readonly CancellationToken _testCancellationToken = new(false); + + public QdrantVectorStoreRecordCollectionTests() + { + this._qdrantClientMock = new Mock(MockBehavior.Strict); + } + + [Theory] + [InlineData(TestCollectionName, true)] + [InlineData("nonexistentcollection", false)] + public async Task CollectionExistsReturnsCollectionStateAsync(string collectionName, bool expectedExists) + { + // Arrange. + var sut = new QdrantVectorStoreRecordCollection>(this._qdrantClientMock.Object, collectionName); + + this._qdrantClientMock + .Setup(x => x.CollectionExistsAsync( + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(expectedExists); + + // Act. + var actual = await sut.CollectionExistsAsync(this._testCancellationToken); + + // Assert. + Assert.Equal(expectedExists, actual); + } + + [Fact] + public async Task CanCreateCollectionAsync() + { + // Arrange. + var sut = new QdrantVectorStoreRecordCollection>(this._qdrantClientMock.Object, TestCollectionName); + + this._qdrantClientMock + .Setup(x => x.CreateCollectionAsync( + It.IsAny(), + It.IsAny(), + this._testCancellationToken)) + .Returns(Task.CompletedTask); + + this._qdrantClientMock + .Setup(x => x.CreatePayloadIndexAsync( + It.IsAny(), + It.IsAny(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(new UpdateResult()); + + // Act. + await sut.CreateCollectionAsync(this._testCancellationToken); + + // Assert. + this._qdrantClientMock + .Verify( + x => x.CreateCollectionAsync( + TestCollectionName, + It.Is(x => x.Size == 4), + this._testCancellationToken), + Times.Once); + + this._qdrantClientMock + .Verify( + x => x.CreatePayloadIndexAsync( + TestCollectionName, + "OriginalNameData", + PayloadSchemaType.Keyword, + this._testCancellationToken), + Times.Once); + + this._qdrantClientMock + .Verify( + x => x.CreatePayloadIndexAsync( + TestCollectionName, + "OriginalNameData", + PayloadSchemaType.Text, + this._testCancellationToken), + Times.Once); + + this._qdrantClientMock + .Verify( + x => x.CreatePayloadIndexAsync( + TestCollectionName, + "data_storage_name", + PayloadSchemaType.Keyword, + this._testCancellationToken), + Times.Once); + } + + [Fact] + public async Task CanDeleteCollectionAsync() + { + // Arrange. + var sut = new QdrantVectorStoreRecordCollection>(this._qdrantClientMock.Object, TestCollectionName); + + this._qdrantClientMock + .Setup(x => x.DeleteCollectionAsync( + It.IsAny(), + null, + this._testCancellationToken)) + .Returns(Task.CompletedTask); + + // Act. + await sut.DeleteCollectionAsync(this._testCancellationToken); + + // Assert. + this._qdrantClientMock + .Verify( + x => x.DeleteCollectionAsync( + TestCollectionName, + null, + this._testCancellationToken), + Times.Once); + } + + [Theory] + [MemberData(nameof(TestOptions))] + public async Task CanGetRecordWithVectorsAsync(bool useDefinition, bool hasNamedVectors, TKey testRecordKey) + where TKey : notnull + { + var sut = this.CreateRecordCollection(useDefinition, hasNamedVectors); + + // Arrange. + var retrievedPoint = CreateRetrievedPoint(hasNamedVectors, testRecordKey); + this.SetupRetrieveMock([retrievedPoint]); + + // Act. + var actual = await sut.GetAsync( + testRecordKey, + new() { IncludeVectors = true }, + this._testCancellationToken); + + // Assert. + this._qdrantClientMock + .Verify( + x => x.RetrieveAsync( + TestCollectionName, + It.Is>(x => x.Count == 1 && (testRecordKey!.GetType() == typeof(ulong) && x[0].Num == (testRecordKey as ulong?) || testRecordKey!.GetType() == typeof(Guid) && x[0].Uuid == (testRecordKey as Guid?).ToString())), + true, + true, + null, + null, + this._testCancellationToken), + Times.Once); + + Assert.NotNull(actual); + Assert.Equal(testRecordKey, actual.Key); + Assert.Equal("data 1", actual.OriginalNameData); + Assert.Equal("data 1", actual.Data); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector!.Value.ToArray()); + } + + [Theory] + [MemberData(nameof(TestOptions))] + public async Task CanGetRecordWithoutVectorsAsync(bool useDefinition, bool hasNamedVectors, TKey testRecordKey) + where TKey : notnull + { + // Arrange. + var sut = this.CreateRecordCollection(useDefinition, hasNamedVectors); + var retrievedPoint = CreateRetrievedPoint(hasNamedVectors, testRecordKey); + this.SetupRetrieveMock([retrievedPoint]); + + // Act. + var actual = await sut.GetAsync( + testRecordKey, + new() { IncludeVectors = false }, + this._testCancellationToken); + + // Assert. + this._qdrantClientMock + .Verify( + x => x.RetrieveAsync( + TestCollectionName, + It.Is>(x => x.Count == 1 && (testRecordKey!.GetType() == typeof(ulong) && x[0].Num == (testRecordKey as ulong?) || testRecordKey!.GetType() == typeof(Guid) && x[0].Uuid == (testRecordKey as Guid?).ToString())), + true, + false, + null, + null, + this._testCancellationToken), + Times.Once); + + Assert.NotNull(actual); + Assert.Equal(testRecordKey, actual.Key); + Assert.Equal("data 1", actual.OriginalNameData); + Assert.Equal("data 1", actual.Data); + Assert.Null(actual.Vector); + } + + [Theory] + [MemberData(nameof(MultiRecordTestOptions))] + public async Task CanGetManyRecordsWithVectorsAsync(bool useDefinition, bool hasNamedVectors, TKey[] testRecordKeys) + where TKey : notnull + { + // Arrange. + var sut = this.CreateRecordCollection(useDefinition, hasNamedVectors); + var retrievedPoint1 = CreateRetrievedPoint(hasNamedVectors, UlongTestRecordKey1); + var retrievedPoint2 = CreateRetrievedPoint(hasNamedVectors, UlongTestRecordKey2); + this.SetupRetrieveMock(testRecordKeys.Select(x => CreateRetrievedPoint(hasNamedVectors, x)).ToList()); + + // Act. + var actual = await sut.GetBatchAsync( + testRecordKeys, + new() { IncludeVectors = true }, + this._testCancellationToken).ToListAsync(); + + // Assert. + this._qdrantClientMock + .Verify( + x => x.RetrieveAsync( + TestCollectionName, + It.Is>(x => + x.Count == 2 && + (testRecordKeys[0]!.GetType() == typeof(ulong) && x[0].Num == (testRecordKeys[0] as ulong?) || testRecordKeys[0]!.GetType() == typeof(Guid) && x[0].Uuid == (testRecordKeys[0] as Guid?).ToString()) && + (testRecordKeys[1]!.GetType() == typeof(ulong) && x[1].Num == (testRecordKeys[1] as ulong?) || testRecordKeys[1]!.GetType() == typeof(Guid) && x[1].Uuid == (testRecordKeys[1] as Guid?).ToString())), + true, + true, + null, + null, + this._testCancellationToken), + Times.Once); + + Assert.NotNull(actual); + Assert.Equal(2, actual.Count); + Assert.Equal(testRecordKeys[0], actual[0].Key); + Assert.Equal(testRecordKeys[1], actual[1].Key); + } + + [Fact] + public async Task CanGetRecordWithCustomMapperAsync() + { + // Arrange. + var retrievedPoint = CreateRetrievedPoint(true, UlongTestRecordKey1); + this.SetupRetrieveMock([retrievedPoint]); + + // Arrange mapper mock from PointStruct to data model. + var mapperMock = new Mock, PointStruct>>(MockBehavior.Strict); + mapperMock.Setup( + x => x.MapFromStorageToDataModel( + It.IsAny(), + It.IsAny())) + .Returns(CreateModel(UlongTestRecordKey1, true)); + + // Arrange target with custom mapper. + var sut = new QdrantVectorStoreRecordCollection>( + this._qdrantClientMock.Object, + TestCollectionName, + new() + { + HasNamedVectors = true, + PointStructCustomMapper = mapperMock.Object + }); + + // Act + var actual = await sut.GetAsync( + UlongTestRecordKey1, + new() { IncludeVectors = true }, + this._testCancellationToken); + + // Assert + Assert.NotNull(actual); + Assert.Equal(UlongTestRecordKey1, actual.Key); + Assert.Equal("data 1", actual.OriginalNameData); + Assert.Equal("data 1", actual.Data); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector!.Value.ToArray()); + + mapperMock + .Verify( + x => x.MapFromStorageToDataModel( + It.Is(x => x.Id.Num == UlongTestRecordKey1), + It.Is(x => x.IncludeVectors)), + Times.Once); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task CanDeleteUlongRecordAsync(bool useDefinition, bool hasNamedVectors) + { + // Arrange + var sut = this.CreateRecordCollection(useDefinition, hasNamedVectors); + this.SetupDeleteMocks(); + + // Act + await sut.DeleteAsync( + UlongTestRecordKey1, + cancellationToken: this._testCancellationToken); + + // Assert + this._qdrantClientMock + .Verify( + x => x.DeleteAsync( + TestCollectionName, + It.Is(x => x == UlongTestRecordKey1), + true, + null, + null, + this._testCancellationToken), + Times.Once); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task CanDeleteGuidRecordAsync(bool useDefinition, bool hasNamedVectors) + { + // Arrange + var sut = this.CreateRecordCollection(useDefinition, hasNamedVectors); + this.SetupDeleteMocks(); + + // Act + await sut.DeleteAsync( + s_guidTestRecordKey1, + cancellationToken: this._testCancellationToken); + + // Assert + this._qdrantClientMock + .Verify( + x => x.DeleteAsync( + TestCollectionName, + It.Is(x => x == s_guidTestRecordKey1), + true, + null, + null, + this._testCancellationToken), + Times.Once); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task CanDeleteManyUlongRecordsAsync(bool useDefinition, bool hasNamedVectors) + { + // Arrange + var sut = this.CreateRecordCollection(useDefinition, hasNamedVectors); + this.SetupDeleteMocks(); + + // Act + await sut.DeleteBatchAsync( + [UlongTestRecordKey1, UlongTestRecordKey2], + cancellationToken: this._testCancellationToken); + + // Assert + this._qdrantClientMock + .Verify( + x => x.DeleteAsync( + TestCollectionName, + It.Is>(x => x.Count == 2 && x.Contains(UlongTestRecordKey1) && x.Contains(UlongTestRecordKey2)), + true, + null, + null, + this._testCancellationToken), + Times.Once); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task CanDeleteManyGuidRecordsAsync(bool useDefinition, bool hasNamedVectors) + { + // Arrange + var sut = this.CreateRecordCollection(useDefinition, hasNamedVectors); + this.SetupDeleteMocks(); + + // Act + await sut.DeleteBatchAsync( + [s_guidTestRecordKey1, s_guidTestRecordKey2], + cancellationToken: this._testCancellationToken); + + // Assert + this._qdrantClientMock + .Verify( + x => x.DeleteAsync( + TestCollectionName, + It.Is>(x => x.Count == 2 && x.Contains(s_guidTestRecordKey1) && x.Contains(s_guidTestRecordKey2)), + true, + null, + null, + this._testCancellationToken), + Times.Once); + } + + [Theory] + [MemberData(nameof(TestOptions))] + public async Task CanUpsertRecordAsync(bool useDefinition, bool hasNamedVectors, TKey testRecordKey) + where TKey : notnull + { + // Arrange + var sut = this.CreateRecordCollection(useDefinition, hasNamedVectors); + this.SetupUpsertMock(); + + // Act + await sut.UpsertAsync( + CreateModel(testRecordKey, true), + cancellationToken: this._testCancellationToken); + + // Assert + this._qdrantClientMock + .Verify( + x => x.UpsertAsync( + TestCollectionName, + It.Is>(x => x.Count == 1 && (testRecordKey!.GetType() == typeof(ulong) && x[0].Id.Num == (testRecordKey as ulong?) || testRecordKey!.GetType() == typeof(Guid) && x[0].Id.Uuid == (testRecordKey as Guid?).ToString())), + true, + null, + null, + this._testCancellationToken), + Times.Once); + } + + [Theory] + [MemberData(nameof(MultiRecordTestOptions))] + public async Task CanUpsertManyRecordsAsync(bool useDefinition, bool hasNamedVectors, TKey[] testRecordKeys) + where TKey : notnull + { + // Arrange + var sut = this.CreateRecordCollection(useDefinition, hasNamedVectors); + this.SetupUpsertMock(); + + var models = testRecordKeys.Select(x => CreateModel(x, true)); + + // Act + var actual = await sut.UpsertBatchAsync( + models, + cancellationToken: this._testCancellationToken).ToListAsync(); + + // Assert + Assert.NotNull(actual); + Assert.Equal(2, actual.Count); + Assert.Equal(testRecordKeys[0], actual[0]); + Assert.Equal(testRecordKeys[1], actual[1]); + + this._qdrantClientMock + .Verify( + x => x.UpsertAsync( + TestCollectionName, + It.Is>(x => + x.Count == 2 && + (testRecordKeys[0]!.GetType() == typeof(ulong) && x[0].Id.Num == (testRecordKeys[0] as ulong?) || testRecordKeys[0]!.GetType() == typeof(Guid) && x[0].Id.Uuid == (testRecordKeys[0] as Guid?).ToString()) && + (testRecordKeys[1]!.GetType() == typeof(ulong) && x[1].Id.Num == (testRecordKeys[1] as ulong?) || testRecordKeys[1]!.GetType() == typeof(Guid) && x[1].Id.Uuid == (testRecordKeys[1] as Guid?).ToString())), + true, + null, + null, + this._testCancellationToken), + Times.Once); + } + + [Fact] + public async Task CanUpsertRecordWithCustomMapperAsync() + { + // Arrange. + this.SetupUpsertMock(); + var pointStruct = new PointStruct + { + Id = new() { Num = UlongTestRecordKey1 }, + Payload = { ["OriginalNameData"] = "data 1", ["data_storage_name"] = "data 1" }, + Vectors = new[] { 1f, 2f, 3f, 4f } + }; + + // Arrange mapper mock from data model to PointStruct. + var mapperMock = new Mock, PointStruct>>(MockBehavior.Strict); + mapperMock + .Setup(x => x.MapFromDataToStorageModel(It.IsAny>())) + .Returns(pointStruct); + + // Arrange target with custom mapper. + var sut = new QdrantVectorStoreRecordCollection>( + this._qdrantClientMock.Object, + TestCollectionName, + new() + { + HasNamedVectors = false, + PointStructCustomMapper = mapperMock.Object + }); + + var model = CreateModel(UlongTestRecordKey1, true); + + // Act + await sut.UpsertAsync( + model, + null, + this._testCancellationToken); + + // Assert + mapperMock + .Verify( + x => x.MapFromDataToStorageModel(It.Is>(x => x == model)), + Times.Once); + } + + /// + /// Tests that the collection can be created even if the definition and the type do not match. + /// In this case, the expectation is that a custom mapper will be provided to map between the + /// schema as defined by the definition and the different data model. + /// + [Fact] + public void CanCreateCollectionWithMismatchedDefinitionAndType() + { + // Arrange. + var definition = new VectorStoreRecordDefinition() + { + Properties = new List + { + new VectorStoreRecordKeyProperty("Id", typeof(ulong)), + new VectorStoreRecordDataProperty("Text", typeof(string)), + new VectorStoreRecordVectorProperty("Embedding", typeof(ReadOnlyMemory)) { Dimensions = 4 }, + } + }; + + // Act. + var sut = new QdrantVectorStoreRecordCollection>( + this._qdrantClientMock.Object, + TestCollectionName, + new() { VectorStoreRecordDefinition = definition, PointStructCustomMapper = Mock.Of, PointStruct>>() }); + } + + private void SetupRetrieveMock(List retrievedPoints) + { + this._qdrantClientMock + .Setup(x => x.RetrieveAsync( + It.IsAny(), + It.IsAny>(), + It.IsAny(), // With Payload + It.IsAny(), // With Vectors + It.IsAny(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(retrievedPoints); + } + + private void SetupDeleteMocks() + { + this._qdrantClientMock + .Setup(x => x.DeleteAsync( + It.IsAny(), + It.IsAny(), + It.IsAny(), // wait + It.IsAny(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(new UpdateResult()); + + this._qdrantClientMock + .Setup(x => x.DeleteAsync( + It.IsAny(), + It.IsAny(), + It.IsAny(), // wait + It.IsAny(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(new UpdateResult()); + + this._qdrantClientMock + .Setup(x => x.DeleteAsync( + It.IsAny(), + It.IsAny>(), + It.IsAny(), // wait + It.IsAny(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(new UpdateResult()); + + this._qdrantClientMock + .Setup(x => x.DeleteAsync( + It.IsAny(), + It.IsAny>(), + It.IsAny(), // wait + It.IsAny(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(new UpdateResult()); + } + + private void SetupUpsertMock() + { + this._qdrantClientMock + .Setup(x => x.UpsertAsync( + It.IsAny(), + It.IsAny>(), + It.IsAny(), // wait + It.IsAny(), + It.IsAny(), + this._testCancellationToken)) + .ReturnsAsync(new UpdateResult()); + } + + private static RetrievedPoint CreateRetrievedPoint(bool hasNamedVectors, TKey recordKey) + { + RetrievedPoint point; + if (hasNamedVectors) + { + var namedVectors = new NamedVectors(); + namedVectors.Vectors.Add("vector_storage_name", new[] { 1f, 2f, 3f, 4f }); + point = new RetrievedPoint() + { + Payload = { ["OriginalNameData"] = "data 1", ["data_storage_name"] = "data 1" }, + Vectors = new Vectors { Vectors_ = namedVectors } + }; + } + else + { + point = new RetrievedPoint() + { + Payload = { ["OriginalNameData"] = "data 1", ["data_storage_name"] = "data 1" }, + Vectors = new[] { 1f, 2f, 3f, 4f } + }; + } + + if (recordKey is ulong ulongKey) + { + point.Id = ulongKey; + } + + if (recordKey is Guid guidKey) + { + point.Id = guidKey; + } + + return point; + } + + private IVectorStoreRecordCollection> CreateRecordCollection(bool useDefinition, bool hasNamedVectors) + where T : notnull + { + var store = new QdrantVectorStoreRecordCollection>( + this._qdrantClientMock.Object, + TestCollectionName, + new() + { + VectorStoreRecordDefinition = useDefinition ? CreateSinglePropsDefinition(typeof(T)) : null, + HasNamedVectors = hasNamedVectors + }) as IVectorStoreRecordCollection>; + return store!; + } + + private static SinglePropsModel CreateModel(T key, bool withVectors) + { + return new SinglePropsModel + { + Key = key, + OriginalNameData = "data 1", + Data = "data 1", + Vector = withVectors ? new float[] { 1, 2, 3, 4 } : null, + NotAnnotated = null, + }; + } + + private static VectorStoreRecordDefinition CreateSinglePropsDefinition(Type keyType) + { + return new() + { + Properties = + [ + new VectorStoreRecordKeyProperty("Key", keyType), + new VectorStoreRecordDataProperty("OriginalNameData", typeof(string)) { IsFilterable = true, IsFullTextSearchable = true }, + new VectorStoreRecordDataProperty("Data", typeof(string)) { IsFilterable = true, StoragePropertyName = "data_storage_name" }, + new VectorStoreRecordVectorProperty("Vector", typeof(ReadOnlyMemory)) { StoragePropertyName = "vector_storage_name" } + ] + }; + } + + public sealed class SinglePropsModel + { + [VectorStoreRecordKey] + public required T Key { get; set; } + + [VectorStoreRecordData(IsFilterable = true, IsFullTextSearchable = true)] + public string OriginalNameData { get; set; } = string.Empty; + + [JsonPropertyName("ignored_data_json_name")] + [VectorStoreRecordData(IsFilterable = true, StoragePropertyName = "data_storage_name")] + public string Data { get; set; } = string.Empty; + + [JsonPropertyName("ignored_vector_json_name")] + [VectorStoreRecordVector(4, StoragePropertyName = "vector_storage_name")] + public ReadOnlyMemory? Vector { get; set; } + + public string? NotAnnotated { get; set; } + } + + public static IEnumerable TestOptions + => GenerateAllCombinations(new object[][] { + new object[] { true, false }, + new object[] { true, false }, + new object[] { UlongTestRecordKey1, s_guidTestRecordKey1 } + }); + + public static IEnumerable MultiRecordTestOptions + => GenerateAllCombinations(new object[][] { + new object[] { true, false }, + new object[] { true, false }, + new object[] { new ulong[] { UlongTestRecordKey1, UlongTestRecordKey2 }, new Guid[] { s_guidTestRecordKey1, s_guidTestRecordKey2 } } + }); + + private static object[][] GenerateAllCombinations(object[][] input) + { + var counterArray = Enumerable.Range(0, input.Length).Select(x => 0).ToArray(); + + // Add each item from the first option set as a separate row. + object[][] currentCombinations = input[0].Select(x => new object[1] { x }).ToArray(); + + // Loop through each additional option set. + for (int currentOptionSetIndex = 1; currentOptionSetIndex < input.Length; currentOptionSetIndex++) + { + var iterationCombinations = new List(); + var currentOptionSet = input[currentOptionSetIndex]; + + // Loop through each row we have already. + foreach (var currentCombination in currentCombinations) + { + // Add each of the values from the new options set to the current row to generate a new row. + for (var currentColumnRow = 0; currentColumnRow < currentOptionSet.Length; currentColumnRow++) + { + iterationCombinations.Add(currentCombination.Append(currentOptionSet[currentColumnRow]).ToArray()); + } + } + + currentCombinations = iterationCombinations.ToArray(); + } + + return currentCombinations; + } +} diff --git a/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreRecordMapperTests.cs b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreRecordMapperTests.cs new file mode 100644 index 000000000000..68ff1d46a86b --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreRecordMapperTests.cs @@ -0,0 +1,440 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text.Json.Serialization; +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client.Grpc; +using Xunit; + +namespace SemanticKernel.Connectors.Qdrant.UnitTests; + +/// +/// Contains tests for the class. +/// +public class QdrantVectorStoreRecordMapperTests +{ + [Theory] + [InlineData(true)] + [InlineData(false)] + public void MapsSinglePropsFromDataToStorageModelWithUlong(bool hasNamedVectors) + { + // Arrange. + var definition = CreateSinglePropsVectorStoreRecordDefinition(typeof(ulong)); + var sut = new QdrantVectorStoreRecordMapper>(definition, hasNamedVectors, s_singlePropsModelStorageNamesMap); + + // Act. + var actual = sut.MapFromDataToStorageModel(CreateSinglePropsModel(5ul)); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(5ul, actual.Id.Num); + Assert.Single(actual.Payload); + Assert.Equal("data value", actual.Payload["data"].StringValue); + + if (hasNamedVectors) + { + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vectors.Vectors_.Vectors["vector"].Data.ToArray()); + } + else + { + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vectors.Vector.Data.ToArray()); + } + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void MapsSinglePropsFromDataToStorageModelWithGuid(bool hasNamedVectors) + { + // Arrange. + var definition = CreateSinglePropsVectorStoreRecordDefinition(typeof(Guid)); + var sut = new QdrantVectorStoreRecordMapper>(definition, hasNamedVectors, s_singlePropsModelStorageNamesMap); + + // Act. + var actual = sut.MapFromDataToStorageModel(CreateSinglePropsModel(Guid.Parse("11111111-1111-1111-1111-111111111111"))); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(Guid.Parse("11111111-1111-1111-1111-111111111111"), Guid.Parse(actual.Id.Uuid)); + Assert.Single(actual.Payload); + Assert.Equal("data value", actual.Payload["data"].StringValue); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public void MapsSinglePropsFromStorageToDataModelWithUlong(bool hasNamedVectors, bool includeVectors) + { + // Arrange. + var definition = CreateSinglePropsVectorStoreRecordDefinition(typeof(ulong)); + var sut = new QdrantVectorStoreRecordMapper>(definition, hasNamedVectors, s_singlePropsModelStorageNamesMap); + + // Act. + var actual = sut.MapFromStorageToDataModel(CreateSinglePropsPointStruct(5, hasNamedVectors), new() { IncludeVectors = includeVectors }); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(5ul, actual.Key); + Assert.Equal("data value", actual.Data); + + if (includeVectors) + { + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector!.Value.ToArray()); + } + else + { + Assert.Null(actual.Vector); + } + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public void MapsSinglePropsFromStorageToDataModelWithGuid(bool hasNamedVectors, bool includeVectors) + { + // Arrange. + var definition = CreateSinglePropsVectorStoreRecordDefinition(typeof(Guid)); + var sut = new QdrantVectorStoreRecordMapper>(definition, hasNamedVectors, s_singlePropsModelStorageNamesMap); + + // Act. + var actual = sut.MapFromStorageToDataModel(CreateSinglePropsPointStruct(Guid.Parse("11111111-1111-1111-1111-111111111111"), hasNamedVectors), new() { IncludeVectors = includeVectors }); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(Guid.Parse("11111111-1111-1111-1111-111111111111"), actual.Key); + Assert.Equal("data value", actual.Data); + + if (includeVectors) + { + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector!.Value.ToArray()); + } + else + { + Assert.Null(actual.Vector); + } + } + + [Fact] + public void MapsMultiPropsFromDataToStorageModelWithUlong() + { + // Arrange. + var definition = CreateMultiPropsVectorStoreRecordDefinition(typeof(ulong)); + var sut = new QdrantVectorStoreRecordMapper>(definition, true, s_multiPropsModelStorageNamesMap); + + // Act. + var actual = sut.MapFromDataToStorageModel(CreateMultiPropsModel(5ul)); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(5ul, actual.Id.Num); + Assert.Equal(7, actual.Payload.Count); + Assert.Equal("data 1", actual.Payload["dataString"].StringValue); + Assert.Equal(5, actual.Payload["dataInt"].IntegerValue); + Assert.Equal(5, actual.Payload["dataLong"].IntegerValue); + Assert.Equal(5.5f, actual.Payload["dataFloat"].DoubleValue); + Assert.Equal(5.5d, actual.Payload["dataDouble"].DoubleValue); + Assert.True(actual.Payload["dataBool"].BoolValue); + Assert.Equal(new int[] { 1, 2, 3, 4 }, actual.Payload["dataArrayInt"].ListValue.Values.Select(x => (int)x.IntegerValue).ToArray()); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vectors.Vectors_.Vectors["vector1"].Data.ToArray()); + Assert.Equal(new float[] { 5, 6, 7, 8 }, actual.Vectors.Vectors_.Vectors["vector2"].Data.ToArray()); + } + + [Fact] + public void MapsMultiPropsFromDataToStorageModelWithGuid() + { + // Arrange. + var definition = CreateMultiPropsVectorStoreRecordDefinition(typeof(Guid)); + var sut = new QdrantVectorStoreRecordMapper>(definition, true, s_multiPropsModelStorageNamesMap); + + // Act. + var actual = sut.MapFromDataToStorageModel(CreateMultiPropsModel(Guid.Parse("11111111-1111-1111-1111-111111111111"))); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(Guid.Parse("11111111-1111-1111-1111-111111111111"), Guid.Parse(actual.Id.Uuid)); + Assert.Equal(7, actual.Payload.Count); + Assert.Equal("data 1", actual.Payload["dataString"].StringValue); + Assert.Equal(5, actual.Payload["dataInt"].IntegerValue); + Assert.Equal(5, actual.Payload["dataLong"].IntegerValue); + Assert.Equal(5.5f, actual.Payload["dataFloat"].DoubleValue); + Assert.Equal(5.5d, actual.Payload["dataDouble"].DoubleValue); + Assert.True(actual.Payload["dataBool"].BoolValue); + Assert.Equal(new int[] { 1, 2, 3, 4 }, actual.Payload["dataArrayInt"].ListValue.Values.Select(x => (int)x.IntegerValue).ToArray()); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vectors.Vectors_.Vectors["vector1"].Data.ToArray()); + Assert.Equal(new float[] { 5, 6, 7, 8 }, actual.Vectors.Vectors_.Vectors["vector2"].Data.ToArray()); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void MapsMultiPropsFromStorageToDataModelWithUlong(bool includeVectors) + { + // Arrange. + var definition = CreateMultiPropsVectorStoreRecordDefinition(typeof(ulong)); + var sut = new QdrantVectorStoreRecordMapper>(definition, true, s_multiPropsModelStorageNamesMap); + + // Act. + var actual = sut.MapFromStorageToDataModel(CreateMultiPropsPointStruct(5), new() { IncludeVectors = includeVectors }); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(5ul, actual.Key); + Assert.Equal("data 1", actual.DataString); + Assert.Equal(5, actual.DataInt); + Assert.Equal(5L, actual.DataLong); + Assert.Equal(5.5f, actual.DataFloat); + Assert.Equal(5.5d, actual.DataDouble); + Assert.True(actual.DataBool); + Assert.Equal(new int[] { 1, 2, 3, 4 }, actual.DataArrayInt); + + if (includeVectors) + { + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector1!.Value.ToArray()); + Assert.Equal(new float[] { 5, 6, 7, 8 }, actual.Vector2!.Value.ToArray()); + } + else + { + Assert.Null(actual.Vector1); + Assert.Null(actual.Vector2); + } + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void MapsMultiPropsFromStorageToDataModelWithGuid(bool includeVectors) + { + // Arrange. + var definition = CreateMultiPropsVectorStoreRecordDefinition(typeof(Guid)); + var sut = new QdrantVectorStoreRecordMapper>(definition, true, s_multiPropsModelStorageNamesMap); + + // Act. + var actual = sut.MapFromStorageToDataModel(CreateMultiPropsPointStruct(Guid.Parse("11111111-1111-1111-1111-111111111111")), new() { IncludeVectors = includeVectors }); + + // Assert. + Assert.NotNull(actual); + Assert.Equal(Guid.Parse("11111111-1111-1111-1111-111111111111"), actual.Key); + Assert.Equal("data 1", actual.DataString); + Assert.Equal(5, actual.DataInt); + Assert.Equal(5L, actual.DataLong); + Assert.Equal(5.5f, actual.DataFloat); + Assert.Equal(5.5d, actual.DataDouble); + Assert.True(actual.DataBool); + Assert.Equal(new int[] { 1, 2, 3, 4 }, actual.DataArrayInt); + + if (includeVectors) + { + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector1!.Value.ToArray()); + Assert.Equal(new float[] { 5, 6, 7, 8 }, actual.Vector2!.Value.ToArray()); + } + else + { + Assert.Null(actual.Vector1); + Assert.Null(actual.Vector2); + } + } + + private static SinglePropsModel CreateSinglePropsModel(TKey key) + { + return new SinglePropsModel + { + Key = key, + Data = "data value", + Vector = new float[] { 1, 2, 3, 4 }, + NotAnnotated = "notAnnotated", + }; + } + + private static MultiPropsModel CreateMultiPropsModel(TKey key) + { + return new MultiPropsModel + { + Key = key, + DataString = "data 1", + DataInt = 5, + DataLong = 5L, + DataFloat = 5.5f, + DataDouble = 5.5d, + DataBool = true, + DataArrayInt = new List { 1, 2, 3, 4 }, + Vector1 = new float[] { 1, 2, 3, 4 }, + Vector2 = new float[] { 5, 6, 7, 8 }, + NotAnnotated = "notAnnotated", + }; + } + + private static PointStruct CreateSinglePropsPointStruct(ulong id, bool hasNamedVectors) + { + var pointStruct = new PointStruct(); + pointStruct.Id = new PointId() { Num = id }; + AddDataToSinglePropsPointStruct(pointStruct, hasNamedVectors); + return pointStruct; + } + + private static PointStruct CreateSinglePropsPointStruct(Guid id, bool hasNamedVectors) + { + var pointStruct = new PointStruct(); + pointStruct.Id = new PointId() { Uuid = id.ToString() }; + AddDataToSinglePropsPointStruct(pointStruct, hasNamedVectors); + return pointStruct; + } + + private static void AddDataToSinglePropsPointStruct(PointStruct pointStruct, bool hasNamedVectors) + { + pointStruct.Payload.Add("data", "data value"); + + if (hasNamedVectors) + { + var namedVectors = new NamedVectors(); + namedVectors.Vectors.Add("vector", new[] { 1f, 2f, 3f, 4f }); + pointStruct.Vectors = new Vectors() { Vectors_ = namedVectors }; + } + else + { + pointStruct.Vectors = new[] { 1f, 2f, 3f, 4f }; + } + } + + private static PointStruct CreateMultiPropsPointStruct(ulong id) + { + var pointStruct = new PointStruct(); + pointStruct.Id = new PointId() { Num = id }; + AddDataToMultiPropsPointStruct(pointStruct); + return pointStruct; + } + + private static PointStruct CreateMultiPropsPointStruct(Guid id) + { + var pointStruct = new PointStruct(); + pointStruct.Id = new PointId() { Uuid = id.ToString() }; + AddDataToMultiPropsPointStruct(pointStruct); + return pointStruct; + } + + private static void AddDataToMultiPropsPointStruct(PointStruct pointStruct) + { + pointStruct.Payload.Add("dataString", "data 1"); + pointStruct.Payload.Add("dataInt", 5); + pointStruct.Payload.Add("dataLong", 5L); + pointStruct.Payload.Add("dataFloat", 5.5f); + pointStruct.Payload.Add("dataDouble", 5.5d); + pointStruct.Payload.Add("dataBool", true); + + var dataIntArray = new ListValue(); + dataIntArray.Values.Add(1); + dataIntArray.Values.Add(2); + dataIntArray.Values.Add(3); + dataIntArray.Values.Add(4); + pointStruct.Payload.Add("dataArrayInt", new Value { ListValue = dataIntArray }); + + var namedVectors = new NamedVectors(); + namedVectors.Vectors.Add("vector1", new[] { 1f, 2f, 3f, 4f }); + namedVectors.Vectors.Add("vector2", new[] { 5f, 6f, 7f, 8f }); + pointStruct.Vectors = new Vectors() { Vectors_ = namedVectors }; + } + + private static readonly Dictionary s_singlePropsModelStorageNamesMap = new() + { + { "Key", "key" }, + { "Data", "data" }, + { "Vector", "vector" }, + }; + + private static VectorStoreRecordDefinition CreateSinglePropsVectorStoreRecordDefinition(Type keyType) => new() + { + Properties = new List + { + new VectorStoreRecordKeyProperty("Key", keyType), + new VectorStoreRecordDataProperty("Data", typeof(string)), + new VectorStoreRecordVectorProperty("Vector", typeof(ReadOnlyMemory)), + }, + }; + + private sealed class SinglePropsModel + { + [VectorStoreRecordKey] + public TKey? Key { get; set; } = default; + + [VectorStoreRecordData] + public string Data { get; set; } = string.Empty; + + [VectorStoreRecordVector] + public ReadOnlyMemory? Vector { get; set; } + + public string NotAnnotated { get; set; } = string.Empty; + } + + private static readonly Dictionary s_multiPropsModelStorageNamesMap = new() + { + { "Key", "key" }, + { "DataString", "dataString" }, + { "DataInt", "dataInt" }, + { "DataLong", "dataLong" }, + { "DataFloat", "dataFloat" }, + { "DataDouble", "dataDouble" }, + { "DataBool", "dataBool" }, + { "DataArrayInt", "dataArrayInt" }, + { "Vector1", "vector1" }, + { "Vector2", "vector2" }, + }; + + private static VectorStoreRecordDefinition CreateMultiPropsVectorStoreRecordDefinition(Type keyType) => new() + { + Properties = new List + { + new VectorStoreRecordKeyProperty("Key", keyType), + new VectorStoreRecordDataProperty("DataString", typeof(string)), + new VectorStoreRecordDataProperty("DataInt", typeof(int)), + new VectorStoreRecordDataProperty("DataLong", typeof(long)), + new VectorStoreRecordDataProperty("DataFloat", typeof(float)), + new VectorStoreRecordDataProperty("DataDouble", typeof(double)), + new VectorStoreRecordDataProperty("DataBool", typeof(bool)), + new VectorStoreRecordDataProperty("DataArrayInt", typeof(List)), + new VectorStoreRecordVectorProperty("Vector1", typeof(ReadOnlyMemory)), + new VectorStoreRecordVectorProperty("Vector2", typeof(ReadOnlyMemory)), + }, + }; + + private sealed class MultiPropsModel + { + [VectorStoreRecordKey] + public TKey? Key { get; set; } = default; + + [VectorStoreRecordData] + public string DataString { get; set; } = string.Empty; + + [JsonPropertyName("data_int_json")] + [VectorStoreRecordData] + public int DataInt { get; set; } = 0; + + [VectorStoreRecordData] + public long DataLong { get; set; } = 0; + + [VectorStoreRecordData] + public float DataFloat { get; set; } = 0; + + [VectorStoreRecordData] + public double DataDouble { get; set; } = 0; + + [VectorStoreRecordData] + public bool DataBool { get; set; } = false; + + [VectorStoreRecordData] + public List? DataArrayInt { get; set; } + + [VectorStoreRecordVector] + public ReadOnlyMemory? Vector1 { get; set; } + + [VectorStoreRecordVector] + public ReadOnlyMemory? Vector2 { get; set; } + + public string NotAnnotated { get; set; } = string.Empty; + } +} diff --git a/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreTests.cs b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreTests.cs new file mode 100644 index 000000000000..2a234f08442a --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Qdrant.UnitTests/QdrantVectorStoreTests.cs @@ -0,0 +1,103 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Data; +using Moq; +using Qdrant.Client; +using Xunit; + +namespace Microsoft.SemanticKernel.Connectors.Qdrant.UnitTests; + +/// +/// Contains tests for the class. +/// +public class QdrantVectorStoreTests +{ + private const string TestCollectionName = "testcollection"; + + private readonly Mock _qdrantClientMock; + + private readonly CancellationToken _testCancellationToken = new(false); + + public QdrantVectorStoreTests() + { + this._qdrantClientMock = new Mock(MockBehavior.Strict); + } + + [Fact] + public void GetCollectionReturnsCollection() + { + // Arrange. + var sut = new QdrantVectorStore(this._qdrantClientMock.Object); + + // Act. + var actual = sut.GetCollection>(TestCollectionName); + + // Assert. + Assert.NotNull(actual); + Assert.IsType>>(actual); + } + + [Fact] + public void GetCollectionCallsFactoryIfProvided() + { + // Arrange. + var factoryMock = new Mock(MockBehavior.Strict); + var collectionMock = new Mock>>(MockBehavior.Strict); + factoryMock + .Setup(x => x.CreateVectorStoreRecordCollection>(It.IsAny(), TestCollectionName, null)) + .Returns(collectionMock.Object); + var sut = new QdrantVectorStore(this._qdrantClientMock.Object, new() { VectorStoreCollectionFactory = factoryMock.Object }); + + // Act. + var actual = sut.GetCollection>(TestCollectionName); + + // Assert. + Assert.Equal(collectionMock.Object, actual); + factoryMock.Verify(x => x.CreateVectorStoreRecordCollection>(It.IsAny(), TestCollectionName, null), Times.Once); + } + + [Fact] + public void GetCollectionThrowsForInvalidKeyType() + { + // Arrange. + var sut = new QdrantVectorStore(this._qdrantClientMock.Object); + + // Act & Assert. + Assert.Throws(() => sut.GetCollection>(TestCollectionName)); + } + + [Fact] + public async Task ListCollectionNamesCallsSDKAsync() + { + // Arrange. + this._qdrantClientMock + .Setup(x => x.ListCollectionsAsync(It.IsAny())) + .ReturnsAsync(new[] { "collection1", "collection2" }); + var sut = new QdrantVectorStore(this._qdrantClientMock.Object); + + // Act. + var collectionNames = sut.ListCollectionNamesAsync(this._testCancellationToken); + + // Assert. + var collectionNamesList = await collectionNames.ToListAsync(); + Assert.Equal(new[] { "collection1", "collection2" }, collectionNamesList); + } + + public sealed class SinglePropsModel + { + [VectorStoreRecordKey] + public required TKey Key { get; set; } + + [VectorStoreRecordData] + public string Data { get; set; } = string.Empty; + + [VectorStoreRecordVector(4)] + public ReadOnlyMemory? Vector { get; set; } + + public string? NotAnnotated { get; set; } + } +} diff --git a/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisHashSetVectorStoreRecordCollectionTests.cs b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisHashSetVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..a95179e86346 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisHashSetVectorStoreRecordCollectionTests.cs @@ -0,0 +1,534 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.InteropServices; +using System.Text.Json.Serialization; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Data; +using Moq; +using NRedisStack; +using StackExchange.Redis; +using Xunit; + +namespace Microsoft.SemanticKernel.Connectors.Redis.UnitTests; + +/// +/// Contains tests for the class. +/// +public class RedisHashSetVectorStoreRecordCollectionTests +{ + private const string TestCollectionName = "testcollection"; + private const string TestRecordKey1 = "testid1"; + private const string TestRecordKey2 = "testid2"; + + private readonly Mock _redisDatabaseMock; + + public RedisHashSetVectorStoreRecordCollectionTests() + { + this._redisDatabaseMock = new Mock(MockBehavior.Strict); + + var batchMock = new Mock(); + this._redisDatabaseMock.Setup(x => x.CreateBatch(It.IsAny())).Returns(batchMock.Object); + } + + [Theory] + [InlineData(TestCollectionName, true)] + [InlineData("nonexistentcollection", false)] + public async Task CollectionExistsReturnsCollectionStateAsync(string collectionName, bool expectedExists) + { + // Arrange + if (expectedExists) + { + SetupExecuteMock(this._redisDatabaseMock, ["index_name", collectionName]); + } + else + { + SetupExecuteMock(this._redisDatabaseMock, new RedisServerException("Unknown index name")); + } + var sut = new RedisHashSetVectorStoreRecordCollection( + this._redisDatabaseMock.Object, + collectionName); + + // Act + var actual = await sut.CollectionExistsAsync(); + + // Assert + var expectedArgs = new object[] { collectionName }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "FT.INFO", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + Assert.Equal(expectedExists, actual); + } + + [Fact] + public async Task CanCreateCollectionAsync() + { + // Arrange. + SetupExecuteMock(this._redisDatabaseMock, string.Empty); + var sut = new RedisHashSetVectorStoreRecordCollection(this._redisDatabaseMock.Object, TestCollectionName); + + // Act. + await sut.CreateCollectionAsync(); + + // Assert. + var expectedArgs = new object[] { + "testcollection", + "PREFIX", + 1, + "testcollection:", + "SCHEMA", + "$.OriginalNameData", + "AS", + "OriginalNameData", + "TAG", + "$.data_storage_name", + "AS", + "data_storage_name", + "TAG", + "$.vector_storage_name", + "AS", + "vector_storage_name", + "VECTOR", + "HNSW", + 6, + "TYPE", + "FLOAT32", + "DIM", + "4", + "DISTANCE_METRIC", + "COSINE" }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "FT.CREATE", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + } + + [Fact] + public async Task CanDeleteCollectionAsync() + { + // Arrange + SetupExecuteMock(this._redisDatabaseMock, string.Empty); + var sut = this.CreateRecordCollection(false); + + // Act + await sut.DeleteCollectionAsync(); + + // Assert + var expectedArgs = new object[] { TestCollectionName }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "FT.DROPINDEX", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanGetRecordWithVectorsAsync(bool useDefinition) + { + // Arrange + var hashEntries = new HashEntry[] + { + new("OriginalNameData", "data 1"), + new("data_storage_name", "data 1"), + new("vector_storage_name", MemoryMarshal.AsBytes(new ReadOnlySpan(new float[] { 1, 2, 3, 4 })).ToArray()) + }; + this._redisDatabaseMock.Setup(x => x.HashGetAllAsync(It.IsAny(), CommandFlags.None)).ReturnsAsync(hashEntries); + var sut = this.CreateRecordCollection(useDefinition); + + // Act + var actual = await sut.GetAsync( + TestRecordKey1, + new() { IncludeVectors = true }); + + // Assert + this._redisDatabaseMock.Verify(x => x.HashGetAllAsync(TestRecordKey1, CommandFlags.None), Times.Once); + + Assert.NotNull(actual); + Assert.Equal(TestRecordKey1, actual.Key); + Assert.Equal("data 1", actual.OriginalNameData); + Assert.Equal("data 1", actual.Data); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector!.Value.ToArray()); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanGetRecordWithoutVectorsAsync(bool useDefinition) + { + // Arrange + var redisValues = new RedisValue[] { new("data 1"), new("data 1") }; + this._redisDatabaseMock.Setup(x => x.HashGetAsync(It.IsAny(), It.IsAny(), CommandFlags.None)).ReturnsAsync(redisValues); + var sut = this.CreateRecordCollection(useDefinition); + + // Act + var actual = await sut.GetAsync( + TestRecordKey1, + new() { IncludeVectors = false }); + + // Assert + var fieldNames = new RedisValue[] { "OriginalNameData", "data_storage_name" }; + this._redisDatabaseMock.Verify(x => x.HashGetAsync(TestRecordKey1, fieldNames, CommandFlags.None), Times.Once); + + Assert.NotNull(actual); + Assert.Equal(TestRecordKey1, actual.Key); + Assert.Equal("data 1", actual.OriginalNameData); + Assert.Equal("data 1", actual.Data); + Assert.False(actual.Vector.HasValue); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanGetManyRecordsWithVectorsAsync(bool useDefinition) + { + // Arrange + var hashEntries1 = new HashEntry[] + { + new("OriginalNameData", "data 1"), + new("data_storage_name", "data 1"), + new("vector_storage_name", MemoryMarshal.AsBytes(new ReadOnlySpan(new float[] { 1, 2, 3, 4 })).ToArray()) + }; + var hashEntries2 = new HashEntry[] + { + new("OriginalNameData", "data 2"), + new("data_storage_name", "data 2"), + new("vector_storage_name", MemoryMarshal.AsBytes(new ReadOnlySpan(new float[] { 5, 6, 7, 8 })).ToArray()) + }; + this._redisDatabaseMock.Setup(x => x.HashGetAllAsync(It.IsAny(), CommandFlags.None)).Returns((RedisKey key, CommandFlags flags) => + { + return key switch + { + RedisKey k when k == TestRecordKey1 => Task.FromResult(hashEntries1), + RedisKey k when k == TestRecordKey2 => Task.FromResult(hashEntries2), + _ => throw new ArgumentException("Unexpected key."), + }; + }); + var sut = this.CreateRecordCollection(useDefinition); + + // Act + var actual = await sut.GetBatchAsync( + [TestRecordKey1, TestRecordKey2], + new() { IncludeVectors = true }).ToListAsync(); + + // Assert + this._redisDatabaseMock.Verify(x => x.HashGetAllAsync(TestRecordKey1, CommandFlags.None), Times.Once); + this._redisDatabaseMock.Verify(x => x.HashGetAllAsync(TestRecordKey2, CommandFlags.None), Times.Once); + + Assert.NotNull(actual); + Assert.Equal(2, actual.Count); + Assert.Equal(TestRecordKey1, actual[0].Key); + Assert.Equal("data 1", actual[0].OriginalNameData); + Assert.Equal("data 1", actual[0].Data); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual[0].Vector!.Value.ToArray()); + Assert.Equal(TestRecordKey2, actual[1].Key); + Assert.Equal("data 2", actual[1].OriginalNameData); + Assert.Equal("data 2", actual[1].Data); + Assert.Equal(new float[] { 5, 6, 7, 8 }, actual[1].Vector!.Value.ToArray()); + } + + [Fact] + public async Task CanGetRecordWithCustomMapperAsync() + { + // Arrange. + var hashEntries = new HashEntry[] + { + new("OriginalNameData", "data 1"), + new("data_storage_name", "data 1"), + new("vector_storage_name", MemoryMarshal.AsBytes(new ReadOnlySpan(new float[] { 1, 2, 3, 4 })).ToArray()) + }; + this._redisDatabaseMock.Setup(x => x.HashGetAllAsync(It.IsAny(), CommandFlags.None)).ReturnsAsync(hashEntries); + + // Arrange mapper mock from JsonNode to data model. + var mapperMock = new Mock>(MockBehavior.Strict); + mapperMock.Setup( + x => x.MapFromStorageToDataModel( + It.IsAny<(string key, HashEntry[] hashEntries)>(), + It.IsAny())) + .Returns(CreateModel(TestRecordKey1, true)); + + // Arrange target with custom mapper. + var sut = new RedisHashSetVectorStoreRecordCollection( + this._redisDatabaseMock.Object, + TestCollectionName, + new() + { + HashEntriesCustomMapper = mapperMock.Object + }); + + // Act + var actual = await sut.GetAsync( + TestRecordKey1, + new() { IncludeVectors = true }); + + // Assert + Assert.NotNull(actual); + Assert.Equal(TestRecordKey1, actual.Key); + Assert.Equal("data 1", actual.OriginalNameData); + Assert.Equal("data 1", actual.Data); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector!.Value.ToArray()); + + mapperMock + .Verify( + x => x.MapFromStorageToDataModel( + It.Is<(string key, HashEntry[] hashEntries)>(x => x.key == TestRecordKey1), + It.Is(x => x.IncludeVectors)), + Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanDeleteRecordAsync(bool useDefinition) + { + // Arrange + this._redisDatabaseMock.Setup(x => x.KeyDeleteAsync(It.IsAny(), CommandFlags.None)).ReturnsAsync(true); + var sut = this.CreateRecordCollection(useDefinition); + + // Act + await sut.DeleteAsync(TestRecordKey1); + + // Assert + this._redisDatabaseMock.Verify(x => x.KeyDeleteAsync(TestRecordKey1, CommandFlags.None), Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanDeleteManyRecordsWithVectorsAsync(bool useDefinition) + { + // Arrange + this._redisDatabaseMock.Setup(x => x.KeyDeleteAsync(It.IsAny(), CommandFlags.None)).ReturnsAsync(true); + var sut = this.CreateRecordCollection(useDefinition); + + // Act + await sut.DeleteBatchAsync([TestRecordKey1, TestRecordKey2]); + + // Assert + this._redisDatabaseMock.Verify(x => x.KeyDeleteAsync(TestRecordKey1, CommandFlags.None), Times.Once); + this._redisDatabaseMock.Verify(x => x.KeyDeleteAsync(TestRecordKey2, CommandFlags.None), Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanUpsertRecordAsync(bool useDefinition) + { + // Arrange + this._redisDatabaseMock.Setup(x => x.HashSetAsync(It.IsAny(), It.IsAny(), CommandFlags.None)).Returns(Task.CompletedTask); + var sut = this.CreateRecordCollection(useDefinition); + var model = CreateModel(TestRecordKey1, true); + + // Act + await sut.UpsertAsync(model); + + // Assert + this._redisDatabaseMock.Verify( + x => x.HashSetAsync( + TestRecordKey1, + It.Is(x => x.Length == 3 && x[0].Name == "OriginalNameData" && x[1].Name == "data_storage_name" && x[2].Name == "vector_storage_name"), + CommandFlags.None), + Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanUpsertManyRecordsAsync(bool useDefinition) + { + // Arrange + this._redisDatabaseMock.Setup(x => x.HashSetAsync(It.IsAny(), It.IsAny(), CommandFlags.None)).Returns(Task.CompletedTask); + var sut = this.CreateRecordCollection(useDefinition); + + var model1 = CreateModel(TestRecordKey1, true); + var model2 = CreateModel(TestRecordKey2, true); + + // Act + var actual = await sut.UpsertBatchAsync([model1, model2]).ToListAsync(); + + // Assert + Assert.NotNull(actual); + Assert.Equal(2, actual.Count); + Assert.Equal(TestRecordKey1, actual[0]); + Assert.Equal(TestRecordKey2, actual[1]); + + this._redisDatabaseMock.Verify( + x => x.HashSetAsync( + TestRecordKey1, + It.Is(x => x.Length == 3 && x[0].Name == "OriginalNameData" && x[1].Name == "data_storage_name" && x[2].Name == "vector_storage_name"), + CommandFlags.None), + Times.Once); + this._redisDatabaseMock.Verify( + x => x.HashSetAsync( + TestRecordKey2, + It.Is(x => x.Length == 3 && x[0].Name == "OriginalNameData" && x[1].Name == "data_storage_name" && x[2].Name == "vector_storage_name"), + CommandFlags.None), + Times.Once); + } + + [Fact] + public async Task CanUpsertRecordWithCustomMapperAsync() + { + // Arrange. + this._redisDatabaseMock.Setup(x => x.HashSetAsync(It.IsAny(), It.IsAny(), CommandFlags.None)).Returns(Task.CompletedTask); + + // Arrange mapper mock from data model to JsonNode. + var mapperMock = new Mock>(MockBehavior.Strict); + var hashEntries = new HashEntry[] + { + new("OriginalNameData", "data 1"), + new("data_storage_name", "data 1"), + new("vector_storage_name", "[1,2,3,4]"), + new("NotAnnotated", RedisValue.Null) + }; + mapperMock + .Setup(x => x.MapFromDataToStorageModel(It.IsAny())) + .Returns((TestRecordKey1, hashEntries)); + + // Arrange target with custom mapper. + var sut = new RedisHashSetVectorStoreRecordCollection( + this._redisDatabaseMock.Object, + TestCollectionName, + new() + { + HashEntriesCustomMapper = mapperMock.Object + }); + + var model = CreateModel(TestRecordKey1, true); + + // Act + await sut.UpsertAsync(model); + + // Assert + mapperMock + .Verify( + x => x.MapFromDataToStorageModel(It.Is(x => x == model)), + Times.Once); + } + + /// + /// Tests that the collection can be created even if the definition and the type do not match. + /// In this case, the expectation is that a custom mapper will be provided to map between the + /// schema as defined by the definition and the different data model. + /// + [Fact] + public void CanCreateCollectionWithMismatchedDefinitionAndType() + { + // Arrange. + var definition = new VectorStoreRecordDefinition() + { + Properties = new List + { + new VectorStoreRecordKeyProperty("Id", typeof(string)), + new VectorStoreRecordDataProperty("Text", typeof(string)), + new VectorStoreRecordVectorProperty("Embedding", typeof(ReadOnlyMemory)) { Dimensions = 4 }, + } + }; + + // Act. + var sut = new RedisHashSetVectorStoreRecordCollection( + this._redisDatabaseMock.Object, + TestCollectionName, + new() { VectorStoreRecordDefinition = definition, HashEntriesCustomMapper = Mock.Of>() }); + } + + private RedisHashSetVectorStoreRecordCollection CreateRecordCollection(bool useDefinition) + { + return new RedisHashSetVectorStoreRecordCollection( + this._redisDatabaseMock.Object, + TestCollectionName, + new() + { + PrefixCollectionNameToKeyNames = false, + VectorStoreRecordDefinition = useDefinition ? this._singlePropsDefinition : null + }); + } + + private static void SetupExecuteMock(Mock redisDatabaseMock, Exception exception) + { + redisDatabaseMock + .Setup( + x => x.ExecuteAsync( + It.IsAny(), + It.IsAny())) + .ThrowsAsync(exception); + } + + private static void SetupExecuteMock(Mock redisDatabaseMock, IEnumerable redisResultStrings) + { + var results = redisResultStrings + .Select(x => RedisResult.Create(new RedisValue(x))) + .ToArray(); + redisDatabaseMock + .Setup( + x => x.ExecuteAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(RedisResult.Create(results)); + } + + private static void SetupExecuteMock(Mock redisDatabaseMock, string redisResultString) + { + redisDatabaseMock + .Setup( + x => x.ExecuteAsync( + It.IsAny(), + It.IsAny())) + .Callback((string command, object[] args) => + { + Console.WriteLine(args); + }) + .ReturnsAsync(RedisResult.Create(new RedisValue(redisResultString))); + } + + private static SinglePropsModel CreateModel(string key, bool withVectors) + { + return new SinglePropsModel + { + Key = key, + OriginalNameData = "data 1", + Data = "data 1", + Vector = withVectors ? new float[] { 1, 2, 3, 4 } : null, + NotAnnotated = null, + }; + } + + private readonly VectorStoreRecordDefinition _singlePropsDefinition = new() + { + Properties = + [ + new VectorStoreRecordKeyProperty("Key", typeof(string)), + new VectorStoreRecordDataProperty("OriginalNameData", typeof(string)), + new VectorStoreRecordDataProperty("Data", typeof(string)) { StoragePropertyName = "data_storage_name" }, + new VectorStoreRecordVectorProperty("Vector", typeof(ReadOnlyMemory)) { StoragePropertyName = "vector_storage_name" } + ] + }; + + public sealed class SinglePropsModel + { + [VectorStoreRecordKey] + public string Key { get; set; } = string.Empty; + + [VectorStoreRecordData(IsFilterable = true)] + public string OriginalNameData { get; set; } = string.Empty; + + [JsonPropertyName("ignored_data_json_name")] + [VectorStoreRecordData(IsFilterable = true, StoragePropertyName = "data_storage_name")] + public string Data { get; set; } = string.Empty; + + [JsonPropertyName("ignored_vector_json_name")] + [VectorStoreRecordVector(4, StoragePropertyName = "vector_storage_name")] + public ReadOnlyMemory? Vector { get; set; } + + public string? NotAnnotated { get; set; } + } +} diff --git a/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisHashSetVectorStoreRecordMapperTests.cs b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisHashSetVectorStoreRecordMapperTests.cs new file mode 100644 index 000000000000..fd7a56d8765c --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisHashSetVectorStoreRecordMapperTests.cs @@ -0,0 +1,268 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; +using Microsoft.SemanticKernel.Connectors.Redis; +using Microsoft.SemanticKernel.Data; +using StackExchange.Redis; +using Xunit; + +namespace SemanticKernel.Connectors.Redis.UnitTests; + +/// +/// Contains tests for the class. +/// +public sealed class RedisHashSetVectorStoreRecordMapperTests +{ + [Fact] + public void MapsAllFieldsFromDataToStorageModel() + { + // Arrange. + var sut = new RedisHashSetVectorStoreRecordMapper(s_vectorStoreRecordDefinition, s_storagePropertyNames); + + // Act. + var actual = sut.MapFromDataToStorageModel(CreateModel("test key")); + + // Assert. + Assert.NotNull(actual.HashEntries); + Assert.Equal("test key", actual.Key); + + Assert.Equal("storage_string_data", actual.HashEntries[0].Name.ToString()); + Assert.Equal("data 1", actual.HashEntries[0].Value.ToString()); + + Assert.Equal("IntData", actual.HashEntries[1].Name.ToString()); + Assert.Equal(1, (int)actual.HashEntries[1].Value); + + Assert.Equal("UIntData", actual.HashEntries[2].Name.ToString()); + Assert.Equal(2u, (uint)actual.HashEntries[2].Value); + + Assert.Equal("LongData", actual.HashEntries[3].Name.ToString()); + Assert.Equal(3, (long)actual.HashEntries[3].Value); + + Assert.Equal("ULongData", actual.HashEntries[4].Name.ToString()); + Assert.Equal(4ul, (ulong)actual.HashEntries[4].Value); + + Assert.Equal("DoubleData", actual.HashEntries[5].Name.ToString()); + Assert.Equal(5.5d, (double)actual.HashEntries[5].Value); + + Assert.Equal("FloatData", actual.HashEntries[6].Name.ToString()); + Assert.Equal(6.6f, (float)actual.HashEntries[6].Value); + + Assert.Equal("BoolData", actual.HashEntries[7].Name.ToString()); + Assert.True((bool)actual.HashEntries[7].Value); + + Assert.Equal("NullableIntData", actual.HashEntries[8].Name.ToString()); + Assert.Equal(7, (int)actual.HashEntries[8].Value); + + Assert.Equal("NullableUIntData", actual.HashEntries[9].Name.ToString()); + Assert.Equal(8u, (uint)actual.HashEntries[9].Value); + + Assert.Equal("NullableLongData", actual.HashEntries[10].Name.ToString()); + Assert.Equal(9, (long)actual.HashEntries[10].Value); + + Assert.Equal("NullableULongData", actual.HashEntries[11].Name.ToString()); + Assert.Equal(10ul, (ulong)actual.HashEntries[11].Value); + + Assert.Equal("NullableDoubleData", actual.HashEntries[12].Name.ToString()); + Assert.Equal(11.1d, (double)actual.HashEntries[12].Value); + + Assert.Equal("NullableFloatData", actual.HashEntries[13].Name.ToString()); + Assert.Equal(12.2f, (float)actual.HashEntries[13].Value); + + Assert.Equal("NullableBoolData", actual.HashEntries[14].Name.ToString()); + Assert.False((bool)actual.HashEntries[14].Value); + + Assert.Equal("FloatVector", actual.HashEntries[15].Name.ToString()); + Assert.Equal(new float[] { 1, 2, 3, 4 }, MemoryMarshal.Cast((byte[])actual.HashEntries[15].Value!).ToArray()); + + Assert.Equal("DoubleVector", actual.HashEntries[16].Name.ToString()); + Assert.Equal(new double[] { 5, 6, 7, 8 }, MemoryMarshal.Cast((byte[])actual.HashEntries[16].Value!).ToArray()); + } + + [Fact] + public void MapsAllFieldsFromStorageToDataModel() + { + // Arrange. + var sut = new RedisHashSetVectorStoreRecordMapper(s_vectorStoreRecordDefinition, s_storagePropertyNames); + + // Act. + var actual = sut.MapFromStorageToDataModel(("test key", CreateHashSet()), new() { IncludeVectors = true }); + + // Assert. + Assert.NotNull(actual); + Assert.Equal("test key", actual.Key); + Assert.Equal("data 1", actual.StringData); + Assert.Equal(1, actual.IntData); + Assert.Equal(2u, actual.UIntData); + Assert.Equal(3, actual.LongData); + Assert.Equal(4ul, actual.ULongData); + Assert.Equal(5.5d, actual.DoubleData); + Assert.Equal(6.6f, actual.FloatData); + Assert.True(actual.BoolData); + Assert.Equal(7, actual.NullableIntData); + Assert.Equal(8u, actual.NullableUIntData); + Assert.Equal(9, actual.NullableLongData); + Assert.Equal(10ul, actual.NullableULongData); + Assert.Equal(11.1d, actual.NullableDoubleData); + Assert.Equal(12.2f, actual.NullableFloatData); + Assert.False(actual.NullableBoolData); + + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.FloatVector!.Value.ToArray()); + Assert.Equal(new double[] { 5, 6, 7, 8 }, actual.DoubleVector!.Value.ToArray()); + } + + private static AllTypesModel CreateModel(string key) + { + return new AllTypesModel + { + Key = key, + StringData = "data 1", + IntData = 1, + UIntData = 2, + LongData = 3, + ULongData = 4, + DoubleData = 5.5d, + FloatData = 6.6f, + BoolData = true, + NullableIntData = 7, + NullableUIntData = 8, + NullableLongData = 9, + NullableULongData = 10, + NullableDoubleData = 11.1d, + NullableFloatData = 12.2f, + NullableBoolData = false, + FloatVector = new float[] { 1, 2, 3, 4 }, + DoubleVector = new double[] { 5, 6, 7, 8 }, + NotAnnotated = "notAnnotated", + }; + } + + private static HashEntry[] CreateHashSet() + { + var hashSet = new HashEntry[17]; + hashSet[0] = new HashEntry("storage_string_data", "data 1"); + hashSet[1] = new HashEntry("IntData", 1); + hashSet[2] = new HashEntry("UIntData", 2); + hashSet[3] = new HashEntry("LongData", 3); + hashSet[4] = new HashEntry("ULongData", 4); + hashSet[5] = new HashEntry("DoubleData", 5.5); + hashSet[6] = new HashEntry("FloatData", 6.6); + hashSet[7] = new HashEntry("BoolData", true); + hashSet[8] = new HashEntry("NullableIntData", 7); + hashSet[9] = new HashEntry("NullableUIntData", 8); + hashSet[10] = new HashEntry("NullableLongData", 9); + hashSet[11] = new HashEntry("NullableULongData", 10); + hashSet[12] = new HashEntry("NullableDoubleData", 11.1); + hashSet[13] = new HashEntry("NullableFloatData", 12.2); + hashSet[14] = new HashEntry("NullableBoolData", false); + hashSet[15] = new HashEntry("FloatVector", MemoryMarshal.AsBytes(new ReadOnlySpan(new float[] { 1, 2, 3, 4 })).ToArray()); + hashSet[16] = new HashEntry("DoubleVector", MemoryMarshal.AsBytes(new ReadOnlySpan(new double[] { 5, 6, 7, 8 })).ToArray()); + return hashSet; + } + + private static readonly Dictionary s_storagePropertyNames = new() + { + ["StringData"] = "storage_string_data", + ["IntData"] = "IntData", + ["UIntData"] = "UIntData", + ["LongData"] = "LongData", + ["ULongData"] = "ULongData", + ["DoubleData"] = "DoubleData", + ["FloatData"] = "FloatData", + ["BoolData"] = "BoolData", + ["NullableIntData"] = "NullableIntData", + ["NullableUIntData"] = "NullableUIntData", + ["NullableLongData"] = "NullableLongData", + ["NullableULongData"] = "NullableULongData", + ["NullableDoubleData"] = "NullableDoubleData", + ["NullableFloatData"] = "NullableFloatData", + ["NullableBoolData"] = "NullableBoolData", + ["FloatVector"] = "FloatVector", + ["DoubleVector"] = "DoubleVector", + }; + + private static readonly VectorStoreRecordDefinition s_vectorStoreRecordDefinition = new() + { + Properties = new List() + { + new VectorStoreRecordKeyProperty("Key", typeof(string)), + new VectorStoreRecordDataProperty("StringData", typeof(string)), + new VectorStoreRecordDataProperty("IntData", typeof(int)), + new VectorStoreRecordDataProperty("UIntData", typeof(uint)), + new VectorStoreRecordDataProperty("LongData", typeof(long)), + new VectorStoreRecordDataProperty("ULongData", typeof(ulong)), + new VectorStoreRecordDataProperty("DoubleData", typeof(double)), + new VectorStoreRecordDataProperty("FloatData", typeof(float)), + new VectorStoreRecordDataProperty("BoolData", typeof(bool)), + new VectorStoreRecordDataProperty("NullableIntData", typeof(int?)), + new VectorStoreRecordDataProperty("NullableUIntData", typeof(uint?)), + new VectorStoreRecordDataProperty("NullableLongData", typeof(long?)), + new VectorStoreRecordDataProperty("NullableULongData", typeof(ulong?)), + new VectorStoreRecordDataProperty("NullableDoubleData", typeof(double?)), + new VectorStoreRecordDataProperty("NullableFloatData", typeof(float?)), + new VectorStoreRecordDataProperty("NullableBoolData", typeof(bool?)), + new VectorStoreRecordVectorProperty("FloatVector", typeof(float)), + new VectorStoreRecordVectorProperty("DoubleVector", typeof(double)), + } + }; + + private sealed class AllTypesModel + { + [VectorStoreRecordKey] + public string Key { get; set; } = string.Empty; + + [VectorStoreRecordData] + public string StringData { get; set; } = string.Empty; + + [VectorStoreRecordData] + public int IntData { get; set; } + + [VectorStoreRecordData] + public uint UIntData { get; set; } + + [VectorStoreRecordData] + public long LongData { get; set; } + + [VectorStoreRecordData] + public ulong ULongData { get; set; } + + [VectorStoreRecordData] + public double DoubleData { get; set; } + + [VectorStoreRecordData] + public float FloatData { get; set; } + + [VectorStoreRecordData] + public bool BoolData { get; set; } + + [VectorStoreRecordData] + public int? NullableIntData { get; set; } + + [VectorStoreRecordData] + public uint? NullableUIntData { get; set; } + + [VectorStoreRecordData] + public long? NullableLongData { get; set; } + + [VectorStoreRecordData] + public ulong? NullableULongData { get; set; } + + [VectorStoreRecordData] + public double? NullableDoubleData { get; set; } + + [VectorStoreRecordData] + public float? NullableFloatData { get; set; } + + [VectorStoreRecordData] + public bool? NullableBoolData { get; set; } + + [VectorStoreRecordVector] + public ReadOnlyMemory? FloatVector { get; set; } + + [VectorStoreRecordVector] + public ReadOnlyMemory? DoubleVector { get; set; } + + public string NotAnnotated { get; set; } = string.Empty; + } +} diff --git a/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisJsonVectorStoreRecordCollectionTests.cs b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisJsonVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..58cda992db4d --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisJsonVectorStoreRecordCollectionTests.cs @@ -0,0 +1,568 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text.Json; +using System.Text.Json.Nodes; +using System.Text.Json.Serialization; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Data; +using Moq; +using NRedisStack; +using StackExchange.Redis; +using Xunit; + +namespace Microsoft.SemanticKernel.Connectors.Redis.UnitTests; + +/// +/// Contains tests for the class. +/// +public class RedisJsonVectorStoreRecordCollectionTests +{ + private const string TestCollectionName = "testcollection"; + private const string TestRecordKey1 = "testid1"; + private const string TestRecordKey2 = "testid2"; + + private readonly Mock _redisDatabaseMock; + + public RedisJsonVectorStoreRecordCollectionTests() + { + this._redisDatabaseMock = new Mock(MockBehavior.Strict); + + var batchMock = new Mock(); + this._redisDatabaseMock.Setup(x => x.CreateBatch(It.IsAny())).Returns(batchMock.Object); + } + + [Theory] + [InlineData(TestCollectionName, true)] + [InlineData("nonexistentcollection", false)] + public async Task CollectionExistsReturnsCollectionStateAsync(string collectionName, bool expectedExists) + { + // Arrange + if (expectedExists) + { + SetupExecuteMock(this._redisDatabaseMock, ["index_name", collectionName]); + } + else + { + SetupExecuteMock(this._redisDatabaseMock, new RedisServerException("Unknown index name")); + } + var sut = new RedisJsonVectorStoreRecordCollection( + this._redisDatabaseMock.Object, + collectionName); + + // Act + var actual = await sut.CollectionExistsAsync(); + + // Assert + var expectedArgs = new object[] { collectionName }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "FT.INFO", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + Assert.Equal(expectedExists, actual); + } + + [Theory] + [InlineData(true, true, "data2", "vector2")] + [InlineData(true, false, "Data2", "Vector2")] + [InlineData(false, true, "data2", "vector2")] + [InlineData(false, false, "Data2", "Vector2")] + public async Task CanCreateCollectionAsync(bool useDefinition, bool useCustomJsonSerializerOptions, string expectedData2Name, string expectedVector2Name) + { + // Arrange. + SetupExecuteMock(this._redisDatabaseMock, string.Empty); + var sut = this.CreateRecordCollection(useDefinition, useCustomJsonSerializerOptions); + + // Act. + await sut.CreateCollectionAsync(); + + // Assert. + var expectedArgs = new object[] { + "testcollection", + "ON", + "JSON", + "PREFIX", + 1, + "testcollection:", + "SCHEMA", + "$.data1_json_name", + "AS", + "data1_json_name", + "TAG", + $"$.{expectedData2Name}", + "AS", + expectedData2Name, + "TAG", + "$.vector1_json_name", + "AS", + "vector1_json_name", + "VECTOR", + "HNSW", + 6, + "TYPE", + "FLOAT32", + "DIM", + "4", + "DISTANCE_METRIC", + "COSINE", + $"$.{expectedVector2Name}", + "AS", + expectedVector2Name, + "VECTOR", + "HNSW", + 6, + "TYPE", + "FLOAT32", + "DIM", + "4", + "DISTANCE_METRIC", + "COSINE" }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "FT.CREATE", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + } + + [Fact] + public async Task CanDeleteCollectionAsync() + { + // Arrange + SetupExecuteMock(this._redisDatabaseMock, string.Empty); + var sut = this.CreateRecordCollection(false); + + // Act + await sut.DeleteCollectionAsync(); + + // Assert + var expectedArgs = new object[] { TestCollectionName }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "FT.DROPINDEX", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + } + + [Theory] + [InlineData(true, true, """{ "data1_json_name": "data 1", "data2": "data 2", "vector1_json_name": [1, 2, 3, 4], "vector2": [1, 2, 3, 4] }""")] + [InlineData(true, false, """{ "data1_json_name": "data 1", "Data2": "data 2", "vector1_json_name": [1, 2, 3, 4], "Vector2": [1, 2, 3, 4] }""")] + [InlineData(false, true, """{ "data1_json_name": "data 1", "data2": "data 2", "vector1_json_name": [1, 2, 3, 4], "vector2": [1, 2, 3, 4] }""")] + [InlineData(false, false, """{ "data1_json_name": "data 1", "Data2": "data 2", "vector1_json_name": [1, 2, 3, 4], "Vector2": [1, 2, 3, 4] }""")] + public async Task CanGetRecordWithVectorsAsync(bool useDefinition, bool useCustomJsonSerializerOptions, string redisResultString) + { + // Arrange + SetupExecuteMock(this._redisDatabaseMock, redisResultString); + var sut = this.CreateRecordCollection(useDefinition, useCustomJsonSerializerOptions); + + // Act + var actual = await sut.GetAsync( + TestRecordKey1, + new() { IncludeVectors = true }); + + // Assert + var expectedArgs = new object[] { TestRecordKey1 }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "JSON.GET", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + + Assert.NotNull(actual); + Assert.Equal(TestRecordKey1, actual.Key); + Assert.Equal("data 1", actual.Data1); + Assert.Equal("data 2", actual.Data2); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector1!.Value.ToArray()); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector2!.Value.ToArray()); + } + + [Theory] + [InlineData(true, true, """{ "data1_json_name": "data 1", "data2": "data 2" }""", "data2")] + [InlineData(true, false, """{ "data1_json_name": "data 1", "Data2": "data 2" }""", "Data2")] + [InlineData(false, true, """{ "data1_json_name": "data 1", "data2": "data 2" }""", "data2")] + [InlineData(false, false, """{ "data1_json_name": "data 1", "Data2": "data 2" }""", "Data2")] + public async Task CanGetRecordWithoutVectorsAsync(bool useDefinition, bool useCustomJsonSerializerOptions, string redisResultString, string expectedData2Name) + { + // Arrange + SetupExecuteMock(this._redisDatabaseMock, redisResultString); + var sut = this.CreateRecordCollection(useDefinition, useCustomJsonSerializerOptions); + + // Act + var actual = await sut.GetAsync( + TestRecordKey1, + new() { IncludeVectors = false }); + + // Assert + var expectedArgs = new object[] { TestRecordKey1, "data1_json_name", expectedData2Name }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "JSON.GET", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + + Assert.NotNull(actual); + Assert.Equal(TestRecordKey1, actual.Key); + Assert.Equal("data 1", actual.Data1); + Assert.Equal("data 2", actual.Data2); + Assert.False(actual.Vector1.HasValue); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanGetManyRecordsWithVectorsAsync(bool useDefinition) + { + // Arrange + var redisResultString1 = """{ "data1_json_name": "data 1", "Data2": "data 2", "vector1_json_name": [1, 2, 3, 4], "Vector2": [1, 2, 3, 4] }"""; + var redisResultString2 = """{ "data1_json_name": "data 1", "Data2": "data 2", "vector1_json_name": [5, 6, 7, 8], "Vector2": [1, 2, 3, 4] }"""; + SetupExecuteMock(this._redisDatabaseMock, [redisResultString1, redisResultString2]); + var sut = this.CreateRecordCollection(useDefinition); + + // Act + var actual = await sut.GetBatchAsync( + [TestRecordKey1, TestRecordKey2], + new() { IncludeVectors = true }).ToListAsync(); + + // Assert + var expectedArgs = new object[] { TestRecordKey1, TestRecordKey2, "$" }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "JSON.MGET", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + + Assert.NotNull(actual); + Assert.Equal(2, actual.Count); + Assert.Equal(TestRecordKey1, actual[0].Key); + Assert.Equal("data 1", actual[0].Data1); + Assert.Equal("data 2", actual[0].Data2); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual[0].Vector1!.Value.ToArray()); + Assert.Equal(TestRecordKey2, actual[1].Key); + Assert.Equal("data 1", actual[1].Data1); + Assert.Equal("data 2", actual[1].Data2); + Assert.Equal(new float[] { 5, 6, 7, 8 }, actual[1].Vector1!.Value.ToArray()); + } + + [Fact] + public async Task CanGetRecordWithCustomMapperAsync() + { + // Arrange. + var redisResultString = """{ "data1_json_name": "data 1", "Data2": "data 2", "vector1_json_name": [1, 2, 3, 4], "Vector2": [1, 2, 3, 4] }"""; + SetupExecuteMock(this._redisDatabaseMock, redisResultString); + + // Arrange mapper mock from JsonNode to data model. + var mapperMock = new Mock>(MockBehavior.Strict); + mapperMock.Setup( + x => x.MapFromStorageToDataModel( + It.IsAny<(string key, JsonNode node)>(), + It.IsAny())) + .Returns(CreateModel(TestRecordKey1, true)); + + // Arrange target with custom mapper. + var sut = new RedisJsonVectorStoreRecordCollection( + this._redisDatabaseMock.Object, + TestCollectionName, + new() + { + JsonNodeCustomMapper = mapperMock.Object + }); + + // Act + var actual = await sut.GetAsync( + TestRecordKey1, + new() { IncludeVectors = true }); + + // Assert + Assert.NotNull(actual); + Assert.Equal(TestRecordKey1, actual.Key); + Assert.Equal("data 1", actual.Data1); + Assert.Equal("data 2", actual.Data2); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector1!.Value.ToArray()); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector2!.Value.ToArray()); + + mapperMock + .Verify( + x => x.MapFromStorageToDataModel( + It.Is<(string key, JsonNode node)>(x => x.key == TestRecordKey1), + It.Is(x => x.IncludeVectors)), + Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanDeleteRecordAsync(bool useDefinition) + { + // Arrange + SetupExecuteMock(this._redisDatabaseMock, "200"); + var sut = this.CreateRecordCollection(useDefinition); + + // Act + await sut.DeleteAsync(TestRecordKey1); + + // Assert + var expectedArgs = new object[] { TestRecordKey1 }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "JSON.DEL", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanDeleteManyRecordsWithVectorsAsync(bool useDefinition) + { + // Arrange + SetupExecuteMock(this._redisDatabaseMock, "200"); + var sut = this.CreateRecordCollection(useDefinition); + + // Act + await sut.DeleteBatchAsync([TestRecordKey1, TestRecordKey2]); + + // Assert + var expectedArgs1 = new object[] { TestRecordKey1 }; + var expectedArgs2 = new object[] { TestRecordKey2 }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "JSON.DEL", + It.Is(x => x.SequenceEqual(expectedArgs1))), + Times.Once); + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "JSON.DEL", + It.Is(x => x.SequenceEqual(expectedArgs2))), + Times.Once); + } + + [Theory] + [InlineData(true, true, """{"data1_json_name":"data 1","data2":"data 2","vector1_json_name":[1,2,3,4],"vector2":[1,2,3,4],"notAnnotated":null}""")] + [InlineData(true, false, """{"data1_json_name":"data 1","Data2":"data 2","vector1_json_name":[1,2,3,4],"Vector2":[1,2,3,4],"NotAnnotated":null}""")] + [InlineData(false, true, """{"data1_json_name":"data 1","data2":"data 2","vector1_json_name":[1,2,3,4],"vector2":[1,2,3,4],"notAnnotated":null}""")] + [InlineData(false, false, """{"data1_json_name":"data 1","Data2":"data 2","vector1_json_name":[1,2,3,4],"Vector2":[1,2,3,4],"NotAnnotated":null}""")] + public async Task CanUpsertRecordAsync(bool useDefinition, bool useCustomJsonSerializerOptions, string expectedUpsertedJson) + { + // Arrange + SetupExecuteMock(this._redisDatabaseMock, "OK"); + var sut = this.CreateRecordCollection(useDefinition, useCustomJsonSerializerOptions); + var model = CreateModel(TestRecordKey1, true); + + // Act + await sut.UpsertAsync(model); + + // Assert + // TODO: Fix issue where NotAnnotated is being included in the JSON. + var expectedArgs = new object[] { TestRecordKey1, "$", expectedUpsertedJson }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "JSON.SET", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public async Task CanUpsertManyRecordsAsync(bool useDefinition) + { + // Arrange + SetupExecuteMock(this._redisDatabaseMock, "OK"); + var sut = this.CreateRecordCollection(useDefinition); + + var model1 = CreateModel(TestRecordKey1, true); + var model2 = CreateModel(TestRecordKey2, true); + + // Act + var actual = await sut.UpsertBatchAsync([model1, model2]).ToListAsync(); + + // Assert + Assert.NotNull(actual); + Assert.Equal(2, actual.Count); + Assert.Equal(TestRecordKey1, actual[0]); + Assert.Equal(TestRecordKey2, actual[1]); + + // TODO: Fix issue where NotAnnotated is being included in the JSON. + var expectedArgs = new object[] { TestRecordKey1, "$", """{"data1_json_name":"data 1","Data2":"data 2","vector1_json_name":[1,2,3,4],"Vector2":[1,2,3,4],"NotAnnotated":null}""", TestRecordKey2, "$", """{"data1_json_name":"data 1","Data2":"data 2","vector1_json_name":[1,2,3,4],"Vector2":[1,2,3,4],"NotAnnotated":null}""" }; + this._redisDatabaseMock + .Verify( + x => x.ExecuteAsync( + "JSON.MSET", + It.Is(x => x.SequenceEqual(expectedArgs))), + Times.Once); + } + + [Fact] + public async Task CanUpsertRecordWithCustomMapperAsync() + { + // Arrange. + SetupExecuteMock(this._redisDatabaseMock, "OK"); + + // Arrange mapper mock from data model to JsonNode. + var mapperMock = new Mock>(MockBehavior.Strict); + var jsonNode = """{"data1_json_name":"data 1","Data2": "data 2","vector1_json_name":[1,2,3,4],"Vector2":[1,2,3,4],"NotAnnotated":null}"""; + mapperMock + .Setup(x => x.MapFromDataToStorageModel(It.IsAny())) + .Returns((TestRecordKey1, JsonNode.Parse(jsonNode)!)); + + // Arrange target with custom mapper. + var sut = new RedisJsonVectorStoreRecordCollection( + this._redisDatabaseMock.Object, + TestCollectionName, + new() + { + JsonNodeCustomMapper = mapperMock.Object + }); + + var model = CreateModel(TestRecordKey1, true); + + // Act + await sut.UpsertAsync(model); + + // Assert + mapperMock + .Verify( + x => x.MapFromDataToStorageModel(It.Is(x => x == model)), + Times.Once); + } + + /// + /// Tests that the collection can be created even if the definition and the type do not match. + /// In this case, the expectation is that a custom mapper will be provided to map between the + /// schema as defined by the definition and the different data model. + /// + [Fact] + public void CanCreateCollectionWithMismatchedDefinitionAndType() + { + // Arrange. + var definition = new VectorStoreRecordDefinition() + { + Properties = new List + { + new VectorStoreRecordKeyProperty("Id", typeof(string)), + new VectorStoreRecordDataProperty("Text", typeof(string)), + new VectorStoreRecordVectorProperty("Embedding", typeof(ReadOnlyMemory)) { Dimensions = 4 }, + } + }; + + // Act. + var sut = new RedisJsonVectorStoreRecordCollection( + this._redisDatabaseMock.Object, + TestCollectionName, + new() { VectorStoreRecordDefinition = definition, JsonNodeCustomMapper = Mock.Of>() }); + } + + private RedisJsonVectorStoreRecordCollection CreateRecordCollection(bool useDefinition, bool useCustomJsonSerializerOptions = false) + { + return new RedisJsonVectorStoreRecordCollection( + this._redisDatabaseMock.Object, + TestCollectionName, + new() + { + PrefixCollectionNameToKeyNames = false, + VectorStoreRecordDefinition = useDefinition ? this._multiPropsDefinition : null, + JsonSerializerOptions = useCustomJsonSerializerOptions ? this._customJsonSerializerOptions : null + }); + } + + private static void SetupExecuteMock(Mock redisDatabaseMock, Exception exception) + { + redisDatabaseMock + .Setup( + x => x.ExecuteAsync( + It.IsAny(), + It.IsAny())) + .ThrowsAsync(exception); + } + + private static void SetupExecuteMock(Mock redisDatabaseMock, IEnumerable redisResultStrings) + { + var results = redisResultStrings + .Select(x => RedisResult.Create(new RedisValue(x))) + .ToArray(); + redisDatabaseMock + .Setup( + x => x.ExecuteAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(RedisResult.Create(results)); + } + + private static void SetupExecuteMock(Mock redisDatabaseMock, string redisResultString) + { + redisDatabaseMock + .Setup( + x => x.ExecuteAsync( + It.IsAny(), + It.IsAny())) + .Callback((string command, object[] args) => + { + Console.WriteLine(args); + }) + .ReturnsAsync(RedisResult.Create(new RedisValue(redisResultString))); + } + + private static MultiPropsModel CreateModel(string key, bool withVectors) + { + return new MultiPropsModel + { + Key = key, + Data1 = "data 1", + Data2 = "data 2", + Vector1 = withVectors ? new float[] { 1, 2, 3, 4 } : null, + Vector2 = withVectors ? new float[] { 1, 2, 3, 4 } : null, + NotAnnotated = null, + }; + } + + private readonly JsonSerializerOptions _customJsonSerializerOptions = new() + { + PropertyNamingPolicy = JsonNamingPolicy.CamelCase + }; + + private readonly VectorStoreRecordDefinition _multiPropsDefinition = new() + { + Properties = + [ + new VectorStoreRecordKeyProperty("Key", typeof(string)), + new VectorStoreRecordDataProperty("Data1", typeof(string)) { IsFilterable = true, StoragePropertyName = "ignored_data1_storage_name" }, + new VectorStoreRecordDataProperty("Data2", typeof(string)) { IsFilterable = true }, + new VectorStoreRecordVectorProperty("Vector1", typeof(ReadOnlyMemory)) { Dimensions = 4, StoragePropertyName = "ignored_vector1_storage_name" }, + new VectorStoreRecordVectorProperty("Vector2", typeof(ReadOnlyMemory)) { Dimensions = 4 } + ] + }; + + public sealed class MultiPropsModel + { + [VectorStoreRecordKey] + public string Key { get; set; } = string.Empty; + + [JsonPropertyName("data1_json_name")] + [VectorStoreRecordData(IsFilterable = true, StoragePropertyName = "ignored_data1_storage_name")] + public string Data1 { get; set; } = string.Empty; + + [VectorStoreRecordData(IsFilterable = true)] + public string Data2 { get; set; } = string.Empty; + + [JsonPropertyName("vector1_json_name")] + [VectorStoreRecordVector(4, StoragePropertyName = "ignored_vector1_storage_name")] + public ReadOnlyMemory? Vector1 { get; set; } + + [VectorStoreRecordVector(4)] + public ReadOnlyMemory? Vector2 { get; set; } + + public string? NotAnnotated { get; set; } + } +} diff --git a/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisJsonVectorStoreRecordMapperTests.cs b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisJsonVectorStoreRecordMapperTests.cs new file mode 100644 index 000000000000..a7ae97c06355 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisJsonVectorStoreRecordMapperTests.cs @@ -0,0 +1,134 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using System.Text.Json; +using System.Text.Json.Nodes; +using Microsoft.SemanticKernel.Connectors.Redis; +using Microsoft.SemanticKernel.Data; +using Xunit; + +namespace SemanticKernel.Connectors.Redis.UnitTests; + +/// +/// Contains tests for the class. +/// +public sealed class RedisJsonVectorStoreRecordMapperTests +{ + [Fact] + public void MapsAllFieldsFromDataToStorageModel() + { + // Arrange. + var sut = new RedisJsonVectorStoreRecordMapper("Key", JsonSerializerOptions.Default); + + // Act. + var actual = sut.MapFromDataToStorageModel(CreateModel("test key")); + + // Assert. + Assert.NotNull(actual.Node); + Assert.Equal("test key", actual.Key); + var jsonObject = actual.Node.AsObject(); + Assert.Equal("data 1", jsonObject?["Data1"]?.ToString()); + Assert.Equal("data 2", jsonObject?["Data2"]?.ToString()); + Assert.Equal(new float[] { 1, 2, 3, 4 }, jsonObject?["Vector1"]?.AsArray().GetValues().ToArray()); + Assert.Equal(new float[] { 5, 6, 7, 8 }, jsonObject?["Vector2"]?.AsArray().GetValues().ToArray()); + } + + [Fact] + public void MapsAllFieldsFromDataToStorageModelWithCustomSerializerOptions() + { + // Arrange. + var sut = new RedisJsonVectorStoreRecordMapper("key", new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); + + // Act. + var actual = sut.MapFromDataToStorageModel(CreateModel("test key")); + + // Assert. + Assert.NotNull(actual.Node); + Assert.Equal("test key", actual.Key); + var jsonObject = actual.Node.AsObject(); + Assert.Equal("data 1", jsonObject?["data1"]?.ToString()); + Assert.Equal("data 2", jsonObject?["data2"]?.ToString()); + Assert.Equal(new float[] { 1, 2, 3, 4 }, jsonObject?["vector1"]?.AsArray().GetValues().ToArray()); + Assert.Equal(new float[] { 5, 6, 7, 8 }, jsonObject?["vector2"]?.AsArray().GetValues().ToArray()); + } + + [Fact] + public void MapsAllFieldsFromStorageToDataModel() + { + // Arrange. + var sut = new RedisJsonVectorStoreRecordMapper("Key", JsonSerializerOptions.Default); + + // Act. + var jsonObject = new JsonObject(); + jsonObject.Add("Data1", "data 1"); + jsonObject.Add("Data2", "data 2"); + jsonObject.Add("Vector1", new JsonArray(new[] { 1, 2, 3, 4 }.Select(x => JsonValue.Create(x)).ToArray())); + jsonObject.Add("Vector2", new JsonArray(new[] { 5, 6, 7, 8 }.Select(x => JsonValue.Create(x)).ToArray())); + var actual = sut.MapFromStorageToDataModel(("test key", jsonObject), new()); + + // Assert. + Assert.NotNull(actual); + Assert.Equal("test key", actual.Key); + Assert.Equal("data 1", actual.Data1); + Assert.Equal("data 2", actual.Data2); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector1!.Value.ToArray()); + Assert.Equal(new float[] { 5, 6, 7, 8 }, actual.Vector2!.Value.ToArray()); + } + + [Fact] + public void MapsAllFieldsFromStorageToDataModelWithCustomSerializerOptions() + { + // Arrange. + var sut = new RedisJsonVectorStoreRecordMapper("key", new JsonSerializerOptions { PropertyNamingPolicy = JsonNamingPolicy.CamelCase }); + + // Act. + var jsonObject = new JsonObject(); + jsonObject.Add("data1", "data 1"); + jsonObject.Add("data2", "data 2"); + jsonObject.Add("vector1", new JsonArray(new[] { 1, 2, 3, 4 }.Select(x => JsonValue.Create(x)).ToArray())); + jsonObject.Add("vector2", new JsonArray(new[] { 5, 6, 7, 8 }.Select(x => JsonValue.Create(x)).ToArray())); + var actual = sut.MapFromStorageToDataModel(("test key", jsonObject), new()); + + // Assert. + Assert.NotNull(actual); + Assert.Equal("test key", actual.Key); + Assert.Equal("data 1", actual.Data1); + Assert.Equal("data 2", actual.Data2); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector1!.Value.ToArray()); + Assert.Equal(new float[] { 5, 6, 7, 8 }, actual.Vector2!.Value.ToArray()); + } + + private static MultiPropsModel CreateModel(string key) + { + return new MultiPropsModel + { + Key = key, + Data1 = "data 1", + Data2 = "data 2", + Vector1 = new float[] { 1, 2, 3, 4 }, + Vector2 = new float[] { 5, 6, 7, 8 }, + NotAnnotated = "notAnnotated", + }; + } + + private sealed class MultiPropsModel + { + [VectorStoreRecordKey] + public string Key { get; set; } = string.Empty; + + [VectorStoreRecordData] + public string Data1 { get; set; } = string.Empty; + + [VectorStoreRecordData] + public string Data2 { get; set; } = string.Empty; + + [VectorStoreRecordVector] + public ReadOnlyMemory? Vector1 { get; set; } + + [VectorStoreRecordVector] + public ReadOnlyMemory? Vector2 { get; set; } + + public string NotAnnotated { get; set; } = string.Empty; + } +} diff --git a/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisKernelBuilderExtensionsTests.cs b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisKernelBuilderExtensionsTests.cs new file mode 100644 index 000000000000..dcb8383b1525 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisKernelBuilderExtensionsTests.cs @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.Redis; +using Microsoft.SemanticKernel.Data; +using Moq; +using StackExchange.Redis; +using Xunit; + +namespace SemanticKernel.Connectors.Redis.UnitTests; + +/// +/// Tests for the class. +/// +public class RedisKernelBuilderExtensionsTests +{ + private readonly IKernelBuilder _kernelBuilder; + + public RedisKernelBuilderExtensionsTests() + { + this._kernelBuilder = Kernel.CreateBuilder(); + } + + [Fact] + public void AddVectorStoreRegistersClass() + { + // Arrange. + this._kernelBuilder.Services.AddSingleton(Mock.Of()); + + // Act. + this._kernelBuilder.AddRedisVectorStore(); + + // Assert. + this.AssertVectorStoreCreated(); + } + + private void AssertVectorStoreCreated() + { + var kernel = this._kernelBuilder.Build(); + var vectorStore = kernel.Services.GetRequiredService(); + Assert.NotNull(vectorStore); + Assert.IsType(vectorStore); + } +} diff --git a/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisServiceCollectionExtensionsTests.cs b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisServiceCollectionExtensionsTests.cs new file mode 100644 index 000000000000..9bbe566f9c66 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisServiceCollectionExtensionsTests.cs @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.Redis; +using Microsoft.SemanticKernel.Data; +using Moq; +using StackExchange.Redis; +using Xunit; + +namespace SemanticKernel.Connectors.Redis.UnitTests; + +/// +/// Tests for the class. +/// +public class RedisServiceCollectionExtensionsTests +{ + private readonly IServiceCollection _serviceCollection; + + public RedisServiceCollectionExtensionsTests() + { + this._serviceCollection = new ServiceCollection(); + } + + [Fact] + public void AddVectorStoreRegistersClass() + { + // Arrange. + this._serviceCollection.AddSingleton(Mock.Of()); + + // Act. + this._serviceCollection.AddRedisVectorStore(); + + // Assert. + this.AssertVectorStoreCreated(); + } + + private void AssertVectorStoreCreated() + { + var serviceProvider = this._serviceCollection.BuildServiceProvider(); + var vectorStore = serviceProvider.GetRequiredService(); + Assert.NotNull(vectorStore); + Assert.IsType(vectorStore); + } +} diff --git a/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisVectorStoreCollectionCreateMappingTests.cs b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisVectorStoreCollectionCreateMappingTests.cs new file mode 100644 index 000000000000..c5bb3b12b2c5 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisVectorStoreCollectionCreateMappingTests.cs @@ -0,0 +1,127 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using Microsoft.SemanticKernel.Data; +using NRedisStack.Search; +using Xunit; +using static NRedisStack.Search.Schema; + +namespace Microsoft.SemanticKernel.Connectors.Redis.UnitTests; + +/// +/// Contains tests for the class. +/// +public class RedisVectorStoreCollectionCreateMappingTests +{ + [Fact] + public void MapToSchemaCreatesSchema() + { + // Arrange. + var properties = new VectorStoreRecordProperty[] + { + new VectorStoreRecordKeyProperty("Key", typeof(string)), + + new VectorStoreRecordDataProperty("FilterableString", typeof(string)) { IsFilterable = true }, + new VectorStoreRecordDataProperty("FullTextSearchableString", typeof(string)) { IsFullTextSearchable = true }, + new VectorStoreRecordDataProperty("FilterableStringEnumerable", typeof(string[])) { IsFilterable = true }, + new VectorStoreRecordDataProperty("FullTextSearchableStringEnumerable", typeof(string[])) { IsFullTextSearchable = true }, + + new VectorStoreRecordDataProperty("FilterableInt", typeof(int)) { IsFilterable = true }, + new VectorStoreRecordDataProperty("FilterableNullableInt", typeof(int)) { IsFilterable = true }, + + new VectorStoreRecordDataProperty("NonFilterableString", typeof(string)), + + new VectorStoreRecordVectorProperty("VectorDefaultIndexingOptions", typeof(ReadOnlyMemory)) { Dimensions = 10 }, + new VectorStoreRecordVectorProperty("VectorSpecificIndexingOptions", typeof(ReadOnlyMemory)) { Dimensions = 20, IndexKind = IndexKind.Flat, DistanceFunction = DistanceFunction.EuclideanDistance }, + }; + + var storagePropertyNames = new Dictionary() + { + { "FilterableString", "FilterableString" }, + { "FullTextSearchableString", "FullTextSearchableString" }, + { "FilterableStringEnumerable", "FilterableStringEnumerable" }, + { "FullTextSearchableStringEnumerable", "FullTextSearchableStringEnumerable" }, + { "FilterableInt", "FilterableInt" }, + { "FilterableNullableInt", "FilterableNullableInt" }, + { "NonFilterableString", "NonFilterableString" }, + { "VectorDefaultIndexingOptions", "VectorDefaultIndexingOptions" }, + { "VectorSpecificIndexingOptions", "vector_specific_indexing_options" }, + }; + + // Act. + var schema = RedisVectorStoreCollectionCreateMapping.MapToSchema(properties, storagePropertyNames); + + // Assert. + Assert.NotNull(schema); + Assert.Equal(8, schema.Fields.Count); + + Assert.IsType(schema.Fields[0]); + Assert.IsType(schema.Fields[1]); + Assert.IsType(schema.Fields[2]); + Assert.IsType(schema.Fields[3]); + Assert.IsType(schema.Fields[4]); + Assert.IsType(schema.Fields[5]); + Assert.IsType(schema.Fields[6]); + Assert.IsType(schema.Fields[7]); + + VerifyFieldName(schema.Fields[0].FieldName, new List { "$.FilterableString", "AS", "FilterableString" }); + VerifyFieldName(schema.Fields[1].FieldName, new List { "$.FullTextSearchableString", "AS", "FullTextSearchableString" }); + VerifyFieldName(schema.Fields[2].FieldName, new List { "$.FilterableStringEnumerable.*", "AS", "FilterableStringEnumerable" }); + VerifyFieldName(schema.Fields[3].FieldName, new List { "$.FullTextSearchableStringEnumerable", "AS", "FullTextSearchableStringEnumerable" }); + + VerifyFieldName(schema.Fields[4].FieldName, new List { "$.FilterableInt", "AS", "FilterableInt" }); + VerifyFieldName(schema.Fields[5].FieldName, new List { "$.FilterableNullableInt", "AS", "FilterableNullableInt" }); + + VerifyFieldName(schema.Fields[6].FieldName, new List { "$.VectorDefaultIndexingOptions", "AS", "VectorDefaultIndexingOptions" }); + VerifyFieldName(schema.Fields[7].FieldName, new List { "$.vector_specific_indexing_options", "AS", "vector_specific_indexing_options" }); + + Assert.Equal("10", ((VectorField)schema.Fields[6]).Attributes!["DIM"]); + Assert.Equal("FLOAT32", ((VectorField)schema.Fields[6]).Attributes!["TYPE"]); + Assert.Equal("COSINE", ((VectorField)schema.Fields[6]).Attributes!["DISTANCE_METRIC"]); + + Assert.Equal("20", ((VectorField)schema.Fields[7]).Attributes!["DIM"]); + Assert.Equal("FLOAT32", ((VectorField)schema.Fields[7]).Attributes!["TYPE"]); + Assert.Equal("L2", ((VectorField)schema.Fields[7]).Attributes!["DISTANCE_METRIC"]); + } + + [Theory] + [InlineData(null)] + [InlineData(0)] + public void MapToSchemaThrowsOnInvalidVectorDimensions(int? dimensions) + { + // Arrange. + var properties = new VectorStoreRecordProperty[] { new VectorStoreRecordVectorProperty("VectorProperty", typeof(ReadOnlyMemory)) { Dimensions = dimensions } }; + var storagePropertyNames = new Dictionary() { { "VectorProperty", "VectorProperty" } }; + + // Act and assert. + Assert.Throws(() => RedisVectorStoreCollectionCreateMapping.MapToSchema(properties, storagePropertyNames)); + } + + [Fact] + public void GetSDKIndexKindThrowsOnUnsupportedIndexKind() + { + // Arrange. + var vectorProperty = new VectorStoreRecordVectorProperty("VectorProperty", typeof(ReadOnlyMemory)) { IndexKind = "Unsupported" }; + + // Act and assert. + Assert.Throws(() => RedisVectorStoreCollectionCreateMapping.GetSDKIndexKind(vectorProperty)); + } + + [Fact] + public void GetSDKDistanceAlgorithmThrowsOnUnsupportedDistanceFunction() + { + // Arrange. + var vectorProperty = new VectorStoreRecordVectorProperty("VectorProperty", typeof(ReadOnlyMemory)) { DistanceFunction = "Unsupported" }; + + // Act and assert. + Assert.Throws(() => RedisVectorStoreCollectionCreateMapping.GetSDKDistanceAlgorithm(vectorProperty)); + } + + private static void VerifyFieldName(FieldName fieldName, List expected) + { + var args = new List(); + fieldName.AddCommandArguments(args); + Assert.Equal(expected, args); + } +} diff --git a/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisVectorStoreTests.cs b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisVectorStoreTests.cs new file mode 100644 index 000000000000..28f8f6cc5bcb --- /dev/null +++ b/dotnet/src/Connectors/Connectors.Redis.UnitTests/RedisVectorStoreTests.cs @@ -0,0 +1,124 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Data; +using Moq; +using StackExchange.Redis; +using Xunit; + +namespace Microsoft.SemanticKernel.Connectors.Redis.UnitTests; + +/// +/// Contains tests for the class. +/// +public class RedisVectorStoreTests +{ + private const string TestCollectionName = "testcollection"; + + private readonly Mock _redisDatabaseMock; + + public RedisVectorStoreTests() + { + this._redisDatabaseMock = new Mock(MockBehavior.Strict); + + var batchMock = new Mock(); + this._redisDatabaseMock.Setup(x => x.CreateBatch(It.IsAny())).Returns(batchMock.Object); + } + + [Fact] + public void GetCollectionReturnsJsonCollection() + { + // Arrange. + var sut = new RedisVectorStore(this._redisDatabaseMock.Object); + + // Act. + var actual = sut.GetCollection>(TestCollectionName); + + // Assert. + Assert.NotNull(actual); + Assert.IsType>>(actual); + } + + [Fact] + public void GetCollectionReturnsHashSetCollection() + { + // Arrange. + var sut = new RedisVectorStore(this._redisDatabaseMock.Object, new() { StorageType = RedisStorageType.HashSet }); + + // Act. + var actual = sut.GetCollection>(TestCollectionName); + + // Assert. + Assert.NotNull(actual); + Assert.IsType>>(actual); + } + + [Fact] + public void GetCollectionCallsFactoryIfProvided() + { + // Arrange. + var factoryMock = new Mock(MockBehavior.Strict); + var collectionMock = new Mock>>(MockBehavior.Strict); + factoryMock + .Setup(x => x.CreateVectorStoreRecordCollection>(It.IsAny(), TestCollectionName, null)) + .Returns(collectionMock.Object); + var sut = new RedisVectorStore(this._redisDatabaseMock.Object, new() { VectorStoreCollectionFactory = factoryMock.Object }); + + // Act. + var actual = sut.GetCollection>(TestCollectionName); + + // Assert. + Assert.Equal(collectionMock.Object, actual); + factoryMock.Verify(x => x.CreateVectorStoreRecordCollection>(It.IsAny(), TestCollectionName, null), Times.Once); + } + + [Fact] + public void GetCollectionThrowsForInvalidKeyType() + { + // Arrange. + var sut = new RedisVectorStore(this._redisDatabaseMock.Object); + + // Act & Assert. + Assert.Throws(() => sut.GetCollection>(TestCollectionName)); + } + + [Fact] + public async Task ListCollectionNamesCallsSDKAsync() + { + // Arrange. + var redisResultStrings = new string[] { "collection1", "collection2" }; + var results = redisResultStrings + .Select(x => RedisResult.Create(new RedisValue(x))) + .ToArray(); + this._redisDatabaseMock + .Setup( + x => x.ExecuteAsync( + It.IsAny(), + It.IsAny())) + .ReturnsAsync(RedisResult.Create(results)); + var sut = new RedisVectorStore(this._redisDatabaseMock.Object); + + // Act. + var collectionNames = sut.ListCollectionNamesAsync(); + + // Assert. + var collectionNamesList = await collectionNames.ToListAsync(); + Assert.Equal(new[] { "collection1", "collection2" }, collectionNamesList); + } + + public sealed class SinglePropsModel + { + [VectorStoreRecordKey] + public required TKey Key { get; set; } + + [VectorStoreRecordData] + public string Data { get; set; } = string.Empty; + + [VectorStoreRecordVector(4)] + public ReadOnlyMemory? Vector { get; set; } + + public string? NotAnnotated { get; set; } + } +} diff --git a/dotnet/src/Connectors/Connectors.UnitTests/Memory/Pinecone/PineconeKernelBuilderExtensionsTests.cs b/dotnet/src/Connectors/Connectors.UnitTests/Memory/Pinecone/PineconeKernelBuilderExtensionsTests.cs new file mode 100644 index 000000000000..67cd1588e0dd --- /dev/null +++ b/dotnet/src/Connectors/Connectors.UnitTests/Memory/Pinecone/PineconeKernelBuilderExtensionsTests.cs @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.Pinecone; +using Microsoft.SemanticKernel.Data; +using Xunit; +using Sdk = Pinecone; + +namespace SemanticKernel.Connectors.UnitTests.Pinecone; + +/// +/// Tests for the class. +/// +public class PineconeKernelBuilderExtensionsTests +{ + private readonly IKernelBuilder _kernelBuilder; + + public PineconeKernelBuilderExtensionsTests() + { + this._kernelBuilder = Kernel.CreateBuilder(); + } + + [Fact] + public void AddVectorStoreRegistersClass() + { + // Arrange. + using var client = new Sdk.PineconeClient("fake api key"); + this._kernelBuilder.Services.AddSingleton(client); + + // Act. + this._kernelBuilder.AddPineconeVectorStore(); + + // Assert. + this.AssertVectorStoreCreated(); + } + + [Fact] + public void AddVectorStoreWithApiKeyRegistersClass() + { + // Act. + this._kernelBuilder.AddPineconeVectorStore("fake api key"); + + // Assert. + this.AssertVectorStoreCreated(); + } + + private void AssertVectorStoreCreated() + { + var kernel = this._kernelBuilder.Build(); + var vectorStore = kernel.Services.GetRequiredService(); + Assert.NotNull(vectorStore); + Assert.IsType(vectorStore); + } +} diff --git a/dotnet/src/Connectors/Connectors.UnitTests/Memory/Pinecone/PineconeServiceCollectionExtensionsTests.cs b/dotnet/src/Connectors/Connectors.UnitTests/Memory/Pinecone/PineconeServiceCollectionExtensionsTests.cs new file mode 100644 index 000000000000..c58659302b46 --- /dev/null +++ b/dotnet/src/Connectors/Connectors.UnitTests/Memory/Pinecone/PineconeServiceCollectionExtensionsTests.cs @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.Pinecone; +using Microsoft.SemanticKernel.Data; +using Xunit; +using Sdk = Pinecone; + +namespace SemanticKernel.Connectors.UnitTests.Pinecone; + +/// +/// Tests for the class. +/// +public class PineconeServiceCollectionExtensionsTests +{ + private readonly IServiceCollection _serviceCollection; + + public PineconeServiceCollectionExtensionsTests() + { + this._serviceCollection = new ServiceCollection(); + } + + [Fact] + public void AddVectorStoreRegistersClass() + { + // Arrange. + using var client = new Sdk.PineconeClient("fake api key"); + this._serviceCollection.AddSingleton(client); + + // Act. + this._serviceCollection.AddPineconeVectorStore(); + + // Assert. + this.AssertVectorStoreCreated(); + } + + [Fact] + public void AddVectorStoreWithApiKeyRegistersClass() + { + // Act. + this._serviceCollection.AddPineconeVectorStore("fake api key"); + + // Assert. + this.AssertVectorStoreCreated(); + } + + private void AssertVectorStoreCreated() + { + var serviceProvider = this._serviceCollection.BuildServiceProvider(); + var vectorStore = serviceProvider.GetRequiredService(); + Assert.NotNull(vectorStore); + Assert.IsType(vectorStore); + } +} diff --git a/dotnet/src/Connectors/Connectors.UnitTests/Memory/Pinecone/PineconeVectorStoreRecordCollectionTests.cs b/dotnet/src/Connectors/Connectors.UnitTests/Memory/Pinecone/PineconeVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..d8e10c71491d --- /dev/null +++ b/dotnet/src/Connectors/Connectors.UnitTests/Memory/Pinecone/PineconeVectorStoreRecordCollectionTests.cs @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using Microsoft.SemanticKernel.Connectors.Pinecone; +using Microsoft.SemanticKernel.Data; +using Moq; +using Xunit; +using Sdk = Pinecone; + +namespace SemanticKernel.Connectors.UnitTests.Pinecone; + +/// +/// Contains tests for the class. +/// +public class PineconeVectorStoreRecordCollectionTests +{ + private const string TestCollectionName = "testcollection"; + + /// + /// Tests that the collection can be created even if the definition and the type do not match. + /// In this case, the expectation is that a custom mapper will be provided to map between the + /// schema as defined by the definition and the different data model. + /// + [Fact] + public void CanCreateCollectionWithMismatchedDefinitionAndType() + { + // Arrange. + var definition = new VectorStoreRecordDefinition() + { + Properties = new List + { + new VectorStoreRecordKeyProperty("Id", typeof(string)), + new VectorStoreRecordDataProperty("Text", typeof(string)), + new VectorStoreRecordVectorProperty("Embedding", typeof(ReadOnlyMemory)) { Dimensions = 4 }, + } + }; + using var pineconeClient = new Sdk.PineconeClient("fake api key"); + + // Act. + var sut = new PineconeVectorStoreRecordCollection( + pineconeClient, + TestCollectionName, + new() { VectorStoreRecordDefinition = definition, VectorCustomMapper = Mock.Of>() }); + } + + public sealed class SinglePropsModel + { + public string Key { get; set; } = string.Empty; + + public string OriginalNameData { get; set; } = string.Empty; + + public string Data { get; set; } = string.Empty; + + public ReadOnlyMemory? Vector { get; set; } + } +} diff --git a/dotnet/src/Functions/Functions.OpenApi/Extensions/OpenApiFunctionExecutionParameters.cs b/dotnet/src/Functions/Functions.OpenApi/Extensions/OpenApiFunctionExecutionParameters.cs index 4c17f11d7518..9673411bcbbb 100644 --- a/dotnet/src/Functions/Functions.OpenApi/Extensions/OpenApiFunctionExecutionParameters.cs +++ b/dotnet/src/Functions/Functions.OpenApi/Extensions/OpenApiFunctionExecutionParameters.cs @@ -59,6 +59,17 @@ public class OpenApiFunctionExecutionParameters /// public IList OperationsToExclude { get; set; } + /// + /// A custom HTTP response content reader. It can be useful when the internal reader + /// for a specific content type is either missing, insufficient, or when custom behavior is desired. + /// For instance, the internal reader for "application/json" HTTP content reads the content as a string. + /// This may not be sufficient in cases where the JSON content is large, streamed chunk by chunk, and needs to be accessed + /// as soon as the first chunk is available. To handle such cases, a custom reader can be provided to read the content + /// as a stream rather than as a string. + /// If the custom reader is not provided, or the reader returns null, the internal reader is used. + /// + public HttpResponseContentReader? HttpResponseContentReader { get; set; } + /// /// Initializes a new instance of the class. /// diff --git a/dotnet/src/Functions/Functions.OpenApi/Extensions/RestApiOperationExtensions.cs b/dotnet/src/Functions/Functions.OpenApi/Extensions/RestApiOperationExtensions.cs index 09414ee0c339..ea9678b062be 100644 --- a/dotnet/src/Functions/Functions.OpenApi/Extensions/RestApiOperationExtensions.cs +++ b/dotnet/src/Functions/Functions.OpenApi/Extensions/RestApiOperationExtensions.cs @@ -172,14 +172,15 @@ private static List GetParametersFromPayloadMetadata( if (!property.Properties.Any()) { parameters.Add(new RestApiOperationParameter( - parameterName, - property.Type, - property.IsRequired, + name: parameterName, + type: property.Type, + isRequired: property.IsRequired, expand: false, - RestApiOperationParameterLocation.Body, - RestApiOperationParameterStyle.Simple, + location: RestApiOperationParameterLocation.Body, + style: RestApiOperationParameterStyle.Simple, defaultValue: property.DefaultValue, description: property.Description, + format: property.Format, schema: property.Schema)); } diff --git a/dotnet/src/Functions/Functions.OpenApi/HttpResponseContentReader.cs b/dotnet/src/Functions/Functions.OpenApi/HttpResponseContentReader.cs new file mode 100644 index 000000000000..f92b58375c8c --- /dev/null +++ b/dotnet/src/Functions/Functions.OpenApi/HttpResponseContentReader.cs @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Threading; +using System.Threading.Tasks; + +namespace Microsoft.SemanticKernel.Plugins.OpenApi; + +/// +/// Represents a delegate for reading HTTP response content. +/// +/// The context containing HTTP operation details. +/// The cancellation token. +/// The HTTP response content. +public delegate Task HttpResponseContentReader(HttpResponseContentReaderContext context, CancellationToken cancellationToken = default); diff --git a/dotnet/src/Functions/Functions.OpenApi/HttpResponseContentReaderContext.cs b/dotnet/src/Functions/Functions.OpenApi/HttpResponseContentReaderContext.cs new file mode 100644 index 000000000000..077591c4d4be --- /dev/null +++ b/dotnet/src/Functions/Functions.OpenApi/HttpResponseContentReaderContext.cs @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Net.Http; + +namespace Microsoft.SemanticKernel.Plugins.OpenApi; + +/// +/// Represents the context for HTTP response content reader. +/// +public sealed class HttpResponseContentReaderContext +{ + /// + /// Initializes a new instance of the class. + /// + /// HTTP request message. + /// HTTP response message. + internal HttpResponseContentReaderContext(HttpRequestMessage request, HttpResponseMessage response) + { + this.Request = request; + this.Response = response; + } + + /// + /// The HTTP request message. + /// + public HttpRequestMessage Request { get; } + + /// + /// The HTTP response message. + /// + public HttpResponseMessage Response { get; } +} diff --git a/dotnet/src/Functions/Functions.OpenApi/HttpResponseContentSerializer.cs b/dotnet/src/Functions/Functions.OpenApi/HttpResponseContentSerializer.cs deleted file mode 100644 index 8af1aae2e043..000000000000 --- a/dotnet/src/Functions/Functions.OpenApi/HttpResponseContentSerializer.cs +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -using System.Net.Http; -using System.Threading.Tasks; - -namespace Microsoft.SemanticKernel.Plugins.OpenApi; - -/// -/// Represents a delegate for serializing REST API operation response content. -/// -/// The operation response content. -/// The serialized HTTP response content. -internal delegate Task HttpResponseContentSerializer(HttpContent content); diff --git a/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationParameter.cs b/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationParameter.cs index c6d8f3f1c8a0..e4e2f9a1d5a4 100644 --- a/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationParameter.cs +++ b/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationParameter.cs @@ -22,6 +22,12 @@ public sealed class RestApiOperationParameter /// public string Type { get; } + /// + /// The parameter type modifier that refines the generic parameter type to a more specific one. + /// More details can be found at https://swagger.io/docs/specification/data-models/data-types + /// + public string? Format { get; } + /// /// The parameter description. /// @@ -74,6 +80,8 @@ public sealed class RestApiOperationParameter /// Type of array item for parameters of "array" type. /// The parameter default value. /// The parameter description. + /// The parameter type modifier that refines the generic parameter type to a more specific one. + /// More details can be found at https://swagger.io/docs/specification/data-models/data-types /// The parameter schema. public RestApiOperationParameter( string name, @@ -85,6 +93,7 @@ public RestApiOperationParameter( string? arrayItemType = null, object? defaultValue = null, string? description = null, + string? format = null, KernelJsonSchema? schema = null) { this.Name = name; @@ -96,6 +105,7 @@ public RestApiOperationParameter( this.ArrayItemType = arrayItemType; this.DefaultValue = defaultValue; this.Description = description; + this.Format = format; this.Schema = schema; } } diff --git a/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationPayloadProperty.cs b/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationPayloadProperty.cs index f83152ea1d0e..b1c8be08aa7c 100644 --- a/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationPayloadProperty.cs +++ b/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationPayloadProperty.cs @@ -19,6 +19,12 @@ public sealed class RestApiOperationPayloadProperty /// public string Type { get; } + /// + /// The property type modifier that refines the generic parameter type to a more specific one. + /// More details can be found at https://swagger.io/docs/specification/data-models/data-types + /// + public string? Format { get; } + /// /// The property description. /// @@ -52,6 +58,8 @@ public sealed class RestApiOperationPayloadProperty /// A flag specifying if the property is required or not. /// A list of properties for the payload property. /// A description of the property. + /// The parameter type modifier that refines the generic parameter type to a more specific one. + /// More details can be found at https://swagger.io/docs/specification/data-models/data-types /// The schema of the payload property. /// The default value of the property. /// Returns a new instance of the class. @@ -61,6 +69,7 @@ public RestApiOperationPayloadProperty( bool isRequired, IList properties, string? description = null, + string? format = null, KernelJsonSchema? schema = null, object? defaultValue = null) { @@ -70,6 +79,7 @@ public RestApiOperationPayloadProperty( this.Description = description; this.Properties = properties; this.Schema = schema; + this.Format = format; this.DefaultValue = defaultValue; } } diff --git a/dotnet/src/Functions/Functions.OpenApi/OpenApi/OpenApiDocumentParser.cs b/dotnet/src/Functions/Functions.OpenApi/OpenApi/OpenApiDocumentParser.cs index 7fe7bd28e9ac..2d6b856b4700 100644 --- a/dotnet/src/Functions/Functions.OpenApi/OpenApi/OpenApiDocumentParser.cs +++ b/dotnet/src/Functions/Functions.OpenApi/OpenApi/OpenApiDocumentParser.cs @@ -291,6 +291,7 @@ private static List CreateRestApiOperationParameters( parameter.Schema.Items?.Type, GetParameterValue(parameter.Schema.Default, "parameter", parameter.Name), parameter.Description, + parameter.Schema.Format, parameter.Schema.ToJsonSchema() ); @@ -371,6 +372,7 @@ private static List GetPayloadProperties(string requiredProperties.Contains(propertyName), GetPayloadProperties(operationId, propertySchema, requiredProperties, level + 1), propertySchema.Description, + propertySchema.Format, propertySchema.ToJsonSchema(), GetParameterValue(propertySchema.Default, "payload property", propertyName)); diff --git a/dotnet/src/Functions/Functions.OpenApi/OpenApiKernelPluginFactory.cs b/dotnet/src/Functions/Functions.OpenApi/OpenApiKernelPluginFactory.cs index e6bc5f1ddddf..62db2dbe95da 100644 --- a/dotnet/src/Functions/Functions.OpenApi/OpenApiKernelPluginFactory.cs +++ b/dotnet/src/Functions/Functions.OpenApi/OpenApiKernelPluginFactory.cs @@ -151,7 +151,8 @@ internal static async Task CreateOpenApiPluginAsync( executionParameters?.AuthCallback, executionParameters?.UserAgent, executionParameters?.EnableDynamicPayload ?? true, - executionParameters?.EnablePayloadNamespacing ?? false); + executionParameters?.EnablePayloadNamespacing ?? false, + executionParameters?.HttpResponseContentReader); var functions = new List(); ILogger logger = loggerFactory.CreateLogger(typeof(OpenApiKernelExtensions)) ?? NullLogger.Instance; @@ -254,7 +255,7 @@ async Task ExecuteAsync(Kernel kernel, KernelFunction Description = $"{p.Description ?? p.Name}", DefaultValue = p.DefaultValue ?? string.Empty, IsRequired = p.IsRequired, - ParameterType = p.Type switch { "string" => typeof(string), "boolean" => typeof(bool), _ => null }, + ParameterType = ConvertParameterDataType(p), Schema = p.Schema ?? (p.Type is null ? null : KernelJsonSchema.Parse($$"""{"type":"{{p.Type}}"}""")), }) .ToList(); @@ -339,9 +340,9 @@ private static string ConvertOperationIdToValidFunctionName(string operationId, } catch (ArgumentException) { - // The exception indicates that the operationId is not a valid function name. - // To comply with the KernelFunction name requirements, it needs to be converted or sanitized. - // Therefore, it should not be re-thrown, but rather swallowed to allow the conversion below. + // The exception indicates that the operationId is not a valid function name. + // To comply with the KernelFunction name requirements, it needs to be converted or sanitized. + // Therefore, it should not be re-thrown, but rather swallowed to allow the conversion below. } // Tokenize operation id on forward and back slashes @@ -360,6 +361,34 @@ private static string ConvertOperationIdToValidFunctionName(string operationId, return result; } + /// + /// Converts the parameter type to a C# object. + /// + /// The REST API operation parameter. + /// + private static Type? ConvertParameterDataType(RestApiOperationParameter parameter) + { + return parameter.Type switch + { + "string" => typeof(string), + "boolean" => typeof(bool), + "number" => parameter.Format switch + { + "float" => typeof(float), + "double" => typeof(double), + _ => typeof(double) + }, + "integer" => parameter.Format switch + { + "int32" => typeof(int), + "int64" => typeof(long), + _ => typeof(long) + }, + "object" => typeof(object), + _ => null + }; + } + /// /// Used to convert operationId to SK function names. /// @@ -372,5 +401,4 @@ private static string ConvertOperationIdToValidFunctionName(string operationId, #endif #endregion - } diff --git a/dotnet/src/Functions/Functions.OpenApi/RestApiOperationRunner.cs b/dotnet/src/Functions/Functions.OpenApi/RestApiOperationRunner.cs index 99ff2f276d15..8d4998207aec 100644 --- a/dotnet/src/Functions/Functions.OpenApi/RestApiOperationRunner.cs +++ b/dotnet/src/Functions/Functions.OpenApi/RestApiOperationRunner.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Generic; using System.Globalization; +using System.IO; using System.Linq; using System.Net; using System.Net.Http; @@ -45,14 +46,17 @@ internal sealed class RestApiOperationRunner private readonly Dictionary _payloadFactoryByMediaType; /// - /// A dictionary containing the content type as the key and the corresponding content serializer as the value. + /// A dictionary containing the content type as the key and the corresponding content reader as the value. /// - private static readonly Dictionary s_serializerByContentType = new() + /// + /// TODO: Pass cancelation tokes to the content readers. + /// + private static readonly Dictionary s_contentReaderByContentType = new() { - { "image", async (content) => await content.ReadAsByteArrayAndTranslateExceptionAsync().ConfigureAwait(false) }, - { "text", async (content) => await content.ReadAsStringWithExceptionMappingAsync().ConfigureAwait(false) }, - { "application/json", async (content) => await content.ReadAsStringWithExceptionMappingAsync().ConfigureAwait(false)}, - { "application/xml", async (content) => await content.ReadAsStringWithExceptionMappingAsync().ConfigureAwait(false)} + { "image", async (context, _) => await context.Response.Content.ReadAsByteArrayAndTranslateExceptionAsync().ConfigureAwait(false) }, + { "text", async (context, _) => await context.Response.Content.ReadAsStringWithExceptionMappingAsync().ConfigureAwait(false) }, + { "application/json", async (context, _) => await context.Response.Content.ReadAsStringWithExceptionMappingAsync().ConfigureAwait(false)}, + { "application/xml", async (context, _) => await context.Response.Content.ReadAsStringWithExceptionMappingAsync().ConfigureAwait(false)} }; /// @@ -82,6 +86,11 @@ internal sealed class RestApiOperationRunner /// private readonly bool _enablePayloadNamespacing; + /// + /// Custom HTTP response content reader. + /// + private readonly HttpResponseContentReader? _httpResponseContentReader; + /// /// Creates an instance of the class. /// @@ -93,17 +102,20 @@ internal sealed class RestApiOperationRunner /// /// Determines whether payload parameters are resolved from the arguments by /// full name (parameter name prefixed with the parent property name). + /// Custom HTTP response content reader. public RestApiOperationRunner( HttpClient httpClient, AuthenticateRequestAsyncCallback? authCallback = null, string? userAgent = null, bool enableDynamicPayload = false, - bool enablePayloadNamespacing = false) + bool enablePayloadNamespacing = false, + HttpResponseContentReader? httpResponseContentReader = null) { this._httpClient = httpClient; this._userAgent = userAgent ?? HttpHeaderConstant.Values.UserAgent; this._enableDynamicPayload = enableDynamicPayload; this._enablePayloadNamespacing = enablePayloadNamespacing; + this._httpResponseContentReader = httpResponseContentReader; // If no auth callback provided, use empty function if (authCallback is null) @@ -197,16 +209,27 @@ private async Task SendAsync( } } + RestApiOperationResponse? response = null; + HttpResponseMessage? responseMessage = null; + try { - using var responseMessage = await this._httpClient.SendWithSuccessCheckAsync(requestMessage, cancellationToken).ConfigureAwait(false); + responseMessage = await this._httpClient.SendWithSuccessCheckAsync(requestMessage, cancellationToken).ConfigureAwait(false); - var response = await SerializeResponseContentAsync(requestMessage, payload, responseMessage).ConfigureAwait(false); + response = await this.ReadContentAndCreateOperationResponseAsync(requestMessage, responseMessage, payload, cancellationToken).ConfigureAwait(false); response.ExpectedSchema ??= GetExpectedSchema(expectedSchemas, responseMessage.StatusCode); return response; } + catch (HttpRequestException ex) + { + var exception = new HttpOperationException(message: ex.Message, innerException: ex); + exception.Data.Add(HttpRequestMethod, requestMessage.Method.Method); + exception.Data.Add(UrlFull, requestMessage.RequestUri?.ToString()); + exception.Data.Add(HttpRequestBody, payload); + throw exception; + } catch (HttpOperationException ex) { #pragma warning disable CS0618 // Type or member is obsolete @@ -237,23 +260,33 @@ private async Task SendAsync( throw; } + finally + { + // Dispose the response message if the content is not a stream. + // Otherwise, the caller is responsible for disposing of both the stream content and the response message. + if (response?.Content is not HttpResponseStream) + { + responseMessage?.Dispose(); + } + } } /// - /// Serializes the response content of an HTTP request. + /// Reads the response content of an HTTP request and creates an operation response. /// - /// The HttpRequestMessage associated with the HTTP request. + /// The HTTP request message. + /// The HTTP response message. /// The payload sent in the HTTP request. - /// The HttpResponseMessage object containing the response content to be serialized. - /// The serialized content. - private static async Task SerializeResponseContentAsync(HttpRequestMessage request, object? payload, HttpResponseMessage responseMessage) + /// The cancellation token. + /// The operation response. + private async Task ReadContentAndCreateOperationResponseAsync(HttpRequestMessage requestMessage, HttpResponseMessage responseMessage, object? payload, CancellationToken cancellationToken) { if (responseMessage.StatusCode == HttpStatusCode.NoContent) { return new RestApiOperationResponse(null, null) { - RequestMethod = request.Method.Method, - RequestUri = request.RequestUri, + RequestMethod = requestMessage.Method.Method, + RequestUri = requestMessage.RequestUri, RequestPayload = payload, }; } @@ -262,32 +295,12 @@ private static async Task SerializeResponseContentAsyn var mediaType = contentType?.MediaType ?? throw new KernelException("No media type available."); - // Obtain the content serializer by media type (e.g., text/plain, application/json, image/jpg) - if (!s_serializerByContentType.TryGetValue(mediaType, out var serializer)) - { - // Split the media type into a primary-type and a sub-type - var mediaTypeParts = mediaType.Split('/'); - if (mediaTypeParts.Length != 2) - { - throw new KernelException($"The string `{mediaType}` is not a valid media type."); - } - - var primaryMediaType = mediaTypeParts.First(); - - // Try to obtain the content serializer by the primary type (e.g., text, application, image) - if (!s_serializerByContentType.TryGetValue(primaryMediaType, out serializer)) - { - throw new KernelException($"The content type `{mediaType}` is not supported."); - } - } - - // Serialize response content and return it - var serializedContent = await serializer.Invoke(responseMessage.Content).ConfigureAwait(false); + var content = await this.ReadHttpContentAsync(requestMessage, responseMessage, mediaType, cancellationToken).ConfigureAwait(false); - return new RestApiOperationResponse(serializedContent, contentType!.ToString()) + return new RestApiOperationResponse(content, contentType.ToString()) { - RequestMethod = request.Method.Method, - RequestUri = request.RequestUri, + RequestMethod = requestMessage.Method.Method, + RequestUri = requestMessage.RequestUri, RequestPayload = payload, }; } @@ -464,5 +477,60 @@ private Uri BuildsOperationUrl(RestApiOperation operation, IDictionary + /// Reads the HTTP content. + /// + /// The HTTP request message. + /// The HTTP response message. + /// The media type of the content. + /// The cancellation token. + /// The HTTP content. + private async Task ReadHttpContentAsync(HttpRequestMessage requestMessage, HttpResponseMessage responseMessage, string mediaType, CancellationToken cancellationToken) + { + object? content = null; + + // Read content using the custom reader if provided. + if (this._httpResponseContentReader is not null) + { + content = await this._httpResponseContentReader.Invoke(new(requestMessage, responseMessage), cancellationToken).ConfigureAwait(false); + } + + // If no custom reader is provided or the custom reader did not return any content, read the content using the default readers. + if (content is null) + { + // Obtain the content reader by media type (e.g., text/plain, application/json, image/jpg) + if (!s_contentReaderByContentType.TryGetValue(mediaType, out var reader)) + { + // Split the media type into a primary-type and a sub-type + var mediaTypeParts = mediaType.Split('/'); + if (mediaTypeParts.Length != 2) + { + throw new KernelException($"The string `{mediaType}` is not a valid media type."); + } + + var primaryMediaType = mediaTypeParts.First(); + + // Try to obtain the content reader by the primary type (e.g., text, application, image) + if (!s_contentReaderByContentType.TryGetValue(primaryMediaType, out reader)) + { + throw new KernelException($"The content type `{mediaType}` is not supported."); + } + } + + content = await reader.Invoke(new(requestMessage, responseMessage), cancellationToken).ConfigureAwait(false); + } + + // Handling the case when the content is a stream + if (content is Stream stream) + { +#pragma warning disable CA2000 // Dispose objects before losing scope. + // Wrap the stream content to capture the HTTP response message, delegating its disposal to the caller. + content = new HttpResponseStream(stream, responseMessage); +#pragma warning restore CA2000 // Dispose objects before losing scope. + } + + return content; + } + #endregion } diff --git a/dotnet/src/Functions/Functions.Prompty.UnitTests/PromptyTest.cs b/dotnet/src/Functions/Functions.Prompty.UnitTests/PromptyTest.cs index 308f87d40464..148e5a1d28b2 100644 --- a/dotnet/src/Functions/Functions.Prompty.UnitTests/PromptyTest.cs +++ b/dotnet/src/Functions/Functions.Prompty.UnitTests/PromptyTest.cs @@ -69,6 +69,39 @@ public void ChatPromptyShouldSupportCreatingOpenAIExecutionSettings() Assert.Null(executionSettings.Seed); } + [Fact] + public void ChatPromptyShouldSupportCreatingOpenAIExecutionSettingsWithJsonObject() + { + // Arrange + Kernel kernel = new(); + var chatPromptyPath = Path.Combine("TestData", "chatJsonObject.prompty"); + + // Act + var kernelFunction = kernel.CreateFunctionFromPromptyFile(chatPromptyPath); + + // Assert + // kernel function created from chat.prompty should have a single execution setting + Assert.Single(kernelFunction.ExecutionSettings!); + Assert.True(kernelFunction.ExecutionSettings!.ContainsKey("default")); + + // Arrange + var defaultExecutionSetting = kernelFunction.ExecutionSettings["default"]; + + // Act + var executionSettings = OpenAIPromptExecutionSettings.FromExecutionSettings(defaultExecutionSetting); + + // Assert + Assert.NotNull(executionSettings); + Assert.Equal("gpt-4o", executionSettings.ModelId); + Assert.Equal(0, executionSettings.Temperature); + Assert.Equal(1.0, executionSettings.TopP); + Assert.Null(executionSettings.StopSequences); + Assert.Equal("json_object", executionSettings.ResponseFormat?.ToString()); + Assert.Null(executionSettings.TokenSelectionBiases); + Assert.Equal(3000, executionSettings.MaxTokens); + Assert.Null(executionSettings.Seed); + } + [Fact] public void ItShouldCreateFunctionFromPromptYamlWithNoExecutionSettings() { diff --git a/dotnet/src/Functions/Functions.Prompty.UnitTests/TestData/chatJsonObject.prompty b/dotnet/src/Functions/Functions.Prompty.UnitTests/TestData/chatJsonObject.prompty new file mode 100644 index 000000000000..a6be798dbf1a --- /dev/null +++ b/dotnet/src/Functions/Functions.Prompty.UnitTests/TestData/chatJsonObject.prompty @@ -0,0 +1,26 @@ +--- +name: Contoso_Chat_Prompt +description: A classifier assistant +authors: + - ???? +model: + api: chat + configuration: + type: azure_openai + azure_deployment: gpt-4o + parameters: + temperature: 0.0 + max_tokens: 3000 + response_format: + type: json_object + +--- +system: +You are a classifier agent that should know classify a problem into Easy/Medium/Hard based on the problem description. +your response should be in a json format with the following structure: +{ + "difficulty": "Easy/Medium/Hard" +} + +user: +{{question}} \ No newline at end of file diff --git a/dotnet/src/Functions/Functions.Prompty/Core/PromptyModelParameters.cs b/dotnet/src/Functions/Functions.Prompty/Core/PromptyModelParameters.cs index 8a7e9ed3a4ef..7699037d7466 100644 --- a/dotnet/src/Functions/Functions.Prompty/Core/PromptyModelParameters.cs +++ b/dotnet/src/Functions/Functions.Prompty/Core/PromptyModelParameters.cs @@ -10,7 +10,7 @@ internal sealed class PromptyModelParameters { /// Specify the format for model output (e.g., JSON mode). [YamlMember(Alias = "response_format")] - public string? ResponseFormat { get; set; } + public PromptyResponseFormat? ResponseFormat { get; set; } /// Seed for deterministic sampling (Beta feature). [YamlMember(Alias = "seed")] diff --git a/dotnet/src/Functions/Functions.Prompty/Core/PromptyResponseFormat.cs b/dotnet/src/Functions/Functions.Prompty/Core/PromptyResponseFormat.cs new file mode 100644 index 000000000000..c3c991903bb1 --- /dev/null +++ b/dotnet/src/Functions/Functions.Prompty/Core/PromptyResponseFormat.cs @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft. All rights reserved. + +using YamlDotNet.Serialization; + +namespace Microsoft.SemanticKernel.Prompty.Core; + +/// The response format of prompty. +internal sealed class PromptyResponseFormat +{ + /// The response format type (e.g: json_object). + [YamlMember(Alias = "type")] + public string? Type { get; set; } +} diff --git a/dotnet/src/Functions/Functions.Prompty/KernelFunctionPrompty.cs b/dotnet/src/Functions/Functions.Prompty/KernelFunctionPrompty.cs index dadcd203fc19..003811934181 100644 --- a/dotnet/src/Functions/Functions.Prompty/KernelFunctionPrompty.cs +++ b/dotnet/src/Functions/Functions.Prompty/KernelFunctionPrompty.cs @@ -171,7 +171,7 @@ public static PromptTemplateConfig ToPromptTemplateConfig(string promptyTemplate extensionData.Add("stop_sequences", stop); } - if (prompty.Model?.Parameters?.ResponseFormat == "json_object") + if (prompty.Model?.Parameters?.ResponseFormat?.Type == "json_object") { extensionData.Add("response_format", "json_object"); } diff --git a/dotnet/src/Functions/Functions.UnitTests/Functions.UnitTests.csproj b/dotnet/src/Functions/Functions.UnitTests/Functions.UnitTests.csproj index 50f58e947499..178dd4860a24 100644 --- a/dotnet/src/Functions/Functions.UnitTests/Functions.UnitTests.csproj +++ b/dotnet/src/Functions/Functions.UnitTests/Functions.UnitTests.csproj @@ -15,6 +15,7 @@ + @@ -27,6 +28,7 @@ + diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV20Tests.cs b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV20Tests.cs index 5e088eca8d06..1e3109c0c1ff 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV20Tests.cs +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV20Tests.cs @@ -225,7 +225,7 @@ public async Task ItCanExtractAllPathsAsOperationsAsync() var restApi = await this._sut.ParseAsync(this._openApiDocument); // Assert - Assert.Equal(5, restApi.Operations.Count); + Assert.Equal(6, restApi.Operations.Count); } [Fact] @@ -366,6 +366,44 @@ public async Task ItCanParseRestApiInfoAsync() Assert.NotEmpty(restApi.Info.Description); } + [Theory] + [InlineData("string-parameter", "string", null)] + [InlineData("boolean-parameter", "boolean", null)] + [InlineData("number-parameter", "number", null)] + [InlineData("float-parameter", "number", "float")] + [InlineData("double-parameter", "number", "double")] + [InlineData("integer-parameter", "integer", null)] + [InlineData("int32-parameter", "integer", "int32")] + [InlineData("int64-parameter", "integer", "int64")] + public async Task ItCanParseParametersOfPrimitiveDataTypeAsync(string name, string type, string? format) + { + // Arrange & Act + var restApiSpec = await this._sut.ParseAsync(this._openApiDocument); + + // Assert + var parameters = restApiSpec.Operations.Single(o => o.Id == "TestParameterDataTypes").GetParameters(); + + var parameter = parameters.FirstOrDefault(p => p.Name == name); + Assert.NotNull(parameter); + + Assert.Equal(type, parameter.Type); + Assert.Equal(format, parameter.Format); + } + + [Fact] + public async Task ItCanParsePropertiesOfObjectDataTypeAsync() + { + // Arrange & Act + var restApiSpec = await this._sut.ParseAsync(this._openApiDocument); + + // Assert + var properties = restApiSpec.Operations.Single(o => o.Id == "TestParameterDataTypes").Payload!.Properties; + + var property = properties.Single(p => p.Name == "attributes"); + Assert.Equal("object", property.Type); + Assert.Null(property.Format); + } + private static RestApiOperationParameter GetParameterMetadata(IList operations, string operationId, RestApiOperationParameterLocation location, string name) { diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV30FeatureTests.cs b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV30FeatureTests.cs new file mode 100644 index 000000000000..d16245567931 --- /dev/null +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV30FeatureTests.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Plugins.OpenApi; +using SemanticKernel.Functions.UnitTests.OpenApi.TestPlugins; +using Xunit; + +namespace SemanticKernel.Functions.UnitTests.OpenApi; + +public class OpenApiDocumentParserV30FeatureTests +{ + /// + /// OpenAPI document stream. + /// + private readonly Stream _openApiDocument; + + /// + /// System under test - an instance of OpenApiDocumentParser class. + /// + private readonly OpenApiDocumentParser _parser; + + public OpenApiDocumentParserV30FeatureTests() + { + this._openApiDocument = ResourcePluginsProvider.LoadFromResource("openapi_feature_testsV3_0.json"); + this._parser = new OpenApiDocumentParser(); + } + + [Fact] + public async Task ItCanParseAllOfAsync() + { + var spec = await this._parser.ParseAsync(this._openApiDocument); + + Assert.NotEmpty(spec.Operations); + var op0 = spec.Operations.Single(static x => x.Id == "allOfGet"); + Assert.NotEmpty(op0.Responses); + var res200 = op0.Responses["200"]; + Assert.NotNull(res200.Schema); + var foo = res200.Schema.RootElement.GetProperty("allOf")[0]; + Assert.Equal("object", foo.GetProperty("type").GetString()); + var bar = res200.Schema.RootElement.GetProperty("allOf")[1]; + Assert.Equal("object", bar.GetProperty("type").GetString()); + } + + [Fact] + public async Task ItCanParseAnyOfAsync() + { + var spec = await this._parser.ParseAsync(this._openApiDocument); + + Assert.NotEmpty(spec.Operations); + var op0 = spec.Operations.Single(static x => x.Id == "anyOfGet"); + Assert.NotEmpty(op0.Responses); + var res200 = op0.Responses["200"]; + Assert.NotNull(res200.Schema); + var foo = res200.Schema.RootElement.GetProperty("anyOf")[0]; + Assert.Equal("object", foo.GetProperty("type").GetString()); + var bar = res200.Schema.RootElement.GetProperty("anyOf")[1]; + Assert.Equal("string", bar.GetProperty("type").GetString()); + } + + [Fact] + public async Task ItCanParseOneOfAsync() + { + var spec = await this._parser.ParseAsync(this._openApiDocument); + + Assert.NotEmpty(spec.Operations); + var op0 = spec.Operations.Single(static x => x.Id == "oneOfGet"); + Assert.NotEmpty(op0.Responses); + var res200 = op0.Responses["200"]; + Assert.NotNull(res200.Schema); + var foo = res200.Schema.RootElement.GetProperty("oneOf")[0]; + Assert.Equal("object", foo.GetProperty("type").GetString()); + var bar = res200.Schema.RootElement.GetProperty("oneOf")[1]; + Assert.Equal("string", bar.GetProperty("type").GetString()); + } +} diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV30Tests.cs b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV30Tests.cs index d12871d192d0..cb9eec5eb508 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV30Tests.cs +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV30Tests.cs @@ -226,7 +226,7 @@ public async Task ItCanExtractAllPathsAsOperationsAsync() var restApi = await this._sut.ParseAsync(this._openApiDocument); // Assert - Assert.Equal(5, restApi.Operations.Count); + Assert.Equal(6, restApi.Operations.Count); } [Fact] @@ -439,6 +439,44 @@ public async Task ItCanParseRestApiInfoAsync() Assert.NotEmpty(restApi.Info.Description); } + [Theory] + [InlineData("string-parameter", "string", null)] + [InlineData("boolean-parameter", "boolean", null)] + [InlineData("number-parameter", "number", null)] + [InlineData("float-parameter", "number", "float")] + [InlineData("double-parameter", "number", "double")] + [InlineData("integer-parameter", "integer", null)] + [InlineData("int32-parameter", "integer", "int32")] + [InlineData("int64-parameter", "integer", "int64")] + public async Task ItCanParseParametersOfPrimitiveDataTypeAsync(string name, string type, string? format) + { + // Arrange & Act + var restApiSpec = await this._sut.ParseAsync(this._openApiDocument); + + // Assert + var parameters = restApiSpec.Operations.Single(o => o.Id == "TestParameterDataTypes").GetParameters(); + + var parameter = parameters.FirstOrDefault(p => p.Name == name); + Assert.NotNull(parameter); + + Assert.Equal(type, parameter.Type); + Assert.Equal(format, parameter.Format); + } + + [Fact] + public async Task ItCanParsePropertiesOfObjectDataTypeAsync() + { + // Arrange & Act + var restApiSpec = await this._sut.ParseAsync(this._openApiDocument); + + // Assert + var properties = restApiSpec.Operations.Single(o => o.Id == "TestParameterDataTypes").Payload!.Properties; + + var property = properties.Single(p => p.Name == "attributes"); + Assert.Equal("object", property.Type); + Assert.Null(property.Format); + } + private static MemoryStream ModifyOpenApiDocument(Stream openApiDocument, Action transformer) { var json = JsonSerializer.Deserialize(openApiDocument); diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV31Tests.cs b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV31Tests.cs index 65d9ca5eccd5..60e182f1bfc6 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV31Tests.cs +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiDocumentParserV31Tests.cs @@ -226,7 +226,7 @@ public async Task ItCanExtractAllPathsAsOperationsAsync() var restApi = await this._sut.ParseAsync(this._openApiDocument); // Assert - Assert.Equal(5, restApi.Operations.Count); + Assert.Equal(6, restApi.Operations.Count); } [Fact] @@ -416,6 +416,44 @@ public async Task ItCanParseRestApiInfoAsync() Assert.NotEmpty(restApi.Info.Description); } + [Theory] + [InlineData("string-parameter", "string", null)] + [InlineData("boolean-parameter", "boolean", null)] + [InlineData("number-parameter", "number", null)] + [InlineData("float-parameter", "number", "float")] + [InlineData("double-parameter", "number", "double")] + [InlineData("integer-parameter", "integer", null)] + [InlineData("int32-parameter", "integer", "int32")] + [InlineData("int64-parameter", "integer", "int64")] + public async Task ItCanParseParametersOfPrimitiveDataTypeAsync(string name, string type, string? format) + { + // Arrange & Act + var restApiSpec = await this._sut.ParseAsync(this._openApiDocument); + + // Assert + var parameters = restApiSpec.Operations.Single(o => o.Id == "TestParameterDataTypes").GetParameters(); + + var parameter = parameters.FirstOrDefault(p => p.Name == name); + Assert.NotNull(parameter); + + Assert.Equal(type, parameter.Type); + Assert.Equal(format, parameter.Format); + } + + [Fact] + public async Task ItCanParsePropertiesOfObjectDataTypeAsync() + { + // Arrange & Act + var restApiSpec = await this._sut.ParseAsync(this._openApiDocument); + + // Assert + var properties = restApiSpec.Operations.Single(o => o.Id == "TestParameterDataTypes").Payload!.Properties; + + var property = properties.Single(p => p.Name == "attributes"); + Assert.Equal("object", property.Type); + Assert.Null(property.Format); + } + private static MemoryStream ModifyOpenApiDocument(Stream openApiDocument, Action> transformer) { var serializer = new SharpYaml.Serialization.Serializer(); diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiKernelPluginFactoryFeatureTests.cs b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiKernelPluginFactoryFeatureTests.cs new file mode 100644 index 000000000000..396c82e310d6 --- /dev/null +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiKernelPluginFactoryFeatureTests.cs @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Linq; +using System.Text.Json; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Plugins.OpenApi; +using SemanticKernel.Functions.UnitTests.OpenApi.TestPlugins; +using Xunit; + +namespace SemanticKernel.Functions.UnitTests.OpenApi; + +public class OpenApiKernelPluginFactoryFeatureTests +{ + [Fact] + public async Task ItShouldCreatePluginWithOperationPayloadForAnyOfSchemaAsync() + { + await using var openApiDocument = ResourcePluginsProvider.LoadFromResource("openapi_feature_testsV3_0.json"); + + var plugin = await OpenApiKernelPluginFactory.CreateFromOpenApiAsync("fakePlugin", openApiDocument, executionParameters: new OpenApiFunctionExecutionParameters { EnableDynamicPayload = false }); + + var postFoobarFunction = plugin["AnyOfPost"]; + Assert.NotNull(postFoobarFunction); + + var functionView = postFoobarFunction.Metadata; + Assert.NotNull(functionView); + + var payloadParameter = functionView.Parameters.First(p => p.Name == "payload"); + Assert.NotNull(payloadParameter.Schema); + Assert.Equal(JsonValueKind.Array, payloadParameter.Schema!.RootElement.GetProperty("anyOf").ValueKind); + } + + [Fact] + public async Task ItShouldCreatePluginWithOperationPayloadForAllOfSchemaAsync() + { + await using var openApiDocument = ResourcePluginsProvider.LoadFromResource("openapi_feature_testsV3_0.json"); + var plugin = await OpenApiKernelPluginFactory.CreateFromOpenApiAsync("fakePlugin", openApiDocument, executionParameters: new OpenApiFunctionExecutionParameters { EnableDynamicPayload = false }); + + var postFoobarFunction = plugin["AllOfPost"]; + Assert.NotNull(postFoobarFunction); + + var functionView = postFoobarFunction.Metadata; + Assert.NotNull(functionView); + + var payloadParameter = functionView.Parameters.First(p => p.Name == "payload"); + Assert.NotNull(payloadParameter.Schema); + Assert.Equal(JsonValueKind.Array, payloadParameter.Schema!.RootElement.GetProperty("allOf").ValueKind); + } + + [Fact] + public async Task ItShouldCreatePluginWithOperationPayloadForOneOfSchemaAsync() + { + await using var openApiDocument = ResourcePluginsProvider.LoadFromResource("openapi_feature_testsV3_0.json"); + + var plugin = await OpenApiKernelPluginFactory.CreateFromOpenApiAsync("fakePlugin", openApiDocument, executionParameters: new OpenApiFunctionExecutionParameters { EnableDynamicPayload = false }); + + var postFoobarFunction = plugin["OneOfPost"]; + Assert.NotNull(postFoobarFunction); + + var functionView = postFoobarFunction.Metadata; + Assert.NotNull(functionView); + + var payloadParameter = functionView.Parameters.First(p => p.Name == "payload"); + Assert.NotNull(payloadParameter.Schema); + Assert.Equal(JsonValueKind.Array, payloadParameter.Schema!.RootElement.GetProperty("oneOf").ValueKind); + } +} diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiKernelPluginFactoryTests.cs b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiKernelPluginFactoryTests.cs index ed4f7fe077b9..3ce3f230e888 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiKernelPluginFactoryTests.cs +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/OpenApiKernelPluginFactoryTests.cs @@ -17,11 +17,6 @@ namespace SemanticKernel.Functions.UnitTests.OpenApi; public sealed class OpenApiKernelPluginFactoryTests { - /// - /// System under test - an instance of OpenApiDocumentParser class. - /// - private readonly OpenApiDocumentParser _sut; - /// /// OpenAPI function execution parameters. /// @@ -40,8 +35,6 @@ public OpenApiKernelPluginFactoryTests() this._executionParameters = new OpenApiFunctionExecutionParameters() { EnableDynamicPayload = false }; this._openApiDocument = ResourcePluginsProvider.LoadFromResource("documentV2_0.json"); - - this._sut = new OpenApiDocumentParser(); } [Fact] @@ -312,7 +305,7 @@ public async Task ItShouldHandleEmptyOperationNameAsync() var plugin = await OpenApiKernelPluginFactory.CreateFromOpenApiAsync("fakePlugin", content, this._executionParameters); // Assert - Assert.Equal(5, plugin.Count()); + Assert.Equal(6, plugin.Count()); Assert.True(plugin.TryGetFunction("GetSecretsSecretname", out var _)); } @@ -331,10 +324,74 @@ public async Task ItShouldHandleNullOperationNameAsync() var plugin = await OpenApiKernelPluginFactory.CreateFromOpenApiAsync("fakePlugin", content, this._executionParameters); // Assert - Assert.Equal(5, plugin.Count()); + Assert.Equal(6, plugin.Count()); Assert.True(plugin.TryGetFunction("GetSecretsSecretname", out var _)); } + [Theory] + [InlineData("string_parameter", typeof(string))] + [InlineData("boolean_parameter", typeof(bool))] + [InlineData("number_parameter", typeof(double))] + [InlineData("float_parameter", typeof(float))] + [InlineData("double_parameter", typeof(double))] + [InlineData("integer_parameter", typeof(long))] + [InlineData("int32_parameter", typeof(int))] + [InlineData("int64_parameter", typeof(long))] + public async Task ItShouldMapPropertiesOfPrimitiveDataTypeToKernelParameterMetadataAsync(string name, Type type) + { + // Arrange & Act + this._executionParameters.EnableDynamicPayload = true; + + var plugin = await OpenApiKernelPluginFactory.CreateFromOpenApiAsync("fakePlugin", this._openApiDocument, this._executionParameters); + + var parametersMetadata = plugin["TestParameterDataTypes"].Metadata.Parameters; + + // Assert + var parameterMetadata = parametersMetadata.First(p => p.Name == name); + + Assert.Equal(type, parameterMetadata.ParameterType); + } + + [Fact] + public async Task ItShouldMapPropertiesOfObjectDataTypeToKernelParameterMetadataAsync() + { + // Arrange & Act + var plugin = await OpenApiKernelPluginFactory.CreateFromOpenApiAsync("fakePlugin", this._openApiDocument, this._executionParameters); + + var parametersMetadata = plugin["TestParameterDataTypes"].Metadata.Parameters; + + // Assert + var parameterMetadata = parametersMetadata.First(p => p.Name == "payload"); + + Assert.Equal(typeof(object), parameterMetadata.ParameterType); + } + + [Fact] + public async Task ItShouldUseCustomHttpResponseContentReaderAsync() + { + // Arrange + using var messageHandlerStub = new HttpMessageHandlerStub(this._openApiDocument); + using var httpClient = new HttpClient(messageHandlerStub, false); + + this._executionParameters.HttpResponseContentReader = async (context, cancellationToken) => await context.Response.Content.ReadAsStreamAsync(cancellationToken); + this._executionParameters.HttpClient = httpClient; + + var kernel = new Kernel(); + + var plugin = await OpenApiKernelPluginFactory.CreateFromOpenApiAsync("fakePlugin", new Uri("http://localhost:3001/openapi.json"), this._executionParameters); + + messageHandlerStub.ResetResponse(); + + // Act + var result = await kernel.InvokeAsync(plugin["GetSecret"], this.GetFakeFunctionArguments()); + + // Assert + var response = result.GetValue(); + Assert.NotNull(response); + + Assert.IsAssignableFrom(response.Content); + } + [Fact] public void Dispose() { @@ -375,5 +432,4 @@ public void DoFakeAction(string parameter) } #endregion - } diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationRunnerTests.cs b/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationRunnerTests.cs index fd980398a3ac..6d72c632e6d7 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationRunnerTests.cs +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationRunnerTests.cs @@ -1238,6 +1238,114 @@ public async Task ItShouldIncludeRequestDataWhenOperationCanceledExceptionIsThro Assert.Equal("{\"value\":\"fake-value\"}", canceledException.Data["http.request.body"]); } + [Fact] + public async Task ItShouldUseCustomHttpResponseContentReaderAsync() + { + // Arrange + var operation = new RestApiOperation( + "fake-id", + new Uri("https://fake-random-test-host"), + "fake-path", + HttpMethod.Get, + "fake-description", + [], + payload: null + ); + + var expectedCancellationToken = new CancellationToken(); + + async Task ReadHttpResponseContentAsync(HttpResponseContentReaderContext context, CancellationToken cancellationToken) + { + Assert.Equal(expectedCancellationToken, cancellationToken); + + return await context.Response.Content.ReadAsStreamAsync(cancellationToken); + } + + var sut = new RestApiOperationRunner(this._httpClient, this._authenticationHandlerMock.Object, httpResponseContentReader: ReadHttpResponseContentAsync); + + // Act + var response = await sut.RunAsync(operation, [], cancellationToken: expectedCancellationToken); + + // Assert + Assert.IsAssignableFrom(response.Content); + } + + [Fact] + public async Task ItShouldUseDefaultHttpResponseContentReaderIfCustomDoesNotReturnAnyContentAsync() + { + // Arrange + this._httpMessageHandlerStub.ResponseToReturn.Content = new StringContent("fake-content", Encoding.UTF8, MediaTypeNames.Application.Json); + + var operation = new RestApiOperation( + "fake-id", + new Uri("https://fake-random-test-host"), + "fake-path", + HttpMethod.Get, + "fake-description", + [], + payload: null + ); + + var readerHasBeenCalled = false; + + Task ReadHttpResponseContentAsync(HttpResponseContentReaderContext context, CancellationToken cancellationToken) + { + readerHasBeenCalled = true; + return Task.FromResult(null); // Return null to indicate that no content is returned + } + + var sut = new RestApiOperationRunner(this._httpClient, this._authenticationHandlerMock.Object, httpResponseContentReader: ReadHttpResponseContentAsync); + + // Act + var response = await sut.RunAsync(operation, []); + + // Assert + Assert.True(readerHasBeenCalled); + Assert.Equal("fake-content", response.Content); + } + + [Fact] + public async Task ItShouldDisposeContentStreamAndHttpResponseContentMessageAsync() + { + // Arrange + var operation = new RestApiOperation( + "fake-id", + new Uri("https://fake-random-test-host"), + "fake-path", + HttpMethod.Get, + "fake-description", + [], + payload: null + ); + + HttpResponseMessage? responseMessage = null; + Stream? contentStream = null; + + async Task ReadHttpResponseContentAsync(HttpResponseContentReaderContext context, CancellationToken cancellationToken) + { + responseMessage = context.Response; + contentStream = await context.Response.Content.ReadAsStreamAsync(cancellationToken); + return contentStream; + } + + var sut = new RestApiOperationRunner(this._httpClient, this._authenticationHandlerMock.Object, httpResponseContentReader: ReadHttpResponseContentAsync); + + // Act + var response = await sut.RunAsync(operation, []); + + // Assert + var stream = Assert.IsAssignableFrom(response.Content); + Assert.True(stream.CanRead); + Assert.True(stream.CanSeek); + + stream.Dispose(); + + // Check that the content stream and the response message are disposed + Assert.Throws(() => responseMessage!.Version = Version.Parse("1.1.1")); + Assert.False(contentStream!.CanRead); + Assert.False(contentStream!.CanSeek); + } + public class SchemaTestData : IEnumerable { public IEnumerator GetEnumerator() diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV2_0.json b/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV2_0.json index b323f1c50f47..f01ab6609171 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV2_0.json +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV2_0.json @@ -349,6 +349,84 @@ "key2": "value2" } } + }, + "/test-parameter-data-types/{string-parameter}": { + "put": { + "description": "Operation to test parameter data types.", + "operationId": "TestParameterDataTypes", + "parameters": [ + { + "in": "path", + "name": "string-parameter", + "default": "string-value", + "required": true, + "type": "string" + }, + { + "in": "query", + "name": "boolean-parameter", + "default": true, + "type": "boolean" + }, + { + "in": "query", + "name": "number-parameter", + "default": -12.01, + "type": "number" + }, + { + "in": "header", + "name": "int32-parameter", + "type": "integer", + "format": "int32" + }, + { + "in": "header", + "name": "int64-parameter", + "type": "integer", + "format": "int64" + }, + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "properties": { + "attributes": { + "description": "attributes", + "properties": { + "double-parameter": { + "type": "number", + "format": "double", + "default": -12.01 + } + }, + "type": "object" + }, + "float-parameter": { + "type": "number", + "format": "float", + "default": 12.01 + }, + "integer-parameter": { + "type": "integer", + "default": 123 + } + }, + "type": "object" + } + } + ], + "responses": { + "200": { + "description": "The OK response", + "schema": { + "type": "string" + } + } + }, + "summary": "Get secret" + } } }, "produces": [], diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV3_0.json b/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV3_0.json index 118c08dbbf6c..9b9a9ed48585 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV3_0.json +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV3_0.json @@ -331,6 +331,94 @@ "key2": "value2" } } + }, + "/test-parameter-data-types/{string-parameter}": { + "put": { + "summary": "Get secret", + "description": "Operation to test parameter data types.", + "operationId": "TestParameterDataTypes", + "parameters": [ + { + "name": "string-parameter", + "in": "path", + "required": true, + "schema": { + "type": "string", + "default": "string-value" + } + }, + { + "name": "boolean-parameter", + "in": "query", + "schema": { + "type": "boolean", + "default": true + } + }, + { + "name": "number-parameter", + "in": "query", + "schema": { + "type": "number", + "default": -12.01 + } + }, + { + "name": "int32-parameter", + "in": "header", + "schema": { + "type": "integer", + "format": "int32" + } + }, + { + "name": "int64-parameter", + "in": "header", + "schema": { + "type": "integer", + "format": "int64" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "type": "object", + "properties": { + "attributes": { + "type": "object", + "properties": { + "double-parameter": { + "type": "number", + "format": "double", + "default": -12.01 + } + }, + "description": "attributes" + }, + "float-parameter": { + "type": "number", + "format": "float", + "default": 12.01 + }, + "integer-parameter": { + "type": "integer", + "default": 123 + } + } + } + } + }, + "required": true, + "x-bodyName": "body" + }, + "responses": { + "200": { + "description": "The OK response" + } + } + } } }, "components": { diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV3_1.yaml b/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV3_1.yaml index aa0a4b0535c4..2ecbdde154a0 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV3_1.yaml +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/documentV3_1.yaml @@ -222,6 +222,64 @@ paths: x-object-extension: key1: value1 key2: value2 + '/test-parameter-data-types/{string-parameter}': + put: + summary: Get secret + description: Operation to test parameter data types. + operationId: TestParameterDataTypes + parameters: + - name: string-parameter + in: path + required: true + schema: + type: string + default: string-value + - name: boolean-parameter + in: query + schema: + type: boolean + default: true + - name: number-parameter + in: query + schema: + type: number + default: -12.01 + - name: int32-parameter + in: header + schema: + type: integer + format: int32 + - name: int64-parameter + in: header + schema: + type: integer + format: int64 + requestBody: + content: + application/json: + schema: + type: object + properties: + attributes: + type: object + properties: + double-parameter: + type: number + format: double + default: -12.01 + description: attributes + float-parameter: + type: number + format: float + default: 12.01 + integer-parameter: + type: integer + default: 123 + required: true + x-bodyName: body + responses: + '200': + description: The OK response components: securitySchemes: oauth2_auth: diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/openapi_feature_testsV3_0.json b/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/openapi_feature_testsV3_0.json new file mode 100644 index 000000000000..edb1bdb4c9bc --- /dev/null +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/TestPlugins/openapi_feature_testsV3_0.json @@ -0,0 +1,164 @@ +{ + "openapi": "3.0.3", + "info": { + "title": "Test Schema", + "version": "0" + }, + "paths": { + "/fooBarAllOf": { + "get": { + "operationId": "allOfGet", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/fooBarAllOf" + } + } + }, + "description": "response" + } + } + }, + "post": { + "operationId": "allOfPost", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/fooBarAllOf" + } + } + } + }, + "responses": { + "201": { + "description": "" + } + } + } + }, + "/fooBarAnyOf": { + "get": { + "operationId": "anyOfGet", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/fooBarAnyOf" + } + } + }, + "description": "response" + } + } + }, + "post": { + "operationId": "anyOfPost", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/fooBarAnyOf" + } + } + } + }, + "responses": { + "201": { + "description": "" + } + } + } + }, + "/fooBarOneOf": { + "get": { + "operationId": "oneOfGet", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/fooBarOneOf" + } + } + }, + "description": "response" + } + } + }, + "post": { + "operationId": "oneOfPost", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/fooBarOneOf" + } + } + } + }, + "responses": { + "201": { + "description": "" + } + } + } + } + }, + "components": { + "schemas": { + "foo": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "extra": { + "type": "string" + } + } + }, + "bar": { + "type": "string" + }, + "fooBarAllOf": { + "allOf": [ + { + "$ref": "#/components/schemas/foo" + }, + { + "type": "object", + "properties": { + "extra1": { + "type": "string" + } + } + } + ] + }, + "fooBarAnyOf": { + "anyOf": [ + { + "$ref": "#/components/schemas/foo" + }, + { + "$ref": "#/components/schemas/bar" + } + ] + }, + "fooBarOneOf": { + "oneOf": [ + { + "$ref": "#/components/schemas/foo" + }, + { + "$ref": "#/components/schemas/bar" + } + ] + } + } + } +} \ No newline at end of file diff --git a/dotnet/src/IntegrationTests/Agents/ChatCompletionAgentTests.cs b/dotnet/src/IntegrationTests/Agents/ChatCompletionAgentTests.cs index 91796c1970b0..4fd99b717b5e 100644 --- a/dotnet/src/IntegrationTests/Agents/ChatCompletionAgentTests.cs +++ b/dotnet/src/IntegrationTests/Agents/ChatCompletionAgentTests.cs @@ -63,7 +63,7 @@ public async Task AzureChatCompletionAgentAsync(string input, string expectedAns { Kernel = kernel, Instructions = "Answer questions about the menu.", - ExecutionSettings = new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }, + Arguments = new(new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }), }; AgentGroupChat chat = new(); diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreCollectionFixture.cs b/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreCollectionFixture.cs new file mode 100644 index 000000000000..6c9870cf0327 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreCollectionFixture.cs @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Xunit; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.AzureAISearch; + +[CollectionDefinition("AzureAISearchVectorStoreCollection")] +public class AzureAISearchVectorStoreCollectionFixture : ICollectionFixture +{ +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreFixture.cs b/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreFixture.cs new file mode 100644 index 000000000000..19158ce56e4f --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreFixture.cs @@ -0,0 +1,245 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text.Json.Serialization; +using System.Text.RegularExpressions; +using System.Threading.Tasks; +using Azure; +using Azure.Search.Documents; +using Azure.Search.Documents.Indexes; +using Azure.Search.Documents.Indexes.Models; +using Azure.Search.Documents.Models; +using Microsoft.Extensions.Configuration; +using Microsoft.SemanticKernel.Data; +using SemanticKernel.IntegrationTests.TestSettings.Memory; +using Xunit; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.AzureAISearch; + +/// +/// Helper class for setting up and tearing down Azure AI Search indexes for testing purposes. +/// +public class AzureAISearchVectorStoreFixture : IAsyncLifetime +{ + /// + /// Test index name which consists out of "hotels-" and the machine name with any non-alphanumeric characters removed. + /// +#pragma warning disable CA1308 // Normalize strings to uppercase + private readonly string _testIndexName = "hotels-" + new Regex("[^a-zA-Z0-9]").Replace(Environment.MachineName.ToLowerInvariant(), ""); +#pragma warning restore CA1308 // Normalize strings to uppercase + + /// + /// Test Configuration setup. + /// + private readonly IConfigurationRoot _configuration = new ConfigurationBuilder() + .AddJsonFile(path: "testsettings.json", optional: false, reloadOnChange: true) + .AddJsonFile(path: "testsettings.development.json", optional: true, reloadOnChange: true) + .AddEnvironmentVariables() + .AddUserSecrets() + .Build(); + + /// + /// Initializes a new instance of the class. + /// + public AzureAISearchVectorStoreFixture() + { + var config = this._configuration.GetRequiredSection("AzureAISearch").Get(); + Assert.NotNull(config); + this.Config = config; + this.SearchIndexClient = new SearchIndexClient(new Uri(config.ServiceUrl), new AzureKeyCredential(config.ApiKey)); + this.VectorStoreRecordDefinition = new VectorStoreRecordDefinition + { + Properties = new List + { + new VectorStoreRecordKeyProperty("HotelId", typeof(string)), + new VectorStoreRecordDataProperty("HotelName", typeof(string)) { IsFilterable = true, IsFullTextSearchable = true }, + new VectorStoreRecordDataProperty("Description", typeof(string)), + new VectorStoreRecordVectorProperty("DescriptionEmbedding", typeof(ReadOnlyMemory?)) { Dimensions = 4 }, + new VectorStoreRecordDataProperty("Tags", typeof(string[])) { IsFilterable = true }, + new VectorStoreRecordDataProperty("ParkingIncluded", typeof(bool?)) { IsFilterable = true }, + new VectorStoreRecordDataProperty("LastRenovationDate", typeof(DateTimeOffset?)) { IsFilterable = true }, + new VectorStoreRecordDataProperty("Rating", typeof(float?)) + } + }; + } + + /// + /// Gets the Search Index Client to use for connecting to the Azure AI Search service. + /// + public SearchIndexClient SearchIndexClient { get; private set; } + + /// + /// Gets the name of the index that this fixture sets up and tears down. + /// + public string TestIndexName { get => this._testIndexName; } + + /// + /// Gets the manually created vector store record definition for our test model. + /// + public VectorStoreRecordDefinition VectorStoreRecordDefinition { get; private set; } + + /// + /// Gets the configuration for the Azure AI Search service. + /// + public AzureAISearchConfiguration Config { get; private set; } + + /// + /// Create / Recreate index and upload documents before test run. + /// + /// An async task. + public async Task InitializeAsync() + { + await AzureAISearchVectorStoreFixture.DeleteIndexIfExistsAsync(this._testIndexName, this.SearchIndexClient); + await AzureAISearchVectorStoreFixture.CreateIndexAsync(this._testIndexName, this.SearchIndexClient); + AzureAISearchVectorStoreFixture.UploadDocuments(this.SearchIndexClient.GetSearchClient(this._testIndexName)); + } + + /// + /// Delete the index after the test run. + /// + /// An async task. + public async Task DisposeAsync() + { + await AzureAISearchVectorStoreFixture.DeleteIndexIfExistsAsync(this._testIndexName, this.SearchIndexClient); + } + + /// + /// Delete the index if it exists. + /// + /// The name of the index to delete. + /// The search index client to use for deleting the index. + /// An async task. + public static async Task DeleteIndexIfExistsAsync(string indexName, SearchIndexClient adminClient) + { + adminClient.GetIndexNames(); + { + await adminClient.DeleteIndexAsync(indexName); + } + } + + /// + /// Create an index with the given name. + /// + /// The name of the index to create. + /// The search index client to use for creating the index. + /// An async task. + public static async Task CreateIndexAsync(string indexName, SearchIndexClient adminClient) + { + FieldBuilder fieldBuilder = new(); + var searchFields = fieldBuilder.Build(typeof(Hotel)); + var embeddingfield = searchFields.First(x => x.Name == "DescriptionEmbedding"); + searchFields.Remove(embeddingfield); + searchFields.Add(new VectorSearchField("DescriptionEmbedding", 4, "my-vector-profile")); + + var definition = new SearchIndex(indexName, searchFields); + definition.VectorSearch = new VectorSearch(); + definition.VectorSearch.Algorithms.Add(new HnswAlgorithmConfiguration("my-hnsw-vector-config-1") { Parameters = new HnswParameters { Metric = VectorSearchAlgorithmMetric.Cosine } }); + definition.VectorSearch.Profiles.Add(new VectorSearchProfile("my-vector-profile", "my-hnsw-vector-config-1")); + + var suggester = new SearchSuggester("sg", new[] { "HotelName" }); + definition.Suggesters.Add(suggester); + + await adminClient.CreateOrUpdateIndexAsync(definition); + } + + /// + /// Upload test documents to the index. + /// + /// The client to use for uploading the documents. + public static void UploadDocuments(SearchClient searchClient) + { + IndexDocumentsBatch batch = IndexDocumentsBatch.Create( + IndexDocumentsAction.Upload( + new Hotel() + { + HotelId = "BaseSet-1", + HotelName = "Hotel 1", + Description = "This is a great hotel", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f }, + Tags = new[] { "pool", "air conditioning", "concierge" }, + ParkingIncluded = false, + LastRenovationDate = new DateTimeOffset(1970, 1, 18, 0, 0, 0, TimeSpan.Zero), + Rating = 3.6 + }), + IndexDocumentsAction.Upload( + new Hotel() + { + HotelId = "BaseSet-2", + HotelName = "Hotel 2", + Description = "This is a great hotel", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f }, + Tags = new[] { "pool", "free wifi", "concierge" }, + ParkingIncluded = false, + LastRenovationDate = new DateTimeOffset(1979, 2, 18, 0, 0, 0, TimeSpan.Zero), + Rating = 3.60 + }), + IndexDocumentsAction.Upload( + new Hotel() + { + HotelId = "BaseSet-3", + HotelName = "Hotel 3", + Description = "This is a great hotel", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f }, + Tags = new[] { "air conditioning", "bar", "continental breakfast" }, + ParkingIncluded = true, + LastRenovationDate = new DateTimeOffset(2015, 9, 20, 0, 0, 0, TimeSpan.Zero), + Rating = 4.80 + }), + IndexDocumentsAction.Upload( + new Hotel() + { + HotelId = "BaseSet-4", + HotelName = "Hotel 4", + Description = "This is a great hotel", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f }, + Tags = new[] { "concierge", "view", "24-hour front desk service" }, + ParkingIncluded = true, + LastRenovationDate = new DateTimeOffset(1960, 2, 06, 0, 0, 0, TimeSpan.Zero), + Rating = 4.60 + }) + ); + + searchClient.IndexDocuments(batch); + } + +#pragma warning disable CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. + public class Hotel + { + [SimpleField(IsKey = true, IsFilterable = true)] + [VectorStoreRecordKey] + public string HotelId { get; set; } + + [SearchableField(IsSortable = true)] + [VectorStoreRecordData(IsFilterable = true, IsFullTextSearchable = true)] + public string HotelName { get; set; } + + [SearchableField(AnalyzerName = LexicalAnalyzerName.Values.EnLucene)] + [VectorStoreRecordData] + public string Description { get; set; } + + [VectorStoreRecordVector(4)] + public ReadOnlyMemory? DescriptionEmbedding { get; set; } + + [SearchableField(IsFilterable = true, IsFacetable = true)] + [VectorStoreRecordData(IsFilterable = true)] +#pragma warning disable CA1819 // Properties should not return arrays + public string[] Tags { get; set; } +#pragma warning restore CA1819 // Properties should not return arrays + + [JsonPropertyName("parking_is_included")] + [SimpleField(IsFilterable = true, IsSortable = true, IsFacetable = true)] + [VectorStoreRecordData(IsFilterable = true)] + public bool? ParkingIncluded { get; set; } + + [SimpleField(IsFilterable = true, IsSortable = true, IsFacetable = true)] + [VectorStoreRecordData(IsFilterable = true)] + public DateTimeOffset? LastRenovationDate { get; set; } + + [SimpleField(IsFilterable = true, IsSortable = true, IsFacetable = true)] + [VectorStoreRecordData] + public double? Rating { get; set; } + } +#pragma warning restore CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreRecordCollectionTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..7f810dc87fbd --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreRecordCollectionTests.cs @@ -0,0 +1,335 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using System.Text.Json.Nodes; +using System.Threading.Tasks; +using Azure; +using Azure.Search.Documents.Indexes; +using Microsoft.SemanticKernel.Connectors.AzureAISearch; +using Microsoft.SemanticKernel.Data; +using Xunit; +using Xunit.Abstractions; +using static SemanticKernel.IntegrationTests.Connectors.Memory.AzureAISearch.AzureAISearchVectorStoreFixture; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.AzureAISearch; + +/// +/// Integration tests for class. +/// Tests work with an Azure AI Search Instance. +/// +[Collection("AzureAISearchVectorStoreCollection")] +public sealed class AzureAISearchVectorStoreRecordCollectionTests(ITestOutputHelper output, AzureAISearchVectorStoreFixture fixture) +{ + // If null, all tests will be enabled + private const string SkipReason = "Requires Azure AI Search Service instance up and running"; + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task CollectionExistsReturnsCollectionStateAsync(bool expectedExists) + { + // Arrange. + var collectionName = expectedExists ? fixture.TestIndexName : "nonexistentcollection"; + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, collectionName); + + // Act. + var actual = await sut.CollectionExistsAsync(); + + // Assert. + Assert.Equal(expectedExists, actual); + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanCreateACollectionUpsertAndGetAsync(bool useRecordDefinition) + { + // Arrange + var hotel = CreateTestHotel("Upsert-1"); + var testCollectionName = $"{fixture.TestIndexName}-createtest"; + var options = new AzureAISearchVectorStoreRecordCollectionOptions + { + VectorStoreRecordDefinition = useRecordDefinition ? fixture.VectorStoreRecordDefinition : null + }; + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, testCollectionName, options); + + await sut.DeleteCollectionAsync(); + + // Act + await sut.CreateCollectionAsync(); + var upsertResult = await sut.UpsertAsync(hotel); + var getResult = await sut.GetAsync("Upsert-1"); + + // Assert + var collectionExistResult = await sut.CollectionExistsAsync(); + Assert.True(collectionExistResult); + await sut.DeleteCollectionAsync(); + + Assert.NotNull(upsertResult); + Assert.Equal("Upsert-1", upsertResult); + + Assert.NotNull(getResult); + Assert.Equal(hotel.HotelName, getResult.HotelName); + Assert.Equal(hotel.Description, getResult.Description); + Assert.NotNull(getResult.DescriptionEmbedding); + Assert.Equal(hotel.DescriptionEmbedding?.ToArray(), getResult.DescriptionEmbedding?.ToArray()); + Assert.Equal(hotel.Tags, getResult.Tags); + Assert.Equal(hotel.ParkingIncluded, getResult.ParkingIncluded); + Assert.Equal(hotel.LastRenovationDate, getResult.LastRenovationDate); + Assert.Equal(hotel.Rating, getResult.Rating); + + // Output + output.WriteLine(collectionExistResult.ToString()); + output.WriteLine(upsertResult); + output.WriteLine(getResult.ToString()); + } + + [Fact(Skip = SkipReason)] + public async Task ItCanDeleteCollectionAsync() + { + // Arrange + var tempCollectionName = fixture.TestIndexName + "-delete"; + await AzureAISearchVectorStoreFixture.CreateIndexAsync(tempCollectionName, fixture.SearchIndexClient); + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, tempCollectionName); + + // Act + await sut.DeleteCollectionAsync(); + + // Assert + Assert.False(await sut.CollectionExistsAsync()); + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanUpsertDocumentToVectorStoreAsync(bool useRecordDefinition) + { + // Arrange + var options = new AzureAISearchVectorStoreRecordCollectionOptions + { + VectorStoreRecordDefinition = useRecordDefinition ? fixture.VectorStoreRecordDefinition : null + }; + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, fixture.TestIndexName, options); + + // Act + var hotel = CreateTestHotel("Upsert-1"); + var upsertResult = await sut.UpsertAsync(hotel); + var getResult = await sut.GetAsync("Upsert-1"); + + // Assert + Assert.NotNull(upsertResult); + Assert.Equal("Upsert-1", upsertResult); + + Assert.NotNull(getResult); + Assert.Equal(hotel.HotelName, getResult.HotelName); + Assert.Equal(hotel.Description, getResult.Description); + Assert.NotNull(getResult.DescriptionEmbedding); + Assert.Equal(hotel.DescriptionEmbedding?.ToArray(), getResult.DescriptionEmbedding?.ToArray()); + Assert.Equal(hotel.Tags, getResult.Tags); + Assert.Equal(hotel.ParkingIncluded, getResult.ParkingIncluded); + Assert.Equal(hotel.LastRenovationDate, getResult.LastRenovationDate); + Assert.Equal(hotel.Rating, getResult.Rating); + + // Output + output.WriteLine(upsertResult); + output.WriteLine(getResult.ToString()); + } + + [Fact(Skip = SkipReason)] + public async Task ItCanUpsertManyDocumentsToVectorStoreAsync() + { + // Arrange + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, fixture.TestIndexName); + + // Act + var results = sut.UpsertBatchAsync( + [ + CreateTestHotel("UpsertMany-1"), + CreateTestHotel("UpsertMany-2"), + CreateTestHotel("UpsertMany-3"), + ]); + + // Assert + Assert.NotNull(results); + var resultsList = await results.ToListAsync(); + + Assert.Equal(3, resultsList.Count); + Assert.Contains("UpsertMany-1", resultsList); + Assert.Contains("UpsertMany-2", resultsList); + Assert.Contains("UpsertMany-3", resultsList); + + // Output + foreach (var result in resultsList) + { + output.WriteLine(result); + } + } + + [Theory(Skip = SkipReason)] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task ItCanGetDocumentFromVectorStoreAsync(bool includeVectors, bool useRecordDefinition) + { + // Arrange + var options = new AzureAISearchVectorStoreRecordCollectionOptions + { + VectorStoreRecordDefinition = useRecordDefinition ? fixture.VectorStoreRecordDefinition : null + }; + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, fixture.TestIndexName, options); + + // Act + var getResult = await sut.GetAsync("BaseSet-1", new GetRecordOptions { IncludeVectors = includeVectors }); + + // Assert + Assert.NotNull(getResult); + + Assert.Equal("Hotel 1", getResult.HotelName); + Assert.Equal("This is a great hotel", getResult.Description); + Assert.Equal(includeVectors, getResult.DescriptionEmbedding != null); + if (includeVectors) + { + Assert.Equal(new[] { 30f, 31f, 32f, 33f }, getResult.DescriptionEmbedding!.Value.ToArray()); + } + Assert.Equal(new[] { "pool", "air conditioning", "concierge" }, getResult.Tags); + Assert.False(getResult.ParkingIncluded); + Assert.Equal(new DateTimeOffset(1970, 1, 18, 0, 0, 0, TimeSpan.Zero), getResult.LastRenovationDate); + Assert.Equal(3.6, getResult.Rating); + + // Output + output.WriteLine(getResult.ToString()); + } + + [Fact(Skip = SkipReason)] + public async Task ItCanGetManyDocumentsFromVectorStoreAsync() + { + // Arrange + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, fixture.TestIndexName); + + // Act + // Also include one non-existing key to test that the operation does not fail for these and returns only the found ones. + var hotels = sut.GetBatchAsync(["BaseSet-1", "BaseSet-2", "BaseSet-3", "BaseSet-5", "BaseSet-4"], new GetRecordOptions { IncludeVectors = true }); + + // Assert + Assert.NotNull(hotels); + var hotelsList = await hotels.ToListAsync(); + Assert.Equal(4, hotelsList.Count); + + // Output + foreach (var hotel in hotelsList) + { + output.WriteLine(hotel.ToString()); + } + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanRemoveDocumentFromVectorStoreAsync(bool useRecordDefinition) + { + // Arrange + var options = new AzureAISearchVectorStoreRecordCollectionOptions + { + VectorStoreRecordDefinition = useRecordDefinition ? fixture.VectorStoreRecordDefinition : null + }; + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, fixture.TestIndexName); + await sut.UpsertAsync(CreateTestHotel("Remove-1")); + + // Act + await sut.DeleteAsync("Remove-1"); + // Also delete a non-existing key to test that the operation does not fail for these. + await sut.DeleteAsync("Remove-2"); + + // Assert + Assert.Null(await sut.GetAsync("Remove-1", new GetRecordOptions { IncludeVectors = true })); + } + + [Fact(Skip = SkipReason)] + public async Task ItCanRemoveManyDocumentsFromVectorStoreAsync() + { + // Arrange + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, fixture.TestIndexName); + await sut.UpsertAsync(CreateTestHotel("RemoveMany-1")); + await sut.UpsertAsync(CreateTestHotel("RemoveMany-2")); + await sut.UpsertAsync(CreateTestHotel("RemoveMany-3")); + + // Act + // Also include a non-existing key to test that the operation does not fail for these. + await sut.DeleteBatchAsync(["RemoveMany-1", "RemoveMany-2", "RemoveMany-3", "RemoveMany-4"]); + + // Assert + Assert.Null(await sut.GetAsync("RemoveMany-1", new GetRecordOptions { IncludeVectors = true })); + Assert.Null(await sut.GetAsync("RemoveMany-2", new GetRecordOptions { IncludeVectors = true })); + Assert.Null(await sut.GetAsync("RemoveMany-3", new GetRecordOptions { IncludeVectors = true })); + } + + [Fact(Skip = SkipReason)] + public async Task ItReturnsNullWhenGettingNonExistentRecordAsync() + { + // Arrange + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, fixture.TestIndexName); + + // Act & Assert + Assert.Null(await sut.GetAsync("BaseSet-5", new GetRecordOptions { IncludeVectors = true })); + } + + [Fact(Skip = SkipReason)] + public async Task ItThrowsOperationExceptionForFailedConnectionAsync() + { + // Arrange + var searchIndexClient = new SearchIndexClient(new Uri("https://localhost:12345"), new AzureKeyCredential("12345")); + var sut = new AzureAISearchVectorStoreRecordCollection(searchIndexClient, fixture.TestIndexName); + + // Act & Assert + await Assert.ThrowsAsync(async () => await sut.GetAsync("BaseSet-1", new GetRecordOptions { IncludeVectors = true })); + } + + [Fact(Skip = SkipReason)] + public async Task ItThrowsOperationExceptionForFailedAuthenticationAsync() + { + // Arrange + var searchIndexClient = new SearchIndexClient(new Uri(fixture.Config.ServiceUrl), new AzureKeyCredential("12345")); + var sut = new AzureAISearchVectorStoreRecordCollection(searchIndexClient, fixture.TestIndexName); + + // Act & Assert + await Assert.ThrowsAsync(async () => await sut.GetAsync("BaseSet-1", new GetRecordOptions { IncludeVectors = true })); + } + + [Fact(Skip = SkipReason)] + public async Task ItThrowsMappingExceptionForFailedMapperAsync() + { + // Arrange + var options = new AzureAISearchVectorStoreRecordCollectionOptions { JsonObjectCustomMapper = new FailingMapper() }; + var sut = new AzureAISearchVectorStoreRecordCollection(fixture.SearchIndexClient, fixture.TestIndexName, options); + + // Act & Assert + await Assert.ThrowsAsync(async () => await sut.GetAsync("BaseSet-1", new GetRecordOptions { IncludeVectors = true })); + } + + private static Hotel CreateTestHotel(string hotelId) => new() + { + HotelId = hotelId, + HotelName = $"MyHotel {hotelId}", + Description = "My Hotel is great.", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f }, + Tags = ["pool", "air conditioning", "concierge"], + ParkingIncluded = true, + LastRenovationDate = new DateTimeOffset(1970, 1, 18, 0, 0, 0, TimeSpan.Zero), + Rating = 3.6 + }; + + private sealed class FailingMapper : IVectorStoreRecordMapper + { + public JsonObject MapFromDataToStorageModel(Hotel dataModel) + { + throw new NotImplementedException(); + } + + public Hotel MapFromStorageToDataModel(JsonObject storageModel, StorageToDataModelMapperOptions options) + { + throw new NotImplementedException(); + } + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreTests.cs new file mode 100644 index 000000000000..7bda8cb0fff9 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/AzureAISearch/AzureAISearchVectorStoreTests.cs @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Connectors.AzureAISearch; +using Xunit; +using Xunit.Abstractions; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.AzureAISearch; + +/// +/// Contains integration tests for the class. +/// Tests work with an Azure AI Search Instance. +/// +[Collection("AzureAISearchVectorStoreCollection")] +public class AzureAISearchVectorStoreTests(ITestOutputHelper output, AzureAISearchVectorStoreFixture fixture) +{ + // If null, all tests will be enabled + private const string SkipReason = "Requires Azure AI Search Service instance up and running"; + + [Fact(Skip = SkipReason)] + public async Task ItCanGetAListOfExistingCollectionNamesAsync() + { + // Arrange + var additionalCollectionName = fixture.TestIndexName + "-listnames"; + await AzureAISearchVectorStoreFixture.DeleteIndexIfExistsAsync(additionalCollectionName, fixture.SearchIndexClient); + await AzureAISearchVectorStoreFixture.CreateIndexAsync(additionalCollectionName, fixture.SearchIndexClient); + var sut = new AzureAISearchVectorStore(fixture.SearchIndexClient); + + // Act + var collectionNames = await sut.ListCollectionNamesAsync().ToListAsync(); + + // Assert + Assert.Equal(2, collectionNames.Where(x => x.StartsWith(fixture.TestIndexName, StringComparison.InvariantCultureIgnoreCase)).Count()); + Assert.Contains(fixture.TestIndexName, collectionNames); + Assert.Contains(additionalCollectionName, collectionNames); + + // Output + output.WriteLine(string.Join(",", collectionNames)); + + // Cleanup + await AzureAISearchVectorStoreFixture.DeleteIndexIfExistsAsync(additionalCollectionName, fixture.SearchIndexClient); + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeAllTypes.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeAllTypes.cs new file mode 100644 index 000000000000..63216da7046f --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeAllTypes.cs @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using Microsoft.SemanticKernel.Data; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone; + +#pragma warning disable CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider adding the 'required' modifier or declaring as nullable. +public record PineconeAllTypes() +#pragma warning restore CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider adding the 'required' modifier or declaring as nullable. +{ + [VectorStoreRecordKey] + public string Id { get; init; } + + [VectorStoreRecordData] + public bool BoolProperty { get; set; } + [VectorStoreRecordData] + public bool? NullableBoolProperty { get; set; } + [VectorStoreRecordData] + public string StringProperty { get; set; } + [VectorStoreRecordData] + public string? NullableStringProperty { get; set; } + [VectorStoreRecordData] + public int IntProperty { get; set; } + [VectorStoreRecordData] + public int? NullableIntProperty { get; set; } + [VectorStoreRecordData] + public long LongProperty { get; set; } + [VectorStoreRecordData] + public long? NullableLongProperty { get; set; } + [VectorStoreRecordData] + public float FloatProperty { get; set; } + [VectorStoreRecordData] + public float? NullableFloatProperty { get; set; } + [VectorStoreRecordData] + public double DoubleProperty { get; set; } + [VectorStoreRecordData] + public double? NullableDoubleProperty { get; set; } + [VectorStoreRecordData] + public decimal DecimalProperty { get; set; } + [VectorStoreRecordData] + public decimal? NullableDecimalProperty { get; set; } + +#pragma warning disable CA1819 // Properties should not return arrays + [VectorStoreRecordData] + public string[] StringArray { get; set; } + [VectorStoreRecordData] + public string[]? NullableStringArray { get; set; } +#pragma warning restore CA1819 // Properties should not return arrays + + [VectorStoreRecordData] + public List StringList { get; set; } + [VectorStoreRecordData] + public List? NullableStringList { get; set; } + + [VectorStoreRecordData] + public IReadOnlyCollection Collection { get; set; } + [VectorStoreRecordData] + public IEnumerable Enumerable { get; set; } + + [VectorStoreRecordVector(Dimensions: 8, IndexKind: null, DistanceFunction: DistanceFunction.DotProductSimilarity)] + public ReadOnlyMemory? Embedding { get; set; } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeHotel.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeHotel.cs new file mode 100644 index 000000000000..c648b10f2c62 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeHotel.cs @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Text.Json.Serialization; +using Microsoft.SemanticKernel.Data; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone; + +#pragma warning disable CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider adding the 'required' modifier or declaring as nullable. +public record PineconeHotel() +#pragma warning restore CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider adding the 'required' modifier or declaring as nullable. +{ + [VectorStoreRecordKey] + public string HotelId { get; init; } + + [VectorStoreRecordData] + public string HotelName { get; set; } + + [JsonPropertyName("code_of_the_hotel")] + [VectorStoreRecordData] + public int HotelCode { get; set; } + + [VectorStoreRecordData] + public float HotelRating { get; set; } + + [JsonPropertyName("json_parking")] + [VectorStoreRecordData(StoragePropertyName = "parking_is_included")] + public bool ParkingIncluded { get; set; } + + [VectorStoreRecordData] + public List Tags { get; set; } = []; + + [VectorStoreRecordData] + public string Description { get; set; } + + [VectorStoreRecordVector(Dimensions: 8, IndexKind: null, DistanceFunction: DistanceFunction.DotProductSimilarity)] + public ReadOnlyMemory DescriptionEmbedding { get; set; } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeUserSecretsExtensions.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeUserSecretsExtensions.cs new file mode 100644 index 000000000000..1644b7427e99 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeUserSecretsExtensions.cs @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.IO; +using System.Reflection; +using System.Text.Json; +using Microsoft.Extensions.Configuration.UserSecrets; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone; +public static class PineconeUserSecretsExtensions +{ + public const string PineconeApiKeyUserSecretEntry = "PineconeApiKey"; + + public static string ReadPineconeApiKey() + => JsonSerializer.Deserialize>( + File.ReadAllText(PathHelper.GetSecretsPathFromSecretsId( + typeof(PineconeUserSecretsExtensions).Assembly.GetCustomAttribute()! + .UserSecretsId)))![PineconeApiKeyUserSecretEntry].Trim(); + + public static bool ContainsPineconeApiKey() + { + var userSecretsIdAttribute = typeof(PineconeUserSecretsExtensions).Assembly.GetCustomAttribute(); + if (userSecretsIdAttribute == null) + { + return false; + } + + var path = PathHelper.GetSecretsPathFromSecretsId(userSecretsIdAttribute.UserSecretsId); + if (!File.Exists(path)) + { + return false; + } + + return JsonSerializer.Deserialize>( + File.ReadAllText(path))!.ContainsKey(PineconeApiKeyUserSecretEntry); + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeVectorStoreFixture.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeVectorStoreFixture.cs new file mode 100644 index 000000000000..28559cb0d19f --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeVectorStoreFixture.cs @@ -0,0 +1,345 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Net.Http; +using System.Text.RegularExpressions; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Connectors.Pinecone; +using Microsoft.SemanticKernel.Data; +using Pinecone.Grpc; +using Xunit; +using Sdk = Pinecone; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone; + +public class PineconeVectorStoreFixture : IAsyncLifetime +{ + private const int MaxAttemptCount = 100; + private const int DelayInterval = 300; + + public string IndexName { get; } = "sk-index" +#pragma warning disable CA1308 // Normalize strings to uppercase + + new Regex("[^a-zA-Z0-9]", RegexOptions.None, matchTimeout: new TimeSpan(0, 0, 10)).Replace(Environment.MachineName.ToLowerInvariant(), ""); +#pragma warning restore CA1308 // Normalize strings to uppercase + + public Sdk.PineconeClient Client { get; private set; } = null!; + public PineconeVectorStore VectorStore { get; private set; } = null!; + public PineconeVectorStoreRecordCollection HotelRecordCollection { get; set; } = null!; + public PineconeVectorStoreRecordCollection AllTypesRecordCollection { get; set; } = null!; + public PineconeVectorStoreRecordCollection HotelRecordCollectionWithCustomNamespace { get; set; } = null!; + public IVectorStoreRecordCollection HotelRecordCollectionFromVectorStore { get; set; } = null!; + + public virtual Sdk.Index Index { get; set; } = null!; + + public virtual async Task InitializeAsync() + { + this.Client = new Sdk.PineconeClient(PineconeUserSecretsExtensions.ReadPineconeApiKey()); + this.VectorStore = new PineconeVectorStore(this.Client); + + var hotelRecordDefinition = new VectorStoreRecordDefinition + { + Properties = + [ + new VectorStoreRecordKeyProperty(nameof(PineconeHotel.HotelId), typeof(string)), + new VectorStoreRecordDataProperty(nameof(PineconeHotel.HotelName), typeof(string)), + new VectorStoreRecordDataProperty(nameof(PineconeHotel.HotelCode), typeof(int)), + new VectorStoreRecordDataProperty(nameof(PineconeHotel.ParkingIncluded), typeof(bool)) { StoragePropertyName = "parking_is_included" }, + new VectorStoreRecordDataProperty(nameof(PineconeHotel.HotelRating), typeof(float)), + new VectorStoreRecordDataProperty(nameof(PineconeHotel.Tags), typeof(List)), + new VectorStoreRecordDataProperty(nameof(PineconeHotel.Description), typeof(string)), + new VectorStoreRecordVectorProperty(nameof(PineconeHotel.DescriptionEmbedding), typeof(ReadOnlyMemory)) { Dimensions = 8, DistanceFunction = DistanceFunction.DotProductSimilarity } + ] + }; + + var allTypesRecordDefinition = new VectorStoreRecordDefinition + { + Properties = + [ + new VectorStoreRecordKeyProperty(nameof(PineconeAllTypes.Id), typeof(string)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.BoolProperty), typeof(bool)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.NullableBoolProperty), typeof(bool?)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.StringProperty), typeof(string)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.NullableStringProperty), typeof(string)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.IntProperty), typeof(int)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.NullableIntProperty), typeof(int?)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.LongProperty), typeof(long)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.NullableLongProperty), typeof(long?)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.FloatProperty), typeof(float)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.NullableFloatProperty), typeof(float?)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.DoubleProperty), typeof(double)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.NullableDoubleProperty), typeof(double?)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.DecimalProperty), typeof(decimal)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.NullableDecimalProperty), typeof(decimal?)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.StringArray), typeof(string[])), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.NullableStringArray), typeof(string[])), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.StringList), typeof(List)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.NullableStringList), typeof(List)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.Collection), typeof(IReadOnlyCollection)), + new VectorStoreRecordDataProperty(nameof(PineconeAllTypes.Enumerable), typeof(IEnumerable)), + new VectorStoreRecordVectorProperty(nameof(PineconeAllTypes.Embedding), typeof(ReadOnlyMemory?)) { Dimensions = 8, DistanceFunction = DistanceFunction.DotProductSimilarity } + ] + }; + + this.HotelRecordCollection = new PineconeVectorStoreRecordCollection( + this.Client, + this.IndexName, + new PineconeVectorStoreRecordCollectionOptions + { + VectorStoreRecordDefinition = hotelRecordDefinition + }); + + this.AllTypesRecordCollection = new PineconeVectorStoreRecordCollection( + this.Client, + this.IndexName, + new PineconeVectorStoreRecordCollectionOptions + { + VectorStoreRecordDefinition = allTypesRecordDefinition + }); + + this.HotelRecordCollectionWithCustomNamespace = new PineconeVectorStoreRecordCollection( + this.Client, + this.IndexName, + new PineconeVectorStoreRecordCollectionOptions + { + VectorStoreRecordDefinition = hotelRecordDefinition, + IndexNamespace = "my-namespace" + }); + + this.HotelRecordCollectionFromVectorStore = this.VectorStore.GetCollection( + this.IndexName, + hotelRecordDefinition); + + await this.ClearIndexesAsync(); + await this.CreateIndexAndWaitAsync(); + await this.AddSampleDataAsync(); + } + + private async Task CreateIndexAndWaitAsync() + { + var attemptCount = 0; + + await this.HotelRecordCollection.CreateCollectionAsync(); + + do + { + await Task.Delay(DelayInterval); + attemptCount++; + this.Index = await this.Client.GetIndex(this.IndexName); + } while (!this.Index.Status.IsReady && attemptCount <= MaxAttemptCount); + + if (!this.Index.Status.IsReady) + { + throw new InvalidOperationException("'Create index' operation didn't complete in time. Index name: " + this.IndexName); + } + } + + public async Task DisposeAsync() + { + if (this.Client is not null) + { + await this.ClearIndexesAsync(); + this.Client.Dispose(); + } + } + + private async Task AddSampleDataAsync() + { + var fiveSeasons = new PineconeHotel + { + HotelId = "five-seasons", + HotelName = "Five Seasons Hotel", + Description = "Great service any season.", + HotelCode = 7, + HotelRating = 4.5f, + ParkingIncluded = true, + DescriptionEmbedding = new ReadOnlyMemory([7.5f, 71.0f, 71.5f, 72.0f, 72.5f, 73.0f, 73.5f, 74.0f]), + Tags = ["wi-fi", "sauna", "gym", "pool"] + }; + + var vacationInn = new PineconeHotel + { + HotelId = "vacation-inn", + HotelName = "Vacation Inn Hotel", + Description = "On vacation? Stay with us.", + HotelCode = 11, + HotelRating = 4.3f, + ParkingIncluded = true, + DescriptionEmbedding = new ReadOnlyMemory([17.5f, 721.0f, 731.5f, 742.0f, 762.5f, 783.0f, 793.5f, 704.0f]), + Tags = ["wi-fi", "breakfast", "gym"] + }; + + var bestEastern = new PineconeHotel + { + HotelId = "best-eastern", + HotelName = "Best Eastern Hotel", + Description = "Best hotel east of New York.", + HotelCode = 42, + HotelRating = 4.7f, + ParkingIncluded = true, + DescriptionEmbedding = new ReadOnlyMemory([47.5f, 421.0f, 741.5f, 744.0f, 742.5f, 483.0f, 743.5f, 744.0f]), + Tags = ["wi-fi", "breakfast", "gym"] + }; + + var stats = await this.Index.DescribeStats(); + var vectorCountBefore = stats.TotalVectorCount; + + // use both Upsert and BatchUpsert methods and also use record collections created directly and using vector store + await this.HotelRecordCollection.UpsertAsync(fiveSeasons); + vectorCountBefore = await this.VerifyVectorCountModifiedAsync(vectorCountBefore, delta: 1); + + await this.HotelRecordCollectionFromVectorStore.UpsertBatchAsync([vacationInn, bestEastern]).ToListAsync(); + vectorCountBefore = await this.VerifyVectorCountModifiedAsync(vectorCountBefore, delta: 2); + + var allTypes1 = new PineconeAllTypes + { + Id = "all-types-1", + BoolProperty = true, + NullableBoolProperty = false, + StringProperty = "string prop 1", + NullableStringProperty = "nullable prop 1", + IntProperty = 1, + NullableIntProperty = 10, + LongProperty = 100L, + NullableLongProperty = 1000L, + FloatProperty = 10.5f, + NullableFloatProperty = 100.5f, + DoubleProperty = 23.75d, + NullableDoubleProperty = 233.75d, + DecimalProperty = 50.75m, + NullableDecimalProperty = 500.75m, + StringArray = ["one", "two"], + NullableStringArray = ["five", "six"], + StringList = ["eleven", "twelve"], + NullableStringList = ["fifteen", "sixteen"], + Collection = ["Foo", "Bar"], + Enumerable = ["another", "and another"], + Embedding = new ReadOnlyMemory([1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f, 8.5f]) + }; + + var allTypes2 = new PineconeAllTypes + { + Id = "all-types-2", + BoolProperty = false, + NullableBoolProperty = null, + StringProperty = "string prop 2", + NullableStringProperty = null, + IntProperty = 2, + NullableIntProperty = null, + LongProperty = 200L, + NullableLongProperty = null, + FloatProperty = 20.5f, + NullableFloatProperty = null, + DoubleProperty = 43.75, + NullableDoubleProperty = null, + DecimalProperty = 250.75M, + NullableDecimalProperty = null, + StringArray = [], + NullableStringArray = null, + StringList = [], + NullableStringList = null, + Collection = [], + Enumerable = [], + Embedding = new ReadOnlyMemory([10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f, 70.5f, 80.5f]) + }; + + await this.AllTypesRecordCollection.UpsertBatchAsync([allTypes1, allTypes2]).ToListAsync(); + vectorCountBefore = await this.VerifyVectorCountModifiedAsync(vectorCountBefore, delta: 2); + + var custom = new PineconeHotel + { + HotelId = "custom-hotel", + HotelName = "Custom Hotel", + Description = "Everything customizable!", + HotelCode = 17, + HotelRating = 4.25f, + ParkingIncluded = true, + DescriptionEmbedding = new ReadOnlyMemory([147.5f, 1421.0f, 1741.5f, 1744.0f, 1742.5f, 1483.0f, 1743.5f, 1744.0f]), + }; + + await this.HotelRecordCollectionWithCustomNamespace.UpsertAsync(custom); + vectorCountBefore = await this.VerifyVectorCountModifiedAsync(vectorCountBefore, delta: 1); + } + + public async Task VerifyVectorCountModifiedAsync(uint vectorCountBefore, int delta) + { + var attemptCount = 0; + Sdk.IndexStats stats; + + do + { + await Task.Delay(DelayInterval); + attemptCount++; + stats = await this.Index.DescribeStats(); + } while (stats.TotalVectorCount != vectorCountBefore + delta && attemptCount <= MaxAttemptCount); + + if (stats.TotalVectorCount != vectorCountBefore + delta) + { + throw new InvalidOperationException("'Upsert'/'Delete' operation didn't complete in time."); + } + + return stats.TotalVectorCount; + } + + public async Task DeleteAndWaitAsync(IEnumerable ids, string? indexNamespace = null) + { + var stats = await this.Index.DescribeStats(); + var vectorCountBefore = stats.Namespaces.Single(x => x.Name == (indexNamespace ?? "")).VectorCount; + var idCount = ids.Count(); + + var attemptCount = 0; + await this.Index.Delete(ids, indexNamespace); + long vectorCount; + do + { + await Task.Delay(DelayInterval); + attemptCount++; + stats = await this.Index.DescribeStats(); + vectorCount = stats.Namespaces.Single(x => x.Name == (indexNamespace ?? "")).VectorCount; + } while (vectorCount > vectorCountBefore - idCount && attemptCount <= MaxAttemptCount); + + if (vectorCount > vectorCountBefore - idCount) + { + throw new InvalidOperationException("'Delete' operation didn't complete in time."); + } + } + + private async Task ClearIndexesAsync() + { + var indexes = await this.Client.ListIndexes(); + var deletions = indexes.Select(x => this.DeleteExistingIndexAndWaitAsync(x.Name)); + + await Task.WhenAll(deletions); + } + + private async Task DeleteExistingIndexAndWaitAsync(string indexName) + { + var exists = true; + try + { + var attemptCount = 0; + await this.Client.DeleteIndex(indexName); + + do + { + await Task.Delay(DelayInterval); + var indexes = (await this.Client.ListIndexes()).Select(x => x.Name).ToArray(); + if (indexes.Length == 0 || !indexes.Contains(indexName)) + { + exists = false; + } + } while (exists && attemptCount <= MaxAttemptCount); + } + catch (HttpRequestException ex) when (ex.Message.Contains("NOT_FOUND")) + { + // index was already deleted + exists = false; + } + + if (exists) + { + throw new InvalidOperationException("'Delete index' operation didn't complete in time. Index name: " + indexName); + } + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeVectorStoreRecordCollectionTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..411225101ffc --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeVectorStoreRecordCollectionTests.cs @@ -0,0 +1,564 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using System.Net; +using System.Net.Http; +using System.Threading.Tasks; +using Grpc.Core; +using Microsoft.SemanticKernel.Connectors.Pinecone; +using Microsoft.SemanticKernel.Data; +using Pinecone; +using SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; +using Xunit; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone; + +[Collection("PineconeVectorStoreTests")] +[PineconeApiKeySetCondition] +public class PineconeVectorStoreRecordCollectionTests(PineconeVectorStoreFixture fixture) : IClassFixture +{ + private PineconeVectorStoreFixture Fixture { get; } = fixture; + + [PineconeFact] + public async Task TryCreateExistingIndexIsNoopAsync() + { + await this.Fixture.HotelRecordCollection.CreateCollectionIfNotExistsAsync(); + } + + [PineconeFact] + public async Task CollectionExistsReturnsTrueForExistingCollectionAsync() + { + var result = await this.Fixture.HotelRecordCollection.CollectionExistsAsync(); + + Assert.True(result); + } + + [PineconeTheory] + [InlineData(true)] + [InlineData(false)] + public async Task BasicGetAsync(bool includeVectors) + { + var fiveSeasons = await this.Fixture.HotelRecordCollection.GetAsync("five-seasons", new GetRecordOptions { IncludeVectors = includeVectors }); + + Assert.NotNull(fiveSeasons); + Assert.Equal("five-seasons", fiveSeasons.HotelId); + Assert.Equal("Five Seasons Hotel", fiveSeasons.HotelName); + Assert.Equal("Great service any season.", fiveSeasons.Description); + Assert.Equal(7, fiveSeasons.HotelCode); + Assert.Equal(4.5f, fiveSeasons.HotelRating); + Assert.True(fiveSeasons.ParkingIncluded); + Assert.Contains("wi-fi", fiveSeasons.Tags); + Assert.Contains("sauna", fiveSeasons.Tags); + Assert.Contains("gym", fiveSeasons.Tags); + Assert.Contains("pool", fiveSeasons.Tags); + + if (includeVectors) + { + Assert.Equal(new ReadOnlyMemory([7.5f, 71.0f, 71.5f, 72.0f, 72.5f, 73.0f, 73.5f, 74.0f]), fiveSeasons.DescriptionEmbedding); + } + else + { + Assert.Equal(new ReadOnlyMemory([]), fiveSeasons.DescriptionEmbedding); + } + } + + [PineconeTheory] + [InlineData(true)] + [InlineData(false)] + public async Task BatchGetAsync(bool collectionFromVectorStore) + { + var hotelsCollection = collectionFromVectorStore + ? this.Fixture.HotelRecordCollection + : this.Fixture.HotelRecordCollectionFromVectorStore; + + var hotels = await hotelsCollection.GetBatchAsync(["five-seasons", "vacation-inn", "best-eastern"]).ToListAsync(); + + var fiveSeasons = hotels.Single(x => x.HotelId == "five-seasons"); + var vacationInn = hotels.Single(x => x.HotelId == "vacation-inn"); + var bestEastern = hotels.Single(x => x.HotelId == "best-eastern"); + + Assert.Equal("Five Seasons Hotel", fiveSeasons.HotelName); + Assert.Equal("Great service any season.", fiveSeasons.Description); + Assert.Equal(7, fiveSeasons.HotelCode); + Assert.Equal(4.5f, fiveSeasons.HotelRating); + Assert.True(fiveSeasons.ParkingIncluded); + Assert.Contains("wi-fi", fiveSeasons.Tags); + Assert.Contains("sauna", fiveSeasons.Tags); + Assert.Contains("gym", fiveSeasons.Tags); + Assert.Contains("pool", fiveSeasons.Tags); + + Assert.Equal("Vacation Inn Hotel", vacationInn.HotelName); + Assert.Equal("On vacation? Stay with us.", vacationInn.Description); + Assert.Equal(11, vacationInn.HotelCode); + Assert.Equal(4.3f, vacationInn.HotelRating); + Assert.True(vacationInn.ParkingIncluded); + Assert.Contains("wi-fi", vacationInn.Tags); + Assert.Contains("breakfast", vacationInn.Tags); + Assert.Contains("gym", vacationInn.Tags); + + Assert.Equal("Best Eastern Hotel", bestEastern.HotelName); + Assert.Equal("Best hotel east of New York.", bestEastern.Description); + Assert.Equal(42, bestEastern.HotelCode); + Assert.Equal(4.7f, bestEastern.HotelRating); + Assert.True(bestEastern.ParkingIncluded); + Assert.Contains("wi-fi", bestEastern.Tags); + Assert.Contains("breakfast", bestEastern.Tags); + Assert.Contains("gym", bestEastern.Tags); + } + + [PineconeTheory] + [InlineData(true)] + [InlineData(false)] + public async Task AllTypesBatchGetAsync(bool includeVectors) + { + var allTypes = await this.Fixture.AllTypesRecordCollection.GetBatchAsync(["all-types-1", "all-types-2"], new GetRecordOptions { IncludeVectors = includeVectors }).ToListAsync(); + + var allTypes1 = allTypes.Single(x => x.Id == "all-types-1"); + var allTypes2 = allTypes.Single(x => x.Id == "all-types-2"); + + Assert.True(allTypes1.BoolProperty); + Assert.Equal("string prop 1", allTypes1.StringProperty); + Assert.Equal(1, allTypes1.IntProperty); + Assert.Equal(100L, allTypes1.LongProperty); + Assert.Equal(10.5f, allTypes1.FloatProperty); + Assert.Equal(23.75d, allTypes1.DoubleProperty); + Assert.Equal(50.75m, allTypes1.DecimalProperty); + Assert.Contains("one", allTypes1.StringArray); + Assert.Contains("two", allTypes1.StringArray); + Assert.Contains("eleven", allTypes1.StringList); + Assert.Contains("twelve", allTypes1.StringList); + Assert.Contains("Foo", allTypes1.Collection); + Assert.Contains("Bar", allTypes1.Collection); + Assert.Contains("another", allTypes1.Enumerable); + Assert.Contains("and another", allTypes1.Enumerable); + + Assert.False(allTypes2.BoolProperty); + Assert.Equal("string prop 2", allTypes2.StringProperty); + Assert.Equal(2, allTypes2.IntProperty); + Assert.Equal(200L, allTypes2.LongProperty); + Assert.Equal(20.5f, allTypes2.FloatProperty); + Assert.Equal(43.75d, allTypes2.DoubleProperty); + Assert.Equal(250.75m, allTypes2.DecimalProperty); + Assert.Empty(allTypes2.StringArray); + Assert.Empty(allTypes2.StringList); + Assert.Empty(allTypes2.Collection); + Assert.Empty(allTypes2.Enumerable); + + if (includeVectors) + { + Assert.True(allTypes1.Embedding.HasValue); + Assert.Equal(new ReadOnlyMemory([1.5f, 2.5f, 3.5f, 4.5f, 5.5f, 6.5f, 7.5f, 8.5f]), allTypes1.Embedding.Value); + + Assert.True(allTypes2.Embedding.HasValue); + Assert.Equal(new ReadOnlyMemory([10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f, 70.5f, 80.5f]), allTypes2.Embedding.Value); + } + else + { + Assert.Null(allTypes1.Embedding); + Assert.Null(allTypes2.Embedding); + } + } + + [PineconeFact] + public async Task BatchGetIncludingNonExistingRecordAsync() + { + var hotels = await this.Fixture.HotelRecordCollection.GetBatchAsync(["vacation-inn", "non-existing"]).ToListAsync(); + + Assert.Single(hotels); + var vacationInn = hotels.Single(x => x.HotelId == "vacation-inn"); + + Assert.Equal("Vacation Inn Hotel", vacationInn.HotelName); + Assert.Equal("On vacation? Stay with us.", vacationInn.Description); + Assert.Equal(11, vacationInn.HotelCode); + Assert.Equal(4.3f, vacationInn.HotelRating); + Assert.True(vacationInn.ParkingIncluded); + Assert.Contains("wi-fi", vacationInn.Tags); + Assert.Contains("breakfast", vacationInn.Tags); + Assert.Contains("gym", vacationInn.Tags); + } + + [PineconeFact] + public async Task GetNonExistingRecordAsync() + { + var result = await this.Fixture.HotelRecordCollection.GetAsync("non-existing"); + Assert.Null(result); + } + + [PineconeTheory] + [InlineData(true)] + [InlineData(false)] + public async Task GetFromCustomNamespaceAsync(bool includeVectors) + { + var custom = await this.Fixture.HotelRecordCollectionWithCustomNamespace.GetAsync("custom-hotel", new GetRecordOptions { IncludeVectors = includeVectors }); + + Assert.NotNull(custom); + Assert.Equal("custom-hotel", custom.HotelId); + Assert.Equal("Custom Hotel", custom.HotelName); + if (includeVectors) + { + Assert.Equal(new ReadOnlyMemory([147.5f, 1421.0f, 1741.5f, 1744.0f, 1742.5f, 1483.0f, 1743.5f, 1744.0f]), custom.DescriptionEmbedding); + } + else + { + Assert.Equal(new ReadOnlyMemory([]), custom.DescriptionEmbedding); + } + } + + [PineconeFact] + public async Task TryGetVectorLocatedInDefaultNamespaceButLookInCustomNamespaceAsync() + { + var badFiveSeasons = await this.Fixture.HotelRecordCollectionWithCustomNamespace.GetAsync("five-seasons"); + + Assert.Null(badFiveSeasons); + } + + [PineconeFact] + public async Task TryGetVectorLocatedInCustomNamespaceButLookInDefaultNamespaceAsync() + { + var badCustomHotel = await this.Fixture.HotelRecordCollection.GetAsync("custom-hotel"); + + Assert.Null(badCustomHotel); + } + + [PineconeFact] + public async Task DeleteNonExistingRecordAsync() + { + await this.Fixture.HotelRecordCollection.DeleteAsync("non-existing"); + } + + [PineconeFact] + public async Task TryDeleteExistingVectorLocatedInDefaultNamespaceButUseCustomNamespaceDoesNotDoAnythingAsync() + { + await this.Fixture.HotelRecordCollectionWithCustomNamespace.DeleteAsync("five-seasons"); + + var stillThere = await this.Fixture.HotelRecordCollection.GetAsync("five-seasons"); + Assert.NotNull(stillThere); + Assert.Equal("five-seasons", stillThere.HotelId); + } + + [PineconeFact] + public async Task TryDeleteExistingVectorLocatedInCustomNamespaceButUseDefaultNamespaceDoesNotDoAnythingAsync() + { + await this.Fixture.HotelRecordCollection.DeleteAsync("custom-hotel"); + + var stillThere = await this.Fixture.HotelRecordCollectionWithCustomNamespace.GetAsync("custom-hotel"); + Assert.NotNull(stillThere); + Assert.Equal("custom-hotel", stillThere.HotelId); + } + + [PineconeTheory] + [InlineData(true)] + [InlineData(false)] + public async Task InsertGetModifyDeleteVectorAsync(bool collectionFromVectorStore) + { + var langriSha = new PineconeHotel + { + HotelId = "langri-sha", + HotelName = "Langri-Sha Hotel", + Description = "Lorem ipsum", + HotelCode = 100, + HotelRating = 4.2f, + ParkingIncluded = false, + DescriptionEmbedding = new ReadOnlyMemory([1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f]) + }; + + var stats = await this.Fixture.Index.DescribeStats(); + var vectorCountBefore = stats.TotalVectorCount; + + var hotelRecordCollection = collectionFromVectorStore + ? this.Fixture.HotelRecordCollectionFromVectorStore + : this.Fixture.HotelRecordCollection; + + // insert + await hotelRecordCollection.UpsertAsync(langriSha); + + vectorCountBefore = await this.Fixture.VerifyVectorCountModifiedAsync(vectorCountBefore, delta: 1); + + var inserted = await hotelRecordCollection.GetAsync("langri-sha", new GetRecordOptions { IncludeVectors = true }); + + Assert.NotNull(inserted); + Assert.Equal(langriSha.HotelName, inserted.HotelName); + Assert.Equal(langriSha.Description, inserted.Description); + Assert.Equal(langriSha.HotelCode, inserted.HotelCode); + Assert.Equal(langriSha.HotelRating, inserted.HotelRating); + Assert.Equal(langriSha.ParkingIncluded, inserted.ParkingIncluded); + Assert.Equal(langriSha.DescriptionEmbedding, inserted.DescriptionEmbedding); + + langriSha.Description += " dolor sit amet"; + langriSha.ParkingIncluded = true; + langriSha.DescriptionEmbedding = new ReadOnlyMemory([11f, 12f, 13f, 14f, 15f, 16f, 17f, 18f]); + + // update + await hotelRecordCollection.UpsertAsync(langriSha); + + // this is not great but no vectors are added so we can't query status for number of vectors like we do for insert/delete + await Task.Delay(2000); + + var updated = await hotelRecordCollection.GetAsync("langri-sha", new GetRecordOptions { IncludeVectors = true }); + + Assert.NotNull(updated); + Assert.Equal(langriSha.HotelName, updated.HotelName); + Assert.Equal(langriSha.Description, updated.Description); + Assert.Equal(langriSha.HotelCode, updated.HotelCode); + Assert.Equal(langriSha.HotelRating, updated.HotelRating); + Assert.Equal(langriSha.ParkingIncluded, updated.ParkingIncluded); + Assert.Equal(langriSha.DescriptionEmbedding, updated.DescriptionEmbedding); + + // delete + await hotelRecordCollection.DeleteAsync("langri-sha"); + + await this.Fixture.VerifyVectorCountModifiedAsync(vectorCountBefore, delta: -1); + } + + [PineconeFact] + public async Task UseCollectionExistsOnNonExistingStoreReturnsFalseAsync() + { + var incorrectRecordStore = new PineconeVectorStoreRecordCollection( + this.Fixture.Client, + "incorrect"); + + var result = await incorrectRecordStore.CollectionExistsAsync(); + + Assert.False(result); + } + + [PineconeFact] + public async Task UseNonExistingIndexThrowsAsync() + { + var incorrectRecordStore = new PineconeVectorStoreRecordCollection( + this.Fixture.Client, + "incorrect"); + + var statusCode = (await Assert.ThrowsAsync( + () => incorrectRecordStore.GetAsync("best-eastern"))).StatusCode; + + Assert.Equal(HttpStatusCode.NotFound, statusCode); + } + + [PineconeFact] + public async Task UseRecordStoreWithCustomMapperAsync() + { + var recordStore = new PineconeVectorStoreRecordCollection( + this.Fixture.Client, + this.Fixture.IndexName, + new PineconeVectorStoreRecordCollectionOptions { VectorCustomMapper = new CustomHotelRecordMapper() }); + + var vacationInn = await recordStore.GetAsync("vacation-inn", new GetRecordOptions { IncludeVectors = true }); + + Assert.NotNull(vacationInn); + Assert.Equal("Custom Vacation Inn Hotel", vacationInn.HotelName); + Assert.Equal("On vacation? Stay with us.", vacationInn.Description); + Assert.Equal(11, vacationInn.HotelCode); + Assert.Equal(4.3f, vacationInn.HotelRating); + Assert.True(vacationInn.ParkingIncluded); + Assert.Contains("wi-fi", vacationInn.Tags); + Assert.Contains("breakfast", vacationInn.Tags); + Assert.Contains("gym", vacationInn.Tags); + } + + private sealed class CustomHotelRecordMapper : IVectorStoreRecordMapper + { + public Vector MapFromDataToStorageModel(PineconeHotel dataModel) + { + var metadata = new MetadataMap + { + [nameof(PineconeHotel.HotelName)] = dataModel.HotelName, + [nameof(PineconeHotel.Description)] = dataModel.Description, + [nameof(PineconeHotel.HotelCode)] = dataModel.HotelCode, + [nameof(PineconeHotel.HotelRating)] = dataModel.HotelRating, + ["parking_is_included"] = dataModel.ParkingIncluded, + [nameof(PineconeHotel.Tags)] = dataModel.Tags.ToArray(), + }; + + return new Vector + { + Id = dataModel.HotelId, + Values = dataModel.DescriptionEmbedding.ToArray(), + Metadata = metadata, + }; + } + + public PineconeHotel MapFromStorageToDataModel(Vector storageModel, StorageToDataModelMapperOptions options) + { + if (storageModel.Metadata == null) + { + throw new InvalidOperationException("Missing metadata."); + } + + return new PineconeHotel + { + HotelId = storageModel.Id, + HotelName = "Custom " + (string)storageModel.Metadata[nameof(PineconeHotel.HotelName)].Inner!, + Description = (string)storageModel.Metadata[nameof(PineconeHotel.Description)].Inner!, + HotelCode = (int)(double)storageModel.Metadata[nameof(PineconeHotel.HotelCode)].Inner!, + HotelRating = (float)(double)storageModel.Metadata[nameof(PineconeHotel.HotelRating)].Inner!, + ParkingIncluded = (bool)storageModel.Metadata["parking_is_included"].Inner!, + Tags = ((MetadataValue[])storageModel.Metadata[nameof(PineconeHotel.Tags)].Inner!)!.Select(x => (string)x.Inner!).ToList(), + }; + } + } + + #region Negative + + [PineconeFact] + public void UseRecordWithNoEmbeddingThrows() + { + var exception = Assert.Throws( + () => new PineconeVectorStoreRecordCollection( + this.Fixture.Client, + "Whatever")); + + Assert.Equal( + $"No vector property found on type {typeof(PineconeRecordNoEmbedding).FullName}.", + exception.Message); + } + +#pragma warning disable CA1812 + private sealed record PineconeRecordNoEmbedding + { + [VectorStoreRecordKey] + public int Id { get; set; } + + [VectorStoreRecordData] + public string? Name { get; set; } + } +#pragma warning restore CA1812 + + [PineconeFact] + public void UseRecordWithMultipleEmbeddingsThrows() + { + var exception = Assert.Throws( + () => new PineconeVectorStoreRecordCollection( + this.Fixture.Client, + "Whatever")); + + Assert.Equal( + $"Multiple vector properties found on type {typeof(PineconeRecordMultipleEmbeddings).FullName} while only one is supported.", + exception.Message); + } + +#pragma warning disable CA1812 + private sealed record PineconeRecordMultipleEmbeddings + { + [VectorStoreRecordKey] + public string Id { get; set; } = null!; + + [VectorStoreRecordVector] + public ReadOnlyMemory Embedding1 { get; set; } + + [VectorStoreRecordVector] + public ReadOnlyMemory Embedding2 { get; set; } + } +#pragma warning restore CA1812 + + [PineconeFact] + public void UseRecordWithUnsupportedKeyTypeThrows() + { + var message = Assert.Throws( + () => new PineconeVectorStoreRecordCollection( + this.Fixture.Client, + "Whatever")).Message; + + Assert.Equal( + $"Key properties must be one of the supported types: {typeof(string).FullName}. Type of the property '{nameof(PineconeRecordUnsupportedKeyType.Id)}' is {typeof(int).FullName}.", + message); + } + +#pragma warning disable CA1812 + private sealed record PineconeRecordUnsupportedKeyType + { + [VectorStoreRecordKey] + public int Id { get; set; } + + [VectorStoreRecordData] + public string? Name { get; set; } + + [VectorStoreRecordVector] + public ReadOnlyMemory Embedding { get; set; } + } +#pragma warning restore CA1812 + + [PineconeFact] + public async Task TryAddingVectorWithUnsupportedValuesAsync() + { + var badAllTypes = new PineconeAllTypes + { + Id = "bad", + BoolProperty = true, + DecimalProperty = 1m, + DoubleProperty = 1.5d, + FloatProperty = 2.5f, + IntProperty = 1, + LongProperty = 11L, + NullableStringArray = ["foo", null!, "bar",], + Embedding = new ReadOnlyMemory([1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f]) + }; + + var exception = await Assert.ThrowsAsync( + () => this.Fixture.AllTypesRecordCollection.UpsertAsync(badAllTypes)); + + Assert.Equal("Microsoft.SemanticKernel.Connectors.Pinecone", exception.Source); + Assert.Equal("Pinecone", exception.VectorStoreType); + Assert.Equal("Upsert", exception.OperationName); + Assert.Equal(this.Fixture.IndexName, exception.CollectionName); + + var inner = exception.InnerException as RpcException; + Assert.NotNull(inner); + Assert.Equal(StatusCode.InvalidArgument, inner.StatusCode); + } + + [PineconeFact] + public async Task TryCreateIndexWithIncorrectDimensionFailsAsync() + { + var recordCollection = new PineconeVectorStoreRecordCollection( + this.Fixture.Client, + "negative-dimension"); + + var message = (await Assert.ThrowsAsync(() => recordCollection.CreateCollectionAsync())).Message; + + Assert.Equal("Property Dimensions on VectorStoreRecordVectorProperty 'Embedding' must be set to a positive integer to create a collection.", message); + } + +#pragma warning disable CA1812 + private sealed record PineconeRecordWithIncorrectDimension + { + [VectorStoreRecordKey] + public string Id { get; set; } = null!; + + [VectorStoreRecordData] + public string? Name { get; set; } + + [VectorStoreRecordVector(Dimensions: -7)] + public ReadOnlyMemory Embedding { get; set; } + } +#pragma warning restore CA1812 + + [PineconeFact] + public async Task TryCreateIndexWithUnsSupportedMetricFailsAsync() + { + var recordCollection = new PineconeVectorStoreRecordCollection( + this.Fixture.Client, + "bad-metric"); + + var message = (await Assert.ThrowsAsync(() => recordCollection.CreateCollectionAsync())).Message; + + Assert.Equal("Distance function 'just eyeball it' for VectorStoreRecordVectorProperty 'Embedding' is not supported by the Pinecone VectorStore.", message); + } + +#pragma warning disable CA1812 + private sealed record PineconeRecordWithUnsupportedMetric + { + [VectorStoreRecordKey] + public string Id { get; set; } = null!; + + [VectorStoreRecordData] + public string? Name { get; set; } + + [VectorStoreRecordVector(Dimensions: 5, IndexKind: null, DistanceFunction: "just eyeball it")] + public ReadOnlyMemory Embedding { get; set; } + } +#pragma warning restore CA1812 + + #endregion +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeVectorStoreTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeVectorStoreTests.cs new file mode 100644 index 000000000000..8aa50e6fa2fa --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/PineconeVectorStoreTests.cs @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Connectors.Pinecone; +using Microsoft.SemanticKernel.Data; +using SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; +using Xunit; +using Sdk = Pinecone; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone; + +[Collection("PineconeVectorStoreTests")] +[PineconeApiKeySetCondition] +public class PineconeVectorStoreTests(PineconeVectorStoreFixture fixture) : IClassFixture +{ + private PineconeVectorStoreFixture Fixture { get; } = fixture; + + [PineconeFact] + public async Task ListCollectionNamesAsync() + { + var collectionNames = await this.Fixture.VectorStore.ListCollectionNamesAsync().ToListAsync(); + + Assert.Equal([this.Fixture.IndexName], collectionNames); + } + + [PineconeFact] + public void CreateCollectionUsingFactory() + { + var vectorStore = new PineconeVectorStore( + this.Fixture.Client, + new PineconeVectorStoreOptions + { + VectorStoreCollectionFactory = new MyVectorStoreRecordCollectionFactory() + }); + + var factoryCollection = vectorStore.GetCollection(this.Fixture.IndexName); + + Assert.NotNull(factoryCollection); + Assert.Equal("factory" + this.Fixture.IndexName, factoryCollection.CollectionName); + } + + private sealed class MyVectorStoreRecordCollectionFactory : IPineconeVectorStoreRecordCollectionFactory + { + public IVectorStoreRecordCollection CreateVectorStoreRecordCollection( + Sdk.PineconeClient pineconeClient, + string name, + VectorStoreRecordDefinition? vectorStoreRecordDefinition) + where TKey : notnull + where TRecord : class + { + if (typeof(TKey) != typeof(string)) + { + throw new InvalidOperationException("Only string keys are supported."); + } + + return (new PineconeVectorStoreRecordCollection(pineconeClient, "factory" + name) as IVectorStoreRecordCollection)!; + } + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/ITestCondition.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/ITestCondition.cs new file mode 100644 index 000000000000..361e13d60cd0 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/ITestCondition.cs @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Threading.Tasks; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; + +public interface ITestCondition +{ + ValueTask IsMetAsync(); + + string SkipReason { get; } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeApiKeySetConditionAttribute.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeApiKeySetConditionAttribute.cs new file mode 100644 index 000000000000..ef144699fb7c --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeApiKeySetConditionAttribute.cs @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Threading.Tasks; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; + +[AttributeUsage(AttributeTargets.Method | AttributeTargets.Class)] +public sealed class PineconeApiKeySetConditionAttribute : Attribute, ITestCondition +{ + public ValueTask IsMetAsync() + { + var isMet = PineconeUserSecretsExtensions.ContainsPineconeApiKey(); + + return ValueTask.FromResult(isMet); + } + + public string SkipReason + => $"Pinecone API key was not specified in user secrets. Use the following command to set it: dotnet user-secrets set \"{PineconeUserSecretsExtensions.PineconeApiKeyUserSecretEntry}\" \"your_Pinecone_API_key\""; +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeFactAttribute.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeFactAttribute.cs new file mode 100644 index 000000000000..d4ebff8869e0 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeFactAttribute.cs @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using Xunit; +using Xunit.Sdk; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; + +[AttributeUsage(AttributeTargets.Method)] +[XunitTestCaseDiscoverer("SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit.PineconeFactDiscoverer", "IntegrationTests")] +public sealed class PineconeFactAttribute : FactAttribute; diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeFactDiscoverer.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeFactDiscoverer.cs new file mode 100644 index 000000000000..c1923ad72a2e --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeFactDiscoverer.cs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Xunit.Abstractions; +using Xunit.Sdk; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; + +public class PineconeFactDiscoverer(IMessageSink messageSink) : FactDiscoverer(messageSink) +{ + protected override IXunitTestCase CreateTestCase( + ITestFrameworkDiscoveryOptions discoveryOptions, + ITestMethod testMethod, + IAttributeInfo factAttribute) + => new PineconeFactTestCase( + this.DiagnosticMessageSink, + discoveryOptions.MethodDisplayOrDefault(), + discoveryOptions.MethodDisplayOptionsOrDefault(), + testMethod); +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeFactTestCase.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeFactTestCase.cs new file mode 100644 index 000000000000..4a27031ff45b --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeFactTestCase.cs @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Threading; +using System.Threading.Tasks; +using Xunit.Abstractions; +using Xunit.Sdk; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; + +public sealed class PineconeFactTestCase : XunitTestCase +{ + [Obsolete("Called by the de-serializer; should only be called by deriving classes for de-serialization purposes")] + public PineconeFactTestCase() + { + } + + public PineconeFactTestCase( + IMessageSink diagnosticMessageSink, + TestMethodDisplay defaultMethodDisplay, + TestMethodDisplayOptions defaultMethodDisplayOptions, + ITestMethod testMethod, + object[]? testMethodArguments = null) + : base(diagnosticMessageSink, defaultMethodDisplay, defaultMethodDisplayOptions, testMethod, testMethodArguments) + { + } + + public override async Task RunAsync( + IMessageSink diagnosticMessageSink, + IMessageBus messageBus, + object[] constructorArguments, + ExceptionAggregator aggregator, + CancellationTokenSource cancellationTokenSource) + => await XunitTestCaseExtensions.TrySkipAsync(this, messageBus) + ? new RunSummary { Total = 1, Skipped = 1 } + : await base.RunAsync( + diagnosticMessageSink, + messageBus, + constructorArguments, + aggregator, + cancellationTokenSource); +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeTheoryAttribute.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeTheoryAttribute.cs new file mode 100644 index 000000000000..bff77c952c24 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeTheoryAttribute.cs @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using Xunit; +using Xunit.Sdk; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; + +[AttributeUsage(AttributeTargets.Method)] +[XunitTestCaseDiscoverer("SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit.PineconeTheoryDiscoverer", "IntegrationTests")] +public sealed class PineconeTheoryAttribute : TheoryAttribute; diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeTheoryDiscoverer.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeTheoryDiscoverer.cs new file mode 100644 index 000000000000..79a60afd69b8 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeTheoryDiscoverer.cs @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using Xunit.Abstractions; +using Xunit.Sdk; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; + +public class PineconeTheoryDiscoverer(IMessageSink messageSink) : TheoryDiscoverer(messageSink) +{ + protected override IEnumerable CreateTestCasesForTheory( + ITestFrameworkDiscoveryOptions discoveryOptions, + ITestMethod testMethod, + IAttributeInfo theoryAttribute) + { + yield return new PineconeTheoryTestCase( + this.DiagnosticMessageSink, + discoveryOptions.MethodDisplayOrDefault(), + discoveryOptions.MethodDisplayOptionsOrDefault(), + testMethod); + } + + protected override IEnumerable CreateTestCasesForDataRow( + ITestFrameworkDiscoveryOptions discoveryOptions, + ITestMethod testMethod, + IAttributeInfo theoryAttribute, + object[] dataRow) + { + yield return new PineconeFactTestCase( + this.DiagnosticMessageSink, + discoveryOptions.MethodDisplayOrDefault(), + discoveryOptions.MethodDisplayOptionsOrDefault(), + testMethod, + dataRow); + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeTheoryTestCase.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeTheoryTestCase.cs new file mode 100644 index 000000000000..1a9ebff92e1f --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/PineconeTheoryTestCase.cs @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Threading; +using System.Threading.Tasks; +using Xunit.Abstractions; +using Xunit.Sdk; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; + +public sealed class PineconeTheoryTestCase : XunitTheoryTestCase +{ + [Obsolete("Called by the de-serializer; should only be called by deriving classes for de-serialization purposes")] + public PineconeTheoryTestCase() + { + } + + public PineconeTheoryTestCase( + IMessageSink diagnosticMessageSink, + TestMethodDisplay defaultMethodDisplay, + TestMethodDisplayOptions defaultMethodDisplayOptions, + ITestMethod testMethod) + : base(diagnosticMessageSink, defaultMethodDisplay, defaultMethodDisplayOptions, testMethod) + { + } + + public override async Task RunAsync( + IMessageSink diagnosticMessageSink, + IMessageBus messageBus, + object[] constructorArguments, + ExceptionAggregator aggregator, + CancellationTokenSource cancellationTokenSource) + => await XunitTestCaseExtensions.TrySkipAsync(this, messageBus) + ? new RunSummary { Total = 1, Skipped = 1 } + : await base.RunAsync( + diagnosticMessageSink, + messageBus, + constructorArguments, + aggregator, + cancellationTokenSource); +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/XunitTestCaseExtensions.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/XunitTestCaseExtensions.cs new file mode 100644 index 000000000000..75d22e4e5ae9 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Pinecone/Xunit/XunitTestCaseExtensions.cs @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Xunit.Abstractions; +using Xunit.Sdk; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Pinecone.Xunit; + +public static class XunitTestCaseExtensions +{ + private static readonly ConcurrentDictionary> s_typeAttributes = new(); + private static readonly ConcurrentDictionary> s_assemblyAttributes = new(); + + public static async ValueTask TrySkipAsync(XunitTestCase testCase, IMessageBus messageBus) + { + var method = testCase.Method; + var type = testCase.TestMethod.TestClass.Class; + var assembly = type.Assembly; + + var skipReasons = new List(); + var attributes = + s_assemblyAttributes.GetOrAdd( + assembly.Name, + a => assembly.GetCustomAttributes(typeof(ITestCondition)).ToList()) + .Concat( + s_typeAttributes.GetOrAdd( + type.Name, + t => type.GetCustomAttributes(typeof(ITestCondition)).ToList())) + .Concat(method.GetCustomAttributes(typeof(ITestCondition))) + .OfType() + .Select(attributeInfo => (ITestCondition)attributeInfo.Attribute); + + foreach (var attribute in attributes) + { + if (!await attribute.IsMetAsync()) + { + skipReasons.Add(attribute.SkipReason); + } + } + + if (skipReasons.Count > 0) + { + messageBus.QueueMessage( + new TestSkipped(new XunitTest(testCase, testCase.DisplayName), string.Join(Environment.NewLine, skipReasons))); + + return true; + } + + return false; + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreCollectionFixture.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreCollectionFixture.cs new file mode 100644 index 000000000000..a7b565d71c2d --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreCollectionFixture.cs @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Xunit; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Qdrant; + +[CollectionDefinition("QdrantVectorStoreCollection")] +public class QdrantVectorStoreCollectionFixture : ICollectionFixture +{ +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreFixture.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreFixture.cs new file mode 100644 index 000000000000..d1a314829547 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreFixture.cs @@ -0,0 +1,325 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using Docker.DotNet; +using Docker.DotNet.Models; +using Grpc.Core; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client; +using Qdrant.Client.Grpc; +using Xunit; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Qdrant; + +public class QdrantVectorStoreFixture : IAsyncLifetime +{ + /// The docker client we are using to create a qdrant container with. + private readonly DockerClient _client; + + /// The id of the qdrant container that we are testing with. + private string? _containerId = null; + +#pragma warning disable CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. + + /// + /// Initializes a new instance of the class. + /// + public QdrantVectorStoreFixture() + { + using var dockerClientConfiguration = new DockerClientConfiguration(); + this._client = dockerClientConfiguration.CreateClient(); + this.HotelVectorStoreRecordDefinition = new VectorStoreRecordDefinition + { + Properties = new List + { + new VectorStoreRecordKeyProperty("HotelId", typeof(ulong)), + new VectorStoreRecordDataProperty("HotelName", typeof(string)) { IsFilterable = true, IsFullTextSearchable = true }, + new VectorStoreRecordDataProperty("HotelCode", typeof(int)) { IsFilterable = true }, + new VectorStoreRecordDataProperty("ParkingIncluded", typeof(bool)) { IsFilterable = true, StoragePropertyName = "parking_is_included" }, + new VectorStoreRecordDataProperty("HotelRating", typeof(float)) { IsFilterable = true }, + new VectorStoreRecordDataProperty("Tags", typeof(List)), + new VectorStoreRecordDataProperty("Description", typeof(string)), + new VectorStoreRecordVectorProperty("DescriptionEmbedding", typeof(ReadOnlyMemory?)) { Dimensions = 4, DistanceFunction = DistanceFunction.ManhattanDistance } + } + }; + this.HotelWithGuidIdVectorStoreRecordDefinition = new VectorStoreRecordDefinition + { + Properties = new List + { + new VectorStoreRecordKeyProperty("HotelId", typeof(Guid)), + new VectorStoreRecordDataProperty("HotelName", typeof(string)) { IsFilterable = true, IsFullTextSearchable = true }, + new VectorStoreRecordDataProperty("Description", typeof(string)), + new VectorStoreRecordVectorProperty("DescriptionEmbedding", typeof(ReadOnlyMemory?)) { Dimensions = 4, DistanceFunction = DistanceFunction.ManhattanDistance } + } + }; + } + +#pragma warning restore CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. + + /// Gets the qdrant client connection to use for tests. + public QdrantClient QdrantClient { get; private set; } + + /// Gets the manually created vector store record definition for our test model. + public VectorStoreRecordDefinition HotelVectorStoreRecordDefinition { get; private set; } + + /// Gets the manually created vector store record definition for our test model. + public VectorStoreRecordDefinition HotelWithGuidIdVectorStoreRecordDefinition { get; private set; } + + /// + /// Create / Recreate qdrant docker container and run it. + /// + /// An async task. + public async Task InitializeAsync() + { + this._containerId = await SetupQdrantContainerAsync(this._client); + + // Connect to qdrant. + this.QdrantClient = new QdrantClient("localhost"); + + // Create schemas for the vector store. + var vectorParamsMap = new VectorParamsMap(); + vectorParamsMap.Map.Add("DescriptionEmbedding", new VectorParams { Size = 4, Distance = Distance.Cosine }); + + // Wait for the qdrant container to be ready. + var retryCount = 0; + while (retryCount++ < 5) + { + try + { + await this.QdrantClient.ListCollectionsAsync(); + } + catch (RpcException e) + { + if (e.StatusCode != Grpc.Core.StatusCode.Unavailable) + { + throw; + } + + await Task.Delay(1000); + } + } + + await this.QdrantClient.CreateCollectionAsync( + "namedVectorsHotels", + vectorParamsMap); + + await this.QdrantClient.CreateCollectionAsync( + "singleVectorHotels", + new VectorParams { Size = 4, Distance = Distance.Cosine }); + + await this.QdrantClient.CreateCollectionAsync( + "singleVectorGuidIdHotels", + new VectorParams { Size = 4, Distance = Distance.Cosine }); + + // Create test data common to both named and unnamed vectors. + var tags = new ListValue(); + tags.Values.Add("t1"); + tags.Values.Add("t2"); + var tagsValue = new Value(); + tagsValue.ListValue = tags; + + // Create some test data using named vectors. + var embedding = new[] { 30f, 31f, 32f, 33f }; + + var namedVectors1 = new NamedVectors(); + var namedVectors2 = new NamedVectors(); + var namedVectors3 = new NamedVectors(); + + namedVectors1.Vectors.Add("DescriptionEmbedding", embedding); + namedVectors2.Vectors.Add("DescriptionEmbedding", embedding); + namedVectors3.Vectors.Add("DescriptionEmbedding", embedding); + + List namedVectorPoints = + [ + new PointStruct + { + Id = 11, + Vectors = new Vectors { Vectors_ = namedVectors1 }, + Payload = { ["HotelName"] = "My Hotel 11", ["HotelCode"] = 11, ["parking_is_included"] = true, ["Tags"] = tagsValue, ["HotelRating"] = 4.5f, ["Description"] = "This is a great hotel." } + }, + new PointStruct + { + Id = 12, + Vectors = new Vectors { Vectors_ = namedVectors2 }, + Payload = { ["HotelName"] = "My Hotel 12", ["HotelCode"] = 12, ["parking_is_included"] = false, ["Description"] = "This is a great hotel." } + }, + new PointStruct + { + Id = 13, + Vectors = new Vectors { Vectors_ = namedVectors3 }, + Payload = { ["HotelName"] = "My Hotel 13", ["HotelCode"] = 13, ["parking_is_included"] = false, ["Description"] = "This is a great hotel." } + }, + ]; + + await this.QdrantClient.UpsertAsync("namedVectorsHotels", namedVectorPoints); + + // Create some test data using a single unnamed vector. + List unnamedVectorPoints = + [ + new PointStruct + { + Id = 11, + Vectors = embedding, + Payload = { ["HotelName"] = "My Hotel 11", ["HotelCode"] = 11, ["parking_is_included"] = true, ["Tags"] = tagsValue, ["HotelRating"] = 4.5f, ["Description"] = "This is a great hotel." } + }, + new PointStruct + { + Id = 12, + Vectors = embedding, + Payload = { ["HotelName"] = "My Hotel 12", ["HotelCode"] = 12, ["parking_is_included"] = false, ["Description"] = "This is a great hotel." } + }, + new PointStruct + { + Id = 13, + Vectors = embedding, + Payload = { ["HotelName"] = "My Hotel 13", ["HotelCode"] = 13, ["parking_is_included"] = false, ["Description"] = "This is a great hotel." } + }, + ]; + + await this.QdrantClient.UpsertAsync("singleVectorHotels", unnamedVectorPoints); + + // Create some test data using a single unnamed vector and a guid id. + List unnamedVectorGuidIdPoints = + [ + new PointStruct + { + Id = Guid.Parse("11111111-1111-1111-1111-111111111111"), + Vectors = embedding, + Payload = { ["HotelName"] = "My Hotel 11", ["Description"] = "This is a great hotel." } + }, + new PointStruct + { + Id = Guid.Parse("22222222-2222-2222-2222-222222222222"), + Vectors = embedding, + Payload = { ["HotelName"] = "My Hotel 12", ["Description"] = "This is a great hotel." } + }, + new PointStruct + { + Id = Guid.Parse("33333333-3333-3333-3333-333333333333"), + Vectors = embedding, + Payload = { ["HotelName"] = "My Hotel 13", ["Description"] = "This is a great hotel." } + }, + ]; + + await this.QdrantClient.UpsertAsync("singleVectorGuidIdHotels", unnamedVectorGuidIdPoints); + } + + /// + /// Delete the docker container after the test run. + /// + /// An async task. + public async Task DisposeAsync() + { + if (this._containerId != null) + { + await this._client.Containers.StopContainerAsync(this._containerId, new ContainerStopParameters()); + await this._client.Containers.RemoveContainerAsync(this._containerId, new ContainerRemoveParameters()); + } + } + + /// + /// Setup the qdrant container by pulling the image and running it. + /// + /// The docker client to create the container with. + /// The id of the container. + private static async Task SetupQdrantContainerAsync(DockerClient client) + { + await client.Images.CreateImageAsync( + new ImagesCreateParameters + { + FromImage = "qdrant/qdrant", + Tag = "latest", + }, + null, + new Progress()); + + var container = await client.Containers.CreateContainerAsync(new CreateContainerParameters() + { + Image = "qdrant/qdrant", + HostConfig = new HostConfig() + { + PortBindings = new Dictionary> + { + {"6333", new List {new() {HostPort = "6333" } }}, + {"6334", new List {new() {HostPort = "6334" } }} + }, + PublishAllPorts = true + }, + ExposedPorts = new Dictionary + { + { "6333", default }, + { "6334", default } + }, + }); + + await client.Containers.StartContainerAsync( + container.ID, + new ContainerStartParameters()); + + return container.ID; + } + + /// + /// A test model for the qdrant vector store. + /// +#pragma warning disable CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. + public record HotelInfo() + { + /// The key of the record. + [VectorStoreRecordKey] + public ulong HotelId { get; init; } + + /// A string metadata field. + [VectorStoreRecordData(IsFilterable = true, IsFullTextSearchable = true)] + public string? HotelName { get; set; } + + /// An int metadata field. + [VectorStoreRecordData(IsFilterable = true)] + public int HotelCode { get; set; } + + /// A float metadata field. + [VectorStoreRecordData(IsFilterable = true)] + public float? HotelRating { get; set; } + + /// A bool metadata field. + [VectorStoreRecordData(IsFilterable = true, StoragePropertyName = "parking_is_included")] + public bool ParkingIncluded { get; set; } + + [VectorStoreRecordData] + public List Tags { get; set; } = new List(); + + /// A data field. + [VectorStoreRecordData] + public string Description { get; set; } + + /// A vector field. + [VectorStoreRecordVector(4, IndexKind.Hnsw, DistanceFunction.ManhattanDistance)] + public ReadOnlyMemory? DescriptionEmbedding { get; set; } + } + + /// + /// A test model for the qdrant vector store. + /// +#pragma warning disable CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. + public record HotelInfoWithGuidId() + { + /// The key of the record. + [VectorStoreRecordKey] + public Guid HotelId { get; init; } + + /// A string metadata field. + [VectorStoreRecordData(IsFilterable = true, IsFullTextSearchable = true)] + public string? HotelName { get; set; } + + /// A data field. + [VectorStoreRecordData] + public string Description { get; set; } + + /// A vector field. + [VectorStoreRecordVector(4, IndexKind.Hnsw, DistanceFunction.ManhattanDistance)] + public ReadOnlyMemory? DescriptionEmbedding { get; set; } + } +} +#pragma warning restore CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreRecordCollectionTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..7e2e9b1f7d78 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreRecordCollectionTests.cs @@ -0,0 +1,381 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Globalization; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Microsoft.SemanticKernel.Data; +using Qdrant.Client.Grpc; +using Xunit; +using Xunit.Abstractions; +using static SemanticKernel.IntegrationTests.Connectors.Memory.Qdrant.QdrantVectorStoreFixture; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Qdrant; + +/// +/// Contains tests for the class. +/// +/// Used for logging. +/// Qdrant setup and teardown. +[Collection("QdrantVectorStoreCollection")] +public sealed class QdrantVectorStoreRecordCollectionTests(ITestOutputHelper output, QdrantVectorStoreFixture fixture) +{ + [Theory] + [InlineData("singleVectorHotels", true)] + [InlineData("nonexistentcollection", false)] + public async Task CollectionExistsReturnsCollectionStateAsync(string collectionName, bool expectedExists) + { + // Arrange. + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, collectionName); + + // Act. + var actual = await sut.CollectionExistsAsync(); + + // Assert. + Assert.Equal(expectedExists, actual); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task ItCanCreateACollectionUpsertAndGetAsync(bool hasNamedVectors, bool useRecordDefinition) + { + // Arrange + var collectionNamePostfix1 = useRecordDefinition ? "WithDefinition" : "WithType"; + var collectionNamePostfix2 = hasNamedVectors ? "HasNamedVectors" : "SingleUnnamedVector"; + var testCollectionName = $"createtest{collectionNamePostfix1}{collectionNamePostfix2}"; + + var options = new QdrantVectorStoreRecordCollectionOptions + { + HasNamedVectors = hasNamedVectors, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.HotelVectorStoreRecordDefinition : null + }; + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, testCollectionName, options); + + var record = this.CreateTestHotel(30); + + // Act + await sut.CreateCollectionAsync(); + var upsertResult = await sut.UpsertAsync(record); + var getResult = await sut.GetAsync(30, new GetRecordOptions { IncludeVectors = true }); + + // Assert + var collectionExistResult = await sut.CollectionExistsAsync(); + Assert.True(collectionExistResult); + await sut.DeleteCollectionAsync(); + + Assert.Equal(30ul, upsertResult); + Assert.Equal(record.HotelId, getResult?.HotelId); + Assert.Equal(record.HotelName, getResult?.HotelName); + Assert.Equal(record.HotelCode, getResult?.HotelCode); + Assert.Equal(record.HotelRating, getResult?.HotelRating); + Assert.Equal(record.ParkingIncluded, getResult?.ParkingIncluded); + Assert.Equal(record.Tags.ToArray(), getResult?.Tags.ToArray()); + Assert.Equal(record.Description, getResult?.Description); + + // Output + output.WriteLine(collectionExistResult.ToString()); + output.WriteLine(upsertResult.ToString(CultureInfo.InvariantCulture)); + output.WriteLine(getResult?.ToString()); + } + + [Fact] + public async Task ItCanDeleteCollectionAsync() + { + // Arrange + var tempCollectionName = "temp-test"; + await fixture.QdrantClient.CreateCollectionAsync( + tempCollectionName, + new VectorParams { Size = 4, Distance = Distance.Cosine }); + + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, tempCollectionName); + + // Act + await sut.DeleteCollectionAsync(); + + // Assert + Assert.False(await sut.CollectionExistsAsync()); + } + + [Theory] + [InlineData(true, "singleVectorHotels", false)] + [InlineData(false, "singleVectorHotels", false)] + [InlineData(true, "namedVectorsHotels", true)] + [InlineData(false, "namedVectorsHotels", true)] + public async Task ItCanUpsertDocumentToVectorStoreAsync(bool useRecordDefinition, string collectionName, bool hasNamedVectors) + { + // Arrange. + var options = new QdrantVectorStoreRecordCollectionOptions + { + HasNamedVectors = hasNamedVectors, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.HotelVectorStoreRecordDefinition : null + }; + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, collectionName, options); + + var record = this.CreateTestHotel(20); + + // Act. + var upsertResult = await sut.UpsertAsync(record); + + // Assert. + var getResult = await sut.GetAsync(20, new GetRecordOptions { IncludeVectors = true }); + Assert.Equal(20ul, upsertResult); + Assert.Equal(record.HotelId, getResult?.HotelId); + Assert.Equal(record.HotelName, getResult?.HotelName); + Assert.Equal(record.HotelCode, getResult?.HotelCode); + Assert.Equal(record.HotelRating, getResult?.HotelRating); + Assert.Equal(record.ParkingIncluded, getResult?.ParkingIncluded); + Assert.Equal(record.Tags.ToArray(), getResult?.Tags.ToArray()); + Assert.Equal(record.Description, getResult?.Description); + + // TODO: figure out why original array is different from the one we get back. + //Assert.Equal(record.DescriptionEmbedding?.ToArray(), getResult?.DescriptionEmbedding?.ToArray()); + + // Output. + output.WriteLine(upsertResult.ToString(CultureInfo.InvariantCulture)); + output.WriteLine(getResult?.ToString()); + } + + [Fact] + public async Task ItCanUpsertAndRemoveDocumentWithGuidIdToVectorStoreAsync() + { + // Arrange. + var options = new QdrantVectorStoreRecordCollectionOptions { HasNamedVectors = false }; + IVectorStoreRecordCollection sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, "singleVectorGuidIdHotels", options); + + var record = new HotelInfoWithGuidId + { + HotelId = Guid.Parse("55555555-5555-5555-5555-555555555555"), + HotelName = "My Hotel 5", + Description = "This is a great hotel.", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f }, + }; + + // Act. + var upsertResult = await sut.UpsertAsync(record); + + // Assert. + var getResult = await sut.GetAsync(Guid.Parse("55555555-5555-5555-5555-555555555555"), new GetRecordOptions { IncludeVectors = true }); + Assert.Equal(Guid.Parse("55555555-5555-5555-5555-555555555555"), upsertResult); + Assert.Equal(record.HotelId, getResult?.HotelId); + Assert.Equal(record.HotelName, getResult?.HotelName); + Assert.Equal(record.Description, getResult?.Description); + + // Act. + await sut.DeleteAsync(Guid.Parse("55555555-5555-5555-5555-555555555555")); + + // Assert. + Assert.Null(await sut.GetAsync(Guid.Parse("55555555-5555-5555-5555-555555555555"))); + + // Output. + output.WriteLine(upsertResult.ToString("D")); + output.WriteLine(getResult?.ToString()); + } + + [Theory] + [InlineData(true, true, "singleVectorHotels", false)] + [InlineData(true, false, "singleVectorHotels", false)] + [InlineData(false, true, "singleVectorHotels", false)] + [InlineData(false, false, "singleVectorHotels", false)] + [InlineData(true, true, "namedVectorsHotels", true)] + [InlineData(true, false, "namedVectorsHotels", true)] + [InlineData(false, true, "namedVectorsHotels", true)] + [InlineData(false, false, "namedVectorsHotels", true)] + public async Task ItCanGetDocumentFromVectorStoreAsync(bool useRecordDefinition, bool withEmbeddings, string collectionName, bool hasNamedVectors) + { + // Arrange. + var options = new QdrantVectorStoreRecordCollectionOptions + { + HasNamedVectors = hasNamedVectors, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.HotelVectorStoreRecordDefinition : null + }; + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, collectionName, options); + + // Act. + var getResult = await sut.GetAsync(11, new GetRecordOptions { IncludeVectors = withEmbeddings }); + + // Assert. + Assert.Equal(11ul, getResult?.HotelId); + Assert.Equal("My Hotel 11", getResult?.HotelName); + Assert.Equal(11, getResult?.HotelCode); + Assert.True(getResult?.ParkingIncluded); + Assert.Equal(4.5f, getResult?.HotelRating); + Assert.Equal(2, getResult?.Tags.Count); + Assert.Equal("t1", getResult?.Tags[0]); + Assert.Equal("t2", getResult?.Tags[1]); + Assert.Equal("This is a great hotel.", getResult?.Description); + if (withEmbeddings) + { + Assert.NotNull(getResult?.DescriptionEmbedding); + } + else + { + Assert.Null(getResult?.DescriptionEmbedding); + } + + // Output. + output.WriteLine(getResult?.ToString()); + } + + [Theory] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task ItCanGetDocumentWithGuidIdFromVectorStoreAsync(bool useRecordDefinition, bool withEmbeddings) + { + // Arrange. + var options = new QdrantVectorStoreRecordCollectionOptions + { + HasNamedVectors = false, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.HotelWithGuidIdVectorStoreRecordDefinition : null + }; + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, "singleVectorGuidIdHotels", options); + + // Act. + var getResult = await sut.GetAsync(Guid.Parse("11111111-1111-1111-1111-111111111111"), new GetRecordOptions { IncludeVectors = withEmbeddings }); + + // Assert. + Assert.Equal(Guid.Parse("11111111-1111-1111-1111-111111111111"), getResult?.HotelId); + Assert.Equal("My Hotel 11", getResult?.HotelName); + Assert.Equal("This is a great hotel.", getResult?.Description); + if (withEmbeddings) + { + Assert.NotNull(getResult?.DescriptionEmbedding); + } + else + { + Assert.Null(getResult?.DescriptionEmbedding); + } + + // Output. + output.WriteLine(getResult?.ToString()); + } + + [Fact] + public async Task ItCanGetManyDocumentsFromVectorStoreAsync() + { + // Arrange + var options = new QdrantVectorStoreRecordCollectionOptions { HasNamedVectors = true }; + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, "namedVectorsHotels", options); + + // Act + // Also include one non-existing key to test that the operation does not fail for these and returns only the found ones. + var hotels = sut.GetBatchAsync([11, 15, 12], new GetRecordOptions { IncludeVectors = true }); + + // Assert + Assert.NotNull(hotels); + var hotelsList = await hotels.ToListAsync(); + Assert.Equal(2, hotelsList.Count); + + // Output + foreach (var hotel in hotelsList) + { + output.WriteLine(hotel?.ToString() ?? "Null"); + } + } + + [Theory] + [InlineData(true, "singleVectorHotels", false)] + [InlineData(false, "singleVectorHotels", false)] + [InlineData(true, "namedVectorsHotels", true)] + [InlineData(false, "namedVectorsHotels", true)] + public async Task ItCanRemoveDocumentFromVectorStoreAsync(bool useRecordDefinition, string collectionName, bool hasNamedVectors) + { + // Arrange. + var options = new QdrantVectorStoreRecordCollectionOptions + { + HasNamedVectors = hasNamedVectors, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.HotelVectorStoreRecordDefinition : null + }; + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, collectionName, options); + + await sut.UpsertAsync(this.CreateTestHotel(20)); + + // Act. + await sut.DeleteAsync(20); + // Also delete a non-existing key to test that the operation does not fail for these. + await sut.DeleteAsync(21); + + // Assert. + Assert.Null(await sut.GetAsync(20)); + } + + [Theory] + [InlineData(true, "singleVectorHotels", false)] + [InlineData(false, "singleVectorHotels", false)] + [InlineData(true, "namedVectorsHotels", true)] + [InlineData(false, "namedVectorsHotels", true)] + public async Task ItCanRemoveManyDocumentsFromVectorStoreAsync(bool useRecordDefinition, string collectionName, bool hasNamedVectors) + { + // Arrange. + var options = new QdrantVectorStoreRecordCollectionOptions + { + HasNamedVectors = hasNamedVectors, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.HotelVectorStoreRecordDefinition : null + }; + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, collectionName, options); + + await sut.UpsertAsync(this.CreateTestHotel(20)); + + // Act. + // Also delete a non-existing key to test that the operation does not fail for these. + await sut.DeleteBatchAsync([20, 21]); + + // Assert. + Assert.Null(await sut.GetAsync(20)); + } + + [Fact] + public async Task ItReturnsNullWhenGettingNonExistentRecordAsync() + { + // Arrange + var options = new QdrantVectorStoreRecordCollectionOptions { HasNamedVectors = false }; + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, "singleVectorHotels", options); + + // Act & Assert + Assert.Null(await sut.GetAsync(15, new GetRecordOptions { IncludeVectors = true })); + } + + [Fact] + public async Task ItThrowsMappingExceptionForFailedMapperAsync() + { + // Arrange + var options = new QdrantVectorStoreRecordCollectionOptions { PointStructCustomMapper = new FailingMapper() }; + var sut = new QdrantVectorStoreRecordCollection(fixture.QdrantClient, "singleVectorHotels", options); + + // Act & Assert + await Assert.ThrowsAsync(async () => await sut.GetAsync(11, new GetRecordOptions { IncludeVectors = true })); + } + + private HotelInfo CreateTestHotel(uint hotelId) + { + return new HotelInfo + { + HotelId = hotelId, + HotelName = $"My Hotel {hotelId}", + HotelCode = (int)hotelId, + HotelRating = 4.5f, + ParkingIncluded = true, + Tags = { "t1", "t2" }, + Description = "This is a great hotel.", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f }, + }; + } + + private sealed class FailingMapper : IVectorStoreRecordMapper + { + public PointStruct MapFromDataToStorageModel(HotelInfo dataModel) + { + throw new NotImplementedException(); + } + + public HotelInfo MapFromStorageToDataModel(PointStruct storageModel, StorageToDataModelMapperOptions options) + { + throw new NotImplementedException(); + } + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreTests.cs new file mode 100644 index 000000000000..0da44530f5c0 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Qdrant/QdrantVectorStoreTests.cs @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Connectors.Qdrant; +using Xunit; +using Xunit.Abstractions; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Qdrant; + +[Collection("QdrantVectorStoreCollection")] +public class QdrantVectorStoreTests(ITestOutputHelper output, QdrantVectorStoreFixture fixture) +{ + [Fact] + public async Task ItCanGetAListOfExistingCollectionNamesAsync() + { + // Arrange + var sut = new QdrantVectorStore(fixture.QdrantClient); + + // Act + var collectionNames = await sut.ListCollectionNamesAsync().ToListAsync(); + + // Assert + Assert.Equal(3, collectionNames.Count); + Assert.Contains("namedVectorsHotels", collectionNames); + Assert.Contains("singleVectorHotels", collectionNames); + Assert.Contains("singleVectorGuidIdHotels", collectionNames); + + // Output + output.WriteLine(string.Join(",", collectionNames)); + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisHashSetVectorStoreRecordCollectionTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisHashSetVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..6c980693c4bc --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisHashSetVectorStoreRecordCollectionTests.cs @@ -0,0 +1,343 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Connectors.Redis; +using Microsoft.SemanticKernel.Data; +using NRedisStack.RedisStackCommands; +using NRedisStack.Search; +using StackExchange.Redis; +using Xunit; +using Xunit.Abstractions; +using static SemanticKernel.IntegrationTests.Connectors.Memory.Redis.RedisVectorStoreFixture; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Redis; + +/// +/// Contains tests for the class. +/// +/// Used for logging. +/// Redis setup and teardown. +[Collection("RedisVectorStoreCollection")] +public sealed class RedisHashSetVectorStoreRecordCollectionTests(ITestOutputHelper output, RedisVectorStoreFixture fixture) +{ + // If null, all tests will be enabled + private const string SkipReason = "Requires Redis docker container up and running"; + + private const string TestCollectionName = "hashhotels"; + + [Theory(Skip = SkipReason)] + [InlineData(TestCollectionName, true)] + [InlineData("nonexistentcollection", false)] + public async Task CollectionExistsReturnsCollectionStateAsync(string collectionName, bool expectedExists) + { + // Arrange. + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, collectionName); + + // Act. + var actual = await sut.CollectionExistsAsync(); + + // Assert. + Assert.Equal(expectedExists, actual); + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanCreateACollectionUpsertAndGetAsync(bool useRecordDefinition) + { + // Arrange + var record = CreateTestHotel("Upsert-1", 1); + var collectionNamePostfix = useRecordDefinition ? "WithDefinition" : "WithType"; + var testCollectionName = $"hashsetcreatetest{collectionNamePostfix}"; + + var options = new RedisHashSetVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.BasicVectorStoreRecordDefinition : null + }; + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, testCollectionName, options); + + // Act + await sut.CreateCollectionAsync(); + var upsertResult = await sut.UpsertAsync(record); + var getResult = await sut.GetAsync("Upsert-1", new GetRecordOptions { IncludeVectors = true }); + + // Assert + var collectionExistResult = await sut.CollectionExistsAsync(); + Assert.True(collectionExistResult); + await sut.DeleteCollectionAsync(); + + Assert.Equal("Upsert-1", upsertResult); + Assert.Equal(record.HotelId, getResult?.HotelId); + Assert.Equal(record.HotelName, getResult?.HotelName); + Assert.Equal(record.HotelCode, getResult?.HotelCode); + Assert.Equal(record.ParkingIncluded, getResult?.ParkingIncluded); + Assert.Equal(record.Rating, getResult?.Rating); + Assert.Equal(record.Description, getResult?.Description); + Assert.Equal(record.DescriptionEmbedding?.ToArray(), getResult?.DescriptionEmbedding?.ToArray()); + + // Output + output.WriteLine(collectionExistResult.ToString()); + output.WriteLine(upsertResult); + output.WriteLine(getResult?.ToString()); + } + + [Fact(Skip = SkipReason)] + public async Task ItCanDeleteCollectionAsync() + { + // Arrange + var tempCollectionName = "temp-test"; + var schema = new Schema(); + schema.AddTextField("HotelName"); + var createParams = new FTCreateParams(); + createParams.AddPrefix(tempCollectionName); + await fixture.Database.FT().CreateAsync(tempCollectionName, createParams, schema); + + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, tempCollectionName); + + // Act + await sut.DeleteCollectionAsync(); + + // Assert + Assert.False(await sut.CollectionExistsAsync()); + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanUpsertDocumentToVectorStoreAsync(bool useRecordDefinition) + { + // Arrange. + var options = new RedisHashSetVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.BasicVectorStoreRecordDefinition : null + }; + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + var record = CreateTestHotel("Upsert-2", 2); + + // Act. + var upsertResult = await sut.UpsertAsync(record); + + // Assert. + var getResult = await sut.GetAsync("Upsert-2", new GetRecordOptions { IncludeVectors = true }); + Assert.Equal("Upsert-2", upsertResult); + Assert.Equal(record.HotelId, getResult?.HotelId); + Assert.Equal(record.HotelName, getResult?.HotelName); + Assert.Equal(record.HotelCode, getResult?.HotelCode); + Assert.Equal(record.ParkingIncluded, getResult?.ParkingIncluded); + Assert.Equal(record.Rating, getResult?.Rating); + Assert.Equal(record.Description, getResult?.Description); + Assert.Equal(record.DescriptionEmbedding?.ToArray(), getResult?.DescriptionEmbedding?.ToArray()); + + // Output. + output.WriteLine(upsertResult); + output.WriteLine(getResult?.ToString()); + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanUpsertManyDocumentsToVectorStoreAsync(bool useRecordDefinition) + { + // Arrange. + var options = new RedisHashSetVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.BasicVectorStoreRecordDefinition : null + }; + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act. + var results = sut.UpsertBatchAsync( + [ + CreateTestHotel("UpsertMany-1", 1), + CreateTestHotel("UpsertMany-2", 2), + CreateTestHotel("UpsertMany-3", 3), + ]); + + // Assert. + Assert.NotNull(results); + var resultsList = await results.ToListAsync(); + + Assert.Equal(3, resultsList.Count); + Assert.Contains("UpsertMany-1", resultsList); + Assert.Contains("UpsertMany-2", resultsList); + Assert.Contains("UpsertMany-3", resultsList); + + // Output + foreach (var result in resultsList) + { + output.WriteLine(result); + } + } + + [Theory(Skip = SkipReason)] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task ItCanGetDocumentFromVectorStoreAsync(bool includeVectors, bool useRecordDefinition) + { + // Arrange. + var options = new RedisHashSetVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.BasicVectorStoreRecordDefinition : null + }; + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act. + var getResult = await sut.GetAsync("BaseSet-1", new GetRecordOptions { IncludeVectors = includeVectors }); + + // Assert. + Assert.Equal("BaseSet-1", getResult?.HotelId); + Assert.Equal("My Hotel 1", getResult?.HotelName); + Assert.Equal(1, getResult?.HotelCode); + Assert.True(getResult?.ParkingIncluded); + Assert.Equal(3.6, getResult?.Rating); + Assert.Equal("This is a great hotel.", getResult?.Description); + if (includeVectors) + { + Assert.Equal(new[] { 30f, 31f, 32f, 33f }, getResult?.DescriptionEmbedding?.ToArray()); + } + else + { + Assert.Null(getResult?.DescriptionEmbedding); + } + + // Output. + output.WriteLine(getResult?.ToString()); + } + + [Fact(Skip = SkipReason)] + public async Task ItCanGetManyDocumentsFromVectorStoreAsync() + { + // Arrange + var options = new RedisHashSetVectorStoreRecordCollectionOptions { PrefixCollectionNameToKeyNames = true }; + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act + // Also include one non-existing key to test that the operation does not fail for these and returns only the found ones. + var hotels = sut.GetBatchAsync(["BaseSet-1", "BaseSet-5", "BaseSet-2"], new GetRecordOptions { IncludeVectors = true }); + + // Assert + Assert.NotNull(hotels); + var hotelsList = await hotels.ToListAsync(); + Assert.Equal(2, hotelsList.Count); + + // Output + foreach (var hotel in hotelsList) + { + output.WriteLine(hotel?.ToString() ?? "Null"); + } + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanRemoveDocumentFromVectorStoreAsync(bool useRecordDefinition) + { + // Arrange. + var options = new RedisHashSetVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.BasicVectorStoreRecordDefinition : null + }; + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + var record = new BasicHotel + { + HotelId = "Remove-1", + HotelName = "Remove Test Hotel", + HotelCode = 20, + Description = "This is a great hotel.", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f } + }; + + await sut.UpsertAsync(record); + + // Act. + await sut.DeleteAsync("Remove-1"); + // Also delete a non-existing key to test that the operation does not fail for these. + await sut.DeleteAsync("Remove-2"); + + // Assert. + Assert.Null(await sut.GetAsync("Remove-1")); + } + + [Fact(Skip = SkipReason)] + public async Task ItCanRemoveManyDocumentsFromVectorStoreAsync() + { + // Arrange + var options = new RedisHashSetVectorStoreRecordCollectionOptions { PrefixCollectionNameToKeyNames = true }; + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + await sut.UpsertAsync(CreateTestHotel("RemoveMany-1", 1)); + await sut.UpsertAsync(CreateTestHotel("RemoveMany-2", 2)); + await sut.UpsertAsync(CreateTestHotel("RemoveMany-3", 3)); + + // Act + // Also include a non-existing key to test that the operation does not fail for these. + await sut.DeleteBatchAsync(["RemoveMany-1", "RemoveMany-2", "RemoveMany-3", "RemoveMany-4"]); + + // Assert + Assert.Null(await sut.GetAsync("RemoveMany-1", new GetRecordOptions { IncludeVectors = true })); + Assert.Null(await sut.GetAsync("RemoveMany-2", new GetRecordOptions { IncludeVectors = true })); + Assert.Null(await sut.GetAsync("RemoveMany-3", new GetRecordOptions { IncludeVectors = true })); + } + + [Fact(Skip = SkipReason)] + public async Task ItReturnsNullWhenGettingNonExistentRecordAsync() + { + // Arrange + var options = new RedisHashSetVectorStoreRecordCollectionOptions { PrefixCollectionNameToKeyNames = true }; + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act & Assert + Assert.Null(await sut.GetAsync("BaseSet-5", new GetRecordOptions { IncludeVectors = true })); + } + + [Fact(Skip = SkipReason)] + public async Task ItThrowsMappingExceptionForFailedMapperAsync() + { + // Arrange + var options = new RedisHashSetVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + HashEntriesCustomMapper = new FailingMapper() + }; + var sut = new RedisHashSetVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act & Assert + await Assert.ThrowsAsync(async () => await sut.GetAsync("BaseSet-1", new GetRecordOptions { IncludeVectors = true })); + } + + private static BasicHotel CreateTestHotel(string hotelId, int hotelCode) + { + var record = new BasicHotel + { + HotelId = hotelId, + HotelName = $"My Hotel {hotelCode}", + HotelCode = 1, + ParkingIncluded = true, + Rating = 3.6, + Description = "This is a great hotel.", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f } + }; + return record; + } + + private sealed class FailingMapper : IVectorStoreRecordMapper + { + public (string Key, HashEntry[] HashEntries) MapFromDataToStorageModel(BasicHotel dataModel) + { + throw new NotImplementedException(); + } + + public BasicHotel MapFromStorageToDataModel((string Key, HashEntry[] HashEntries) storageModel, StorageToDataModelMapperOptions options) + { + throw new NotImplementedException(); + } + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisJsonVectorStoreRecordCollectionTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisJsonVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..4fbd7bc5d647 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisJsonVectorStoreRecordCollectionTests.cs @@ -0,0 +1,374 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Linq; +using System.Text.Json.Nodes; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Connectors.Redis; +using Microsoft.SemanticKernel.Data; +using NRedisStack.RedisStackCommands; +using NRedisStack.Search; +using Xunit; +using Xunit.Abstractions; +using static SemanticKernel.IntegrationTests.Connectors.Memory.Redis.RedisVectorStoreFixture; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Redis; + +/// +/// Contains tests for the class. +/// +/// Used for logging. +/// Redis setup and teardown. +[Collection("RedisVectorStoreCollection")] +public sealed class RedisJsonVectorStoreRecordCollectionTests(ITestOutputHelper output, RedisVectorStoreFixture fixture) +{ + // If null, all tests will be enabled + private const string SkipReason = "Requires Redis docker container up and running"; + + private const string TestCollectionName = "jsonhotels"; + + [Theory(Skip = SkipReason)] + [InlineData(TestCollectionName, true)] + [InlineData("nonexistentcollection", false)] + public async Task CollectionExistsReturnsCollectionStateAsync(string collectionName, bool expectedExists) + { + // Arrange. + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, collectionName); + + // Act. + var actual = await sut.CollectionExistsAsync(); + + // Assert. + Assert.Equal(expectedExists, actual); + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanCreateACollectionUpsertAndGetAsync(bool useRecordDefinition) + { + // Arrange + var record = CreateTestHotel("Upsert-1", 1); + var collectionNamePostfix = useRecordDefinition ? "WithDefinition" : "WithType"; + var testCollectionName = $"jsoncreatetest{collectionNamePostfix}"; + + var options = new RedisJsonVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.VectorStoreRecordDefinition : null + }; + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, testCollectionName, options); + + // Act + await sut.CreateCollectionAsync(); + var upsertResult = await sut.UpsertAsync(record); + var getResult = await sut.GetAsync("Upsert-1", new GetRecordOptions { IncludeVectors = true }); + + // Assert + var collectionExistResult = await sut.CollectionExistsAsync(); + Assert.True(collectionExistResult); + await sut.DeleteCollectionAsync(); + + Assert.Equal("Upsert-1", upsertResult); + Assert.Equal(record.HotelId, getResult?.HotelId); + Assert.Equal(record.HotelName, getResult?.HotelName); + Assert.Equal(record.HotelCode, getResult?.HotelCode); + Assert.Equal(record.Tags, getResult?.Tags); + Assert.Equal(record.FTSTags, getResult?.FTSTags); + Assert.Equal(record.ParkingIncluded, getResult?.ParkingIncluded); + Assert.Equal(record.LastRenovationDate, getResult?.LastRenovationDate); + Assert.Equal(record.Rating, getResult?.Rating); + Assert.Equal(record.Address.Country, getResult?.Address.Country); + Assert.Equal(record.Address.City, getResult?.Address.City); + Assert.Equal(record.Description, getResult?.Description); + Assert.Equal(record.DescriptionEmbedding?.ToArray(), getResult?.DescriptionEmbedding?.ToArray()); + + // Output + output.WriteLine(collectionExistResult.ToString()); + output.WriteLine(upsertResult); + output.WriteLine(getResult?.ToString()); + } + + [Fact(Skip = SkipReason)] + public async Task ItCanDeleteCollectionAsync() + { + // Arrange + var tempCollectionName = "temp-test"; + var schema = new Schema(); + schema.AddTextField("HotelName"); + var createParams = new FTCreateParams(); + createParams.AddPrefix(tempCollectionName); + await fixture.Database.FT().CreateAsync(tempCollectionName, createParams, schema); + + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, tempCollectionName); + + // Act + await sut.DeleteCollectionAsync(); + + // Assert + Assert.False(await sut.CollectionExistsAsync()); + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanUpsertDocumentToVectorStoreAsync(bool useRecordDefinition) + { + // Arrange. + var options = new RedisJsonVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.VectorStoreRecordDefinition : null + }; + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + Hotel record = CreateTestHotel("Upsert-2", 2); + + // Act. + var upsertResult = await sut.UpsertAsync(record); + + // Assert. + var getResult = await sut.GetAsync("Upsert-2", new GetRecordOptions { IncludeVectors = true }); + Assert.Equal("Upsert-2", upsertResult); + Assert.Equal(record.HotelId, getResult?.HotelId); + Assert.Equal(record.HotelName, getResult?.HotelName); + Assert.Equal(record.HotelCode, getResult?.HotelCode); + Assert.Equal(record.Tags, getResult?.Tags); + Assert.Equal(record.FTSTags, getResult?.FTSTags); + Assert.Equal(record.ParkingIncluded, getResult?.ParkingIncluded); + Assert.Equal(record.LastRenovationDate, getResult?.LastRenovationDate); + Assert.Equal(record.Rating, getResult?.Rating); + Assert.Equal(record.Address.Country, getResult?.Address.Country); + Assert.Equal(record.Address.City, getResult?.Address.City); + Assert.Equal(record.Description, getResult?.Description); + Assert.Equal(record.DescriptionEmbedding?.ToArray(), getResult?.DescriptionEmbedding?.ToArray()); + + // Output. + output.WriteLine(upsertResult); + output.WriteLine(getResult?.ToString()); + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanUpsertManyDocumentsToVectorStoreAsync(bool useRecordDefinition) + { + // Arrange. + var options = new RedisJsonVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.VectorStoreRecordDefinition : null + }; + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act. + var results = sut.UpsertBatchAsync( + [ + CreateTestHotel("UpsertMany-1", 1), + CreateTestHotel("UpsertMany-2", 2), + CreateTestHotel("UpsertMany-3", 3), + ]); + + // Assert. + Assert.NotNull(results); + var resultsList = await results.ToListAsync(); + + Assert.Equal(3, resultsList.Count); + Assert.Contains("UpsertMany-1", resultsList); + Assert.Contains("UpsertMany-2", resultsList); + Assert.Contains("UpsertMany-3", resultsList); + + // Output + foreach (var result in resultsList) + { + output.WriteLine(result); + } + } + + [Theory(Skip = SkipReason)] + [InlineData(true, true)] + [InlineData(true, false)] + [InlineData(false, true)] + [InlineData(false, false)] + public async Task ItCanGetDocumentFromVectorStoreAsync(bool includeVectors, bool useRecordDefinition) + { + // Arrange. + var options = new RedisJsonVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.VectorStoreRecordDefinition : null + }; + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act. + var getResult = await sut.GetAsync("BaseSet-1", new GetRecordOptions { IncludeVectors = includeVectors }); + + // Assert. + Assert.Equal("BaseSet-1", getResult?.HotelId); + Assert.Equal("My Hotel 1", getResult?.HotelName); + Assert.Equal(1, getResult?.HotelCode); + Assert.Equal(new[] { "pool", "air conditioning", "concierge" }, getResult?.Tags); + Assert.Equal(new[] { "pool", "air conditioning", "concierge" }, getResult?.FTSTags); + Assert.True(getResult?.ParkingIncluded); + Assert.Equal(new DateTimeOffset(1970, 1, 18, 0, 0, 0, TimeSpan.Zero), getResult?.LastRenovationDate); + Assert.Equal(3.6, getResult?.Rating); + Assert.Equal("Seattle", getResult?.Address.City); + Assert.Equal("This is a great hotel.", getResult?.Description); + if (includeVectors) + { + Assert.Equal(new[] { 30f, 31f, 32f, 33f }, getResult?.DescriptionEmbedding?.ToArray()); + } + else + { + Assert.Null(getResult?.DescriptionEmbedding); + } + + // Output. + output.WriteLine(getResult?.ToString()); + } + + [Fact(Skip = SkipReason)] + public async Task ItCanGetManyDocumentsFromVectorStoreAsync() + { + // Arrange + var options = new RedisJsonVectorStoreRecordCollectionOptions { PrefixCollectionNameToKeyNames = true }; + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act + // Also include one non-existing key to test that the operation does not fail for these and returns only the found ones. + var hotels = sut.GetBatchAsync(["BaseSet-1", "BaseSet-5", "BaseSet-2"], new GetRecordOptions { IncludeVectors = true }); + + // Assert + Assert.NotNull(hotels); + var hotelsList = await hotels.ToListAsync(); + Assert.Equal(2, hotelsList.Count); + + // Output + foreach (var hotel in hotelsList) + { + output.WriteLine(hotel?.ToString() ?? "Null"); + } + } + + [Fact(Skip = SkipReason)] + public async Task ItFailsToGetDocumentsWithInvalidSchemaAsync() + { + // Arrange. + var options = new RedisJsonVectorStoreRecordCollectionOptions { PrefixCollectionNameToKeyNames = true }; + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act & Assert. + await Assert.ThrowsAsync(async () => await sut.GetAsync("BaseSet-4-Invalid", new GetRecordOptions { IncludeVectors = true })); + } + + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanRemoveDocumentFromVectorStoreAsync(bool useRecordDefinition) + { + // Arrange. + var options = new RedisJsonVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + VectorStoreRecordDefinition = useRecordDefinition ? fixture.VectorStoreRecordDefinition : null + }; + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + var address = new HotelAddress { City = "Seattle", Country = "USA" }; + var record = new Hotel + { + HotelId = "Remove-1", + HotelName = "Remove Test Hotel", + HotelCode = 20, + Description = "This is a great hotel.", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f } + }; + + await sut.UpsertAsync(record); + + // Act. + await sut.DeleteAsync("Remove-1"); + // Also delete a non-existing key to test that the operation does not fail for these. + await sut.DeleteAsync("Remove-2"); + + // Assert. + Assert.Null(await sut.GetAsync("Remove-1")); + } + + [Fact(Skip = SkipReason)] + public async Task ItCanRemoveManyDocumentsFromVectorStoreAsync() + { + // Arrange + var options = new RedisJsonVectorStoreRecordCollectionOptions { PrefixCollectionNameToKeyNames = true }; + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + await sut.UpsertAsync(CreateTestHotel("RemoveMany-1", 1)); + await sut.UpsertAsync(CreateTestHotel("RemoveMany-2", 2)); + await sut.UpsertAsync(CreateTestHotel("RemoveMany-3", 3)); + + // Act + // Also include a non-existing key to test that the operation does not fail for these. + await sut.DeleteBatchAsync(["RemoveMany-1", "RemoveMany-2", "RemoveMany-3", "RemoveMany-4"]); + + // Assert + Assert.Null(await sut.GetAsync("RemoveMany-1", new GetRecordOptions { IncludeVectors = true })); + Assert.Null(await sut.GetAsync("RemoveMany-2", new GetRecordOptions { IncludeVectors = true })); + Assert.Null(await sut.GetAsync("RemoveMany-3", new GetRecordOptions { IncludeVectors = true })); + } + + [Fact(Skip = SkipReason)] + public async Task ItReturnsNullWhenGettingNonExistentRecordAsync() + { + // Arrange + var options = new RedisJsonVectorStoreRecordCollectionOptions { PrefixCollectionNameToKeyNames = true }; + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act & Assert + Assert.Null(await sut.GetAsync("BaseSet-5", new GetRecordOptions { IncludeVectors = true })); + } + + [Fact(Skip = SkipReason)] + public async Task ItThrowsMappingExceptionForFailedMapperAsync() + { + // Arrange + var options = new RedisJsonVectorStoreRecordCollectionOptions + { + PrefixCollectionNameToKeyNames = true, + JsonNodeCustomMapper = new FailingMapper() + }; + var sut = new RedisJsonVectorStoreRecordCollection(fixture.Database, TestCollectionName, options); + + // Act & Assert + await Assert.ThrowsAsync(async () => await sut.GetAsync("BaseSet-1", new GetRecordOptions { IncludeVectors = true })); + } + + private static Hotel CreateTestHotel(string hotelId, int hotelCode) + { + var address = new HotelAddress { City = "Seattle", Country = "USA" }; + var record = new Hotel + { + HotelId = hotelId, + HotelName = $"My Hotel {hotelCode}", + HotelCode = 1, + Tags = ["pool", "air conditioning", "concierge"], + FTSTags = ["pool", "air conditioning", "concierge"], + ParkingIncluded = true, + LastRenovationDate = new DateTimeOffset(1970, 1, 18, 0, 0, 0, TimeSpan.Zero), + Rating = 3.6, + Address = address, + Description = "This is a great hotel.", + DescriptionEmbedding = new[] { 30f, 31f, 32f, 33f } + }; + return record; + } + + private sealed class FailingMapper : IVectorStoreRecordMapper + { + public (string Key, JsonNode Node) MapFromDataToStorageModel(Hotel dataModel) + { + throw new NotImplementedException(); + } + + public Hotel MapFromStorageToDataModel((string Key, JsonNode Node) storageModel, StorageToDataModelMapperOptions options) + { + throw new NotImplementedException(); + } + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisVectorStoreCollectionFixture.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisVectorStoreCollectionFixture.cs new file mode 100644 index 000000000000..1bebd51d8f5f --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisVectorStoreCollectionFixture.cs @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Xunit; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Redis; + +[CollectionDefinition("RedisVectorStoreCollection")] +public class RedisVectorStoreCollectionFixture : ICollectionFixture +{ +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisVectorStoreFixture.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisVectorStoreFixture.cs new file mode 100644 index 000000000000..3256cae3e79e --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisVectorStoreFixture.cs @@ -0,0 +1,300 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; +using System.Text.Json.Serialization; +using System.Threading.Tasks; +using Docker.DotNet; +using Docker.DotNet.Models; +using Microsoft.SemanticKernel.Data; +using NRedisStack.RedisStackCommands; +using NRedisStack.Search; +using NRedisStack.Search.Literals.Enums; +using StackExchange.Redis; +using Xunit; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Redis; + +#pragma warning disable CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. +/// +/// Does setup and teardown of redis docker container and associated test data. +/// +public class RedisVectorStoreFixture : IAsyncLifetime +{ + /// The docker client we are using to create a redis container with. + private readonly DockerClient _client; + + /// The id of the redis container that we are testing with. + private string? _containerId = null; + + /// + /// Initializes a new instance of the class. + /// + public RedisVectorStoreFixture() + { + using var dockerClientConfiguration = new DockerClientConfiguration(); + this._client = dockerClientConfiguration.CreateClient(); + this.VectorStoreRecordDefinition = new VectorStoreRecordDefinition + { + Properties = new List + { + new VectorStoreRecordKeyProperty("HotelId", typeof(string)), + new VectorStoreRecordDataProperty("HotelName", typeof(string)) { IsFilterable = true }, + new VectorStoreRecordDataProperty("HotelCode", typeof(int)) { IsFilterable = true }, + new VectorStoreRecordDataProperty("Description", typeof(string)) { IsFullTextSearchable = true }, + new VectorStoreRecordVectorProperty("DescriptionEmbedding", typeof(ReadOnlyMemory?)) { Dimensions = 4 }, + new VectorStoreRecordDataProperty("Tags", typeof(string[])) { IsFilterable = true }, + new VectorStoreRecordDataProperty("FTSTags", typeof(string[])) { IsFullTextSearchable = true }, + new VectorStoreRecordDataProperty("ParkingIncluded", typeof(bool)) { StoragePropertyName = "parking_is_included" }, + new VectorStoreRecordDataProperty("LastRenovationDate", typeof(DateTimeOffset)), + new VectorStoreRecordDataProperty("Rating", typeof(double)), + new VectorStoreRecordDataProperty("Address", typeof(HotelAddress)) + } + }; + this.BasicVectorStoreRecordDefinition = new VectorStoreRecordDefinition + { + Properties = new List + { + new VectorStoreRecordKeyProperty("HotelId", typeof(string)), + new VectorStoreRecordDataProperty("HotelName", typeof(string)) { IsFilterable = true }, + new VectorStoreRecordDataProperty("HotelCode", typeof(int)) { IsFilterable = true }, + new VectorStoreRecordDataProperty("Description", typeof(string)) { IsFullTextSearchable = true }, + new VectorStoreRecordVectorProperty("DescriptionEmbedding", typeof(ReadOnlyMemory?)) { Dimensions = 4 }, + new VectorStoreRecordDataProperty("ParkingIncluded", typeof(bool)) { StoragePropertyName = "parking_is_included" }, + new VectorStoreRecordDataProperty("Rating", typeof(double)), + } + }; + } + + /// Gets the redis database connection to use for tests. + public IDatabase Database { get; private set; } + + /// Gets the manually created vector store record definition for our test model. + public VectorStoreRecordDefinition VectorStoreRecordDefinition { get; private set; } + + /// Gets the manually created vector store record definition for our basic test model. + public VectorStoreRecordDefinition BasicVectorStoreRecordDefinition { get; private set; } + + /// + /// Create / Recreate redis docker container, create an index and add test data. + /// + /// An async task. + public async Task InitializeAsync() + { + this._containerId = await SetupRedisContainerAsync(this._client); + + // Connect to redis. + ConnectionMultiplexer redis = ConnectionMultiplexer.Connect("localhost:6379,connectTimeout=60000,connectRetry=5"); + this.Database = redis.GetDatabase(); + + // Create a schema for the vector store. + var schema = new Schema(); + schema.AddTextField(new FieldName("$.HotelName", "HotelName")); + schema.AddNumericField(new FieldName("$.HotelCode", "HotelCode")); + schema.AddTextField(new FieldName("$.Description", "Description")); + schema.AddVectorField(new FieldName("$.DescriptionEmbedding", "DescriptionEmbedding"), Schema.VectorField.VectorAlgo.HNSW, new Dictionary() + { + ["TYPE"] = "FLOAT32", + ["DIM"] = "4", + ["DISTANCE_METRIC"] = "L2" + }); + var jsonCreateParams = new FTCreateParams().AddPrefix("jsonhotels:").On(IndexDataType.JSON); + await this.Database.FT().CreateAsync("jsonhotels", jsonCreateParams, schema); + + // Create a hashset index. + var hashsetCreateParams = new FTCreateParams().AddPrefix("hashhotels:").On(IndexDataType.HASH); + await this.Database.FT().CreateAsync("hashhotels", hashsetCreateParams, schema); + + // Create some test data. + var address = new HotelAddress { City = "Seattle", Country = "USA" }; + var embedding = new[] { 30f, 31f, 32f, 33f }; + + // Add JSON test data. + await this.Database.JSON().SetAsync("jsonhotels:BaseSet-1", "$", new + { + HotelName = "My Hotel 1", + HotelCode = 1, + Description = "This is a great hotel.", + DescriptionEmbedding = embedding, + Tags = new[] { "pool", "air conditioning", "concierge" }, + FTSTags = new[] { "pool", "air conditioning", "concierge" }, + parking_is_included = true, + LastRenovationDate = new DateTimeOffset(1970, 1, 18, 0, 0, 0, TimeSpan.Zero), + Rating = 3.6, + Address = address + }); + await this.Database.JSON().SetAsync("jsonhotels:BaseSet-2", "$", new { HotelName = "My Hotel 2", HotelCode = 2, Description = "This is a great hotel.", DescriptionEmbedding = embedding, parking_is_included = false }); + await this.Database.JSON().SetAsync("jsonhotels:BaseSet-3", "$", new { HotelName = "My Hotel 3", HotelCode = 3, Description = "This is a great hotel.", DescriptionEmbedding = embedding, parking_is_included = false }); + await this.Database.JSON().SetAsync("jsonhotels:BaseSet-4-Invalid", "$", new { HotelId = "AnotherId", HotelName = "My Invalid Hotel", HotelCode = 4, Description = "This is an invalid hotel.", DescriptionEmbedding = embedding, parking_is_included = false }); + + // Add hashset test data. + await this.Database.HashSetAsync("hashhotels:BaseSet-1", new HashEntry[] + { + new("HotelName", "My Hotel 1"), + new("HotelCode", 1), + new("Description", "This is a great hotel."), + new("DescriptionEmbedding", MemoryMarshal.AsBytes(new ReadOnlySpan(embedding)).ToArray()), + new("parking_is_included", true), + new("Rating", 3.6) + }); + await this.Database.HashSetAsync("hashhotels:BaseSet-2", new HashEntry[] + { + new("HotelName", "My Hotel 2"), + new("HotelCode", 2), + new("Description", "This is a great hotel."), + new("DescriptionEmbedding", MemoryMarshal.AsBytes(new ReadOnlySpan(embedding)).ToArray()), + new("parking_is_included", false), + }); + await this.Database.HashSetAsync("hashhotels:BaseSet-3", new HashEntry[] + { + new("HotelName", "My Hotel 3"), + new("HotelCode", 3), + new("Description", "This is a great hotel."), + new("DescriptionEmbedding", MemoryMarshal.AsBytes(new ReadOnlySpan(embedding)).ToArray()), + new("parking_is_included", false), + }); + await this.Database.HashSetAsync("hashhotels:BaseSet-4-Invalid", new HashEntry[] + { + new("HotelId", "AnotherId"), + new("HotelName", "My Invalid Hotel"), + new("HotelCode", 4), + new("Description", "This is an invalid hotel."), + new("DescriptionEmbedding", MemoryMarshal.AsBytes(new ReadOnlySpan(embedding)).ToArray()), + new("parking_is_included", false), + }); + } + + /// + /// Delete the docker container after the test run. + /// + /// An async task. + public async Task DisposeAsync() + { + if (this._containerId != null) + { + await this._client.Containers.StopContainerAsync(this._containerId, new ContainerStopParameters()); + await this._client.Containers.RemoveContainerAsync(this._containerId, new ContainerRemoveParameters()); + } + } + + /// + /// Setup the redis container by pulling the image and running it. + /// + /// The docker client to create the container with. + /// The id of the container. + private static async Task SetupRedisContainerAsync(DockerClient client) + { + await client.Images.CreateImageAsync( + new ImagesCreateParameters + { + FromImage = "redis/redis-stack", + Tag = "latest", + }, + null, + new Progress()); + + var container = await client.Containers.CreateContainerAsync(new CreateContainerParameters() + { + Image = "redis/redis-stack", + HostConfig = new HostConfig() + { + PortBindings = new Dictionary> + { + {"6379", new List {new() {HostPort = "6379"}}} + }, + PublishAllPorts = true + }, + ExposedPorts = new Dictionary + { + { "6379", default } + }, + }); + + await client.Containers.StartContainerAsync( + container.ID, + new ContainerStartParameters()); + + return container.ID; + } + + /// + /// A test model for the vector store that has complex properties as supported by JSON redis mode. + /// + public class Hotel + { + [VectorStoreRecordKey] + public string HotelId { get; init; } + + [VectorStoreRecordData(IsFilterable = true)] + public string HotelName { get; init; } + + [VectorStoreRecordData(IsFilterable = true)] + public int HotelCode { get; init; } + + [VectorStoreRecordData(IsFullTextSearchable = true)] + public string Description { get; init; } + + [VectorStoreRecordVector(4)] + public ReadOnlyMemory? DescriptionEmbedding { get; init; } + +#pragma warning disable CA1819 // Properties should not return arrays + [VectorStoreRecordData(IsFilterable = true)] + public string[] Tags { get; init; } + + [VectorStoreRecordData(IsFullTextSearchable = true)] + public string[] FTSTags { get; init; } +#pragma warning restore CA1819 // Properties should not return arrays + + [JsonPropertyName("parking_is_included")] + [VectorStoreRecordData(StoragePropertyName = "parking_is_included")] + public bool ParkingIncluded { get; init; } + + [VectorStoreRecordData] + public DateTimeOffset LastRenovationDate { get; init; } + + [VectorStoreRecordData] + public double Rating { get; init; } + + [VectorStoreRecordData] + public HotelAddress Address { get; init; } + } + + /// + /// A test model for the vector store to simulate a complex type. + /// + public class HotelAddress + { + public string City { get; init; } + public string Country { get; init; } + } + + /// + /// A test model for the vector store that only uses basic types as supported by HashSets Redis mode. + /// + public class BasicHotel + { + [VectorStoreRecordKey] + public string HotelId { get; init; } + + [VectorStoreRecordData(IsFilterable = true)] + public string HotelName { get; init; } + + [VectorStoreRecordData(IsFilterable = true)] + public int HotelCode { get; init; } + + [VectorStoreRecordData(IsFullTextSearchable = true)] + public string Description { get; init; } + + [VectorStoreRecordVector(4)] + public ReadOnlyMemory? DescriptionEmbedding { get; init; } + + [JsonPropertyName("parking_is_included")] + [VectorStoreRecordData(StoragePropertyName = "parking_is_included")] + public bool ParkingIncluded { get; init; } + + [VectorStoreRecordData] + public double Rating { get; init; } + } +} +#pragma warning restore CS8618 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisVectorStoreTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisVectorStoreTests.cs new file mode 100644 index 000000000000..8e18522928eb --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/Memory/Redis/RedisVectorStoreTests.cs @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Connectors.Redis; +using Xunit; +using Xunit.Abstractions; + +namespace SemanticKernel.IntegrationTests.Connectors.Memory.Redis; + +/// +/// Contains tests for the class. +/// +/// Used to write to the test output stream. +/// The test fixture. +[Collection("RedisVectorStoreCollection")] +public class RedisVectorStoreTests(ITestOutputHelper output, RedisVectorStoreFixture fixture) +{ + // If null, all tests will be enabled + private const string SkipReason = "Requires Redis docker container up and running"; + + [Fact(Skip = SkipReason)] + public async Task ItCanGetAListOfExistingCollectionNamesAsync() + { + // Arrange + var sut = new RedisVectorStore(fixture.Database); + + // Act + var collectionNames = await sut.ListCollectionNamesAsync().ToListAsync(); + + // Assert + Assert.Equal(2, collectionNames.Count); + Assert.Contains("jsonhotels", collectionNames); + Assert.Contains("hashhotels", collectionNames); + + // Output + output.WriteLine(string.Join(",", collectionNames)); + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/MistralAI/ChatCompletion/MistralAIChatCompletionTests.cs b/dotnet/src/IntegrationTests/Connectors/MistralAI/ChatCompletion/MistralAIChatCompletionTests.cs index 67053cb68eaa..def078a53799 100644 --- a/dotnet/src/IntegrationTests/Connectors/MistralAI/ChatCompletion/MistralAIChatCompletionTests.cs +++ b/dotnet/src/IntegrationTests/Connectors/MistralAI/ChatCompletion/MistralAIChatCompletionTests.cs @@ -300,6 +300,48 @@ public async Task ValidateGetChatMessageContentsWithAutoInvokeAndFunctionFilterA Assert.Contains("GetWeather", invokedFunctions); } + [Fact(Skip = "This test is for manual verification.")] + public async Task ValidateGetStreamingChatMessageContentsWithAutoInvokeAndFunctionFilterAsync() + { + // Arrange + var model = this._configuration["MistralAI:ChatModel"]; + var apiKey = this._configuration["MistralAI:ApiKey"]; + var service = new MistralAIChatCompletionService(model!, apiKey!); + + var kernel = new Kernel(); + kernel.Plugins.AddFromType(); + + var invokedFunctions = new List(); + var filter = new FakeFunctionFilter(async (context, next) => + { + invokedFunctions.Add(context.Function.Name); + await next(context); + }); + kernel.FunctionInvocationFilters.Add(filter); + + // Act + var chatHistory = new ChatHistory + { + new ChatMessageContent(AuthorRole.User, "What is the weather like in Paris?") + }; + var executionSettings = new MistralAIPromptExecutionSettings { ToolCallBehavior = MistralAIToolCallBehavior.AutoInvokeKernelFunctions }; + + StringBuilder content = new(); + + await foreach (var update in service.GetStreamingChatMessageContentsAsync(chatHistory, executionSettings, kernel)) + { + if (!string.IsNullOrEmpty(update.Content)) + { + content.Append(update.Content); + } + } + + // Assert + Assert.NotNull(content); + Assert.Contains("sunny", content.ToString()); + Assert.Contains("GetWeather", invokedFunctions); + } + [Fact(Skip = "This test is for manual verification.")] public async Task ValidateGetChatMessageContentsWithAutoInvokeAndFunctionInvocationFilterAsync() { diff --git a/dotnet/src/IntegrationTests/IntegrationTests.csproj b/dotnet/src/IntegrationTests/IntegrationTests.csproj index df5afa473ce7..55a6ac6d1006 100644 --- a/dotnet/src/IntegrationTests/IntegrationTests.csproj +++ b/dotnet/src/IntegrationTests/IntegrationTests.csproj @@ -44,6 +44,7 @@ + @@ -59,13 +60,17 @@ + + + + diff --git a/dotnet/src/IntegrationTests/TestSettings/Memory/AzureAISearchConfiguration.cs b/dotnet/src/IntegrationTests/TestSettings/Memory/AzureAISearchConfiguration.cs new file mode 100644 index 000000000000..fd4043ef9b83 --- /dev/null +++ b/dotnet/src/IntegrationTests/TestSettings/Memory/AzureAISearchConfiguration.cs @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; + +namespace SemanticKernel.IntegrationTests.TestSettings.Memory; + +[SuppressMessage("Design", "CA1054:URI-like parameters should not be strings", Justification = "This is just for test configuration")] +public sealed class AzureAISearchConfiguration(string serviceUrl, string apiKey) +{ + [SuppressMessage("Design", "CA1056:URI-like properties should not be strings", Justification = "This is just for test configuration")] + public string ServiceUrl { get; set; } = serviceUrl; + + public string ApiKey { get; set; } = apiKey; +} diff --git a/dotnet/src/IntegrationTests/TestSettings/Memory/AzureAISearchSetup.psm1 b/dotnet/src/IntegrationTests/TestSettings/Memory/AzureAISearchSetup.psm1 new file mode 100644 index 000000000000..64563abdeeb0 --- /dev/null +++ b/dotnet/src/IntegrationTests/TestSettings/Memory/AzureAISearchSetup.psm1 @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft. All rights reserved. + +# This module requires powershell 7 and the Az and Az.Search modules. You may need to import Az and install Az.Search. +# Import-Module -Name Az +# Install-Module -Name Az.Search + +# Before running any of the functions you will need to connect to your azure account and pick the appropriate subscription. +# Connect-AzAccount +# Select-AzSubscription -SubscriptionName "My Dev Subscription" + +$resourceGroup = "sk-integration-test-infra" +$aiSearchResourceName = "aisearch-integration-test-basic" + +<# +.SYNOPSIS + Setup the infra required for Azure AI Search Integration tests, + retrieve the connection information for it, and update the secrets + store with these settings. + +.Parameter OverrideResourceGroup + Optional override resource group name if the default doesn't work. + +.Parameter OverrideAISearchResourceName + Optional override ai search resource name if the default doesn't work. +#> +function New-AzureAISearchIntegrationInfra($overrideResourceGroup = $resourceGroup, $overrideAISearchResourceName = $aiSearchResourceName) { + # Create the resource group if it doesn't exist. + Get-AzResourceGroup -Name $overrideResourceGroup -ErrorVariable notPresent -ErrorAction SilentlyContinue + if ($notPresent) { + Write-Host "Resource Group does not exist, creating '$overrideResourceGroup' ..." + New-AzResourceGroup -Name $overrideResourceGroup -Location "North Europe" + } + + # Create the ai search service if it doesn't exist. + $service = Get-AzSearchService -ResourceGroupName $resourceGroup -Name $aiSearchResourceName + if (-not $service) { + Write-Host "Service does not exist, creating '$overrideAISearchResourceName' ..." + New-AzSearchService -ResourceGroupName $overrideResourceGroup -Name $overrideAISearchResourceName -Sku "Basic" -Location "North Europe" -PartitionCount 1 -ReplicaCount 1 -HostingMode Default + } + + # Set the required local secrets. + Set-AzureAISearchIntegrationInfraUserSecrets -OverrideResourceGroup $overrideResourceGroup -OverrideAISearchResourceName $overrideAISearchResourceName +} + +<# +.SYNOPSIS + Set the user secrets required to run the Azure AI Search integration tests. + +.Parameter OverrideResourceGroup + Optional override resource group name if the default doesn't work. + +.Parameter OverrideAISearchResourceName + Optional override ai search resource name if the default doesn't work. +#> +function Set-AzureAISearchIntegrationInfraUserSecrets($overrideResourceGroup = $resourceGroup, $overrideAISearchResourceName = $aiSearchResourceName) { + # Set the required local secrets. + $keys = Get-AzSearchAdminKeyPair -ResourceGroupName $overrideResourceGroup -ServiceName $overrideAISearchResourceName + dotnet user-secrets set "AzureAISearch:ServiceUrl" "https://$overrideAISearchResourceName.search.windows.net" --project ../../IntegrationTests.csproj + dotnet user-secrets set "AzureAISearch:ApiKey" $keys.Primary --project ../../IntegrationTests.csproj +} + +<# +.SYNOPSIS + Tear down the infra required for Azure AI Search Integration tests. + +.Parameter OverrideResourceGroup + Optional override resource group name if the default doesn't work. + +.Parameter OverrideAISearchResourceName + Optional override ai search resource name if the default doesn't work. +#> +function Remove-AzureAISearchIntegrationInfra($overrideResourceGroup = $resourceGroup, $overrideAISearchResourceName = $aiSearchResourceName) { + Remove-AzSearchService -ResourceGroupName $overrideResourceGroup -Name $overrideAISearchResourceName +} \ No newline at end of file diff --git a/dotnet/src/InternalUtilities/src/Data/VectorStoreErrorHandler.cs b/dotnet/src/InternalUtilities/src/Data/VectorStoreErrorHandler.cs new file mode 100644 index 000000000000..1aa2e6f479ad --- /dev/null +++ b/dotnet/src/InternalUtilities/src/Data/VectorStoreErrorHandler.cs @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Contains helpers for reading vector store model properties and their attributes. +/// +[ExcludeFromCodeCoverage] +internal static class VectorStoreErrorHandler +{ + /// + /// Run the given model conversion and wrap any exceptions with . + /// + /// The response type of the operation. + /// The name of the database system the operation is being run on. + /// The name of the collection the operation is being run on. + /// The type of database operation being run. + /// The operation to run. + /// The result of the operation. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static T RunModelConversion(string databaseSystemName, string collectionName, string operationName, Func operation) + { + try + { + return operation.Invoke(); + } + catch (Exception ex) + { + throw new VectorStoreRecordMappingException("Failed to convert vector store record.", ex) + { + VectorStoreType = databaseSystemName, + CollectionName = collectionName, + OperationName = operationName + }; + } + } +} diff --git a/dotnet/src/InternalUtilities/src/Data/VectorStoreRecordPropertyReader.cs b/dotnet/src/InternalUtilities/src/Data/VectorStoreRecordPropertyReader.cs new file mode 100644 index 000000000000..d4f06071f66b --- /dev/null +++ b/dotnet/src/InternalUtilities/src/Data/VectorStoreRecordPropertyReader.cs @@ -0,0 +1,532 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Reflection; +using System.Text.Json; +using System.Text.Json.Serialization; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Contains helpers for reading vector store model properties and their attributes. +/// +[ExcludeFromCodeCoverage] +internal static class VectorStoreRecordPropertyReader +{ + /// Cache of property enumerations so that we don't incur reflection costs with each invocation. + private static readonly ConcurrentDictionary dataProperties, List vectorProperties)> s_singleVectorPropertiesCache = new(); + + /// Cache of property enumerations so that we don't incur reflection costs with each invocation. + private static readonly ConcurrentDictionary dataProperties, List vectorProperties)> s_multipleVectorsPropertiesCache = new(); + + /// + /// Split the given into key, data and vector properties and verify that we have the expected numbers of each type. + /// + /// The name of the type that the definition relates to. + /// The to split. + /// A value indicating whether multiple vectors are supported. + /// A value indicating whether we need at least one vector. + /// The properties on the split into key, data and vector groupings. + /// Thrown if there are any validation failures with the provided . + public static (VectorStoreRecordKeyProperty KeyProperty, List DataProperties, List VectorProperties) SplitDefinitionAndVerify( + string typeName, + VectorStoreRecordDefinition definition, + bool supportsMultipleVectors, + bool requiresAtLeastOneVector) + { + var keyProperties = definition.Properties.OfType().ToList(); + + if (keyProperties.Count > 1) + { + throw new ArgumentException($"Multiple key properties found on type {typeName} or the provided {nameof(VectorStoreRecordDefinition)}."); + } + + var keyProperty = keyProperties.FirstOrDefault(); + var dataProperties = definition.Properties.OfType().ToList(); + var vectorProperties = definition.Properties.OfType().ToList(); + + if (keyProperty is null) + { + throw new ArgumentException($"No key property found on type {typeName} or the provided {nameof(VectorStoreRecordDefinition)}."); + } + + if (requiresAtLeastOneVector && vectorProperties.Count == 0) + { + throw new ArgumentException($"No vector property found on type {typeName} or the provided {nameof(VectorStoreRecordDefinition)}."); + } + + if (!supportsMultipleVectors && vectorProperties.Count > 1) + { + throw new ArgumentException($"Multiple vector properties found on type {typeName} or the provided {nameof(VectorStoreRecordDefinition)} while only one is supported."); + } + + return (keyProperty, dataProperties, vectorProperties); + } + + /// + /// Find the properties with , and attributes + /// and verify that they exist and that we have the expected numbers of each type. + /// Return those properties in separate categories. + /// + /// The data model to find the properties on. + /// A value indicating whether multiple vector properties are supported instead of just one. + /// The categorized properties. + public static (PropertyInfo KeyProperty, List DataProperties, List VectorProperties) FindProperties(Type type, bool supportsMultipleVectors) + { + var cache = supportsMultipleVectors ? s_multipleVectorsPropertiesCache : s_singleVectorPropertiesCache; + + // First check the cache. + if (cache.TryGetValue(type, out var cachedProperties)) + { + return cachedProperties; + } + + PropertyInfo? keyProperty = null; + List dataProperties = new(); + List vectorProperties = new(); + bool singleVectorPropertyFound = false; + + foreach (var property in type.GetProperties()) + { + // Get Key property. + if (property.GetCustomAttribute() is not null) + { + if (keyProperty is not null) + { + throw new ArgumentException($"Multiple key properties found on type {type.FullName}."); + } + + keyProperty = property; + } + + // Get data properties. + if (property.GetCustomAttribute() is not null) + { + dataProperties.Add(property); + } + + // Get Vector properties. + if (property.GetCustomAttribute() is not null) + { + // Add all vector properties if we support multiple vectors. + if (supportsMultipleVectors) + { + vectorProperties.Add(property); + } + // Add only one vector property if we don't support multiple vectors. + else if (!singleVectorPropertyFound) + { + vectorProperties.Add(property); + singleVectorPropertyFound = true; + } + else + { + throw new ArgumentException($"Multiple vector properties found on type {type.FullName} while only one is supported."); + } + } + } + + // Check that we have a key property. + if (keyProperty is null) + { + throw new ArgumentException($"No key property found on type {type.FullName}."); + } + + // Check that we have one vector property if we don't have named vectors. + if (!supportsMultipleVectors && !singleVectorPropertyFound) + { + throw new ArgumentException($"No vector property found on type {type.FullName}."); + } + + // Update the cache. + cache[type] = (keyProperty, dataProperties, vectorProperties); + + return (keyProperty, dataProperties, vectorProperties); + } + + /// + /// Find the properties listed in the on the and verify + /// that they exist and that we have the expected numbers of each type. + /// Return those properties in separate categories. + /// + /// The data model to find the properties on. + /// The property configuration. + /// A value indicating whether multiple vector properties are supported instead of just one. + /// The categorized properties. + public static (PropertyInfo KeyProperty, List DataProperties, List VectorProperties) FindProperties(Type type, VectorStoreRecordDefinition vectorStoreRecordDefinition, bool supportsMultipleVectors) + { + PropertyInfo? keyProperty = null; + List dataProperties = new(); + List vectorProperties = new(); + bool singleVectorPropertyFound = false; + + foreach (VectorStoreRecordProperty property in vectorStoreRecordDefinition.Properties) + { + // Key. + if (property is VectorStoreRecordKeyProperty keyPropertyInfo) + { + if (keyProperty is not null) + { + throw new ArgumentException($"Multiple key properties configured for type {type.FullName}."); + } + + keyProperty = type.GetProperty(keyPropertyInfo.DataModelPropertyName); + if (keyProperty == null) + { + throw new ArgumentException($"Key property '{keyPropertyInfo.DataModelPropertyName}' not found on type {type.FullName}."); + } + } + // Data. + else if (property is VectorStoreRecordDataProperty dataPropertyInfo) + { + var dataProperty = type.GetProperty(dataPropertyInfo.DataModelPropertyName); + if (dataProperty == null) + { + throw new ArgumentException($"Data property '{dataPropertyInfo.DataModelPropertyName}' not found on type {type.FullName}."); + } + + dataProperties.Add(dataProperty); + } + // Vector. + else if (property is VectorStoreRecordVectorProperty vectorPropertyInfo) + { + var vectorProperty = type.GetProperty(vectorPropertyInfo.DataModelPropertyName); + if (vectorProperty == null) + { + throw new ArgumentException($"Vector property '{vectorPropertyInfo.DataModelPropertyName}' not found on type {type.FullName}."); + } + + // Add all vector properties if we support multiple vectors. + if (supportsMultipleVectors) + { + vectorProperties.Add(vectorProperty); + } + // Add only one vector property if we don't support multiple vectors. + else if (!singleVectorPropertyFound) + { + vectorProperties.Add(vectorProperty); + singleVectorPropertyFound = true; + } + else + { + throw new ArgumentException($"Multiple vector properties configured for type {type.FullName} while only one is supported."); + } + } + else + { + throw new ArgumentException($"Unknown property type '{property.GetType().FullName}' in vector store record definition."); + } + } + + // Check that we have a key property. + if (keyProperty is null) + { + throw new ArgumentException($"No key property configured for type {type.FullName}."); + } + + // Check that we have one vector property if we don't have named vectors. + if (!supportsMultipleVectors && !singleVectorPropertyFound) + { + throw new ArgumentException($"No vector property configured for type {type.FullName}."); + } + + return (keyProperty!, dataProperties, vectorProperties); + } + + /// + /// Create a by reading the attributes on the properties of the given type. + /// + /// The type to create the definition for. + /// if the store supports multiple vectors, otherwise. + /// The based on the given type. + public static VectorStoreRecordDefinition CreateVectorStoreRecordDefinitionFromType(Type type, bool supportsMultipleVectors) + { + var properties = FindProperties(type, supportsMultipleVectors); + var definitionProperties = new List(); + + // Key property. + var keyAttribute = properties.KeyProperty.GetCustomAttribute(); + definitionProperties.Add(new VectorStoreRecordKeyProperty(properties.KeyProperty.Name, properties.KeyProperty.PropertyType) { StoragePropertyName = keyAttribute!.StoragePropertyName }); + + // Data properties. + foreach (var dataProperty in properties.DataProperties) + { + var dataAttribute = dataProperty.GetCustomAttribute(); + if (dataAttribute is not null) + { + definitionProperties.Add(new VectorStoreRecordDataProperty(dataProperty.Name, dataProperty.PropertyType) + { + IsFilterable = dataAttribute.IsFilterable, + IsFullTextSearchable = dataAttribute.IsFullTextSearchable, + StoragePropertyName = dataAttribute.StoragePropertyName + }); + } + } + + // Vector properties. + foreach (var vectorProperty in properties.VectorProperties) + { + var vectorAttribute = vectorProperty.GetCustomAttribute(); + if (vectorAttribute is not null) + { + definitionProperties.Add(new VectorStoreRecordVectorProperty(vectorProperty.Name, vectorProperty.PropertyType) + { + Dimensions = vectorAttribute.Dimensions, + IndexKind = vectorAttribute.IndexKind, + DistanceFunction = vectorAttribute.DistanceFunction, + StoragePropertyName = vectorAttribute.StoragePropertyName + }); + } + } + + return new VectorStoreRecordDefinition { Properties = definitionProperties }; + } + + /// + /// Verify that the given properties are of the supported types. + /// + /// The properties to check. + /// A set of supported types that the provided properties may have. + /// A description of the category of properties being checked. Used for error messaging. + /// A value indicating whether versions of all the types should also be supported. + /// Thrown if any of the properties are not in the given set of types. + public static void VerifyPropertyTypes(List properties, HashSet supportedTypes, string propertyCategoryDescription, bool? supportEnumerable = false) + { + var supportedEnumerableTypes = supportEnumerable == true + ? supportedTypes + : []; + + VerifyPropertyTypes(properties, supportedTypes, supportedEnumerableTypes, propertyCategoryDescription); + } + + /// + /// Verify that the given properties are of the supported types. + /// + /// The properties to check. + /// A set of supported types that the provided properties may have. + /// A set of supported types that the provided enumerable properties may use as their element type. + /// A description of the category of properties being checked. Used for error messaging. + /// Thrown if any of the properties are not in the given set of types. + public static void VerifyPropertyTypes(List properties, HashSet supportedTypes, HashSet supportedEnumerableTypes, string propertyCategoryDescription) + { + foreach (var property in properties) + { + VerifyPropertyType(property.Name, property.PropertyType, supportedTypes, supportedEnumerableTypes, propertyCategoryDescription); + } + } + + /// + /// Verify that the given properties are of the supported types. + /// + /// The properties to check. + /// A set of supported types that the provided properties may have. + /// A description of the category of properties being checked. Used for error messaging. + /// A value indicating whether versions of all the types should also be supported. + /// Thrown if any of the properties are not in the given set of types. + public static void VerifyPropertyTypes(IEnumerable properties, HashSet supportedTypes, string propertyCategoryDescription, bool? supportEnumerable = false) + { + var supportedEnumerableTypes = supportEnumerable == true + ? supportedTypes + : []; + + VerifyPropertyTypes(properties, supportedTypes, supportedEnumerableTypes, propertyCategoryDescription); + } + + /// + /// Verify that the given properties are of the supported types. + /// + /// The properties to check. + /// A set of supported types that the provided properties may have. + /// A set of supported types that the provided enumerable properties may use as their element type. + /// A description of the category of properties being checked. Used for error messaging. + /// Thrown if any of the properties are not in the given set of types. + public static void VerifyPropertyTypes(IEnumerable properties, HashSet supportedTypes, HashSet supportedEnumerableTypes, string propertyCategoryDescription) + { + foreach (var property in properties) + { + VerifyPropertyType(property.DataModelPropertyName, property.PropertyType, supportedTypes, supportedEnumerableTypes, propertyCategoryDescription); + } + } + + /// + /// Verify that the given property is of the supported types. + /// + /// The name of the property being checked. Used for error messaging. + /// The type of the property being checked. + /// A set of supported types that the provided property may have. + /// A set of supported types that the provided property may use as its element type if it's enumerable. + /// A description of the category of property being checked. Used for error messaging. + /// Thrown if the property is not in the given set of types. + public static void VerifyPropertyType(string propertyName, Type propertyType, HashSet supportedTypes, HashSet supportedEnumerableTypes, string propertyCategoryDescription) + { + // Add shortcut before testing all the more expensive scenarios. + if (supportedTypes.Contains(propertyType)) + { + return; + } + + // Check all collection scenarios and get stored type. + if (supportedEnumerableTypes.Count > 0 && typeof(IEnumerable).IsAssignableFrom(propertyType)) + { + var typeToCheck = propertyType switch + { + IEnumerable => typeof(object), + var enumerableType when enumerableType.IsGenericType && enumerableType.GetGenericTypeDefinition() == typeof(IEnumerable<>) => enumerableType.GetGenericArguments()[0], + var arrayType when arrayType.IsArray => arrayType.GetElementType()!, + var interfaceType when interfaceType.GetInterfaces().FirstOrDefault(i => i.IsGenericType && i.GetGenericTypeDefinition() == typeof(IEnumerable<>)) is Type enumerableInterface => + enumerableInterface.GetGenericArguments()[0], + _ => propertyType + }; + + if (!supportedEnumerableTypes.Contains(typeToCheck)) + { + var supportedEnumerableElementTypesString = string.Join(", ", supportedEnumerableTypes!.Select(t => t.FullName)); + throw new ArgumentException($"Enumerable {propertyCategoryDescription} properties must have one of the supported element types: {supportedEnumerableElementTypesString}. Element type of the property '{propertyName}' is {typeToCheck.FullName}."); + } + } + else + { + // if we got here, we know the type is not supported + var supportedTypesString = string.Join(", ", supportedTypes.Select(t => t.FullName)); + throw new ArgumentException($"{propertyCategoryDescription} properties must be one of the supported types: {supportedTypesString}. Type of the property '{propertyName}' is {propertyType.FullName}."); + } + } + + /// + /// Get the JSON property name of a property by using the if available, otherwise + /// using the if available, otherwise falling back to the property name. + /// The provided may not actually contain the property, e.g. when the user has a data model that + /// doesn't resemble the stored data and where they are using a custom mapper. + /// + /// The property to retrieve a storage name for. + /// The data model type that the property belongs to. + /// The options used for JSON serialization. + /// The JSON storage property name. + public static string GetJsonPropertyName(VectorStoreRecordProperty property, Type dataModel, JsonSerializerOptions options) + { + var propertyInfo = dataModel.GetProperty(property.DataModelPropertyName); + + if (propertyInfo != null) + { + var jsonPropertyNameAttribute = propertyInfo.GetCustomAttribute(); + if (jsonPropertyNameAttribute is not null) + { + return jsonPropertyNameAttribute.Name; + } + } + + if (options.PropertyNamingPolicy is not null) + { + return options.PropertyNamingPolicy.ConvertName(property.DataModelPropertyName); + } + + return property.DataModelPropertyName; + } + + /// + /// Get the JSON property name of a property by using the if available, otherwise + /// using the if available, otherwise falling back to the property name. + /// + /// The options used for JSON serialization. + /// The property to retrieve a storage name for. + /// The JSON storage property name. + public static string GetJsonPropertyName(JsonSerializerOptions options, PropertyInfo property) + { + var jsonPropertyNameAttribute = property.GetCustomAttribute(); + if (jsonPropertyNameAttribute is not null) + { + return jsonPropertyNameAttribute.Name; + } + + if (options.PropertyNamingPolicy is not null) + { + return options.PropertyNamingPolicy.ConvertName(property.Name); + } + + return property.Name; + } + + /// + /// Build a map of property names to the names under which they should be saved in storage if using JSON serialization. + /// + /// The properties to build the map for. + /// The data model type that the property belongs to. + /// The options used for JSON serialization. + /// The map from property names to the names under which they should be saved in storage if using JSON serialization. + public static Dictionary BuildPropertyNameToJsonPropertyNameMap( + (VectorStoreRecordKeyProperty keyProperty, List dataProperties, List vectorProperties) properties, + Type dataModel, + JsonSerializerOptions options) + { + var jsonPropertyNameMap = new Dictionary(); + jsonPropertyNameMap.Add(properties.keyProperty.DataModelPropertyName, GetJsonPropertyName(properties.keyProperty, dataModel, options)); + + foreach (var dataProperty in properties.dataProperties) + { + jsonPropertyNameMap.Add(dataProperty.DataModelPropertyName, GetJsonPropertyName(dataProperty, dataModel, options)); + } + + foreach (var vectorProperty in properties.vectorProperties) + { + jsonPropertyNameMap.Add(vectorProperty.DataModelPropertyName, GetJsonPropertyName(vectorProperty, dataModel, options)); + } + + return jsonPropertyNameMap; + } + + /// + /// Build a map of property names to the names under which they should be saved in storage if using JSON serialization. + /// + /// The properties to build the map for. + /// The data model type that the property belongs to. + /// The options used for JSON serialization. + /// The map from property names to the names under which they should be saved in storage if using JSON serialization. + public static Dictionary BuildPropertyNameToJsonPropertyNameMap( + (PropertyInfo keyProperty, List dataProperties, List vectorProperties) properties, + Type dataModel, + JsonSerializerOptions options) + { + var jsonPropertyNameMap = new Dictionary(); + jsonPropertyNameMap.Add(properties.keyProperty.Name, GetJsonPropertyName(options, properties.keyProperty)); + + foreach (var dataProperty in properties.dataProperties) + { + jsonPropertyNameMap.Add(dataProperty.Name, GetJsonPropertyName(options, dataProperty)); + } + + foreach (var vectorProperty in properties.vectorProperties) + { + jsonPropertyNameMap.Add(vectorProperty.Name, GetJsonPropertyName(options, vectorProperty)); + } + + return jsonPropertyNameMap; + } + + /// + /// Build a map of property names to the names under which they should be saved in storage, for the given properties. + /// + /// The properties to build the map for. + /// The map from property names to the names under which they should be saved in storage. + public static Dictionary BuildPropertyNameToStorageNameMap((VectorStoreRecordKeyProperty keyProperty, List dataProperties, List vectorProperties) properties) + { + var storagePropertyNameMap = new Dictionary(); + storagePropertyNameMap.Add(properties.keyProperty.DataModelPropertyName, properties.keyProperty.StoragePropertyName ?? properties.keyProperty.DataModelPropertyName); + + foreach (var dataProperty in properties.dataProperties) + { + storagePropertyNameMap.Add(dataProperty.DataModelPropertyName, dataProperty.StoragePropertyName ?? dataProperty.DataModelPropertyName); + } + + foreach (var vectorProperty in properties.vectorProperties) + { + storagePropertyNameMap.Add(vectorProperty.DataModelPropertyName, vectorProperty.StoragePropertyName ?? vectorProperty.DataModelPropertyName); + } + + return storagePropertyNameMap; + } +} diff --git a/dotnet/src/InternalUtilities/src/Schema/JsonSchemaMapper.ReflectionHelpers.cs b/dotnet/src/InternalUtilities/src/Schema/JsonSchemaMapper.ReflectionHelpers.cs index 31c582756e66..11dc0c6d85b7 100644 --- a/dotnet/src/InternalUtilities/src/Schema/JsonSchemaMapper.ReflectionHelpers.cs +++ b/dotnet/src/InternalUtilities/src/Schema/JsonSchemaMapper.ReflectionHelpers.cs @@ -25,7 +25,7 @@ static partial class JsonSchemaMapper private static Type GetElementType(JsonTypeInfo typeInfo) { Debug.Assert(typeInfo.Kind is JsonTypeInfoKind.Enumerable or JsonTypeInfoKind.Dictionary); - return (Type)typeof(JsonTypeInfo).GetProperty("ElementType", BindingFlags.Instance | BindingFlags.NonPublic)?.GetValue(typeInfo)!; + return (Type)typeof(JsonTypeInfo).GetProperty("ElementType", BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic)?.GetValue(typeInfo)!; } // The source generator currently doesn't populate attribute providers for properties diff --git a/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsPythonPlugin.cs b/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsPythonPlugin.cs index 6c92763f3fe4..c8094de65201 100644 --- a/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsPythonPlugin.cs +++ b/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsPythonPlugin.cs @@ -155,7 +155,7 @@ public async Task UploadFileAsync( await this.AddHeadersAsync(httpClient).ConfigureAwait(false); using var fileContent = new ByteArrayContent(File.ReadAllBytes(localFilePath)); - using var request = new HttpRequestMessage(HttpMethod.Post, $"{this._poolManagementEndpoint}python/uploadFile?identifier={this._settings.SessionId}&api-version={ApiVersion}") + using var request = new HttpRequestMessage(HttpMethod.Post, $"{this._poolManagementEndpoint}files/upload?identifier={this._settings.SessionId}&api-version={ApiVersion}") { Content = new MultipartFormDataContent { @@ -173,7 +173,7 @@ public async Task UploadFileAsync( var JsonElementResult = JsonSerializer.Deserialize(await response.Content.ReadAsStringAsync().ConfigureAwait(false)); - return JsonSerializer.Deserialize(JsonElementResult.GetProperty("$values")[0].GetRawText())!; + return JsonSerializer.Deserialize(JsonElementResult.GetProperty("value")[0].GetProperty("properties").GetRawText())!; } /// @@ -230,7 +230,7 @@ public async Task> ListFilesAsync() using var httpClient = this._httpClientFactory.CreateClient(); await this.AddHeadersAsync(httpClient).ConfigureAwait(false); - var response = await httpClient.GetAsync(new Uri($"{this._poolManagementEndpoint}python/files?identifier={this._settings.SessionId}&api-version={ApiVersion}")).ConfigureAwait(false); + var response = await httpClient.GetAsync(new Uri($"{this._poolManagementEndpoint}/files?identifier={this._settings.SessionId}&api-version={ApiVersion}")).ConfigureAwait(false); if (!response.IsSuccessStatusCode) { @@ -239,13 +239,13 @@ public async Task> ListFilesAsync() var jsonElementResult = JsonSerializer.Deserialize(await response.Content.ReadAsStringAsync().ConfigureAwait(false)); - var files = jsonElementResult.GetProperty("$values"); + var files = jsonElementResult.GetProperty("value"); var result = new SessionsRemoteFileMetadata[files.GetArrayLength()]; for (var i = 0; i < result.Length; i++) { - result[i] = JsonSerializer.Deserialize(files[i].GetRawText())!; + result[i] = JsonSerializer.Deserialize(files[i].GetProperty("properties").GetRawText())!; } return result; diff --git a/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsRemoteFileMetadata.cs b/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsRemoteFileMetadata.cs index 6f7f10ec9c5c..526194618a98 100644 --- a/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsRemoteFileMetadata.cs +++ b/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsRemoteFileMetadata.cs @@ -39,7 +39,7 @@ public SessionsRemoteFileMetadata(string filename, int size) /// The last modified time. /// [Description("Last modified time.")] - [JsonPropertyName("last_modified_time")] + [JsonPropertyName("lastModifiedTime")] public DateTime? LastModifiedTime { get; set; } /// diff --git a/dotnet/src/Plugins/Plugins.UnitTests/Core/SessionsPythonPluginTests.cs b/dotnet/src/Plugins/Plugins.UnitTests/Core/SessionsPythonPluginTests.cs index 37bb2aa4a029..789cd85fc353 100644 --- a/dotnet/src/Plugins/Plugins.UnitTests/Core/SessionsPythonPluginTests.cs +++ b/dotnet/src/Plugins/Plugins.UnitTests/Core/SessionsPythonPluginTests.cs @@ -193,14 +193,14 @@ public async Task ItShouldListFilesAsync() // Assert Assert.Contains(result, (item) => - item.Filename == "test.txt" && - item.Size == 680 && - item.LastModifiedTime!.Value.Ticks == 638508470494918207); + item.Filename == "test-file.txt" && + item.Size == 516 && + item.LastModifiedTime!.Value.Ticks == 638585580822423944); Assert.Contains(result, (item) => - item.Filename == "test2.txt" && - item.Size == 1074 && - item.LastModifiedTime!.Value.Ticks == 638508471084916062); + item.Filename == "test-file2.txt" && + item.Size == 211 && + item.LastModifiedTime!.Value.Ticks == 638585580822423944); } [Fact] @@ -210,9 +210,9 @@ public async Task ItShouldUploadFileAsync() var responseContent = await File.ReadAllTextAsync(UpdaloadFileTestDataFilePath); var requestPayload = await File.ReadAllBytesAsync(FileTestDataFilePath); - var expectedResponse = new SessionsRemoteFileMetadata("test.txt", 680) + var expectedResponse = new SessionsRemoteFileMetadata("test-file.txt", 516) { - LastModifiedTime = new DateTime(638508470494918207), + LastModifiedTime = new DateTime(638585526384228269) }; this._messageHandlerStub.ResponseToReturn = new HttpResponseMessage(HttpStatusCode.OK) @@ -223,7 +223,7 @@ public async Task ItShouldUploadFileAsync() var plugin = new SessionsPythonPlugin(this._defaultSettings, this._httpClientFactory); // Act - var result = await plugin.UploadFileAsync(".test.txt", FileTestDataFilePath); + var result = await plugin.UploadFileAsync("test-file.txt", FileTestDataFilePath); // Assert Assert.Equal(result.Filename, expectedResponse.Filename); diff --git a/dotnet/src/Plugins/Plugins.UnitTests/TestData/sessions_python_plugin_file_list.json b/dotnet/src/Plugins/Plugins.UnitTests/TestData/sessions_python_plugin_file_list.json index 57378d5ca1c6..98e5e1c86282 100644 --- a/dotnet/src/Plugins/Plugins.UnitTests/TestData/sessions_python_plugin_file_list.json +++ b/dotnet/src/Plugins/Plugins.UnitTests/TestData/sessions_python_plugin_file_list.json @@ -1,17 +1,23 @@ { "$id": "1", - "$values": [ + "value": [ { "$id": "2", - "filename": "test2.txt", - "size": 1074, - "last_modified_time": "2024-05-09T10:25:08.4916062Z" + "properties": { + "$id": "3", + "filename": "test-file.txt", + "size": 516, + "lastModifiedTime": "2024-08-06T16:21:22.2423944Z" + } }, { - "$id": "3", - "filename": "test.txt", - "size": 680, - "last_modified_time": "2024-05-09T10:24:09.4918207Z" + "$id": "4", + "properties": { + "$id": "5", + "filename": "test-file2.txt", + "size": 211, + "lastModifiedTime": "2024-08-06T16:21:22.2423944Z" + } } ] } \ No newline at end of file diff --git a/dotnet/src/Plugins/Plugins.UnitTests/TestData/sessions_python_plugin_file_upload.json b/dotnet/src/Plugins/Plugins.UnitTests/TestData/sessions_python_plugin_file_upload.json index 22eaaa5f4f72..b052d5f22e9f 100644 --- a/dotnet/src/Plugins/Plugins.UnitTests/TestData/sessions_python_plugin_file_upload.json +++ b/dotnet/src/Plugins/Plugins.UnitTests/TestData/sessions_python_plugin_file_upload.json @@ -1,11 +1,14 @@ { "$id": "1", - "$values": [ + "value": [ { "$id": "2", - "filename": "test.txt", - "size": 680, - "last_modified_time": "2024-05-09T10:24:09.4918207Z" + "properties": { + "$id": "3", + "filename": "test-file.txt", + "size": 516, + "lastModifiedTime": "2024-08-06T14:50:38.4228269Z" + } } ] } \ No newline at end of file diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/IVectorStore.cs b/dotnet/src/SemanticKernel.Abstractions/Data/IVectorStore.cs new file mode 100644 index 000000000000..31246a3138d6 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/IVectorStore.cs @@ -0,0 +1,43 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Threading; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Interface for accessing the list of collections in a vector store. +/// +/// +/// This interface can be used with collections of any schema type, but requires you to provide schema information when getting a collection. +/// +[Experimental("SKEXP0001")] +public interface IVectorStore +{ + /// + /// Get a collection from the vector store. + /// + /// The data type of the record key. + /// The record data model to use for adding, updating and retrieving data from the collection. + /// The name of the collection. + /// Defines the schema of the record type. + /// A new instance for managing the records in the collection. + /// + /// To successfully request a collection, either must be annotated with attributes that define the schema of + /// the record type, or must be provided. + /// + /// + /// + /// + IVectorStoreRecordCollection GetCollection(string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition = null) + where TKey : notnull + where TRecord : class; + + /// + /// Retrieve the names of all the collections in the vector store. + /// + /// The to monitor for cancellation requests. The default is . + /// The list of names of all the collections in the vector store. + IAsyncEnumerable ListCollectionNamesAsync(CancellationToken cancellationToken = default); +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/IVectorStoreRecordCollection.cs b/dotnet/src/SemanticKernel.Abstractions/Data/IVectorStoreRecordCollection.cs new file mode 100644 index 000000000000..5071412014a8 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/IVectorStoreRecordCollection.cs @@ -0,0 +1,130 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Threading; +using System.Threading.Tasks; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// A schema aware interface for managing a named collection of records in a vector store and for creating or deleting the collection itself. +/// +/// The data type of the record key. +/// The record data model to use for adding, updating and retrieving data from the store. +[Experimental("SKEXP0001")] +#pragma warning disable CA1711 // Identifiers should not have incorrect suffix +public interface IVectorStoreRecordCollection +#pragma warning restore CA1711 // Identifiers should not have incorrect suffix + where TKey : notnull + where TRecord : class +{ + /// + /// Gets the name of the collection. + /// + public string CollectionName { get; } + + /// + /// Check if the collection exists in the vector store. + /// + /// The to monitor for cancellation requests. The default is . + /// if the collection exists, otherwise. + Task CollectionExistsAsync(CancellationToken cancellationToken = default); + + /// + /// Create this collection in the vector store. + /// + /// The to monitor for cancellation requests. The default is . + /// A that completes when the collection has been created. + Task CreateCollectionAsync(CancellationToken cancellationToken = default); + + /// + /// Create this collection in the vector store if it does not already exist. + /// + /// The to monitor for cancellation requests. The default is . + /// A that completes when the collection has been created. + Task CreateCollectionIfNotExistsAsync(CancellationToken cancellationToken = default); + + /// + /// Delete the collection from the vector store. + /// + /// The to monitor for cancellation requests. The default is . + /// A that completes when the collection has been deleted. + Task DeleteCollectionAsync(CancellationToken cancellationToken = default); + + /// + /// Gets a record from the vector store. Does not guarantee that the collection exists. + /// Returns null if the record is not found. + /// + /// The unique id associated with the record to get. + /// Optional options for retrieving the record. + /// The to monitor for cancellation requests. The default is . + /// The record if found, otherwise null. + /// Throw when the command fails to execute for any reason. + /// Throw when mapping between the storage model and record data model fails. + Task GetAsync(TKey key, GetRecordOptions? options = default, CancellationToken cancellationToken = default); + + /// + /// Gets a batch of records from the vector store. Does not guarantee that the collection exists. + /// Gets will be made in a single request or in a single parallel batch depending on the available store functionality. + /// Only found records will be returned, so the resultset may be smaller than the requested keys. + /// Throws for any issues other than records not being found. + /// + /// The unique ids associated with the record to get. + /// Optional options for retrieving the records. + /// The to monitor for cancellation requests. The default is . + /// The records associated with the unique keys provided. + /// Throw when the command fails to execute for any reason. + /// Throw when mapping between the storage model and record data model fails. + IAsyncEnumerable GetBatchAsync(IEnumerable keys, GetRecordOptions? options = default, CancellationToken cancellationToken = default); + + /// + /// Deletes a record from the vector store. Does not guarantee that the collection exists. + /// + /// The unique id associated with the record to remove. + /// Optional options for removing the record. + /// The to monitor for cancellation requests. The default is . + /// The unique identifier for the record. + /// Throw when the command fails to execute for any reason other than that the record does not exit. + Task DeleteAsync(TKey key, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default); + + /// + /// Deletes a batch of records from the vector store. Does not guarantee that the collection exists. + /// Deletes will be made in a single request or in a single parallel batch depending on the available store functionality. + /// If a record is not found, it will be ignored and the batch will succeed. + /// If any record cannot be deleted for any other reason, the operation will throw. Some records may have already been deleted, while others may not, so the entire operation should be retried. + /// + /// The unique ids associated with the records to remove. + /// Optional options for removing the records. + /// The to monitor for cancellation requests. The default is . + /// A that completes when the records have been deleted. + /// Throw when the command fails to execute for any reason other than that a record does not exist. + Task DeleteBatchAsync(IEnumerable keys, DeleteRecordOptions? options = default, CancellationToken cancellationToken = default); + + /// + /// Upserts a record into the vector store. Does not guarantee that the collection exists. + /// If the record already exists, it will be updated. + /// If the record does not exist, it will be created. + /// + /// The record to upsert. + /// Optional options for upserting the record. + /// The to monitor for cancellation requests. The default is . + /// The unique identifier for the record. + /// Throw when the command fails to execute for any reason. + /// Throw when mapping between the storage model and record data model fails. + Task UpsertAsync(TRecord record, UpsertRecordOptions? options = default, CancellationToken cancellationToken = default); + + /// + /// Upserts a group of records into the vector store. Does not guarantee that the collection exists. + /// If the record already exists, it will be updated. + /// If the record does not exist, it will be created. + /// Upserts will be made in a single request or in a single parallel batch depending on the available store functionality. + /// + /// The records to upsert. + /// Optional options for upserting the records. + /// The to monitor for cancellation requests. The default is . + /// The unique identifiers for the records. + /// Throw when the command fails to execute for any reason. + /// Throw when mapping between the storage model and record data model fails. + IAsyncEnumerable UpsertBatchAsync(IEnumerable records, UpsertRecordOptions? options = default, CancellationToken cancellationToken = default); +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/IVectorStoreRecordMapper.cs b/dotnet/src/SemanticKernel.Abstractions/Data/IVectorStoreRecordMapper.cs new file mode 100644 index 000000000000..4125c4a1b3ad --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/IVectorStoreRecordMapper.cs @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Interface for mapping between a storage model, and the consumer record data model. +/// +/// The consumer record data model to map to or from. +/// The storage model to map to or from. +[Experimental("SKEXP0001")] +public interface IVectorStoreRecordMapper + where TRecordDataModel : class +{ + /// + /// Map from the consumer record data model to the storage model. + /// + /// The consumer record data model record to map. + /// The mapped result. + TStorageModel MapFromDataToStorageModel(TRecordDataModel dataModel); + + /// + /// Map from the storage model to the consumer record data model. + /// + /// The storage data model record to map. + /// Options to control the mapping behavior. + /// The mapped result. + TRecordDataModel MapFromStorageToDataModel(TStorageModel storageModel, StorageToDataModelMapperOptions options); +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordAttributes/VectorStoreRecordDataAttribute.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordAttributes/VectorStoreRecordDataAttribute.cs new file mode 100644 index 000000000000..f31b5c38352e --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordAttributes/VectorStoreRecordDataAttribute.cs @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Attribute to mark a property on a record class as 'data'. +/// +/// +/// Marking a property as 'data' means that the property is not a key, and not a vector, but optionally +/// this property may have an associated vector field containing an embedding for this data. +/// The characteristics defined here will influence how the property is treated by the vector store. +/// +[Experimental("SKEXP0001")] +[AttributeUsage(AttributeTargets.Property, AllowMultiple = false)] +public sealed class VectorStoreRecordDataAttribute : Attribute +{ + /// + /// Gets or sets a value indicating whether this data property is filterable. + /// + /// + /// Default is . + /// + public bool IsFilterable { get; init; } + + /// + /// Gets or sets a value indicating whether this data property is full text searchable. + /// + /// + /// Default is . + /// + public bool IsFullTextSearchable { get; init; } + + /// + /// Gets or sets an optional name to use for the property in storage, if different from the property name. + /// E.g. the property name might be "MyProperty" but the storage name might be "my_property". + /// + public string? StoragePropertyName { get; set; } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordAttributes/VectorStoreRecordKeyAttribute.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordAttributes/VectorStoreRecordKeyAttribute.cs new file mode 100644 index 000000000000..32376956b853 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordAttributes/VectorStoreRecordKeyAttribute.cs @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Attribute to mark a property on a record class as the key under which the record is stored in a vector store. +/// +/// +/// The characteristics defined here will influence how the property is treated by the vector store. +/// +[Experimental("SKEXP0001")] +[AttributeUsage(AttributeTargets.Property, AllowMultiple = false)] +public sealed class VectorStoreRecordKeyAttribute : Attribute +{ + /// + /// Gets or sets an optional name to use for the property in storage, if different from the property name. + /// E.g. the property name might be "MyProperty" but the storage name might be "my_property". + /// + public string? StoragePropertyName { get; set; } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordAttributes/VectorStoreRecordVectorAttribute.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordAttributes/VectorStoreRecordVectorAttribute.cs new file mode 100644 index 000000000000..74a2a0796811 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordAttributes/VectorStoreRecordVectorAttribute.cs @@ -0,0 +1,79 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Attribute to mark a property on a record class as a vector. +/// +/// +/// The characteristics defined here will influence how the property is treated by the vector store. +/// +[Experimental("SKEXP0001")] +[AttributeUsage(AttributeTargets.Property, AllowMultiple = false)] +public sealed class VectorStoreRecordVectorAttribute : Attribute +{ + /// + /// Initializes a new instance of the class. + /// + public VectorStoreRecordVectorAttribute() + { + } + + /// + /// Initializes a new instance of the class. + /// + /// The number of dimensions that the vector has. + public VectorStoreRecordVectorAttribute(int Dimensions) + { + this.Dimensions = Dimensions; + } + + /// + /// Initializes a new instance of the class. + /// + /// The number of dimensions that the vector has. + /// The kind of index to use. + /// The distance function to use when comparing vectors. + public VectorStoreRecordVectorAttribute(int Dimensions, string? IndexKind, string? DistanceFunction) + { + this.Dimensions = Dimensions; + this.IndexKind = IndexKind; + this.DistanceFunction = DistanceFunction; + } + + /// + /// Gets or sets the number of dimensions that the vector has. + /// + /// + /// This property is required when creating collections, but may be omitted if not using that functionality. + /// If not provided when trying to create a collection, create will fail. + /// + public int? Dimensions { get; private set; } + + /// + /// Gets the kind of index to use. + /// + /// + /// + /// Default varies by database type. See the documentation of your chosen database connector for more information. + /// + public string? IndexKind { get; private set; } + + /// + /// Gets the distance function to use when comparing vectors. + /// + /// + /// + /// Default varies by database type. See the documentation of your chosen database connector for more information. + /// + public string? DistanceFunction { get; private set; } + + /// + /// Gets or sets an optional name to use for the property in storage, if different from the property name. + /// E.g. the property name might be "MyProperty" but the storage name might be "my_property". + /// + public string? StoragePropertyName { get; set; } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/DistanceFunction.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/DistanceFunction.cs new file mode 100644 index 000000000000..32601243966b --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/DistanceFunction.cs @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Defines a list of well known distance functions that can be used to compare vectors. +/// +/// +/// Not all Vector Store connectors support all distance functions and some connectors may +/// support additional distance functions that are not defined here. See the documentation +/// for each connector for more information on what is supported. +/// +[Experimental("SKEXP0001")] +public static class DistanceFunction +{ + /// + /// The cosine (angular) similarity between two vectors. + /// + /// + /// Measures only the angle between the two vectors, without taking into account the length of the vectors. + /// ConsineSimilarity = 1 - CosineDistance. + /// -1 means vectors are opposite. + /// 0 means vectors are orthogonal. + /// 1 means vectors are identical. + /// + public const string CosineSimilarity = nameof(CosineSimilarity); + + /// + /// The cosine (angular) similarity between two vectors. + /// + /// + /// CosineDistance = 1 - CosineSimilarity. + /// 2 means vectors are opposite. + /// 1 means vectors are orthogonal. + /// 0 means vectors are identical. + /// + public const string CosineDistance = nameof(CosineDistance); + + /// + /// Measures both the length and angle between two vectors. + /// + /// + /// Same as cosine similarity if the vectors are the same length, but more performant. + /// + public const string DotProductSimilarity = nameof(DotProductSimilarity); + + /// + /// Measures the Euclidean distance between two vectors. + /// + /// + /// Also known as l2-norm. + /// + public const string EuclideanDistance = nameof(EuclideanDistance); + + /// + /// Measures the Manhattan distance between two vectors. + /// + public const string ManhattanDistance = nameof(ManhattanDistance); +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/IndexKind.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/IndexKind.cs new file mode 100644 index 000000000000..364baaa8e727 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/IndexKind.cs @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Defines a list of well known index types that can be used to index vectors. +/// +/// +/// Not all Vector Store connectors support all index types and some connectors may +/// support additional index types that are not defined here. See the documentation +/// for each connector for more information on what is supported. +/// +[Experimental("SKEXP0001")] +public static class IndexKind +{ + /// + /// Hierarchical Navigable Small World, which performs an approximate nearest neighbour (ANN) search. + /// + /// + /// Lower accuracy than exhaustive k nearest neighbor, but faster and more efficient. + /// + public const string Hnsw = nameof(Hnsw); + + /// + /// Does a brute force search to find the nearest neighbors. + /// Calculates the distances between all pairs of data points, so has a linear time complexity, that grows directly proportional to the number of points. + /// Also referred to as exhaustive k nearest neighbor in some databases. + /// + /// + /// High recall accuracy, but slower and more expensive than HNSW. + /// Better with smaller datasets. + /// + public const string Flat = nameof(Flat); +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordDataProperty.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordDataProperty.cs new file mode 100644 index 000000000000..9dec25aa4ce1 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordDataProperty.cs @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Defines a data property on a vector store record. +/// +/// +/// The characteristics defined here will influence how the property is treated by the vector store. +/// +[Experimental("SKEXP0001")] +public sealed class VectorStoreRecordDataProperty : VectorStoreRecordProperty +{ + /// + /// Initializes a new instance of the class. + /// + /// The name of the property. + /// The type of the property. + public VectorStoreRecordDataProperty(string propertyName, Type propertyType) + : base(propertyName, propertyType) + { + } + + /// + /// Initializes a new instance of the class by cloning the given source. + /// + /// The source to clone + public VectorStoreRecordDataProperty(VectorStoreRecordDataProperty source) + : base(source) + { + this.IsFilterable = source.IsFilterable; + this.IsFullTextSearchable = source.IsFullTextSearchable; + } + + /// + /// Gets or sets a value indicating whether this data property is filterable. + /// + /// + /// Default is . + /// + public bool IsFilterable { get; init; } + + /// + /// Gets or sets a value indicating whether this data property is full text searchable. + /// + /// + /// Default is . + /// + public bool IsFullTextSearchable { get; init; } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordDefinition.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordDefinition.cs new file mode 100644 index 000000000000..455bd5842c47 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordDefinition.cs @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// A description of the properties of a record stored in a vector store. +/// +/// +/// Each property contains additional information about how the property will be treated by the vector store. +/// +[Experimental("SKEXP0001")] +public sealed class VectorStoreRecordDefinition +{ + /// Empty static list for initialization purposes. + private static readonly List s_emptyFields = new(); + + /// + /// The list of properties that are stored in the record. + /// + public IReadOnlyList Properties { get; init; } = s_emptyFields; +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordKeyProperty.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordKeyProperty.cs new file mode 100644 index 000000000000..6ba9725e2da4 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordKeyProperty.cs @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Defines a key property on a vector store record. +/// +/// +/// The characteristics defined here will influence how the property is treated by the vector store. +/// +[Experimental("SKEXP0001")] +public sealed class VectorStoreRecordKeyProperty : VectorStoreRecordProperty +{ + /// + /// Initializes a new instance of the class. + /// + /// The name of the property. + /// The type of the property. + public VectorStoreRecordKeyProperty(string propertyName, Type propertyType) + : base(propertyName, propertyType) + { + } + + /// + /// Initializes a new instance of the class by cloning the given source. + /// + /// The source to clone + public VectorStoreRecordKeyProperty(VectorStoreRecordKeyProperty source) + : base(source) + { + } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordProperty.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordProperty.cs new file mode 100644 index 000000000000..400ae7065355 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordProperty.cs @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Defines a base property class for properties on a vector store record. +/// +/// +/// The characteristics defined here will influence how the property is treated by the vector store. +/// +[Experimental("SKEXP0001")] +public abstract class VectorStoreRecordProperty +{ + /// + /// Initializes a new instance of the class. + /// + /// The name of the property on the data model. + /// The type of the property. + private protected VectorStoreRecordProperty(string dataModelPropertyName, Type propertyType) + { + Verify.NotNullOrWhiteSpace(dataModelPropertyName); + Verify.NotNull(propertyType); + + this.DataModelPropertyName = dataModelPropertyName; + this.PropertyType = propertyType; + } + + private protected VectorStoreRecordProperty(VectorStoreRecordProperty source) + { + this.DataModelPropertyName = source.DataModelPropertyName; + this.StoragePropertyName = source.StoragePropertyName; + this.PropertyType = source.PropertyType; + } + + /// + /// Gets or sets the name of the property on the data model. + /// + public string DataModelPropertyName { get; private set; } + + /// + /// Gets or sets an optional name to use for the property in storage, if different from the property name. + /// E.g. the property name might be "MyProperty" but the storage name might be "my_property". + /// This property will only be respected by implementations that do not support a well known + /// serialization mechanism like JSON, in which case the attributes used by that seriallization system will + /// be used. + /// + public string? StoragePropertyName { get; init; } + + /// + /// Gets or sets the type of the property. + /// + public Type PropertyType { get; private set; } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordVectorProperty.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordVectorProperty.cs new file mode 100644 index 000000000000..4f4b3a1bce0a --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordDefinition/VectorStoreRecordVectorProperty.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Defines a vector property on a vector store record. +/// +/// +/// The characteristics defined here will influence how the property is treated by the vector store. +/// +[Experimental("SKEXP0001")] +public sealed class VectorStoreRecordVectorProperty : VectorStoreRecordProperty +{ + /// + /// Initializes a new instance of the class. + /// + /// The name of the property. + /// The type of the property. + public VectorStoreRecordVectorProperty(string propertyName, Type propertyType) + : base(propertyName, propertyType) + { + } + + /// + /// Initializes a new instance of the class by cloning the given source. + /// + /// The source to clone + public VectorStoreRecordVectorProperty(VectorStoreRecordVectorProperty source) + : base(source) + { + this.Dimensions = source.Dimensions; + this.IndexKind = source.IndexKind; + this.DistanceFunction = source.DistanceFunction; + } + + /// + /// Gets or sets the number of dimensions that the vector has. + /// + /// + /// This property is required when creating collections, but may be omitted if not using that functionality. + /// If not provided when trying to create a collection, create will fail. + /// + public int? Dimensions { get; init; } + + /// + /// Gets the kind of index to use. + /// + /// + /// + /// Default varies by database type. See the documentation of your chosen database connector for more information. + /// + public string? IndexKind { get; init; } + + /// + /// Gets the distance function to use when comparing vectors. + /// + /// + /// + /// Default varies by database type. See the documentation of your chosen database connector for more information. + /// + public string? DistanceFunction { get; init; } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordOptions/DeleteRecordOptions.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordOptions/DeleteRecordOptions.cs new file mode 100644 index 000000000000..4f034d125a6d --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordOptions/DeleteRecordOptions.cs @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Options when calling . +/// +/// +/// This class does not currently include any options, but is added for future extensibility of the API. +/// +[Experimental("SKEXP0001")] +public class DeleteRecordOptions +{ + /// + /// Initializes a new instance of the class. + /// + public DeleteRecordOptions() + { + } + + /// + /// Initializes a new instance of the class by cloning the given options. + /// + /// The options to clone + public DeleteRecordOptions(DeleteRecordOptions source) + { + } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordOptions/GetRecordOptions.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordOptions/GetRecordOptions.cs new file mode 100644 index 000000000000..5330e076acea --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordOptions/GetRecordOptions.cs @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Options when calling . +/// +[Experimental("SKEXP0001")] +public class GetRecordOptions +{ + /// + /// Initializes a new instance of the class. + /// + public GetRecordOptions() + { + } + + /// + /// Initializes a new instance of the class by cloning the given options. + /// + /// The options to clone + public GetRecordOptions(GetRecordOptions source) + { + this.IncludeVectors = source.IncludeVectors; + } + + /// + /// Gets or sets a value indicating whether to include vectors in the retrieval result. + /// + public bool IncludeVectors { get; init; } = false; +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/RecordOptions/UpsertRecordOptions.cs b/dotnet/src/SemanticKernel.Abstractions/Data/RecordOptions/UpsertRecordOptions.cs new file mode 100644 index 000000000000..c1d9cba35b5d --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/RecordOptions/UpsertRecordOptions.cs @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Options when calling . +/// Reserved for future use. +/// +/// +/// This class does not currently include any options, but is added for future extensibility of the API. +/// +[Experimental("SKEXP0001")] +public class UpsertRecordOptions +{ + /// + /// Initializes a new instance of the class. + /// + public UpsertRecordOptions() + { + } + + /// + /// Initializes a new instance of the class by cloning the given options. + /// + /// The options to clone + public UpsertRecordOptions(UpsertRecordOptions source) + { + } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/StorageToDataModelMapperOptions.cs b/dotnet/src/SemanticKernel.Abstractions/Data/StorageToDataModelMapperOptions.cs new file mode 100644 index 000000000000..bdee284b0f14 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/StorageToDataModelMapperOptions.cs @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Options to use with the method. +/// +[Experimental("SKEXP0001")] +public class StorageToDataModelMapperOptions +{ + /// + /// Get or sets a value indicating whether to include vectors in the retrieval result. + /// + public bool IncludeVectors { get; init; } = false; +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/VectorStoreException.cs b/dotnet/src/SemanticKernel.Abstractions/Data/VectorStoreException.cs new file mode 100644 index 000000000000..5a0183e85d83 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/VectorStoreException.cs @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Base exception type thrown for any type of failure when using vector stores. +/// +[Experimental("SKEXP0001")] +public abstract class VectorStoreException : KernelException +{ + /// + /// Initializes a new instance of the class. + /// + protected VectorStoreException() + { + } + + /// + /// Initializes a new instance of the class with a specified error message. + /// + /// The error message that explains the reason for the exception. + protected VectorStoreException(string? message) : base(message) + { + } + + /// + /// Initializes a new instance of the class with a specified error message and a reference to the inner exception that is the cause of this exception. + /// + /// The error message that explains the reason for the exception. + /// The exception that is the cause of the current exception, or a null reference if no inner exception is specified. + protected VectorStoreException(string? message, Exception? innerException) : base(message, innerException) + { + } + + /// + /// Gets or sets the type of vector store that the failing operation was performed on. + /// + public string? VectorStoreType { get; init; } + + /// + /// Gets or sets the name of the vector store collection that the failing operation was performed on. + /// + public string? CollectionName { get; init; } + + /// + /// Gets or sets the name of the vector store operation that failed. + /// + public string? OperationName { get; init; } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/VectorStoreOperationException.cs b/dotnet/src/SemanticKernel.Abstractions/Data/VectorStoreOperationException.cs new file mode 100644 index 000000000000..2830c1b22646 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/VectorStoreOperationException.cs @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Exception thrown when a vector store command fails, such as upserting a record or deleting a collection. +/// +[Experimental("SKEXP0001")] +public class VectorStoreOperationException : VectorStoreException +{ + /// + /// Initializes a new instance of the class. + /// + public VectorStoreOperationException() + { + } + + /// + /// Initializes a new instance of the class with a specified error message. + /// + /// The error message that explains the reason for the exception. + public VectorStoreOperationException(string? message) : base(message) + { + } + + /// + /// Initializes a new instance of the class with a specified error message and a reference to the inner exception that is the cause of this exception. + /// + /// The error message that explains the reason for the exception. + /// The exception that is the cause of the current exception, or a null reference if no inner exception is specified. + public VectorStoreOperationException(string? message, Exception? innerException) : base(message, innerException) + { + } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Data/VectorStoreRecordMappingException.cs b/dotnet/src/SemanticKernel.Abstractions/Data/VectorStoreRecordMappingException.cs new file mode 100644 index 000000000000..6b912b233ceb --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Data/VectorStoreRecordMappingException.cs @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Exception thrown when a failure occurs while trying to convert models for storage or retrieval. +/// +[Experimental("SKEXP0001")] +public class VectorStoreRecordMappingException : VectorStoreException +{ + /// + /// Initializes a new instance of the class. + /// + public VectorStoreRecordMappingException() + { + } + + /// + /// Initializes a new instance of the class with a specified error message. + /// + /// The error message that explains the reason for the exception. + public VectorStoreRecordMappingException(string? message) : base(message) + { + } + + /// + /// Initializes a new instance of the class with a specified error message and a reference to the inner exception that is the cause of this exception. + /// + /// The error message that explains the reason for the exception. + /// The exception that is the cause of the current exception, or a null reference if no inner exception is specified. + public VectorStoreRecordMappingException(string? message, Exception? innerException) : base(message, innerException) + { + } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Functions/KernelFunction.cs b/dotnet/src/SemanticKernel.Abstractions/Functions/KernelFunction.cs index 11ad6e109084..b838d7b30261 100644 --- a/dotnet/src/SemanticKernel.Abstractions/Functions/KernelFunction.cs +++ b/dotnet/src/SemanticKernel.Abstractions/Functions/KernelFunction.cs @@ -158,7 +158,7 @@ public async Task InvokeAsync( Verify.NotNull(kernel); using var activity = s_activitySource.StartActivity(this.Name); - ILogger logger = kernel.LoggerFactory.CreateLogger(this.Name) ?? NullLogger.Instance; + ILogger logger = kernel.LoggerFactory.CreateLogger(typeof(KernelFunction)) ?? NullLogger.Instance; // Ensure arguments are initialized. arguments ??= []; diff --git a/dotnet/src/SemanticKernel.Abstractions/Services/AIServiceExtensions.cs b/dotnet/src/SemanticKernel.Abstractions/Services/AIServiceExtensions.cs index a218031f9673..30a3ee7794e5 100644 --- a/dotnet/src/SemanticKernel.Abstractions/Services/AIServiceExtensions.cs +++ b/dotnet/src/SemanticKernel.Abstractions/Services/AIServiceExtensions.cs @@ -73,7 +73,7 @@ public static class AIServiceExtensions /// The function arguments. /// A tuple of the selected service and the settings associated with the service (the settings may be null). /// An appropriate service could not be found. - public static (T?, PromptExecutionSettings?) SelectAIService( + public static (T, PromptExecutionSettings?) SelectAIService( this IAIServiceSelector selector, Kernel kernel, KernelFunction function, diff --git a/dotnet/src/SemanticKernel.Core/Data/KernelBuilderExtensions.cs b/dotnet/src/SemanticKernel.Core/Data/KernelBuilderExtensions.cs new file mode 100644 index 000000000000..7f738b080b42 --- /dev/null +++ b/dotnet/src/SemanticKernel.Core/Data/KernelBuilderExtensions.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel; + +/// +/// Extension methods to register Data services on the . +/// +[Experimental("SKEXP0001")] +public static class KernelBuilderExtensions +{ + /// + /// Register a Volatile with the specified service ID. + /// + /// The builder to register the on. + /// An optional service id to use as the service key. + /// The kernel builder. + public static IKernelBuilder AddVolatileVectorStore(this IKernelBuilder builder, string? serviceId = default) + { + builder.Services.AddVolatileVectorStore(serviceId); + return builder; + } +} diff --git a/dotnet/src/SemanticKernel.Core/Data/ServiceCollectionExtensions.cs b/dotnet/src/SemanticKernel.Core/Data/ServiceCollectionExtensions.cs new file mode 100644 index 000000000000..857bef29f01b --- /dev/null +++ b/dotnet/src/SemanticKernel.Core/Data/ServiceCollectionExtensions.cs @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel.Data; + +namespace Microsoft.SemanticKernel; + +/// +/// Extension methods to register Data services on an . +/// +[Experimental("SKEXP0001")] +public static class ServiceCollectionExtensions +{ + /// + /// Register a Volatile with the specified service ID. + /// + /// The to register the on. + /// An optional service id to use as the service key. + /// The service collection. + public static IServiceCollection AddVolatileVectorStore(this IServiceCollection services, string? serviceId = default) + { + services.AddKeyedSingleton(serviceId); + return services; + } +} diff --git a/dotnet/src/SemanticKernel.Core/Data/VolatileVectorStore.cs b/dotnet/src/SemanticKernel.Core/Data/VolatileVectorStore.cs new file mode 100644 index 000000000000..7175e2896978 --- /dev/null +++ b/dotnet/src/SemanticKernel.Core/Data/VolatileVectorStore.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Threading; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Service for storing and retrieving vector records, and managing vector record collections, that uses an in memory dictionary as the underlying storage. +/// +[Experimental("SKEXP0001")] +public sealed class VolatileVectorStore : IVectorStore +{ + /// Internal storage for the record collection. + private readonly ConcurrentDictionary> _internalCollection; + + /// + /// Initializes a new instance of the class. + /// + public VolatileVectorStore() + { + this._internalCollection = new(); + } + + /// + /// Initializes a new instance of the class. + /// + /// Allows passing in the dictionary used for storage, for testing purposes. + internal VolatileVectorStore(ConcurrentDictionary> internalCollection) + { + this._internalCollection = internalCollection; + } + + /// + public IVectorStoreRecordCollection GetCollection(string name, VectorStoreRecordDefinition? vectorStoreRecordDefinition = null) + where TKey : notnull + where TRecord : class + { + var collection = new VolatileVectorStoreRecordCollection(this._internalCollection, name, new() { VectorStoreRecordDefinition = vectorStoreRecordDefinition }) as IVectorStoreRecordCollection; + return collection!; + } + + /// + public IAsyncEnumerable ListCollectionNamesAsync(CancellationToken cancellationToken = default) + { + return this._internalCollection.Keys.ToAsyncEnumerable(); + } +} diff --git a/dotnet/src/SemanticKernel.Core/Data/VolatileVectorStoreRecordCollection.cs b/dotnet/src/SemanticKernel.Core/Data/VolatileVectorStoreRecordCollection.cs new file mode 100644 index 000000000000..decfa8ef20ea --- /dev/null +++ b/dotnet/src/SemanticKernel.Core/Data/VolatileVectorStoreRecordCollection.cs @@ -0,0 +1,191 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Threading.Tasks; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Service for storing and retrieving vector records, that uses an in memory dictionary as the underlying storage. +/// +/// The data type of the record key. +/// The data model to use for adding, updating and retrieving data from storage. +[Experimental("SKEXP0001")] +#pragma warning disable CA1711 // Identifiers should not have incorrect suffix +public sealed class VolatileVectorStoreRecordCollection : IVectorStoreRecordCollection +#pragma warning restore CA1711 // Identifiers should not have incorrect suffix + where TKey : notnull + where TRecord : class +{ + /// Internal storage for the record collection. + private readonly ConcurrentDictionary> _internalCollection; + + /// Optional configuration options for this class. + private readonly VolatileVectorStoreRecordCollectionOptions _options; + + /// The name of the collection that this will access. + private readonly string _collectionName; + + /// A property info object that points at the key property for the current model, allowing easy reading and writing of this property. + private readonly PropertyInfo _keyPropertyInfo; + + /// + /// Initializes a new instance of the class. + /// + /// The name of the collection that this will access. + /// Optional configuration options for this class. + public VolatileVectorStoreRecordCollection(string collectionName, VolatileVectorStoreRecordCollectionOptions? options = default) + { + // Verify. + Verify.NotNullOrWhiteSpace(collectionName); + + // Assign. + this._collectionName = collectionName; + this._internalCollection = new(); + this._options = options ?? new VolatileVectorStoreRecordCollectionOptions(); + var vectorStoreRecordDefinition = this._options.VectorStoreRecordDefinition ?? VectorStoreRecordPropertyReader.CreateVectorStoreRecordDefinitionFromType(typeof(TRecord), true); + + // Get the key property info. + var keyProperty = vectorStoreRecordDefinition.Properties.OfType().FirstOrDefault(); + if (keyProperty is null) + { + throw new ArgumentException($"No Key property found on {typeof(TRecord).Name} or provided via {nameof(VectorStoreRecordDefinition)}"); + } + + this._keyPropertyInfo = typeof(TRecord).GetProperty(keyProperty.DataModelPropertyName) ?? throw new ArgumentException($"Key property {keyProperty.DataModelPropertyName} not found on {typeof(TRecord).Name}"); + } + + /// + /// Initializes a new instance of the class. + /// + /// Allows passing in the dictionary used for storage, for testing purposes. + /// The name of the collection that this will access. + /// Optional configuration options for this class. + internal VolatileVectorStoreRecordCollection(ConcurrentDictionary> internalCollection, string collectionName, VolatileVectorStoreRecordCollectionOptions? options = default) + : this(collectionName, options) + { + this._internalCollection = internalCollection; + } + + /// + public string CollectionName => this._collectionName; + + /// + public Task CollectionExistsAsync(CancellationToken cancellationToken = default) + { + return this._internalCollection.ContainsKey(this._collectionName) ? Task.FromResult(true) : Task.FromResult(false); + } + + /// + public Task CreateCollectionAsync(CancellationToken cancellationToken = default) + { + this._internalCollection.TryAdd(this._collectionName, new ConcurrentDictionary()); + return Task.CompletedTask; + } + + /// + public async Task CreateCollectionIfNotExistsAsync(CancellationToken cancellationToken = default) + { + if (!await this.CollectionExistsAsync(cancellationToken).ConfigureAwait(false)) + { + await this.CreateCollectionAsync(cancellationToken).ConfigureAwait(false); + } + } + + /// + public Task DeleteCollectionAsync(CancellationToken cancellationToken = default) + { + this._internalCollection.TryRemove(this._collectionName, out _); + return Task.CompletedTask; + } + + /// + public Task GetAsync(TKey key, GetRecordOptions? options = null, CancellationToken cancellationToken = default) + { + var collectionDictionary = this.GetCollectionDictionary(); + + if (collectionDictionary.TryGetValue(key, out var record)) + { + return Task.FromResult(record as TRecord); + } + + return Task.FromResult(null); + } + + /// + public async IAsyncEnumerable GetBatchAsync(IEnumerable keys, GetRecordOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + foreach (var key in keys) + { + var record = await this.GetAsync(key, options, cancellationToken).ConfigureAwait(false); + + if (record is not null) + { + yield return record; + } + } + } + + /// + public Task DeleteAsync(TKey key, DeleteRecordOptions? options = null, CancellationToken cancellationToken = default) + { + var collectionDictionary = this.GetCollectionDictionary(); + + collectionDictionary.TryRemove(key, out _); + return Task.CompletedTask; + } + + /// + public Task DeleteBatchAsync(IEnumerable keys, DeleteRecordOptions? options = null, CancellationToken cancellationToken = default) + { + var collectionDictionary = this.GetCollectionDictionary(); + + foreach (var key in keys) + { + collectionDictionary.TryRemove(key, out _); + } + + return Task.CompletedTask; + } + + /// + public Task UpsertAsync(TRecord record, UpsertRecordOptions? options = null, CancellationToken cancellationToken = default) + { + var collectionDictionary = this.GetCollectionDictionary(); + + var key = (TKey)this._keyPropertyInfo.GetValue(record)!; + collectionDictionary.AddOrUpdate(key!, record, (key, currentValue) => record); + + return Task.FromResult(key!); + } + + /// + public async IAsyncEnumerable UpsertBatchAsync(IEnumerable records, UpsertRecordOptions? options = null, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + foreach (var record in records) + { + yield return await this.UpsertAsync(record, options, cancellationToken).ConfigureAwait(false); + } + } + + /// + /// Get the collection dictionary from the internal storage, throws if it does not exist. + /// + /// The retrieved collection dictionary. + private ConcurrentDictionary GetCollectionDictionary() + { + if (!this._internalCollection.TryGetValue(this._collectionName, out var collectionDictionary)) + { + throw new VectorStoreOperationException($"Call to vector store failed. Collection '{this._collectionName}' does not exist."); + } + + return collectionDictionary; + } +} diff --git a/dotnet/src/SemanticKernel.Core/Data/VolatileVectorStoreRecordCollectionOptions.cs b/dotnet/src/SemanticKernel.Core/Data/VolatileVectorStoreRecordCollectionOptions.cs new file mode 100644 index 000000000000..8732e7efa486 --- /dev/null +++ b/dotnet/src/SemanticKernel.Core/Data/VolatileVectorStoreRecordCollectionOptions.cs @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.Data; + +/// +/// Options when creating a . +/// +[Experimental("SKEXP0001")] +public sealed class VolatileVectorStoreRecordCollectionOptions +{ + /// + /// Gets or sets an optional record definition that defines the schema of the record type. + /// + /// + /// If not provided, the schema will be inferred from the record model class using reflection. + /// In this case, the record model properties must be annotated with the appropriate attributes to indicate their usage. + /// See , and . + /// + public VectorStoreRecordDefinition? VectorStoreRecordDefinition { get; init; } = null; +} diff --git a/dotnet/src/SemanticKernel.UnitTests/Data/KernelBuilderExtensionsTests.cs b/dotnet/src/SemanticKernel.UnitTests/Data/KernelBuilderExtensionsTests.cs new file mode 100644 index 000000000000..2f1f3923c3c4 --- /dev/null +++ b/dotnet/src/SemanticKernel.UnitTests/Data/KernelBuilderExtensionsTests.cs @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Data; +using Xunit; + +namespace SemanticKernel.UnitTests.Data; + +/// +/// Contains tests for . +/// +public class KernelBuilderExtensionsTests +{ + private readonly IKernelBuilder _kernelBuilder; + + public KernelBuilderExtensionsTests() + { + this._kernelBuilder = Kernel.CreateBuilder(); + } + + [Fact] + public void AddVectorStoreRegistersClass() + { + // Act. + this._kernelBuilder.AddVolatileVectorStore(); + + // Assert. + var kernel = this._kernelBuilder.Build(); + var vectorStore = kernel.Services.GetRequiredService(); + Assert.NotNull(vectorStore); + Assert.IsType(vectorStore); + } +} diff --git a/dotnet/src/SemanticKernel.UnitTests/Data/ServiceCollectionExtensionsTests.cs b/dotnet/src/SemanticKernel.UnitTests/Data/ServiceCollectionExtensionsTests.cs new file mode 100644 index 000000000000..0898b439c067 --- /dev/null +++ b/dotnet/src/SemanticKernel.UnitTests/Data/ServiceCollectionExtensionsTests.cs @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Data; +using Xunit; + +namespace SemanticKernel.UnitTests.Data; + +/// +/// Contains tests for the class. +/// +public class ServiceCollectionExtensionsTests +{ + private readonly IServiceCollection _serviceCollection; + + public ServiceCollectionExtensionsTests() + { + this._serviceCollection = new ServiceCollection(); + } + + [Fact] + public void AddVectorStoreRegistersClass() + { + // Act. + this._serviceCollection.AddVolatileVectorStore(); + + // Assert. + var serviceProvider = this._serviceCollection.BuildServiceProvider(); + var vectorStore = serviceProvider.GetRequiredService(); + Assert.NotNull(vectorStore); + Assert.IsType(vectorStore); + } +} diff --git a/dotnet/src/SemanticKernel.UnitTests/Data/VectorStoreRecordPropertyReaderTests.cs b/dotnet/src/SemanticKernel.UnitTests/Data/VectorStoreRecordPropertyReaderTests.cs new file mode 100644 index 000000000000..cfddd8437425 --- /dev/null +++ b/dotnet/src/SemanticKernel.UnitTests/Data/VectorStoreRecordPropertyReaderTests.cs @@ -0,0 +1,468 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using System.Text.Json; +using System.Text.Json.Serialization; +using Microsoft.SemanticKernel.Data; +using Xunit; + +namespace SemanticKernel.UnitTests.Data; + +public class VectorStoreRecordPropertyReaderTests +{ + [Fact] + public void SplitDefinitionsAndVerifyReturnsProperties() + { + // Act. + var properties = VectorStoreRecordPropertyReader.SplitDefinitionAndVerify("testType", this._multiPropsDefinition, true, true); + + // Assert. + Assert.Equal("Key", properties.KeyProperty.DataModelPropertyName); + Assert.Equal(2, properties.DataProperties.Count); + Assert.Equal(2, properties.VectorProperties.Count); + Assert.Equal("Data1", properties.DataProperties[0].DataModelPropertyName); + Assert.Equal("Data2", properties.DataProperties[1].DataModelPropertyName); + Assert.Equal("Vector1", properties.VectorProperties[0].DataModelPropertyName); + Assert.Equal("Vector2", properties.VectorProperties[1].DataModelPropertyName); + } + + [Theory] + [InlineData(false, true, "MultiProps")] + [InlineData(true, true, "NoKey")] + [InlineData(true, true, "MultiKeys")] + [InlineData(false, true, "NoVector")] + [InlineData(true, true, "NoVector")] + public void SplitDefinitionsAndVerifyThrowsForInvalidModel(bool supportsMultipleVectors, bool requiresAtLeastOneVector, string definitionName) + { + // Arrange. + var definition = definitionName switch + { + "MultiProps" => this._multiPropsDefinition, + "NoKey" => this._noKeyDefinition, + "MultiKeys" => this._multiKeysDefinition, + "NoVector" => this._noVectorDefinition, + _ => throw new ArgumentException("Invalid definition.") + }; + + // Act & Assert. + Assert.Throws(() => VectorStoreRecordPropertyReader.SplitDefinitionAndVerify("testType", definition, supportsMultipleVectors, requiresAtLeastOneVector)); + } + + [Theory] + [InlineData(true, false)] + [InlineData(false, false)] + [InlineData(true, true)] + [InlineData(false, true)] + public void FindPropertiesCanFindAllPropertiesOnSinglePropsModel(bool supportsMultipleVectors, bool useConfig) + { + // Act. + var properties = useConfig ? + VectorStoreRecordPropertyReader.FindProperties(typeof(SinglePropsModel), this._singlePropsDefinition, supportsMultipleVectors) : + VectorStoreRecordPropertyReader.FindProperties(typeof(SinglePropsModel), supportsMultipleVectors); + + // Assert. + Assert.Equal("Key", properties.KeyProperty.Name); + Assert.Single(properties.DataProperties); + Assert.Single(properties.VectorProperties); + Assert.Equal("Data", properties.DataProperties[0].Name); + Assert.Equal("Vector", properties.VectorProperties[0].Name); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void FindPropertiesCanFindAllPropertiesOnMultiPropsModel(bool useConfig) + { + // Act. + var properties = useConfig ? + VectorStoreRecordPropertyReader.FindProperties(typeof(MultiPropsModel), this._multiPropsDefinition, true) : + VectorStoreRecordPropertyReader.FindProperties(typeof(MultiPropsModel), true); + + // Assert. + Assert.Equal("Key", properties.KeyProperty.Name); + Assert.Equal(2, properties.DataProperties.Count); + Assert.Equal(2, properties.VectorProperties.Count); + Assert.Equal("Data1", properties.DataProperties[0].Name); + Assert.Equal("Data2", properties.DataProperties[1].Name); + Assert.Equal("Vector1", properties.VectorProperties[0].Name); + Assert.Equal("Vector2", properties.VectorProperties[1].Name); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void FindPropertiesThrowsForMultipleVectorsWithSingleVectorSupport(bool useConfig) + { + // Act. + var ex = useConfig ? + Assert.Throws(() => VectorStoreRecordPropertyReader.FindProperties(typeof(MultiPropsModel), this._multiPropsDefinition, false)) : + Assert.Throws(() => VectorStoreRecordPropertyReader.FindProperties(typeof(MultiPropsModel), false)); + + // Assert. + var expectedMessage = useConfig ? + "Multiple vector properties configured for type SemanticKernel.UnitTests.Data.VectorStoreRecordPropertyReaderTests+MultiPropsModel while only one is supported." : + "Multiple vector properties found on type SemanticKernel.UnitTests.Data.VectorStoreRecordPropertyReaderTests+MultiPropsModel while only one is supported."; + Assert.Equal(expectedMessage, ex.Message); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void FindPropertiesThrowsOnMultipleKeyProperties(bool useConfig) + { + // Act. + var ex = useConfig ? + Assert.Throws(() => VectorStoreRecordPropertyReader.FindProperties(typeof(MultiKeysModel), this._multiKeysDefinition, true)) : + Assert.Throws(() => VectorStoreRecordPropertyReader.FindProperties(typeof(MultiKeysModel), true)); + + // Assert. + var expectedMessage = useConfig ? + "Multiple key properties configured for type SemanticKernel.UnitTests.Data.VectorStoreRecordPropertyReaderTests+MultiKeysModel." : + "Multiple key properties found on type SemanticKernel.UnitTests.Data.VectorStoreRecordPropertyReaderTests+MultiKeysModel."; + Assert.Equal(expectedMessage, ex.Message); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void FindPropertiesThrowsOnNoKeyProperty(bool useConfig) + { + // Act. + var ex = useConfig ? + Assert.Throws(() => VectorStoreRecordPropertyReader.FindProperties(typeof(NoKeyModel), this._noKeyDefinition, true)) : + Assert.Throws(() => VectorStoreRecordPropertyReader.FindProperties(typeof(NoKeyModel), true)); + + // Assert. + var expectedMessage = useConfig ? + "No key property configured for type SemanticKernel.UnitTests.Data.VectorStoreRecordPropertyReaderTests+NoKeyModel." : + "No key property found on type SemanticKernel.UnitTests.Data.VectorStoreRecordPropertyReaderTests+NoKeyModel."; + Assert.Equal(expectedMessage, ex.Message); + } + + [Theory] + [InlineData(true)] + [InlineData(false)] + public void FindPropertiesThrowsOnNoVectorPropertyWithSingleVectorSupport(bool useConfig) + { + // Act. + var ex = useConfig ? + Assert.Throws(() => VectorStoreRecordPropertyReader.FindProperties(typeof(NoVectorModel), this._noVectorDefinition, false)) : + Assert.Throws(() => VectorStoreRecordPropertyReader.FindProperties(typeof(NoVectorModel), false)); + + // Assert. + var expectedMessage = useConfig ? + "No vector property configured for type SemanticKernel.UnitTests.Data.VectorStoreRecordPropertyReaderTests+NoVectorModel." : + "No vector property found on type SemanticKernel.UnitTests.Data.VectorStoreRecordPropertyReaderTests+NoVectorModel."; + Assert.Equal(expectedMessage, ex.Message); + } + + [Theory] + [InlineData("Key", "MissingKey")] + [InlineData("Data", "MissingData")] + [InlineData("Vector", "MissingVector")] + public void FindPropertiesUsingConfigThrowsForNotFoundProperties(string propertyType, string propertyName) + { + var missingKeyDefinition = new VectorStoreRecordDefinition { Properties = [new VectorStoreRecordKeyProperty(propertyName, typeof(string))] }; + var missingDataDefinition = new VectorStoreRecordDefinition { Properties = [new VectorStoreRecordDataProperty(propertyName, typeof(string))] }; + var missingVectorDefinition = new VectorStoreRecordDefinition { Properties = [new VectorStoreRecordVectorProperty(propertyName, typeof(ReadOnlyMemory))] }; + + var definition = propertyType switch + { + "Key" => missingKeyDefinition, + "Data" => missingDataDefinition, + "Vector" => missingVectorDefinition, + _ => throw new ArgumentException("Invalid property type.") + }; + + Assert.Throws(() => VectorStoreRecordPropertyReader.FindProperties(typeof(NoKeyModel), definition, false)); + } + + [Fact] + public void CreateVectorStoreRecordDefinitionFromTypeConvertsAllProps() + { + // Act. + var definition = VectorStoreRecordPropertyReader.CreateVectorStoreRecordDefinitionFromType(typeof(MultiPropsModel), true); + + // Assert. + Assert.Equal(5, definition.Properties.Count); + Assert.Equal("Key", definition.Properties[0].DataModelPropertyName); + Assert.Equal("Data1", definition.Properties[1].DataModelPropertyName); + Assert.Equal("Data2", definition.Properties[2].DataModelPropertyName); + Assert.Equal("Vector1", definition.Properties[3].DataModelPropertyName); + Assert.Equal("Vector2", definition.Properties[4].DataModelPropertyName); + + Assert.IsType(definition.Properties[0]); + Assert.IsType(definition.Properties[1]); + Assert.IsType(definition.Properties[2]); + Assert.IsType(definition.Properties[3]); + Assert.IsType(definition.Properties[4]); + + var data1 = (VectorStoreRecordDataProperty)definition.Properties[1]; + var data2 = (VectorStoreRecordDataProperty)definition.Properties[2]; + + Assert.True(data1.IsFilterable); + Assert.False(data2.IsFilterable); + + Assert.True(data1.IsFullTextSearchable); + Assert.False(data2.IsFullTextSearchable); + + Assert.Equal(typeof(string), data1.PropertyType); + Assert.Equal(typeof(string), data2.PropertyType); + + var vector1 = (VectorStoreRecordVectorProperty)definition.Properties[3]; + + Assert.Equal(4, vector1.Dimensions); + } + + [Fact] + public void VerifyPropertyTypesPassForAllowedTypes() + { + // Arrange. + var properties = VectorStoreRecordPropertyReader.FindProperties(typeof(SinglePropsModel), true); + + // Act. + VectorStoreRecordPropertyReader.VerifyPropertyTypes(properties.DataProperties, [typeof(string)], "Data"); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(this._singlePropsDefinition.Properties.OfType(), [typeof(string)], "Data"); + } + + [Fact] + public void VerifyPropertyTypesPassForAllowedEnumerableTypes() + { + // Arrange. + var properties = VectorStoreRecordPropertyReader.FindProperties(typeof(EnumerablePropsModel), true); + + // Act. + VectorStoreRecordPropertyReader.VerifyPropertyTypes(properties.DataProperties, [typeof(string)], "Data", supportEnumerable: true); + VectorStoreRecordPropertyReader.VerifyPropertyTypes(this._enumerablePropsDefinition.Properties.OfType(), [typeof(string)], "Data", supportEnumerable: true); + } + + [Fact] + public void VerifyPropertyTypesFailsForDisallowedTypes() + { + // Arrange. + var properties = VectorStoreRecordPropertyReader.FindProperties(typeof(SinglePropsModel), true); + + // Act. + var ex1 = Assert.Throws(() => VectorStoreRecordPropertyReader.VerifyPropertyTypes(properties.DataProperties, [typeof(int), typeof(float)], "Data")); + var ex2 = Assert.Throws(() => VectorStoreRecordPropertyReader.VerifyPropertyTypes(this._singlePropsDefinition.Properties.OfType(), [typeof(int), typeof(float)], "Data")); + + // Assert. + Assert.Equal("Data properties must be one of the supported types: System.Int32, System.Single. Type of the property 'Data' is System.String.", ex1.Message); + Assert.Equal("Data properties must be one of the supported types: System.Int32, System.Single. Type of the property 'Data' is System.String.", ex2.Message); + } + + [Fact] + public void VerifyStoragePropertyNameMapChecksStorageNameAndFallsBackToPropertyName() + { + // Arrange. + var properties = VectorStoreRecordPropertyReader.SplitDefinitionAndVerify("testType", this._multiPropsDefinition, true, true); + + // Act. + var storageNameMap = VectorStoreRecordPropertyReader.BuildPropertyNameToStorageNameMap(properties); + + // Assert. + Assert.Equal(5, storageNameMap.Count); + + // From Property Names. + Assert.Equal("Key", storageNameMap["Key"]); + Assert.Equal("Data1", storageNameMap["Data1"]); + Assert.Equal("Vector1", storageNameMap["Vector1"]); + Assert.Equal("Vector2", storageNameMap["Vector2"]); + + // From storage property name on vector store record data property. + Assert.Equal("data_2", storageNameMap["Data2"]); + } + + [Fact] + public void VerifyGetJsonPropertyNameChecksJsonOptionsAndJsonAttributesAndFallsBackToPropertyName() + { + // Arrange. + var options = new JsonSerializerOptions() { PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseUpper }; + var properties = VectorStoreRecordPropertyReader.FindProperties(typeof(MultiPropsModel), true); + var allProperties = (new PropertyInfo[] { properties.KeyProperty }) + .Concat(properties.DataProperties) + .Concat(properties.VectorProperties); + + // Act. + var jsonNameMap = allProperties + .Select(p => new { PropertyName = p.Name, JsonName = VectorStoreRecordPropertyReader.GetJsonPropertyName(options, p) }) + .ToDictionary(p => p.PropertyName, p => p.JsonName); + + // Assert. + Assert.Equal(5, jsonNameMap.Count); + + // From JsonNamingPolicy. + Assert.Equal("KEY", jsonNameMap["Key"]); + Assert.Equal("DATA1", jsonNameMap["Data1"]); + Assert.Equal("DATA2", jsonNameMap["Data2"]); + Assert.Equal("VECTOR1", jsonNameMap["Vector1"]); + + // From JsonPropertyName attribute. + Assert.Equal("vector-2", jsonNameMap["Vector2"]); + } + + [Fact] + public void VerifyBuildPropertyNameToJsonPropertyNameMapChecksJsonAttributesAndJsonOptionsAndFallsbackToPropertyNames() + { + // Arrange. + var options = new JsonSerializerOptions() { PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseUpper }; + var properties = VectorStoreRecordPropertyReader.SplitDefinitionAndVerify("testType", this._multiPropsDefinition, true, true); + var propertiesInfo = VectorStoreRecordPropertyReader.FindProperties(typeof(MultiPropsModel), true); + + // Act. + var jsonNameMap1 = VectorStoreRecordPropertyReader.BuildPropertyNameToJsonPropertyNameMap(properties, typeof(MultiPropsModel), options); + var jsonNameMap2 = VectorStoreRecordPropertyReader.BuildPropertyNameToJsonPropertyNameMap(propertiesInfo, typeof(MultiPropsModel), options); + + void assertJsonNameMap(Dictionary jsonNameMap) + { + Assert.Equal(5, jsonNameMap.Count); + + // From JsonNamingPolicy. + Assert.Equal("KEY", jsonNameMap["Key"]); + Assert.Equal("DATA1", jsonNameMap["Data1"]); + Assert.Equal("DATA2", jsonNameMap["Data2"]); + Assert.Equal("VECTOR1", jsonNameMap["Vector1"]); + + // From JsonPropertyName attribute. + Assert.Equal("vector-2", jsonNameMap["Vector2"]); + }; + + // Assert. + assertJsonNameMap(jsonNameMap1); + assertJsonNameMap(jsonNameMap2); + } + +#pragma warning disable CA1812 // Invalid unused classes error, since I am using these for testing purposes above. + + private sealed class NoKeyModel + { + } + + private readonly VectorStoreRecordDefinition _noKeyDefinition = new(); + + private sealed class NoVectorModel + { + [VectorStoreRecordKey] + public string Key { get; set; } = string.Empty; + } + + private readonly VectorStoreRecordDefinition _noVectorDefinition = new() + { + Properties = + [ + new VectorStoreRecordKeyProperty("Key", typeof(string)) + ] + }; + + private sealed class MultiKeysModel + { + [VectorStoreRecordKey] + public string Key1 { get; set; } = string.Empty; + + [VectorStoreRecordKey] + public string Key2 { get; set; } = string.Empty; + } + + private readonly VectorStoreRecordDefinition _multiKeysDefinition = new() + { + Properties = + [ + new VectorStoreRecordKeyProperty("Key1", typeof(string)), + new VectorStoreRecordKeyProperty("Key2", typeof(string)) + ] + }; + + private sealed class SinglePropsModel + { + [VectorStoreRecordKey] + public string Key { get; set; } = string.Empty; + + [VectorStoreRecordData] + public string Data { get; set; } = string.Empty; + + [VectorStoreRecordVector] + public ReadOnlyMemory Vector { get; set; } + + public string NotAnnotated { get; set; } = string.Empty; + } + + private readonly VectorStoreRecordDefinition _singlePropsDefinition = new() + { + Properties = + [ + new VectorStoreRecordKeyProperty("Key", typeof(string)), + new VectorStoreRecordDataProperty("Data", typeof(string)), + new VectorStoreRecordVectorProperty("Vector", typeof(ReadOnlyMemory)) + ] + }; + + private sealed class MultiPropsModel + { + [VectorStoreRecordKey] + public string Key { get; set; } = string.Empty; + + [VectorStoreRecordData(IsFilterable = true, IsFullTextSearchable = true)] + public string Data1 { get; set; } = string.Empty; + + [VectorStoreRecordData] + public string Data2 { get; set; } = string.Empty; + + [VectorStoreRecordVector(4, IndexKind.Flat, DistanceFunction.DotProductSimilarity)] + public ReadOnlyMemory Vector1 { get; set; } + + [VectorStoreRecordVector] + [JsonPropertyName("vector-2")] + public ReadOnlyMemory Vector2 { get; set; } + + public string NotAnnotated { get; set; } = string.Empty; + } + + private readonly VectorStoreRecordDefinition _multiPropsDefinition = new() + { + Properties = + [ + new VectorStoreRecordKeyProperty("Key", typeof(string)), + new VectorStoreRecordDataProperty("Data1", typeof(string)) { IsFilterable = true, IsFullTextSearchable = true }, + new VectorStoreRecordDataProperty("Data2", typeof(string)) { StoragePropertyName = "data_2" }, + new VectorStoreRecordVectorProperty("Vector1", typeof(ReadOnlyMemory)) { Dimensions = 4, IndexKind = IndexKind.Flat, DistanceFunction = DistanceFunction.DotProductSimilarity }, + new VectorStoreRecordVectorProperty("Vector2", typeof(ReadOnlyMemory)) + ] + }; + + private sealed class EnumerablePropsModel + { + [VectorStoreRecordKey] + public string Key { get; set; } = string.Empty; + + [VectorStoreRecordData] + public IEnumerable EnumerableData { get; set; } = new List(); + + [VectorStoreRecordData] + public string[] ArrayData { get; set; } = Array.Empty(); + + [VectorStoreRecordData] + public List ListData { get; set; } = new List(); + + [VectorStoreRecordVector] + public ReadOnlyMemory Vector { get; set; } + + public string NotAnnotated { get; set; } = string.Empty; + } + + private readonly VectorStoreRecordDefinition _enumerablePropsDefinition = new() + { + Properties = + [ + new VectorStoreRecordKeyProperty("Key", typeof(string)), + new VectorStoreRecordDataProperty("EnumerableData", typeof(IEnumerable)), + new VectorStoreRecordDataProperty("ArrayData", typeof(string[])), + new VectorStoreRecordDataProperty("ListData", typeof(List)), + new VectorStoreRecordVectorProperty("Vector", typeof(ReadOnlyMemory)) + ] + }; + +#pragma warning restore CA1812 // Non-nullable field must contain a non-null value when exiting constructor. Consider declaring as nullable. +} diff --git a/dotnet/src/SemanticKernel.UnitTests/Data/VolatileVectorStoreRecordCollectionTests.cs b/dotnet/src/SemanticKernel.UnitTests/Data/VolatileVectorStoreRecordCollectionTests.cs new file mode 100644 index 000000000000..c70382481fbc --- /dev/null +++ b/dotnet/src/SemanticKernel.UnitTests/Data/VolatileVectorStoreRecordCollectionTests.cs @@ -0,0 +1,314 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Concurrent; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Data; +using Xunit; + +namespace SemanticKernel.UnitTests.Data; + +/// +/// Contains tests for the class. +/// +public class VolatileVectorStoreRecordCollectionTests +{ + private const string TestCollectionName = "testcollection"; + private const string TestRecordKey1 = "testid1"; + private const string TestRecordKey2 = "testid2"; + private const int TestRecordIntKey1 = 1; + private const int TestRecordIntKey2 = 2; + + private readonly CancellationToken _testCancellationToken = new(false); + + private readonly ConcurrentDictionary> _collectionStore; + + public VolatileVectorStoreRecordCollectionTests() + { + this._collectionStore = new(); + } + + [Theory] + [InlineData(TestCollectionName, true)] + [InlineData("nonexistentcollection", false)] + public async Task CollectionExistsReturnsCollectionStateAsync(string collectionName, bool expectedExists) + { + // Arrange + var collection = new ConcurrentDictionary(); + this._collectionStore.TryAdd(TestCollectionName, collection); + + var sut = new VolatileVectorStoreRecordCollection>( + this._collectionStore, + collectionName); + + // Act + var actual = await sut.CollectionExistsAsync(this._testCancellationToken); + + // Assert + Assert.Equal(expectedExists, actual); + } + + [Fact] + public async Task CanCreateCollectionAsync() + { + // Arrange + var sut = this.CreateRecordCollection(false); + + // Act + await sut.CreateCollectionAsync(this._testCancellationToken); + + // Assert + Assert.True(this._collectionStore.ContainsKey(TestCollectionName)); + } + + [Fact] + public async Task DeleteCollectionRemovesCollectionFromDictionaryAsync() + { + // Arrange + var collection = new ConcurrentDictionary(); + this._collectionStore.TryAdd(TestCollectionName, collection); + + var sut = this.CreateRecordCollection(false); + + // Act + await sut.DeleteCollectionAsync(this._testCancellationToken); + + // Assert + Assert.Empty(this._collectionStore); + } + + [Theory] + [InlineData(true, TestRecordKey1)] + [InlineData(true, TestRecordIntKey1)] + [InlineData(false, TestRecordKey1)] + [InlineData(false, TestRecordIntKey1)] + public async Task CanGetRecordWithVectorsAsync(bool useDefinition, TKey testKey) + where TKey : notnull + { + // Arrange + var record = CreateModel(testKey, withVectors: true); + var collection = new ConcurrentDictionary(); + collection.TryAdd(testKey!, record); + this._collectionStore.TryAdd(TestCollectionName, collection); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act + var actual = await sut.GetAsync( + testKey, + new() + { + IncludeVectors = true + }, + this._testCancellationToken); + + // Assert + var expectedArgs = new object[] { TestRecordKey1 }; + + Assert.NotNull(actual); + Assert.Equal(testKey, actual.Key); + Assert.Equal($"data {testKey}", actual.Data); + Assert.Equal(new float[] { 1, 2, 3, 4 }, actual.Vector!.Value.ToArray()); + } + + [Theory] + [InlineData(true, TestRecordKey1, TestRecordKey2)] + [InlineData(true, TestRecordIntKey1, TestRecordIntKey2)] + [InlineData(false, TestRecordKey1, TestRecordKey2)] + [InlineData(false, TestRecordIntKey1, TestRecordIntKey2)] + public async Task CanGetManyRecordsWithVectorsAsync(bool useDefinition, TKey testKey1, TKey testKey2) + where TKey : notnull + { + // Arrange + var record1 = CreateModel(testKey1, withVectors: true); + var record2 = CreateModel(testKey2, withVectors: true); + var collection = new ConcurrentDictionary(); + collection.TryAdd(testKey1!, record1); + collection.TryAdd(testKey2!, record2); + this._collectionStore.TryAdd(TestCollectionName, collection); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act + var actual = await sut.GetBatchAsync( + [testKey1, testKey2], + new() + { + IncludeVectors = true + }, + this._testCancellationToken).ToListAsync(); + + // Assert + Assert.NotNull(actual); + Assert.Equal(2, actual.Count); + Assert.Equal(testKey1, actual[0].Key); + Assert.Equal($"data {testKey1}", actual[0].Data); + Assert.Equal(testKey2, actual[1].Key); + Assert.Equal($"data {testKey2}", actual[1].Data); + } + + [Theory] + [InlineData(true, TestRecordKey1, TestRecordKey2)] + [InlineData(true, TestRecordIntKey1, TestRecordIntKey2)] + [InlineData(false, TestRecordKey1, TestRecordKey2)] + [InlineData(false, TestRecordIntKey1, TestRecordIntKey2)] + public async Task CanDeleteRecordAsync(bool useDefinition, TKey testKey1, TKey testKey2) + where TKey : notnull + { + // Arrange + var record1 = CreateModel(testKey1, withVectors: true); + var record2 = CreateModel(testKey2, withVectors: true); + var collection = new ConcurrentDictionary(); + collection.TryAdd(testKey1, record1); + collection.TryAdd(testKey2, record2); + this._collectionStore.TryAdd(TestCollectionName, collection); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act + await sut.DeleteAsync( + testKey1, + cancellationToken: this._testCancellationToken); + + // Assert + Assert.False(collection.ContainsKey(testKey1)); + Assert.True(collection.ContainsKey(testKey2)); + } + + [Theory] + [InlineData(true, TestRecordKey1, TestRecordKey2)] + [InlineData(true, TestRecordIntKey1, TestRecordIntKey2)] + [InlineData(false, TestRecordKey1, TestRecordKey2)] + [InlineData(false, TestRecordIntKey1, TestRecordIntKey2)] + public async Task CanDeleteManyRecordsWithVectorsAsync(bool useDefinition, TKey testKey1, TKey testKey2) + where TKey : notnull + { + // Arrange + var record1 = CreateModel(testKey1, withVectors: true); + var record2 = CreateModel(testKey2, withVectors: true); + var collection = new ConcurrentDictionary(); + collection.TryAdd(testKey1, record1); + collection.TryAdd(testKey2, record2); + this._collectionStore.TryAdd(TestCollectionName, collection); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act + await sut.DeleteBatchAsync( + [testKey1, testKey2], + cancellationToken: this._testCancellationToken); + + // Assert + Assert.False(collection.ContainsKey(testKey1)); + Assert.False(collection.ContainsKey(testKey2)); + } + + [Theory] + [InlineData(true, TestRecordKey1)] + [InlineData(true, TestRecordIntKey1)] + [InlineData(false, TestRecordKey1)] + [InlineData(false, TestRecordIntKey1)] + public async Task CanUpsertRecordAsync(bool useDefinition, TKey testKey1) + where TKey : notnull + { + // Arrange + var record1 = CreateModel(testKey1, withVectors: true); + var collection = new ConcurrentDictionary(); + this._collectionStore.TryAdd(TestCollectionName, collection); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act + var upsertResult = await sut.UpsertAsync( + record1, + cancellationToken: this._testCancellationToken); + + // Assert + Assert.Equal(testKey1, upsertResult); + Assert.True(collection.ContainsKey(testKey1)); + Assert.IsType>(collection[testKey1]); + Assert.Equal($"data {testKey1}", (collection[testKey1] as SinglePropsModel)!.Data); + } + + [Theory] + [InlineData(true, TestRecordKey1, TestRecordKey2)] + [InlineData(true, TestRecordIntKey1, TestRecordIntKey2)] + [InlineData(false, TestRecordKey1, TestRecordKey2)] + [InlineData(false, TestRecordIntKey1, TestRecordIntKey2)] + public async Task CanUpsertManyRecordsAsync(bool useDefinition, TKey testKey1, TKey testKey2) + where TKey : notnull + { + // Arrange + var record1 = CreateModel(testKey1, withVectors: true); + var record2 = CreateModel(testKey2, withVectors: true); + + var collection = new ConcurrentDictionary(); + this._collectionStore.TryAdd(TestCollectionName, collection); + + var sut = this.CreateRecordCollection(useDefinition); + + // Act + var actual = await sut.UpsertBatchAsync( + [record1, record2], + cancellationToken: this._testCancellationToken).ToListAsync(); + + // Assert + Assert.NotNull(actual); + Assert.Equal(2, actual.Count); + Assert.Equal(testKey1, actual[0]); + Assert.Equal(testKey2, actual[1]); + + Assert.True(collection.ContainsKey(testKey1)); + Assert.IsType>(collection[testKey1]); + Assert.Equal($"data {testKey1}", (collection[testKey1] as SinglePropsModel)!.Data); + } + + private static SinglePropsModel CreateModel(TKey key, bool withVectors) + { + return new SinglePropsModel + { + Key = key, + Data = "data " + key, + Vector = withVectors ? new float[] { 1, 2, 3, 4 } : null, + NotAnnotated = null, + }; + } + + private VolatileVectorStoreRecordCollection> CreateRecordCollection(bool useDefinition) + where TKey : notnull + { + return new VolatileVectorStoreRecordCollection>( + this._collectionStore, + TestCollectionName, + new() + { + VectorStoreRecordDefinition = useDefinition ? this._singlePropsDefinition : null + }); + } + + private readonly VectorStoreRecordDefinition _singlePropsDefinition = new() + { + Properties = + [ + new VectorStoreRecordKeyProperty("Key", typeof(string)), + new VectorStoreRecordDataProperty("Data", typeof(string)), + new VectorStoreRecordVectorProperty("Vector", typeof(ReadOnlyMemory)) + ] + }; + + public sealed class SinglePropsModel + { + [VectorStoreRecordKey] + public TKey? Key { get; set; } + + [VectorStoreRecordData] + public string Data { get; set; } = string.Empty; + + [VectorStoreRecordVector] + public ReadOnlyMemory? Vector { get; set; } + + public string? NotAnnotated { get; set; } + } +} diff --git a/dotnet/src/SemanticKernel.UnitTests/Data/VolatileVectorStoreTests.cs b/dotnet/src/SemanticKernel.UnitTests/Data/VolatileVectorStoreTests.cs new file mode 100644 index 000000000000..694d2239b224 --- /dev/null +++ b/dotnet/src/SemanticKernel.UnitTests/Data/VolatileVectorStoreTests.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Concurrent; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.SemanticKernel.Data; +using Xunit; + +namespace SemanticKernel.UnitTests.Data; + +/// +/// Contains tests for the class. +/// +public class VolatileVectorStoreTests +{ + private const string TestCollectionName = "testcollection"; + + [Fact] + public void GetCollectionReturnsCollection() + { + // Arrange. + var sut = new VolatileVectorStore(); + + // Act. + var actual = sut.GetCollection>(TestCollectionName); + + // Assert. + Assert.NotNull(actual); + Assert.IsType>>(actual); + } + + [Fact] + public void GetCollectionReturnsCollectionWithNonStringKey() + { + // Arrange. + var sut = new VolatileVectorStore(); + + // Act. + var actual = sut.GetCollection>(TestCollectionName); + + // Assert. + Assert.NotNull(actual); + Assert.IsType>>(actual); + } + + [Fact] + public async Task ListCollectionNamesReadsDictionaryAsync() + { + // Arrange. + var collectionStore = new ConcurrentDictionary>(); + collectionStore.TryAdd("collection1", new ConcurrentDictionary()); + collectionStore.TryAdd("collection2", new ConcurrentDictionary()); + var sut = new VolatileVectorStore(collectionStore); + + // Act. + var collectionNames = sut.ListCollectionNamesAsync(); + + // Assert. + var collectionNamesList = await collectionNames.ToListAsync(); + Assert.Equal(new[] { "collection1", "collection2" }, collectionNamesList); + } + + public sealed class SinglePropsModel + { + [VectorStoreRecordKey] + public required TKey Key { get; set; } + + [VectorStoreRecordData] + public string Data { get; set; } = string.Empty; + + [VectorStoreRecordVector(4)] + public ReadOnlyMemory? Vector { get; set; } + + public string? NotAnnotated { get; set; } + } +} diff --git a/dotnet/src/SemanticKernel.UnitTests/Text/TextChunkerInternationalTests.cs b/dotnet/src/SemanticKernel.UnitTests/Text/TextChunkerInternationalTests.cs index ce3be9193191..47d61e74b164 100644 --- a/dotnet/src/SemanticKernel.UnitTests/Text/TextChunkerInternationalTests.cs +++ b/dotnet/src/SemanticKernel.UnitTests/Text/TextChunkerInternationalTests.cs @@ -15,7 +15,7 @@ public sealed class TextChunkerInternationalTests public sealed class StatefulTokenCounter { private readonly Dictionary _callStats = []; - private readonly Tokenizer _tokenizer = Tokenizer.CreateTiktokenForModel("gpt-4"); + private readonly Tokenizer _tokenizer = TiktokenTokenizer.CreateForModel("gpt-4"); public int Count(string input) { @@ -29,7 +29,7 @@ public int Count(string input) private static TokenCounter StatelessTokenCounter => (string input) => { - var tokenizer = Tokenizer.CreateTiktokenForModel("gpt-4"); + var tokenizer = TiktokenTokenizer.CreateForModel("gpt-4"); return tokenizer.CountTokens(input); }; diff --git a/python/.coveragerc b/python/.coveragerc index 521b2ffe70c9..263f05cd0111 100644 --- a/python/.coveragerc +++ b/python/.coveragerc @@ -10,8 +10,8 @@ omit = semantic_kernel/connectors/memory/mongodb_atlas/* semantic_kernel/connectors/memory/pinecone/* semantic_kernel/connectors/memory/postgres/* - semantic_kernel/connectors/memory/qdrant/* - semantic_kernel/connectors/memory/redis/* + semantic_kernel/connectors/memory/qdrant/qdrant_memory_store.py + semantic_kernel/connectors/memory/redis/redis_memory_store.py semantic_kernel/connectors/memory/usearch/* semantic_kernel/connectors/memory/weaviate/* semantic_kernel/reliability/* @@ -33,4 +33,4 @@ exclude_lines = # TYPE_CHECKING and @overload blocks are never executed during pytest run if TYPE_CHECKING: @overload - @abstractmethod \ No newline at end of file + @abstractmethod diff --git a/python/.cspell.json b/python/.cspell.json index 00961beae80c..79602e7ac6ac 100644 --- a/python/.cspell.json +++ b/python/.cspell.json @@ -45,6 +45,15 @@ "generativeai", "genai", "protos", - "endregion" + "endregion", + "vertexai", + "aiplatform", + "serde", + "datamodel", + "vectorstoremodel", + "qdrant", + "huggingface", + "pytestmark", + "contoso" ] } \ No newline at end of file diff --git a/python/.env.example b/python/.env.example index 58602848434d..d63e29eb17c3 100644 --- a/python/.env.example +++ b/python/.env.example @@ -1,7 +1,7 @@ OPENAI_API_KEY="" -OPEN_AI_CHAT_MODEL_ID="" -OPEN_AI_TEXT_MODEL_ID="" -OPEN_AI_EMBEDDING_MODEL_ID="" +OPENAI_CHAT_MODEL_ID="" +OPENAI_TEXT_MODEL_ID="" +OPENAI_EMBEDDING_MODEL_ID="" OPENAI_ORG_ID="" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="" AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="" diff --git a/python/.vscode/tasks.json b/python/.vscode/tasks.json index 3d7c72c4036e..fa9736a6fd5e 100644 --- a/python/.vscode/tasks.json +++ b/python/.vscode/tasks.json @@ -117,6 +117,24 @@ }, "problemMatcher": [] }, + { + "label": "Python: Tests - Unit - Failed Only", + "type": "shell", + "command": "poetry", + "args": [ + "run", + "pytest", + "tests/unit/", + "--last-failed", + "-v" + ], + "group": "test", + "presentation": { + "reveal": "always", + "panel": "shared" + }, + "problemMatcher": [] + }, { "label": "Python: Tests - Code Coverage", "type": "shell", @@ -135,7 +153,13 @@ "args": [ "run", "pytest", - "tests/" + "tests/", + "-n", + "logical", + "--dist", + "loadfile", + "--dist", + "worksteal" ], "group": "test", "presentation": { diff --git a/python/mypy.ini b/python/mypy.ini index 6a30f83bd145..9f392f90a3ab 100644 --- a/python/mypy.ini +++ b/python/mypy.ini @@ -26,6 +26,8 @@ ignore_errors = true [mypy-semantic_kernel.connectors.memory.astradb.*] ignore_errors = true +[mypy-semantic_kernel.connectors.memory.azure_ai_search.*] +ignore_errors = false [mypy-semantic_kernel.connectors.memory.azure_cognitive_search.*] ignore_errors = true @@ -50,9 +52,13 @@ ignore_errors = true [mypy-semantic_kernel.connectors.memory.postgres.*] ignore_errors = true +[mypy-semantic_kernel.connectors.memory.qdrant.qdrant_vector_record_store.*] +ignore_errors = true [mypy-semantic_kernel.connectors.memory.qdrant.*] ignore_errors = true +[mypy-semantic_kernel.connectors.memory.redis.redis_vector_record_store.*] +ignore_errors = true [mypy-semantic_kernel.connectors.memory.redis.*] ignore_errors = true diff --git a/python/poetry.lock b/python/poetry.lock index 979b5a2cb51f..3781e85f22e4 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -2,18 +2,18 @@ [[package]] name = "accelerate" -version = "0.31.0" +version = "0.33.0" description = "Accelerate" optional = false python-versions = ">=3.8.0" files = [ - {file = "accelerate-0.31.0-py3-none-any.whl", hash = "sha256:0fc608dc49584f64d04711a39711d73cb0ad4ef3d21cddee7ef2216e29471144"}, - {file = "accelerate-0.31.0.tar.gz", hash = "sha256:b5199865b26106ccf9205acacbe8e4b3b428ad585e7c472d6a46f6fb75b6c176"}, + {file = "accelerate-0.33.0-py3-none-any.whl", hash = "sha256:0a7f33d60ba09afabd028d4f0856dd19c5a734b7a596d637d9dd6e3d0eadbaf3"}, + {file = "accelerate-0.33.0.tar.gz", hash = "sha256:11ba481ed6ea09191775df55ce464aeeba67a024bd0261a44b77b30fb439e26a"}, ] [package.dependencies] -huggingface-hub = "*" -numpy = ">=1.17" +huggingface-hub = ">=0.21.0" +numpy = ">=1.17,<2.0.0" packaging = ">=20.0" psutil = "*" pyyaml = "*" @@ -32,91 +32,118 @@ test-trackers = ["comet-ml", "dvclive", "tensorboard", "wandb"] testing = ["bitsandbytes", "datasets", "diffusers", "evaluate", "parameterized", "pytest (>=7.2.0,<=8.0.0)", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "timm", "torchpippy (>=0.2.0)", "tqdm", "transformers"] [[package]] -name = "aiohttp" -version = "3.9.5" -description = "Async http client/server framework (asyncio)" +name = "aiohappyeyeballs" +version = "2.3.5" +description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7"}, - {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c"}, - {file = "aiohttp-3.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a"}, - {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430"}, - {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3"}, - {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b"}, - {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72"}, - {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0"}, - {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558"}, - {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db"}, - {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f"}, - {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832"}, - {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10"}, - {file = "aiohttp-3.9.5-cp310-cp310-win32.whl", hash = "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb"}, - {file = "aiohttp-3.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb"}, - {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342"}, - {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d"}, - {file = "aiohttp-3.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424"}, - {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee"}, - {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2"}, - {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233"}, - {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595"}, - {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6"}, - {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d"}, - {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323"}, - {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9"}, - {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771"}, - {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75"}, - {file = "aiohttp-3.9.5-cp311-cp311-win32.whl", hash = "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6"}, - {file = "aiohttp-3.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a"}, - {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678"}, - {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c"}, - {file = "aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f"}, - {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4"}, - {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c"}, - {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa"}, - {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58"}, - {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf"}, - {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f"}, - {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81"}, - {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a"}, - {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a"}, - {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da"}, - {file = "aiohttp-3.9.5-cp312-cp312-win32.whl", hash = "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59"}, - {file = "aiohttp-3.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888"}, - {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8"}, - {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8"}, - {file = "aiohttp-3.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78"}, - {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b"}, - {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de"}, - {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac"}, - {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11"}, - {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd"}, - {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1"}, - {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e"}, - {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156"}, - {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031"}, - {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe"}, - {file = "aiohttp-3.9.5-cp38-cp38-win32.whl", hash = "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da"}, - {file = "aiohttp-3.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a"}, - {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed"}, - {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a"}, - {file = "aiohttp-3.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372"}, - {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb"}, - {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24"}, - {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2"}, - {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106"}, - {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b"}, - {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475"}, - {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed"}, - {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c"}, - {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46"}, - {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"}, - {file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"}, - {file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"}, - {file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"}, + {file = "aiohappyeyeballs-2.3.5-py3-none-any.whl", hash = "sha256:4d6dea59215537dbc746e93e779caea8178c866856a721c9c660d7a5a7b8be03"}, + {file = "aiohappyeyeballs-2.3.5.tar.gz", hash = "sha256:6fa48b9f1317254f122a07a131a86b71ca6946ca989ce6326fff54a99a920105"}, ] -[package.dependencies] +[[package]] +name = "aiohttp" +version = "3.10.5" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:18a01eba2574fb9edd5f6e5fb25f66e6ce061da5dab5db75e13fe1558142e0a3"}, + {file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:94fac7c6e77ccb1ca91e9eb4cb0ac0270b9fb9b289738654120ba8cebb1189c6"}, + {file = "aiohttp-3.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f1f1c75c395991ce9c94d3e4aa96e5c59c8356a15b1c9231e783865e2772699"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f7acae3cf1a2a2361ec4c8e787eaaa86a94171d2417aae53c0cca6ca3118ff6"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:94c4381ffba9cc508b37d2e536b418d5ea9cfdc2848b9a7fea6aebad4ec6aac1"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c31ad0c0c507894e3eaa843415841995bf8de4d6b2d24c6e33099f4bc9fc0d4f"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0912b8a8fadeb32ff67a3ed44249448c20148397c1ed905d5dac185b4ca547bb"}, + {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d93400c18596b7dc4794d48a63fb361b01a0d8eb39f28800dc900c8fbdaca91"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d00f3c5e0d764a5c9aa5a62d99728c56d455310bcc288a79cab10157b3af426f"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d742c36ed44f2798c8d3f4bc511f479b9ceef2b93f348671184139e7d708042c"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:814375093edae5f1cb31e3407997cf3eacefb9010f96df10d64829362ae2df69"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8224f98be68a84b19f48e0bdc14224b5a71339aff3a27df69989fa47d01296f3"}, + {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d9a487ef090aea982d748b1b0d74fe7c3950b109df967630a20584f9a99c0683"}, + {file = "aiohttp-3.10.5-cp310-cp310-win32.whl", hash = "sha256:d9ef084e3dc690ad50137cc05831c52b6ca428096e6deb3c43e95827f531d5ef"}, + {file = "aiohttp-3.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:66bf9234e08fe561dccd62083bf67400bdbf1c67ba9efdc3dac03650e97c6088"}, + {file = "aiohttp-3.10.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8c6a4e5e40156d72a40241a25cc226051c0a8d816610097a8e8f517aeacd59a2"}, + {file = "aiohttp-3.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c634a3207a5445be65536d38c13791904fda0748b9eabf908d3fe86a52941cf"}, + {file = "aiohttp-3.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4aff049b5e629ef9b3e9e617fa6e2dfeda1bf87e01bcfecaf3949af9e210105e"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1942244f00baaacaa8155eca94dbd9e8cc7017deb69b75ef67c78e89fdad3c77"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04a1f2a65ad2f93aa20f9ff9f1b672bf912413e5547f60749fa2ef8a644e061"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f2bfc0032a00405d4af2ba27f3c429e851d04fad1e5ceee4080a1c570476697"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:424ae21498790e12eb759040bbb504e5e280cab64693d14775c54269fd1d2bb7"}, + {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:975218eee0e6d24eb336d0328c768ebc5d617609affaca5dbbd6dd1984f16ed0"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4120d7fefa1e2d8fb6f650b11489710091788de554e2b6f8347c7a20ceb003f5"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b90078989ef3fc45cf9221d3859acd1108af7560c52397ff4ace8ad7052a132e"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ba5a8b74c2a8af7d862399cdedce1533642fa727def0b8c3e3e02fcb52dca1b1"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:02594361128f780eecc2a29939d9dfc870e17b45178a867bf61a11b2a4367277"}, + {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8fb4fc029e135859f533025bc82047334e24b0d489e75513144f25408ecaf058"}, + {file = "aiohttp-3.10.5-cp311-cp311-win32.whl", hash = "sha256:e1ca1ef5ba129718a8fc827b0867f6aa4e893c56eb00003b7367f8a733a9b072"}, + {file = "aiohttp-3.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:349ef8a73a7c5665cca65c88ab24abe75447e28aa3bc4c93ea5093474dfdf0ff"}, + {file = "aiohttp-3.10.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:305be5ff2081fa1d283a76113b8df7a14c10d75602a38d9f012935df20731487"}, + {file = "aiohttp-3.10.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3a1c32a19ee6bbde02f1cb189e13a71b321256cc1d431196a9f824050b160d5a"}, + {file = "aiohttp-3.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:61645818edd40cc6f455b851277a21bf420ce347baa0b86eaa41d51ef58ba23d"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c225286f2b13bab5987425558baa5cbdb2bc925b2998038fa028245ef421e75"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ba01ebc6175e1e6b7275c907a3a36be48a2d487549b656aa90c8a910d9f3178"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8eaf44ccbc4e35762683078b72bf293f476561d8b68ec8a64f98cf32811c323e"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c43eb1ab7cbf411b8e387dc169acb31f0ca0d8c09ba63f9eac67829585b44f"}, + {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de7a5299827253023c55ea549444e058c0eb496931fa05d693b95140a947cb73"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4790f0e15f00058f7599dab2b206d3049d7ac464dc2e5eae0e93fa18aee9e7bf"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:44b324a6b8376a23e6ba25d368726ee3bc281e6ab306db80b5819999c737d820"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0d277cfb304118079e7044aad0b76685d30ecb86f83a0711fc5fb257ffe832ca"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:54d9ddea424cd19d3ff6128601a4a4d23d54a421f9b4c0fff740505813739a91"}, + {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4f1c9866ccf48a6df2b06823e6ae80573529f2af3a0992ec4fe75b1a510df8a6"}, + {file = "aiohttp-3.10.5-cp312-cp312-win32.whl", hash = "sha256:dc4826823121783dccc0871e3f405417ac116055bf184ac04c36f98b75aacd12"}, + {file = "aiohttp-3.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:22c0a23a3b3138a6bf76fc553789cb1a703836da86b0f306b6f0dc1617398abc"}, + {file = "aiohttp-3.10.5-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7f6b639c36734eaa80a6c152a238242bedcee9b953f23bb887e9102976343092"}, + {file = "aiohttp-3.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f29930bc2921cef955ba39a3ff87d2c4398a0394ae217f41cb02d5c26c8b1b77"}, + {file = "aiohttp-3.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f489a2c9e6455d87eabf907ac0b7d230a9786be43fbe884ad184ddf9e9c1e385"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:123dd5b16b75b2962d0fff566effb7a065e33cd4538c1692fb31c3bda2bfb972"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b98e698dc34966e5976e10bbca6d26d6724e6bdea853c7c10162a3235aba6e16"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3b9162bab7e42f21243effc822652dc5bb5e8ff42a4eb62fe7782bcbcdfacf6"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1923a5c44061bffd5eebeef58cecf68096e35003907d8201a4d0d6f6e387ccaa"}, + {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d55f011da0a843c3d3df2c2cf4e537b8070a419f891c930245f05d329c4b0689"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:afe16a84498441d05e9189a15900640a2d2b5e76cf4efe8cbb088ab4f112ee57"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8112fb501b1e0567a1251a2fd0747baae60a4ab325a871e975b7bb67e59221f"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1e72589da4c90337837fdfe2026ae1952c0f4a6e793adbbfbdd40efed7c63599"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4d46c7b4173415d8e583045fbc4daa48b40e31b19ce595b8d92cf639396c15d5"}, + {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33e6bc4bab477c772a541f76cd91e11ccb6d2efa2b8d7d7883591dfb523e5987"}, + {file = "aiohttp-3.10.5-cp313-cp313-win32.whl", hash = "sha256:c58c6837a2c2a7cf3133983e64173aec11f9c2cd8e87ec2fdc16ce727bcf1a04"}, + {file = "aiohttp-3.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:38172a70005252b6893088c0f5e8a47d173df7cc2b2bd88650957eb84fcf5022"}, + {file = "aiohttp-3.10.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f6f18898ace4bcd2d41a122916475344a87f1dfdec626ecde9ee802a711bc569"}, + {file = "aiohttp-3.10.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5ede29d91a40ba22ac1b922ef510aab871652f6c88ef60b9dcdf773c6d32ad7a"}, + {file = "aiohttp-3.10.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:673f988370f5954df96cc31fd99c7312a3af0a97f09e407399f61583f30da9bc"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58718e181c56a3c02d25b09d4115eb02aafe1a732ce5714ab70326d9776457c3"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b38b1570242fbab8d86a84128fb5b5234a2f70c2e32f3070143a6d94bc854cf"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:074d1bff0163e107e97bd48cad9f928fa5a3eb4b9d33366137ffce08a63e37fe"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd31f176429cecbc1ba499d4aba31aaccfea488f418d60376b911269d3b883c5"}, + {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7384d0b87d4635ec38db9263e6a3f1eb609e2e06087f0aa7f63b76833737b471"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8989f46f3d7ef79585e98fa991e6ded55d2f48ae56d2c9fa5e491a6e4effb589"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c83f7a107abb89a227d6c454c613e7606c12a42b9a4ca9c5d7dad25d47c776ae"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cde98f323d6bf161041e7627a5fd763f9fd829bcfcd089804a5fdce7bb6e1b7d"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:676f94c5480d8eefd97c0c7e3953315e4d8c2b71f3b49539beb2aa676c58272f"}, + {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2d21ac12dc943c68135ff858c3a989f2194a709e6e10b4c8977d7fcd67dfd511"}, + {file = "aiohttp-3.10.5-cp38-cp38-win32.whl", hash = "sha256:17e997105bd1a260850272bfb50e2a328e029c941c2708170d9d978d5a30ad9a"}, + {file = "aiohttp-3.10.5-cp38-cp38-win_amd64.whl", hash = "sha256:1c19de68896747a2aa6257ae4cf6ef59d73917a36a35ee9d0a6f48cff0f94db8"}, + {file = "aiohttp-3.10.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7e2fe37ac654032db1f3499fe56e77190282534810e2a8e833141a021faaab0e"}, + {file = "aiohttp-3.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5bf3ead3cb66ab990ee2561373b009db5bc0e857549b6c9ba84b20bc462e172"}, + {file = "aiohttp-3.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b2c16a919d936ca87a3c5f0e43af12a89a3ce7ccbce59a2d6784caba945b68b"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad146dae5977c4dd435eb31373b3fe9b0b1bf26858c6fc452bf6af394067e10b"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c5c6fa16412b35999320f5c9690c0f554392dc222c04e559217e0f9ae244b92"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95c4dc6f61d610bc0ee1edc6f29d993f10febfe5b76bb470b486d90bbece6b22"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da452c2c322e9ce0cfef392e469a26d63d42860f829026a63374fde6b5c5876f"}, + {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:898715cf566ec2869d5cb4d5fb4be408964704c46c96b4be267442d265390f32"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:391cc3a9c1527e424c6865e087897e766a917f15dddb360174a70467572ac6ce"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:380f926b51b92d02a34119d072f178d80bbda334d1a7e10fa22d467a66e494db"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce91db90dbf37bb6fa0997f26574107e1b9d5ff939315247b7e615baa8ec313b"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9093a81e18c45227eebe4c16124ebf3e0d893830c6aca7cc310bfca8fe59d857"}, + {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ee40b40aa753d844162dcc80d0fe256b87cba48ca0054f64e68000453caead11"}, + {file = "aiohttp-3.10.5-cp39-cp39-win32.whl", hash = "sha256:03f2645adbe17f274444953bdea69f8327e9d278d961d85657cb0d06864814c1"}, + {file = "aiohttp-3.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:d17920f18e6ee090bdd3d0bfffd769d9f2cb4c8ffde3eb203777a3895c128862"}, + {file = "aiohttp-3.10.5.tar.gz", hash = "sha256:f071854b47d39591ce9a17981c46790acb30518e2f83dfca8db2dfa091178691"}, +] + +[package.dependencies] +aiohappyeyeballs = ">=2.3.0" aiosignal = ">=1.1.2" async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} attrs = ">=17.3.0" @@ -125,7 +152,7 @@ multidict = ">=4.5,<7.0" yarl = ">=1.0,<2.0" [package.extras] -speedups = ["Brotli", "aiodns", "brotlicffi"] +speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] [[package]] name = "aiosignal" @@ -152,6 +179,31 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[[package]] +name = "anthropic" +version = "0.32.0" +description = "The official Python library for the anthropic API" +optional = false +python-versions = ">=3.7" +files = [ + {file = "anthropic-0.32.0-py3-none-any.whl", hash = "sha256:302c7c652b05a26c418f70697b585d7b47daac36210d097a0daa45ecda89f258"}, + {file = "anthropic-0.32.0.tar.gz", hash = "sha256:1027bddeb7c3cbcb5e16d5e3b4d4a8d17b6258ca2fb4298bf91cc69adb148452"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tokenizers = ">=0.13.0" +typing-extensions = ">=4.7,<5" + +[package.extras] +bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"] +vertex = ["google-auth (>=2,<3)"] + [[package]] name = "anyio" version = "4.4.0" @@ -233,22 +285,22 @@ files = [ [[package]] name = "attrs" -version = "23.2.0" +version = "24.2.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.7" files = [ - {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, - {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, + {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, + {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] [package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] -tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "authlib" @@ -266,13 +318,13 @@ cryptography = "*" [[package]] name = "azure-ai-inference" -version = "1.0.0b2" +version = "1.0.0b3" description = "Microsoft Azure Ai Inference Client Library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "azure-ai-inference-1.0.0b2.tar.gz", hash = "sha256:efe2ad0c79ab80d3323edb919cb769a5e21d8ed2c56fe17f9b737893432e0a99"}, - {file = "azure_ai_inference-1.0.0b2-py3-none-any.whl", hash = "sha256:fb35a30867419590d2c7a52e287c456c9bd264690bdfcdec54930c3e73897a8f"}, + {file = "azure-ai-inference-1.0.0b3.tar.gz", hash = "sha256:1e99dc74c3b335a457500311bbbadb348f54dc4c12252a93cb8ab78d6d217ff0"}, + {file = "azure_ai_inference-1.0.0b3-py3-none-any.whl", hash = "sha256:6734ca7334c809a170beb767f1f1455724ab3f006cb60045e42a833c0e764403"}, ] [package.dependencies] @@ -327,13 +379,13 @@ typing-extensions = ">=4.6.0" [[package]] name = "azure-identity" -version = "1.16.1" +version = "1.17.1" description = "Microsoft Azure Identity Library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "azure-identity-1.16.1.tar.gz", hash = "sha256:6d93f04468f240d59246d8afde3091494a5040d4f141cad0f49fc0c399d0d91e"}, - {file = "azure_identity-1.16.1-py3-none-any.whl", hash = "sha256:8fb07c25642cd4ac422559a8b50d3e77f73dcc2bbfaba419d06d6c9d7cff6726"}, + {file = "azure-identity-1.17.1.tar.gz", hash = "sha256:32ecc67cc73f4bd0595e4f64b1ca65cd05186f4fe6f98ed2ae9f1aa32646efea"}, + {file = "azure_identity-1.17.1-py3-none-any.whl", hash = "sha256:db8d59c183b680e763722bfe8ebc45930e6c57df510620985939f7f3191e0382"}, ] [package.dependencies] @@ -341,6 +393,7 @@ azure-core = ">=1.23.0" cryptography = ">=2.5" msal = ">=1.24.0" msal-extensions = ">=0.3.0" +typing-extensions = ">=4.0.0" [[package]] name = "azure-search-documents" @@ -371,38 +424,38 @@ files = [ [[package]] name = "bcrypt" -version = "4.1.3" +version = "4.2.0" description = "Modern password hashing for your software and your servers" optional = false python-versions = ">=3.7" files = [ - {file = "bcrypt-4.1.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:48429c83292b57bf4af6ab75809f8f4daf52aa5d480632e53707805cc1ce9b74"}, - {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8bea4c152b91fd8319fef4c6a790da5c07840421c2b785084989bf8bbb7455"}, - {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d3b317050a9a711a5c7214bf04e28333cf528e0ed0ec9a4e55ba628d0f07c1a"}, - {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:094fd31e08c2b102a14880ee5b3d09913ecf334cd604af27e1013c76831f7b05"}, - {file = "bcrypt-4.1.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:4fb253d65da30d9269e0a6f4b0de32bd657a0208a6f4e43d3e645774fb5457f3"}, - {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:193bb49eeeb9c1e2db9ba65d09dc6384edd5608d9d672b4125e9320af9153a15"}, - {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8cbb119267068c2581ae38790e0d1fbae65d0725247a930fc9900c285d95725d"}, - {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6cac78a8d42f9d120b3987f82252bdbeb7e6e900a5e1ba37f6be6fe4e3848286"}, - {file = "bcrypt-4.1.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:01746eb2c4299dd0ae1670234bf77704f581dd72cc180f444bfe74eb80495b64"}, - {file = "bcrypt-4.1.3-cp37-abi3-win32.whl", hash = "sha256:037c5bf7c196a63dcce75545c8874610c600809d5d82c305dd327cd4969995bf"}, - {file = "bcrypt-4.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:8a893d192dfb7c8e883c4576813bf18bb9d59e2cfd88b68b725990f033f1b978"}, - {file = "bcrypt-4.1.3-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d4cf6ef1525f79255ef048b3489602868c47aea61f375377f0d00514fe4a78c"}, - {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5698ce5292a4e4b9e5861f7e53b1d89242ad39d54c3da451a93cac17b61921a"}, - {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec3c2e1ca3e5c4b9edb94290b356d082b721f3f50758bce7cce11d8a7c89ce84"}, - {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3a5be252fef513363fe281bafc596c31b552cf81d04c5085bc5dac29670faa08"}, - {file = "bcrypt-4.1.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5f7cd3399fbc4ec290378b541b0cf3d4398e4737a65d0f938c7c0f9d5e686611"}, - {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:c4c8d9b3e97209dd7111bf726e79f638ad9224b4691d1c7cfefa571a09b1b2d6"}, - {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:31adb9cbb8737a581a843e13df22ffb7c84638342de3708a98d5c986770f2834"}, - {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:551b320396e1d05e49cc18dd77d970accd52b322441628aca04801bbd1d52a73"}, - {file = "bcrypt-4.1.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6717543d2c110a155e6821ce5670c1f512f602eabb77dba95717ca76af79867d"}, - {file = "bcrypt-4.1.3-cp39-abi3-win32.whl", hash = "sha256:6004f5229b50f8493c49232b8e75726b568535fd300e5039e255d919fc3a07f2"}, - {file = "bcrypt-4.1.3-cp39-abi3-win_amd64.whl", hash = "sha256:2505b54afb074627111b5a8dc9b6ae69d0f01fea65c2fcaea403448c503d3991"}, - {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:cb9c707c10bddaf9e5ba7cdb769f3e889e60b7d4fea22834b261f51ca2b89fed"}, - {file = "bcrypt-4.1.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9f8ea645eb94fb6e7bea0cf4ba121c07a3a182ac52876493870033141aa687bc"}, - {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f44a97780677e7ac0ca393bd7982b19dbbd8d7228c1afe10b128fd9550eef5f1"}, - {file = "bcrypt-4.1.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d84702adb8f2798d813b17d8187d27076cca3cd52fe3686bb07a9083930ce650"}, - {file = "bcrypt-4.1.3.tar.gz", hash = "sha256:2ee15dd749f5952fe3f0430d0ff6b74082e159c50332a1413d51b5689cf06623"}, + {file = "bcrypt-4.2.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d84cf6d877918620b687b8fd1bf7781d11e8a0998f576c7aa939776b512b98d"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:1bb429fedbe0249465cdd85a58e8376f31bb315e484f16e68ca4c786dcc04291"}, + {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:655ea221910bcac76ea08aaa76df427ef8625f92e55a8ee44fbf7753dbabb328"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:1ee38e858bf5d0287c39b7a1fc59eec64bbf880c7d504d3a06a96c16e14058e7"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0da52759f7f30e83f1e30a888d9163a81353ef224d82dc58eb5bb52efcabc399"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3698393a1b1f1fd5714524193849d0c6d524d33523acca37cd28f02899285060"}, + {file = "bcrypt-4.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:762a2c5fb35f89606a9fde5e51392dad0cd1ab7ae64149a8b935fe8d79dd5ed7"}, + {file = "bcrypt-4.2.0-cp37-abi3-win32.whl", hash = "sha256:5a1e8aa9b28ae28020a3ac4b053117fb51c57a010b9f969603ed885f23841458"}, + {file = "bcrypt-4.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:8f6ede91359e5df88d1f5c1ef47428a4420136f3ce97763e31b86dd8280fbdf5"}, + {file = "bcrypt-4.2.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:c52aac18ea1f4a4f65963ea4f9530c306b56ccd0c6f8c8da0c06976e34a6e841"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bbbfb2734f0e4f37c5136130405332640a1e46e6b23e000eeff2ba8d005da68"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3413bd60460f76097ee2e0a493ccebe4a7601918219c02f503984f0a7ee0aebe"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8d7bb9c42801035e61c109c345a28ed7e84426ae4865511eb82e913df18f58c2"}, + {file = "bcrypt-4.2.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3d3a6d28cb2305b43feac298774b997e372e56c7c7afd90a12b3dc49b189151c"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:9c1c4ad86351339c5f320ca372dfba6cb6beb25e8efc659bedd918d921956bae"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:27fe0f57bb5573104b5a6de5e4153c60814c711b29364c10a75a54bb6d7ff48d"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:8ac68872c82f1add6a20bd489870c71b00ebacd2e9134a8aa3f98a0052ab4b0e"}, + {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8"}, + {file = "bcrypt-4.2.0-cp39-abi3-win32.whl", hash = "sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34"}, + {file = "bcrypt-4.2.0-cp39-abi3-win_amd64.whl", hash = "sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9"}, + {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a"}, + {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db"}, + {file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:1ff39b78a52cf03fdf902635e4c81e544714861ba3f0efc56558979dd4f09170"}, + {file = "bcrypt-4.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:373db9abe198e8e2c70d12b479464e0d5092cc122b20ec504097b5f2297ed184"}, + {file = "bcrypt-4.2.0.tar.gz", hash = "sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221"}, ] [package.extras] @@ -475,13 +528,13 @@ virtualenv = ["virtualenv (>=20.0.35)"] [[package]] name = "cachetools" -version = "5.3.3" +version = "5.4.0" description = "Extensible memoizing collections and decorators" optional = false python-versions = ">=3.7" files = [ - {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, - {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, + {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, + {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, ] [[package]] @@ -497,63 +550,78 @@ files = [ [[package]] name = "cffi" -version = "1.16.0" +version = "1.17.0" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" files = [ - {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, - {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, - {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, - {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, - {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, - {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, - {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, - {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, - {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, - {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, - {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, - {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, - {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, - {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, - {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, + {file = "cffi-1.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb"}, + {file = "cffi-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c815270206f983309915a6844fe994b2fa47e5d05c4c4cef267c3b30e34dbe42"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6bdcd415ba87846fd317bee0774e412e8792832e7805938987e4ede1d13046d"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a98748ed1a1df4ee1d6f927e151ed6c1a09d5ec21684de879c7ea6aa96f58f2"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a048d4f6630113e54bb4b77e315e1ba32a5a31512c31a273807d0027a7e69ab"}, + {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24aa705a5f5bd3a8bcfa4d123f03413de5d86e497435693b638cbffb7d5d8a1b"}, + {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:856bf0924d24e7f93b8aee12a3a1095c34085600aa805693fb7f5d1962393206"}, + {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4304d4416ff032ed50ad6bb87416d802e67139e31c0bde4628f36a47a3164bfa"}, + {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:331ad15c39c9fe9186ceaf87203a9ecf5ae0ba2538c9e898e3a6967e8ad3db6f"}, + {file = "cffi-1.17.0-cp310-cp310-win32.whl", hash = "sha256:669b29a9eca6146465cc574659058ed949748f0809a2582d1f1a324eb91054dc"}, + {file = "cffi-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:48b389b1fd5144603d61d752afd7167dfd205973a43151ae5045b35793232aa2"}, + {file = "cffi-1.17.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720"}, + {file = "cffi-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8"}, + {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6"}, + {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91"}, + {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8"}, + {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb"}, + {file = "cffi-1.17.0-cp311-cp311-win32.whl", hash = "sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9"}, + {file = "cffi-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0"}, + {file = "cffi-1.17.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc"}, + {file = "cffi-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828"}, + {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150"}, + {file = "cffi-1.17.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a"}, + {file = "cffi-1.17.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885"}, + {file = "cffi-1.17.0-cp312-cp312-win32.whl", hash = "sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492"}, + {file = "cffi-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2"}, + {file = "cffi-1.17.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118"}, + {file = "cffi-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204"}, + {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f"}, + {file = "cffi-1.17.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0"}, + {file = "cffi-1.17.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4"}, + {file = "cffi-1.17.0-cp313-cp313-win32.whl", hash = "sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a"}, + {file = "cffi-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7"}, + {file = "cffi-1.17.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:964823b2fc77b55355999ade496c54dde161c621cb1f6eac61dc30ed1b63cd4c"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:516a405f174fd3b88829eabfe4bb296ac602d6a0f68e0d64d5ac9456194a5b7e"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dec6b307ce928e8e112a6bb9921a1cb00a0e14979bf28b98e084a4b8a742bd9b"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4094c7b464cf0a858e75cd14b03509e84789abf7b79f8537e6a72152109c76e"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2404f3de742f47cb62d023f0ba7c5a916c9c653d5b368cc966382ae4e57da401"}, + {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa9d43b02a0c681f0bfbc12d476d47b2b2b6a3f9287f11ee42989a268a1833c"}, + {file = "cffi-1.17.0-cp38-cp38-win32.whl", hash = "sha256:0bb15e7acf8ab35ca8b24b90af52c8b391690ef5c4aec3d31f38f0d37d2cc499"}, + {file = "cffi-1.17.0-cp38-cp38-win_amd64.whl", hash = "sha256:93a7350f6706b31f457c1457d3a3259ff9071a66f312ae64dc024f049055f72c"}, + {file = "cffi-1.17.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1a2ddbac59dc3716bc79f27906c010406155031a1c801410f1bafff17ea304d2"}, + {file = "cffi-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6327b572f5770293fc062a7ec04160e89741e8552bf1c358d1a23eba68166759"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbc183e7bef690c9abe5ea67b7b60fdbca81aa8da43468287dae7b5c046107d4"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bdc0f1f610d067c70aa3737ed06e2726fd9d6f7bfee4a351f4c40b6831f4e82"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d872186c1617d143969defeadac5a904e6e374183e07977eedef9c07c8953bf"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d46ee4764b88b91f16661a8befc6bfb24806d885e27436fdc292ed7e6f6d058"}, + {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f76a90c345796c01d85e6332e81cab6d70de83b829cf1d9762d0a3da59c7932"}, + {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e60821d312f99d3e1569202518dddf10ae547e799d75aef3bca3a2d9e8ee693"}, + {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:eb09b82377233b902d4c3fbeeb7ad731cdab579c6c6fda1f763cd779139e47c3"}, + {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:24658baf6224d8f280e827f0a50c46ad819ec8ba380a42448e24459daf809cf4"}, + {file = "cffi-1.17.0-cp39-cp39-win32.whl", hash = "sha256:0fdacad9e0d9fc23e519efd5ea24a70348305e8d7d85ecbb1a5fa66dc834e7fb"}, + {file = "cffi-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:7cbc78dc018596315d4e7841c8c3a7ae31cc4d638c9b627f87d52e8abaaf2d29"}, + {file = "cffi-1.17.0.tar.gz", hash = "sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76"}, ] [package.dependencies] @@ -682,55 +750,54 @@ files = [ [[package]] name = "cheap-repr" -version = "0.5.1" +version = "0.5.2" description = "Better version of repr/reprlib for short, cheap string representations." optional = false python-versions = "*" files = [ - {file = "cheap_repr-0.5.1-py2.py3-none-any.whl", hash = "sha256:30096998aeb49367a4a153988d7a99dce9dc59bbdd4b19740da6b4f3f97cf2ff"}, - {file = "cheap_repr-0.5.1.tar.gz", hash = "sha256:31ec63b9d8394aa23d746c8376c8307f75f9fca0b983566b8bcf13cc661fe6dd"}, + {file = "cheap_repr-0.5.2-py2.py3-none-any.whl", hash = "sha256:537ec1991bfee885c13c6d473afd110a408e039cde26882e95bf92761556ab6e"}, + {file = "cheap_repr-0.5.2.tar.gz", hash = "sha256:001a5cf8adb0305c7ad3152c5f776040ac2a559d97f85770cebcb28c6ca5a30f"}, ] [package.extras] -tests = ["Django", "Django (<2)", "Django (<3)", "chainmap", "numpy (>=1.16.3)", "numpy (>=1.16.3,<1.17)", "numpy (>=1.16.3,<1.19)", "pandas (>=0.24.2)", "pandas (>=0.24.2,<0.25)", "pandas (>=0.24.2,<0.26)", "pytest"] +tests = ["Django", "numpy (>=1.16.3)", "pandas (>=0.24.2)", "pytest"] [[package]] name = "chroma-hnswlib" -version = "0.7.5" +version = "0.7.6" description = "Chromas fork of hnswlib" optional = false python-versions = "*" files = [ - {file = "chroma_hnswlib-0.7.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:624ab09491a9bf2523ad54d8fdf8e868a706814373534eeb1f0d8195db03be6c"}, - {file = "chroma_hnswlib-0.7.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b838b4f231bbf0e62b89552f2814eebbc3e17173abe50cb547c4c4aadbf62f6c"}, - {file = "chroma_hnswlib-0.7.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5f234e67632a33f75d8f6cb39f457fb7d10d16fdb48ba52f55a8983ac59cb4f"}, - {file = "chroma_hnswlib-0.7.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf54240944a0487d7aefb1338b6ea74f784aa888c41c6f1e96b66e24a40d37cd"}, - {file = "chroma_hnswlib-0.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:49fec3f01adc829104ed428b7d4362382092d481e01456f3f46040af5aa044ee"}, - {file = "chroma_hnswlib-0.7.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:18cdbbf93c5b4855f2a050808976f5d261b36055b392a2df632a574410ffefaa"}, - {file = "chroma_hnswlib-0.7.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:35b17814041d9512e99832327f49138da470df6478c9ad1e9ca334d9c18720dd"}, - {file = "chroma_hnswlib-0.7.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70893cfde9ac979ec17bb7389d6aa47e422930ccc20353778524d668473ee702"}, - {file = "chroma_hnswlib-0.7.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1453494aea293ce2d8d38e7713b2b459e4245c59a744f9b3f85488165eb52b74"}, - {file = "chroma_hnswlib-0.7.5-cp311-cp311-win_amd64.whl", hash = "sha256:109ee141a2f1241f7d7729443d96ea1450564c6c0923722a8139d70d738d1ad8"}, - {file = "chroma_hnswlib-0.7.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bf721938246f3c95340313cd7ac2564a38142e137d7fce9b098a0fd774bbc7ea"}, - {file = "chroma_hnswlib-0.7.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dda8ff4609b41dab4cca14d146e4dda0d365be6264877de5dd17d8dee314740"}, - {file = "chroma_hnswlib-0.7.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af3a84ceebea8f750467304fabbeb77d5fa2b75ccf55fd2e0494c2ad84527df5"}, - {file = "chroma_hnswlib-0.7.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e178ace2050e4e46c4f899d3cb15ad11a8dc6452eb9e365918c65f171bbf01e0"}, - {file = "chroma_hnswlib-0.7.5-cp312-cp312-win_amd64.whl", hash = "sha256:ddf183960bd5081e23310377153b5bdd3ed1540b74271f3ff8aa741de165e39d"}, - {file = "chroma_hnswlib-0.7.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:86ddae071363ba0f0c03325b6f022cfb14a969b9c989e188cfcaa81d8e0df177"}, - {file = "chroma_hnswlib-0.7.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dbff5658955df5983d32439b698b83044e8f612590716cd499e81376672edbd"}, - {file = "chroma_hnswlib-0.7.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b50209cced093f286a76c619cc2b404a5da29df6792f5852fdf514a24b8acb4c"}, - {file = "chroma_hnswlib-0.7.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4c1535106ad3c6a727b8b2cdb01e02abf8a603b83489cb50bde3a6811731dee2"}, - {file = "chroma_hnswlib-0.7.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:36e35f7773313480c27d0b63b26863408c9c6ab47f44d581783d4693bc52f54c"}, - {file = "chroma_hnswlib-0.7.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:932220cc42a30abc9fdfa230be7ebdd25c1c4967a5ea514119b65ba0844fe193"}, - {file = "chroma_hnswlib-0.7.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85d12ada7a55271fbd2839fb1fdae9e2f485db7dad871f23fa359ec68d16a5e0"}, - {file = "chroma_hnswlib-0.7.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1657dff26eedb24f55d0a3f371bbe02c15c1fef447c8e57fba3e99c42f7683b"}, - {file = "chroma_hnswlib-0.7.5-cp38-cp38-win_amd64.whl", hash = "sha256:2336483218ff5329743d8fcad3d7f870e220503f01ee9420c9469d857783015f"}, - {file = "chroma_hnswlib-0.7.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a4920e0b3045feb4bfe3cf891233024839d5e8384fee505b91a70ca540eef0ef"}, - {file = "chroma_hnswlib-0.7.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9f64134544c5c38d1075b314fcdc364bab6ba68f2e96e223b14559b7bef6f5ac"}, - {file = "chroma_hnswlib-0.7.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:561ae3cb8dbf28251f8980666b608d9800c5f17d6eec53a27c7bd9b4d1093b34"}, - {file = "chroma_hnswlib-0.7.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:645c6309761aadcf378eb88d7a9b598574a8033274d8931b9ca7079eb2206425"}, - {file = "chroma_hnswlib-0.7.5-cp39-cp39-win_amd64.whl", hash = "sha256:89a80901995c68c4e019edf1375f8b2aaeb9e42e12faab6f999fdfd91aee53ee"}, - {file = "chroma_hnswlib-0.7.5.tar.gz", hash = "sha256:45537d0142ad0fadf712092b8be55de211613d0322627a1a2de05b6e800c2954"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f35192fbbeadc8c0633f0a69c3d3e9f1a4eab3a46b65458bbcbcabdd9e895c36"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f007b608c96362b8f0c8b6b2ac94f67f83fcbabd857c378ae82007ec92f4d82"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:456fd88fa0d14e6b385358515aef69fc89b3c2191706fd9aee62087b62aad09c"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dfaae825499c2beaa3b75a12d7ec713b64226df72a5c4097203e3ed532680da"}, + {file = "chroma_hnswlib-0.7.6-cp310-cp310-win_amd64.whl", hash = "sha256:2487201982241fb1581be26524145092c95902cb09fc2646ccfbc407de3328ec"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:81181d54a2b1e4727369486a631f977ffc53c5533d26e3d366dda243fb0998ca"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4b4ab4e11f1083dd0a11ee4f0e0b183ca9f0f2ed63ededba1935b13ce2b3606f"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53db45cd9173d95b4b0bdccb4dbff4c54a42b51420599c32267f3abbeb795170"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c093f07a010b499c00a15bc9376036ee4800d335360570b14f7fe92badcdcf9"}, + {file = "chroma_hnswlib-0.7.6-cp311-cp311-win_amd64.whl", hash = "sha256:0540b0ac96e47d0aa39e88ea4714358ae05d64bbe6bf33c52f316c664190a6a3"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e87e9b616c281bfbe748d01705817c71211613c3b063021f7ed5e47173556cb7"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ec5ca25bc7b66d2ecbf14502b5729cde25f70945d22f2aaf523c2d747ea68912"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:305ae491de9d5f3c51e8bd52d84fdf2545a4a2bc7af49765cda286b7bb30b1d4"}, + {file = "chroma_hnswlib-0.7.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:822ede968d25a2c88823ca078a58f92c9b5c4142e38c7c8b4c48178894a0a3c5"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2fe6ea949047beed19a94b33f41fe882a691e58b70c55fdaa90274ae78be046f"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feceff971e2a2728c9ddd862a9dd6eb9f638377ad98438876c9aeac96c9482f5"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb0633b60e00a2b92314d0bf5bbc0da3d3320be72c7e3f4a9b19f4609dc2b2ab"}, + {file = "chroma_hnswlib-0.7.6-cp37-cp37m-win_amd64.whl", hash = "sha256:a566abe32fab42291f766d667bdbfa234a7f457dcbd2ba19948b7a978c8ca624"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6be47853d9a58dedcfa90fc846af202b071f028bbafe1d8711bf64fe5a7f6111"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a7af35bdd39a88bffa49f9bb4bf4f9040b684514a024435a1ef5cdff980579d"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a53b1f1551f2b5ad94eb610207bde1bb476245fc5097a2bec2b476c653c58bde"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3085402958dbdc9ff5626ae58d696948e715aef88c86d1e3f9285a88f1afd3bc"}, + {file = "chroma_hnswlib-0.7.6-cp38-cp38-win_amd64.whl", hash = "sha256:77326f658a15adfb806a16543f7db7c45f06fd787d699e643642d6bde8ed49c4"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:93b056ab4e25adab861dfef21e1d2a2756b18be5bc9c292aa252fa12bb44e6ae"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fe91f018b30452c16c811fd6c8ede01f84e5a9f3c23e0758775e57f1c3778871"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6c0e627476f0f4d9e153420d36042dd9c6c3671cfd1fe511c0253e38c2a1039"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e9796a4536b7de6c6d76a792ba03e08f5aaa53e97e052709568e50b4d20c04f"}, + {file = "chroma_hnswlib-0.7.6-cp39-cp39-win_amd64.whl", hash = "sha256:d30e2db08e7ffdcc415bd072883a322de5995eb6ec28a8f8c054103bbd3ec1e0"}, + {file = "chroma_hnswlib-0.7.6.tar.gz", hash = "sha256:4dce282543039681160259d29fcde6151cc9106c6461e0485f57cdccd83059b7"}, ] [package.dependencies] @@ -738,19 +805,19 @@ numpy = "*" [[package]] name = "chromadb" -version = "0.5.4" +version = "0.5.5" description = "Chroma." optional = false python-versions = ">=3.8" files = [ - {file = "chromadb-0.5.4-py3-none-any.whl", hash = "sha256:60f468d44527bea8f88bd39ee26744d0d59c386dacbe544ff715e534691df0e8"}, - {file = "chromadb-0.5.4.tar.gz", hash = "sha256:02878f527befa8f80ab350023241dbb29313a168f93010208f071e3db9572bb4"}, + {file = "chromadb-0.5.5-py3-none-any.whl", hash = "sha256:2a5a4b84cb0fc32b380e193be68cdbadf3d9f77dbbf141649be9886e42910ddd"}, + {file = "chromadb-0.5.5.tar.gz", hash = "sha256:84f4bfee320fb4912cbeb4d738f01690891e9894f0ba81f39ee02867102a1c4d"}, ] [package.dependencies] bcrypt = ">=4.0.1" build = ">=1.0.3" -chroma-hnswlib = "0.7.5" +chroma-hnswlib = "0.7.6" fastapi = ">=0.95.2" grpcio = ">=1.58.0" httpx = ">=0.27.0" @@ -837,63 +904,83 @@ test = ["pytest"] [[package]] name = "coverage" -version = "7.5.1" +version = "7.6.1" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.8" files = [ - {file = "coverage-7.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0884920835a033b78d1c73b6d3bbcda8161a900f38a488829a83982925f6c2e"}, - {file = "coverage-7.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:39afcd3d4339329c5f58de48a52f6e4e50f6578dd6099961cf22228feb25f38f"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7b0ceee8147444347da6a66be737c9d78f3353b0681715b668b72e79203e4a"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a9ca3f2fae0088c3c71d743d85404cec8df9be818a005ea065495bedc33da35"}, - {file = "coverage-7.5.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd215c0c7d7aab005221608a3c2b46f58c0285a819565887ee0b718c052aa4e"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4bf0655ab60d754491004a5efd7f9cccefcc1081a74c9ef2da4735d6ee4a6223"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:61c4bf1ba021817de12b813338c9be9f0ad5b1e781b9b340a6d29fc13e7c1b5e"}, - {file = "coverage-7.5.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db66fc317a046556a96b453a58eced5024af4582a8dbdc0c23ca4dbc0d5b3146"}, - {file = "coverage-7.5.1-cp310-cp310-win32.whl", hash = "sha256:b016ea6b959d3b9556cb401c55a37547135a587db0115635a443b2ce8f1c7228"}, - {file = "coverage-7.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:df4e745a81c110e7446b1cc8131bf986157770fa405fe90e15e850aaf7619bc8"}, - {file = "coverage-7.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:796a79f63eca8814ca3317a1ea443645c9ff0d18b188de470ed7ccd45ae79428"}, - {file = "coverage-7.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fc84a37bfd98db31beae3c2748811a3fa72bf2007ff7902f68746d9757f3746"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6175d1a0559986c6ee3f7fccfc4a90ecd12ba0a383dcc2da30c2b9918d67d8a3"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fc81d5878cd6274ce971e0a3a18a8803c3fe25457165314271cf78e3aae3aa2"}, - {file = "coverage-7.5.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:556cf1a7cbc8028cb60e1ff0be806be2eded2daf8129b8811c63e2b9a6c43bca"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9981706d300c18d8b220995ad22627647be11a4276721c10911e0e9fa44c83e8"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d7fed867ee50edf1a0b4a11e8e5d0895150e572af1cd6d315d557758bfa9c057"}, - {file = "coverage-7.5.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef48e2707fb320c8f139424a596f5b69955a85b178f15af261bab871873bb987"}, - {file = "coverage-7.5.1-cp311-cp311-win32.whl", hash = "sha256:9314d5678dcc665330df5b69c1e726a0e49b27df0461c08ca12674bcc19ef136"}, - {file = "coverage-7.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:5fa567e99765fe98f4e7d7394ce623e794d7cabb170f2ca2ac5a4174437e90dd"}, - {file = "coverage-7.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b6cf3764c030e5338e7f61f95bd21147963cf6aa16e09d2f74f1fa52013c1206"}, - {file = "coverage-7.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ec92012fefebee89a6b9c79bc39051a6cb3891d562b9270ab10ecfdadbc0c34"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16db7f26000a07efcf6aea00316f6ac57e7d9a96501e990a36f40c965ec7a95d"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beccf7b8a10b09c4ae543582c1319c6df47d78fd732f854ac68d518ee1fb97fa"}, - {file = "coverage-7.5.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8748731ad392d736cc9ccac03c9845b13bb07d020a33423fa5b3a36521ac6e4e"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7352b9161b33fd0b643ccd1f21f3a3908daaddf414f1c6cb9d3a2fd618bf2572"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7a588d39e0925f6a2bff87154752481273cdb1736270642aeb3635cb9b4cad07"}, - {file = "coverage-7.5.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:68f962d9b72ce69ea8621f57551b2fa9c70509af757ee3b8105d4f51b92b41a7"}, - {file = "coverage-7.5.1-cp312-cp312-win32.whl", hash = "sha256:f152cbf5b88aaeb836127d920dd0f5e7edff5a66f10c079157306c4343d86c19"}, - {file = "coverage-7.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:5a5740d1fb60ddf268a3811bcd353de34eb56dc24e8f52a7f05ee513b2d4f596"}, - {file = "coverage-7.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e2213def81a50519d7cc56ed643c9e93e0247f5bbe0d1247d15fa520814a7cd7"}, - {file = "coverage-7.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5037f8fcc2a95b1f0e80585bd9d1ec31068a9bcb157d9750a172836e98bc7a90"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3721c2c9e4c4953a41a26c14f4cef64330392a6d2d675c8b1db3b645e31f0e"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca498687ca46a62ae590253fba634a1fe9836bc56f626852fb2720f334c9e4e5"}, - {file = "coverage-7.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cdcbc320b14c3e5877ee79e649677cb7d89ef588852e9583e6b24c2e5072661"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:57e0204b5b745594e5bc14b9b50006da722827f0b8c776949f1135677e88d0b8"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fe7502616b67b234482c3ce276ff26f39ffe88adca2acf0261df4b8454668b4"}, - {file = "coverage-7.5.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9e78295f4144f9dacfed4f92935fbe1780021247c2fabf73a819b17f0ccfff8d"}, - {file = "coverage-7.5.1-cp38-cp38-win32.whl", hash = "sha256:1434e088b41594baa71188a17533083eabf5609e8e72f16ce8c186001e6b8c41"}, - {file = "coverage-7.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:0646599e9b139988b63704d704af8e8df7fa4cbc4a1f33df69d97f36cb0a38de"}, - {file = "coverage-7.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4cc37def103a2725bc672f84bd939a6fe4522310503207aae4d56351644682f1"}, - {file = "coverage-7.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fc0b4d8bfeabd25ea75e94632f5b6e047eef8adaed0c2161ada1e922e7f7cece"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d0a0f5e06881ecedfe6f3dd2f56dcb057b6dbeb3327fd32d4b12854df36bf26"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9735317685ba6ec7e3754798c8871c2f49aa5e687cc794a0b1d284b2389d1bd5"}, - {file = "coverage-7.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d21918e9ef11edf36764b93101e2ae8cc82aa5efdc7c5a4e9c6c35a48496d601"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c3e757949f268364b96ca894b4c342b41dc6f8f8b66c37878aacef5930db61be"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:79afb6197e2f7f60c4824dd4b2d4c2ec5801ceb6ba9ce5d2c3080e5660d51a4f"}, - {file = "coverage-7.5.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d1d0d98d95dd18fe29dc66808e1accf59f037d5716f86a501fc0256455219668"}, - {file = "coverage-7.5.1-cp39-cp39-win32.whl", hash = "sha256:1cc0fe9b0b3a8364093c53b0b4c0c2dd4bb23acbec4c9240b5f284095ccf7981"}, - {file = "coverage-7.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:dde0070c40ea8bb3641e811c1cfbf18e265d024deff6de52c5950677a8fb1e0f"}, - {file = "coverage-7.5.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:6537e7c10cc47c595828b8a8be04c72144725c383c4702703ff4e42e44577312"}, - {file = "coverage-7.5.1.tar.gz", hash = "sha256:54de9ef3a9da981f7af93eafde4ede199e0846cd819eb27c88e2b712aae9708c"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16"}, + {file = "coverage-7.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc"}, + {file = "coverage-7.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c"}, + {file = "coverage-7.6.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959"}, + {file = "coverage-7.6.1-cp310-cp310-win32.whl", hash = "sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232"}, + {file = "coverage-7.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93"}, + {file = "coverage-7.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d"}, + {file = "coverage-7.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234"}, + {file = "coverage-7.6.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133"}, + {file = "coverage-7.6.1-cp311-cp311-win32.whl", hash = "sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c"}, + {file = "coverage-7.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, + {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, + {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, + {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, + {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, + {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106"}, + {file = "coverage-7.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a"}, + {file = "coverage-7.6.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388"}, + {file = "coverage-7.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155"}, + {file = "coverage-7.6.1-cp313-cp313-win32.whl", hash = "sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a"}, + {file = "coverage-7.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e"}, + {file = "coverage-7.6.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704"}, + {file = "coverage-7.6.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223"}, + {file = "coverage-7.6.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3"}, + {file = "coverage-7.6.1-cp313-cp313t-win32.whl", hash = "sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f"}, + {file = "coverage-7.6.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0"}, + {file = "coverage-7.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3"}, + {file = "coverage-7.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569"}, + {file = "coverage-7.6.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989"}, + {file = "coverage-7.6.1-cp38-cp38-win32.whl", hash = "sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7"}, + {file = "coverage-7.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255"}, + {file = "coverage-7.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a"}, + {file = "coverage-7.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb"}, + {file = "coverage-7.6.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36"}, + {file = "coverage-7.6.1-cp39-cp39-win32.whl", hash = "sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c"}, + {file = "coverage-7.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca"}, + {file = "coverage-7.6.1-pp38.pp39.pp310-none-any.whl", hash = "sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df"}, + {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, ] [package.dependencies] @@ -904,43 +991,38 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "42.0.8" +version = "43.0.0" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"}, - {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"}, - {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"}, - {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"}, - {file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"}, - {file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"}, - {file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"}, - {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"}, - {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"}, - {file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"}, - {file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"}, - {file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"}, - {file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"}, - {file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"}, + {file = "cryptography-43.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47"}, + {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf"}, + {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55"}, + {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431"}, + {file = "cryptography-43.0.0-cp37-abi3-win32.whl", hash = "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc"}, + {file = "cryptography-43.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778"}, + {file = "cryptography-43.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5"}, + {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f"}, + {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0"}, + {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b"}, + {file = "cryptography-43.0.0-cp39-abi3-win32.whl", hash = "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf"}, + {file = "cryptography-43.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f"}, + {file = "cryptography-43.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069"}, + {file = "cryptography-43.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1"}, + {file = "cryptography-43.0.0.tar.gz", hash = "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e"}, ] [package.dependencies] @@ -953,38 +1035,38 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "cryptography-vectors (==43.0.0)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] name = "debugpy" -version = "1.8.1" +version = "1.8.5" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.1-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:3bda0f1e943d386cc7a0e71bfa59f4137909e2ed947fb3946c506e113000f741"}, - {file = "debugpy-1.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dda73bf69ea479c8577a0448f8c707691152e6c4de7f0c4dec5a4bc11dee516e"}, - {file = "debugpy-1.8.1-cp310-cp310-win32.whl", hash = "sha256:3a79c6f62adef994b2dbe9fc2cc9cc3864a23575b6e387339ab739873bea53d0"}, - {file = "debugpy-1.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:7eb7bd2b56ea3bedb009616d9e2f64aab8fc7000d481faec3cd26c98a964bcdd"}, - {file = "debugpy-1.8.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:016a9fcfc2c6b57f939673c874310d8581d51a0fe0858e7fac4e240c5eb743cb"}, - {file = "debugpy-1.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd97ed11a4c7f6d042d320ce03d83b20c3fb40da892f994bc041bbc415d7a099"}, - {file = "debugpy-1.8.1-cp311-cp311-win32.whl", hash = "sha256:0de56aba8249c28a300bdb0672a9b94785074eb82eb672db66c8144fff673146"}, - {file = "debugpy-1.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:1a9fe0829c2b854757b4fd0a338d93bc17249a3bf69ecf765c61d4c522bb92a8"}, - {file = "debugpy-1.8.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3ebb70ba1a6524d19fa7bb122f44b74170c447d5746a503e36adc244a20ac539"}, - {file = "debugpy-1.8.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2e658a9630f27534e63922ebf655a6ab60c370f4d2fc5c02a5b19baf4410ace"}, - {file = "debugpy-1.8.1-cp312-cp312-win32.whl", hash = "sha256:caad2846e21188797a1f17fc09c31b84c7c3c23baf2516fed5b40b378515bbf0"}, - {file = "debugpy-1.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:edcc9f58ec0fd121a25bc950d4578df47428d72e1a0d66c07403b04eb93bcf98"}, - {file = "debugpy-1.8.1-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:7a3afa222f6fd3d9dfecd52729bc2e12c93e22a7491405a0ecbf9e1d32d45b39"}, - {file = "debugpy-1.8.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d915a18f0597ef685e88bb35e5d7ab968964b7befefe1aaea1eb5b2640b586c7"}, - {file = "debugpy-1.8.1-cp38-cp38-win32.whl", hash = "sha256:92116039b5500633cc8d44ecc187abe2dfa9b90f7a82bbf81d079fcdd506bae9"}, - {file = "debugpy-1.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:e38beb7992b5afd9d5244e96ad5fa9135e94993b0c551ceebf3fe1a5d9beb234"}, - {file = "debugpy-1.8.1-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:bfb20cb57486c8e4793d41996652e5a6a885b4d9175dd369045dad59eaacea42"}, - {file = "debugpy-1.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efd3fdd3f67a7e576dd869c184c5dd71d9aaa36ded271939da352880c012e703"}, - {file = "debugpy-1.8.1-cp39-cp39-win32.whl", hash = "sha256:58911e8521ca0c785ac7a0539f1e77e0ce2df753f786188f382229278b4cdf23"}, - {file = "debugpy-1.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:6df9aa9599eb05ca179fb0b810282255202a66835c6efb1d112d21ecb830ddd3"}, - {file = "debugpy-1.8.1-py2.py3-none-any.whl", hash = "sha256:28acbe2241222b87e255260c76741e1fbf04fdc3b6d094fcf57b6c6f75ce1242"}, - {file = "debugpy-1.8.1.zip", hash = "sha256:f696d6be15be87aef621917585f9bb94b1dc9e8aced570db1b8a6fc14e8f9b42"}, + {file = "debugpy-1.8.5-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:7e4d594367d6407a120b76bdaa03886e9eb652c05ba7f87e37418426ad2079f7"}, + {file = "debugpy-1.8.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4413b7a3ede757dc33a273a17d685ea2b0c09dbd312cc03f5534a0fd4d40750a"}, + {file = "debugpy-1.8.5-cp310-cp310-win32.whl", hash = "sha256:dd3811bd63632bb25eda6bd73bea8e0521794cda02be41fa3160eb26fc29e7ed"}, + {file = "debugpy-1.8.5-cp310-cp310-win_amd64.whl", hash = "sha256:b78c1250441ce893cb5035dd6f5fc12db968cc07f91cc06996b2087f7cefdd8e"}, + {file = "debugpy-1.8.5-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:606bccba19f7188b6ea9579c8a4f5a5364ecd0bf5a0659c8a5d0e10dcee3032a"}, + {file = "debugpy-1.8.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db9fb642938a7a609a6c865c32ecd0d795d56c1aaa7a7a5722d77855d5e77f2b"}, + {file = "debugpy-1.8.5-cp311-cp311-win32.whl", hash = "sha256:4fbb3b39ae1aa3e5ad578f37a48a7a303dad9a3d018d369bc9ec629c1cfa7408"}, + {file = "debugpy-1.8.5-cp311-cp311-win_amd64.whl", hash = "sha256:345d6a0206e81eb68b1493ce2fbffd57c3088e2ce4b46592077a943d2b968ca3"}, + {file = "debugpy-1.8.5-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:5b5c770977c8ec6c40c60d6f58cacc7f7fe5a45960363d6974ddb9b62dbee156"}, + {file = "debugpy-1.8.5-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a65b00b7cdd2ee0c2cf4c7335fef31e15f1b7056c7fdbce9e90193e1a8c8cb"}, + {file = "debugpy-1.8.5-cp312-cp312-win32.whl", hash = "sha256:c9f7c15ea1da18d2fcc2709e9f3d6de98b69a5b0fff1807fb80bc55f906691f7"}, + {file = "debugpy-1.8.5-cp312-cp312-win_amd64.whl", hash = "sha256:28ced650c974aaf179231668a293ecd5c63c0a671ae6d56b8795ecc5d2f48d3c"}, + {file = "debugpy-1.8.5-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:3df6692351172a42af7558daa5019651f898fc67450bf091335aa8a18fbf6f3a"}, + {file = "debugpy-1.8.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cd04a73eb2769eb0bfe43f5bfde1215c5923d6924b9b90f94d15f207a402226"}, + {file = "debugpy-1.8.5-cp38-cp38-win32.whl", hash = "sha256:8f913ee8e9fcf9d38a751f56e6de12a297ae7832749d35de26d960f14280750a"}, + {file = "debugpy-1.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:a697beca97dad3780b89a7fb525d5e79f33821a8bc0c06faf1f1289e549743cf"}, + {file = "debugpy-1.8.5-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:0a1029a2869d01cb777216af8c53cda0476875ef02a2b6ff8b2f2c9a4b04176c"}, + {file = "debugpy-1.8.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84c276489e141ed0b93b0af648eef891546143d6a48f610945416453a8ad406"}, + {file = "debugpy-1.8.5-cp39-cp39-win32.whl", hash = "sha256:ad84b7cde7fd96cf6eea34ff6c4a1b7887e0fe2ea46e099e53234856f9d99a34"}, + {file = "debugpy-1.8.5-cp39-cp39-win_amd64.whl", hash = "sha256:7b0fe36ed9d26cb6836b0a51453653f8f2e347ba7348f2bbfe76bfeb670bfb1c"}, + {file = "debugpy-1.8.5-py2.py3-none-any.whl", hash = "sha256:55919dce65b471eff25901acf82d328bbd5b833526b6c1364bd5133754777a44"}, + {file = "debugpy-1.8.5.zip", hash = "sha256:b2112cfeb34b4507399d298fe7023a16656fc553ed5246536060ca7bd0e668d0"}, ] [[package]] @@ -1069,20 +1151,16 @@ trio = ["trio (>=0.23)"] wmi = ["wmi (>=1.5.1)"] [[package]] -name = "email-validator" -version = "2.1.1" -description = "A robust email address syntax and deliverability validation library." +name = "docstring-parser" +version = "0.16" +description = "Parse Python docstrings in reST, Google and Numpydoc format" optional = false -python-versions = ">=3.8" +python-versions = ">=3.6,<4.0" files = [ - {file = "email_validator-2.1.1-py3-none-any.whl", hash = "sha256:97d882d174e2a65732fb43bfce81a3a834cbc1bde8bf419e30ef5ea976370a05"}, - {file = "email_validator-2.1.1.tar.gz", hash = "sha256:200a70680ba08904be6d1eef729205cc0d687634399a5924d842533efb824b84"}, + {file = "docstring_parser-0.16-py3-none-any.whl", hash = "sha256:bf0a1387354d3691d102edef7ec124f219ef639982d096e26e3b60aeffa90637"}, + {file = "docstring_parser-0.16.tar.gz", hash = "sha256:538beabd0af1e2db0146b6bd3caa526c35a34d61af9fd2887f3a8a27a739aa6e"}, ] -[package.dependencies] -dnspython = ">=2.0.0" -idna = ">=2.0.0" - [[package]] name = "environs" version = "9.5.0" @@ -1106,18 +1184,32 @@ tests = ["dj-database-url", "dj-email-url", "django-cache-url", "pytest"] [[package]] name = "exceptiongroup" -version = "1.2.1" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, - {file = "exceptiongroup-1.2.1.tar.gz", hash = "sha256:a4785e48b045528f5bfe627b6ad554ff32def154f42372786903b7abcfe1aa16"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "execnet" +version = "2.1.1" +description = "execnet: rapid multi-Python deployment" +optional = false +python-versions = ">=3.8" +files = [ + {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"}, + {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"}, +] + +[package.extras] +testing = ["hatch", "pre-commit", "pytest", "tox"] + [[package]] name = "executing" version = "2.0.1" @@ -1134,57 +1226,33 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth [[package]] name = "fastapi" -version = "0.111.0" +version = "0.112.0" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" files = [ - {file = "fastapi-0.111.0-py3-none-any.whl", hash = "sha256:97ecbf994be0bcbdadedf88c3150252bed7b2087075ac99735403b1b76cc8fc0"}, - {file = "fastapi-0.111.0.tar.gz", hash = "sha256:b9db9dd147c91cb8b769f7183535773d8741dd46f9dc6676cd82eab510228cd7"}, + {file = "fastapi-0.112.0-py3-none-any.whl", hash = "sha256:3487ded9778006a45834b8c816ec4a48d522e2631ca9e75ec5a774f1b052f821"}, + {file = "fastapi-0.112.0.tar.gz", hash = "sha256:d262bc56b7d101d1f4e8fc0ad2ac75bb9935fec504d2b7117686cec50710cf05"}, ] [package.dependencies] -email_validator = ">=2.0.0" -fastapi-cli = ">=0.0.2" -httpx = ">=0.23.0" -jinja2 = ">=2.11.2" -orjson = ">=3.2.1" pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" -python-multipart = ">=0.0.7" starlette = ">=0.37.2,<0.38.0" typing-extensions = ">=4.8.0" -ujson = ">=4.0.1,<4.0.2 || >4.0.2,<4.1.0 || >4.1.0,<4.2.0 || >4.2.0,<4.3.0 || >4.3.0,<5.0.0 || >5.0.0,<5.1.0 || >5.1.0" -uvicorn = {version = ">=0.12.0", extras = ["standard"]} [package.extras] -all = ["email_validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] - -[[package]] -name = "fastapi-cli" -version = "0.0.4" -description = "Run and manage FastAPI apps from the command line with FastAPI CLI. 🚀" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fastapi_cli-0.0.4-py3-none-any.whl", hash = "sha256:a2552f3a7ae64058cdbb530be6fa6dbfc975dc165e4fa66d224c3d396e25e809"}, - {file = "fastapi_cli-0.0.4.tar.gz", hash = "sha256:e2e9ffaffc1f7767f488d6da34b6f5a377751c996f397902eb6abb99a67bde32"}, -] - -[package.dependencies] -typer = ">=0.12.3" - -[package.extras] -standard = ["fastapi", "uvicorn[standard] (>=0.15.0)"] +all = ["email_validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +standard = ["email_validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=2.11.2)", "python-multipart (>=0.0.7)", "uvicorn[standard] (>=0.12.0)"] [[package]] name = "fastjsonschema" -version = "2.19.1" +version = "2.20.0" description = "Fastest Python implementation of JSON schema" optional = false python-versions = "*" files = [ - {file = "fastjsonschema-2.19.1-py3-none-any.whl", hash = "sha256:3672b47bc94178c9f23dbb654bf47440155d4db9df5f7bc47643315f9c405cd0"}, - {file = "fastjsonschema-2.19.1.tar.gz", hash = "sha256:e3126a94bdc4623d3de4485f8d468a12f02a67921315ddc87836d6e456dc789d"}, + {file = "fastjsonschema-2.20.0-py3-none-any.whl", hash = "sha256:5875f0b0fa7a0043a91e93a9b8f793bcbbba9691e7fd83dca95c28ba26d21f0a"}, + {file = "fastjsonschema-2.20.0.tar.gz", hash = "sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23"}, ] [package.extras] @@ -1192,18 +1260,18 @@ devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benc [[package]] name = "filelock" -version = "3.14.0" +version = "3.15.4" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.14.0-py3-none-any.whl", hash = "sha256:43339835842f110ca7ae60f1e1c160714c5a6afd15a2873419ab185334975c0f"}, - {file = "filelock-3.14.0.tar.gz", hash = "sha256:6ea72da3be9b8c82afd3edcf99f2fffbb5076335a5ae4d03248bb5b6c3eae78a"}, + {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, + {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] typing = ["typing-extensions (>=4.8)"] [[package]] @@ -1305,13 +1373,13 @@ files = [ [[package]] name = "fsspec" -version = "2024.5.0" +version = "2024.6.1" description = "File-system specification" optional = false python-versions = ">=3.8" files = [ - {file = "fsspec-2024.5.0-py3-none-any.whl", hash = "sha256:e0fdbc446d67e182f49a70b82cf7889028a63588fde6b222521f10937b2b670c"}, - {file = "fsspec-2024.5.0.tar.gz", hash = "sha256:1d021b0b0f933e3b3029ed808eb400c08ba101ca2de4b3483fbc9ca23fcee94a"}, + {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"}, + {file = "fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49"}, ] [package.extras] @@ -1320,6 +1388,7 @@ adl = ["adlfs"] arrow = ["pyarrow (>=1)"] dask = ["dask", "distributed"] dev = ["pre-commit", "ruff"] +doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"] dropbox = ["dropbox", "dropboxdrivefs", "requests"] full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] fuse = ["fusepy"] @@ -1391,13 +1460,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] [[package]] name = "google-api-python-client" -version = "2.137.0" +version = "2.140.0" description = "Google API Client Library for Python" optional = false python-versions = ">=3.7" files = [ - {file = "google_api_python_client-2.137.0-py2.py3-none-any.whl", hash = "sha256:a8b5c5724885e5be9f5368739aa0ccf416627da4ebd914b410a090c18f84d692"}, - {file = "google_api_python_client-2.137.0.tar.gz", hash = "sha256:e739cb74aac8258b1886cb853b0722d47c81fe07ad649d7f2206f06530513c04"}, + {file = "google_api_python_client-2.140.0-py2.py3-none-any.whl", hash = "sha256:aeb4bb99e9fdd241473da5ff35464a0658fea0db76fe89c0f8c77ecfc3813404"}, + {file = "google_api_python_client-2.140.0.tar.gz", hash = "sha256:0bb973adccbe66a3d0a70abe4e49b3f2f004d849416bfec38d22b75649d389d8"}, ] [package.dependencies] @@ -1409,13 +1478,13 @@ uritemplate = ">=3.0.1,<5" [[package]] name = "google-auth" -version = "2.29.0" +version = "2.33.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.29.0.tar.gz", hash = "sha256:672dff332d073227550ffc7457868ac4218d6c500b155fe6cc17d2b13602c360"}, - {file = "google_auth-2.29.0-py2.py3-none-any.whl", hash = "sha256:d452ad095688cd52bae0ad6fafe027f6a6d6f560e810fec20914e17a09526415"}, + {file = "google_auth-2.33.0-py2.py3-none-any.whl", hash = "sha256:8eff47d0d4a34ab6265c50a106a3362de6a9975bb08998700e389f857e4d39df"}, + {file = "google_auth-2.33.0.tar.gz", hash = "sha256:d6a52342160d7290e334b4d47ba390767e4438ad0d45b7630774533e82655b95"}, ] [package.dependencies] @@ -1445,6 +1514,224 @@ files = [ google-auth = "*" httplib2 = ">=0.19.0" +[[package]] +name = "google-cloud-aiplatform" +version = "1.62.0" +description = "Vertex AI API client library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "google-cloud-aiplatform-1.62.0.tar.gz", hash = "sha256:e15d5b2a99e30d4a16f4c51cfb8129962e6da41a9027d2ea696abe0e2f006fe8"}, + {file = "google_cloud_aiplatform-1.62.0-py2.py3-none-any.whl", hash = "sha256:d7738e0fd4494a54ae08a51755a2143d58937cba2db826189771f45566c9ee3c"}, +] + +[package.dependencies] +docstring-parser = "<1" +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.8.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<3.0.0dev" +google-cloud-bigquery = ">=1.15.0,<3.20.0 || >3.20.0,<4.0.0dev" +google-cloud-resource-manager = ">=1.3.3,<3.0.0dev" +google-cloud-storage = ">=1.32.0,<3.0.0dev" +packaging = ">=14.3" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" +pydantic = "<3" +shapely = "<3.0.0dev" + +[package.extras] +autologging = ["mlflow (>=1.27.0,<=2.1.1)"] +cloud-profiler = ["tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "werkzeug (>=2.0.0,<2.1.0dev)"] +datasets = ["pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)"] +endpoint = ["requests (>=2.28.1)"] +full = ["cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "tqdm (>=4.23.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)"] +langchain = ["langchain (>=0.1.16,<0.3)", "langchain-core (<0.3)", "langchain-google-vertexai (<2)", "openinference-instrumentation-langchain (>=0.1.19,<0.2)", "orjson (<=3.10.6)", "tenacity (<=8.3)"] +langchain-testing = ["absl-py", "cloudpickle (>=3.0,<4.0)", "google-cloud-trace (<2)", "langchain (>=0.1.16,<0.3)", "langchain-core (<0.3)", "langchain-google-vertexai (<2)", "openinference-instrumentation-langchain (>=0.1.19,<0.2)", "opentelemetry-exporter-gcp-trace (<2)", "opentelemetry-sdk (<2)", "orjson (<=3.10.6)", "pydantic (>=2.6.3,<3)", "pytest-xdist", "tenacity (<=8.3)"] +lit = ["explainable-ai-sdk (>=1.0.0)", "lit-nlp (==0.4.0)", "pandas (>=1.0.0)", "tensorflow (>=2.3.0,<3.0.0dev)"] +metadata = ["numpy (>=1.15.0)", "pandas (>=1.0.0)"] +pipelines = ["pyyaml (>=5.3.1,<7)"] +prediction = ["docker (>=5.0.3)", "fastapi (>=0.71.0,<=0.109.1)", "httpx (>=0.23.0,<0.25.0)", "starlette (>=0.17.1)", "uvicorn[standard] (>=0.16.0)"] +preview = ["cloudpickle (<3.0)", "google-cloud-logging (<4.0)"] +private-endpoints = ["requests (>=2.28.1)", "urllib3 (>=1.21.1,<1.27)"] +rapid-evaluation = ["pandas (>=1.0.0,<2.2.0)", "tqdm (>=4.23.0)"] +ray = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "setuptools (<70.0.0)"] +ray-testing = ["google-cloud-bigquery", "google-cloud-bigquery-storage", "immutabledict", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pytest-xdist", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "ray[train] (==2.9.3)", "scikit-learn", "setuptools (<70.0.0)", "tensorflow", "torch (>=2.0.0,<2.1.0)", "xgboost", "xgboost-ray"] +reasoningengine = ["cloudpickle (>=3.0,<4.0)", "google-cloud-trace (<2)", "opentelemetry-exporter-gcp-trace (<2)", "opentelemetry-sdk (<2)", "pydantic (>=2.6.3,<3)"] +tensorboard = ["tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "werkzeug (>=2.0.0,<2.1.0dev)"] +testing = ["bigframes", "cloudpickle (<3.0)", "docker (>=5.0.3)", "explainable-ai-sdk (>=1.0.0)", "fastapi (>=0.71.0,<=0.109.1)", "google-api-core (>=2.11,<3.0.0)", "google-cloud-bigquery", "google-cloud-bigquery-storage", "google-cloud-logging (<4.0)", "google-vizier (>=0.1.6)", "grpcio-testing", "httpx (>=0.23.0,<0.25.0)", "immutabledict", "ipython", "kfp (>=2.6.0,<3.0.0)", "lit-nlp (==0.4.0)", "mlflow (>=1.27.0,<=2.1.1)", "nltk", "numpy (>=1.15.0)", "pandas (>=1.0.0)", "pandas (>=1.0.0,<2.2.0)", "pyarrow (>=10.0.1)", "pyarrow (>=14.0.0)", "pyarrow (>=3.0.0,<8.0dev)", "pyarrow (>=6.0.1)", "pydantic (<2)", "pyfakefs", "pytest-asyncio", "pytest-xdist", "pyyaml (>=5.3.1,<7)", "ray[default] (>=2.4,<2.5.dev0 || >2.9.0,!=2.9.1,!=2.9.2,<=2.9.3)", "ray[default] (>=2.5,<=2.9.3)", "requests (>=2.28.1)", "requests-toolbelt (<1.0.0)", "scikit-learn", "sentencepiece (>=0.2.0)", "setuptools (<70.0.0)", "starlette (>=0.17.1)", "tensorboard-plugin-profile (>=2.4.0,<3.0.0dev)", "tensorflow (==2.13.0)", "tensorflow (==2.16.1)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.3.0,<3.0.0dev)", "tensorflow (>=2.4.0,<3.0.0dev)", "torch (>=2.0.0,<2.1.0)", "torch (>=2.2.0)", "tqdm (>=4.23.0)", "urllib3 (>=1.21.1,<1.27)", "uvicorn[standard] (>=0.16.0)", "werkzeug (>=2.0.0,<2.1.0dev)", "xgboost"] +tokenization = ["sentencepiece (>=0.2.0)"] +vizier = ["google-vizier (>=0.1.6)"] +xai = ["tensorflow (>=2.3.0,<3.0.0dev)"] + +[[package]] +name = "google-cloud-bigquery" +version = "3.25.0" +description = "Google BigQuery API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-bigquery-3.25.0.tar.gz", hash = "sha256:5b2aff3205a854481117436836ae1403f11f2594e6810a98886afd57eda28509"}, + {file = "google_cloud_bigquery-3.25.0-py2.py3-none-any.whl", hash = "sha256:7f0c371bc74d2a7fb74dacbc00ac0f90c8c2bec2289b51dd6685a275873b1ce9"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<3.0.0dev" +google-cloud-core = ">=1.6.0,<3.0.0dev" +google-resumable-media = ">=0.6.0,<3.0dev" +packaging = ">=20.0.0" +python-dateutil = ">=2.7.2,<3.0dev" +requests = ">=2.21.0,<3.0.0dev" + +[package.extras] +all = ["Shapely (>=1.8.4,<3.0.0dev)", "db-dtypes (>=0.3.0,<2.0.0dev)", "geopandas (>=0.9.0,<1.0dev)", "google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "importlib-metadata (>=1.0.0)", "ipykernel (>=6.0.0)", "ipython (>=7.23.1,!=8.1.0)", "ipywidgets (>=7.7.0)", "opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)", "pandas (>=1.1.0)", "proto-plus (>=1.15.0,<2.0.0dev)", "protobuf (>=3.19.5,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev)", "pyarrow (>=3.0.0)", "tqdm (>=4.7.4,<5.0.0dev)"] +bigquery-v2 = ["proto-plus (>=1.15.0,<2.0.0dev)", "protobuf (>=3.19.5,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev)"] +bqstorage = ["google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "pyarrow (>=3.0.0)"] +geopandas = ["Shapely (>=1.8.4,<3.0.0dev)", "geopandas (>=0.9.0,<1.0dev)"] +ipython = ["ipykernel (>=6.0.0)", "ipython (>=7.23.1,!=8.1.0)"] +ipywidgets = ["ipykernel (>=6.0.0)", "ipywidgets (>=7.7.0)"] +opentelemetry = ["opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)"] +pandas = ["db-dtypes (>=0.3.0,<2.0.0dev)", "importlib-metadata (>=1.0.0)", "pandas (>=1.1.0)", "pyarrow (>=3.0.0)"] +tqdm = ["tqdm (>=4.7.4,<5.0.0dev)"] + +[[package]] +name = "google-cloud-core" +version = "2.4.1" +description = "Google Cloud API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-cloud-core-2.4.1.tar.gz", hash = "sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073"}, + {file = "google_cloud_core-2.4.1-py2.py3-none-any.whl", hash = "sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61"}, +] + +[package.dependencies] +google-api-core = ">=1.31.6,<2.0.dev0 || >2.3.0,<3.0.0dev" +google-auth = ">=1.25.0,<3.0dev" + +[package.extras] +grpc = ["grpcio (>=1.38.0,<2.0dev)", "grpcio-status (>=1.38.0,<2.0.dev0)"] + +[[package]] +name = "google-cloud-resource-manager" +version = "1.12.5" +description = "Google Cloud Resource Manager API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_resource_manager-1.12.5-py2.py3-none-any.whl", hash = "sha256:2708a718b45c79464b7b21559c701b5c92e6b0b1ab2146d0a256277a623dc175"}, + {file = "google_cloud_resource_manager-1.12.5.tar.gz", hash = "sha256:b7af4254401ed4efa3aba3a929cb3ddb803fa6baf91a78485e45583597de5891"}, +] + +[package.dependencies] +google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-auth = ">=2.14.1,<2.24.0 || >2.24.0,<2.25.0 || >2.25.0,<3.0.0dev" +grpc-google-iam-v1 = ">=0.12.4,<1.0.0dev" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + +[[package]] +name = "google-cloud-storage" +version = "2.18.2" +description = "Google Cloud Storage API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_cloud_storage-2.18.2-py2.py3-none-any.whl", hash = "sha256:97a4d45c368b7d401ed48c4fdfe86e1e1cb96401c9e199e419d289e2c0370166"}, + {file = "google_cloud_storage-2.18.2.tar.gz", hash = "sha256:aaf7acd70cdad9f274d29332673fcab98708d0e1f4dceb5a5356aaef06af4d99"}, +] + +[package.dependencies] +google-api-core = ">=2.15.0,<3.0.0dev" +google-auth = ">=2.26.1,<3.0dev" +google-cloud-core = ">=2.3.0,<3.0dev" +google-crc32c = ">=1.0,<2.0dev" +google-resumable-media = ">=2.7.2" +requests = ">=2.18.0,<3.0.0dev" + +[package.extras] +protobuf = ["protobuf (<6.0.0dev)"] +tracing = ["opentelemetry-api (>=1.1.0)"] + +[[package]] +name = "google-crc32c" +version = "1.5.0" +description = "A python wrapper of the C library 'Google CRC32C'" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-crc32c-1.5.0.tar.gz", hash = "sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7"}, + {file = "google_crc32c-1.5.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13"}, + {file = "google_crc32c-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b"}, + {file = "google_crc32c-1.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e"}, + {file = "google_crc32c-1.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c"}, + {file = "google_crc32c-1.5.0-cp310-cp310-win32.whl", hash = "sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee"}, + {file = "google_crc32c-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289"}, + {file = "google_crc32c-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273"}, + {file = "google_crc32c-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438"}, + {file = "google_crc32c-1.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd"}, + {file = "google_crc32c-1.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c"}, + {file = "google_crc32c-1.5.0-cp311-cp311-win32.whl", hash = "sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709"}, + {file = "google_crc32c-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-win32.whl", hash = "sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94"}, + {file = "google_crc32c-1.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740"}, + {file = "google_crc32c-1.5.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8"}, + {file = "google_crc32c-1.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d"}, + {file = "google_crc32c-1.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894"}, + {file = "google_crc32c-1.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a"}, + {file = "google_crc32c-1.5.0-cp38-cp38-win32.whl", hash = "sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4"}, + {file = "google_crc32c-1.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c"}, + {file = "google_crc32c-1.5.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7"}, + {file = "google_crc32c-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57"}, + {file = "google_crc32c-1.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96"}, + {file = "google_crc32c-1.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61"}, + {file = "google_crc32c-1.5.0-cp39-cp39-win32.whl", hash = "sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c"}, + {file = "google_crc32c-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178"}, + {file = "google_crc32c-1.5.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462"}, + {file = "google_crc32c-1.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31"}, + {file = "google_crc32c-1.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93"}, +] + +[package.extras] +testing = ["pytest"] + [[package]] name = "google-generativeai" version = "0.7.2" @@ -1468,23 +1755,58 @@ typing-extensions = "*" [package.extras] dev = ["Pillow", "absl-py", "black", "ipython", "nose2", "pandas", "pytype", "pyyaml"] +[[package]] +name = "google-resumable-media" +version = "2.7.2" +description = "Utilities for Google Media Downloads and Resumable Uploads" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_resumable_media-2.7.2-py2.py3-none-any.whl", hash = "sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa"}, + {file = "google_resumable_media-2.7.2.tar.gz", hash = "sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0"}, +] + +[package.dependencies] +google-crc32c = ">=1.0,<2.0dev" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0dev)", "google-auth (>=1.22.0,<2.0dev)"] +requests = ["requests (>=2.18.0,<3.0.0dev)"] + [[package]] name = "googleapis-common-protos" -version = "1.63.0" +version = "1.63.2" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" files = [ - {file = "googleapis-common-protos-1.63.0.tar.gz", hash = "sha256:17ad01b11d5f1d0171c06d3ba5c04c54474e883b66b949722b4938ee2694ef4e"}, - {file = "googleapis_common_protos-1.63.0-py2.py3-none-any.whl", hash = "sha256:ae45f75702f7c08b541f750854a678bd8f534a1a6bace6afe975f1d0a82d6632"}, + {file = "googleapis-common-protos-1.63.2.tar.gz", hash = "sha256:27c5abdffc4911f28101e635de1533fb4cfd2c37fbaa9174587c799fac90aa87"}, + {file = "googleapis_common_protos-1.63.2-py2.py3-none-any.whl", hash = "sha256:27a2499c7e8aff199665b22741997e485eccc8645aa9176c7c988e6fae507945"}, ] [package.dependencies] -protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" +grpcio = {version = ">=1.44.0,<2.0.0.dev0", optional = true, markers = "extra == \"grpc\""} +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" [package.extras] grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] +[[package]] +name = "grpc-google-iam-v1" +version = "0.13.1" +description = "IAM API client library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpc-google-iam-v1-0.13.1.tar.gz", hash = "sha256:3ff4b2fd9d990965e410965253c0da6f66205d5a8291c4c31c6ebecca18a9001"}, + {file = "grpc_google_iam_v1-0.13.1-py2.py3-none-any.whl", hash = "sha256:c3e86151a981811f30d5e7330f271cee53e73bb87755e88cc3b6f0c7b5fe374e"}, +] + +[package.dependencies] +googleapis-common-protos = {version = ">=1.56.0,<2.0.0dev", extras = ["grpc"]} +grpcio = ">=1.44.0,<2.0.0dev" +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0dev" + [[package]] name = "grpcio" version = "1.63.0" @@ -1545,100 +1867,94 @@ protobuf = ["grpcio-tools (>=1.63.0)"] [[package]] name = "grpcio-health-checking" -version = "1.60.0" +version = "1.62.3" description = "Standard Health Checking Service for gRPC" optional = false python-versions = ">=3.6" files = [ - {file = "grpcio-health-checking-1.60.0.tar.gz", hash = "sha256:478b5300778120fed9f6d134d72b157a59f9c06689789218cbff47fafca2f119"}, - {file = "grpcio_health_checking-1.60.0-py3-none-any.whl", hash = "sha256:13caf28bc93795bd6bdb580b21832ebdd1aa3f5b648ea47ed17362d85bed96d3"}, + {file = "grpcio-health-checking-1.62.3.tar.gz", hash = "sha256:5074ba0ce8f0dcfe328408ec5c7551b2a835720ffd9b69dade7fa3e0dc1c7a93"}, + {file = "grpcio_health_checking-1.62.3-py3-none-any.whl", hash = "sha256:f29da7dd144d73b4465fe48f011a91453e9ff6c8af0d449254cf80021cab3e0d"}, ] [package.dependencies] -grpcio = ">=1.60.0" +grpcio = ">=1.62.3" protobuf = ">=4.21.6" [[package]] name = "grpcio-status" -version = "1.62.2" +version = "1.62.3" description = "Status proto mapping for gRPC" optional = false python-versions = ">=3.6" files = [ - {file = "grpcio-status-1.62.2.tar.gz", hash = "sha256:62e1bfcb02025a1cd73732a2d33672d3e9d0df4d21c12c51e0bbcaf09bab742a"}, - {file = "grpcio_status-1.62.2-py3-none-any.whl", hash = "sha256:206ddf0eb36bc99b033f03b2c8e95d319f0044defae9b41ae21408e7e0cda48f"}, + {file = "grpcio-status-1.62.3.tar.gz", hash = "sha256:289bdd7b2459794a12cf95dc0cb727bd4a1742c37bd823f760236c937e53a485"}, + {file = "grpcio_status-1.62.3-py3-none-any.whl", hash = "sha256:f9049b762ba8de6b1086789d8315846e094edac2c50beaf462338b301a8fd4b8"}, ] [package.dependencies] googleapis-common-protos = ">=1.5.5" -grpcio = ">=1.62.2" +grpcio = ">=1.62.3" protobuf = ">=4.21.6" [[package]] name = "grpcio-tools" -version = "1.60.0" +version = "1.62.3" description = "Protobuf code generator for gRPC" optional = false python-versions = ">=3.7" files = [ - {file = "grpcio-tools-1.60.0.tar.gz", hash = "sha256:ed30499340228d733ff69fcf4a66590ed7921f94eb5a2bf692258b1280b9dac7"}, - {file = "grpcio_tools-1.60.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:6807b7a3f3e6e594566100bd7fe04a2c42ce6d5792652677f1aaf5aa5adaef3d"}, - {file = "grpcio_tools-1.60.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:857c5351e9dc33a019700e171163f94fcc7e3ae0f6d2b026b10fda1e3c008ef1"}, - {file = "grpcio_tools-1.60.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:ec0e401e9a43d927d216d5169b03c61163fb52b665c5af2fed851357b15aef88"}, - {file = "grpcio_tools-1.60.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e68dc4474f30cad11a965f0eb5d37720a032b4720afa0ec19dbcea2de73b5aae"}, - {file = "grpcio_tools-1.60.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbf0ed772d2ae7e8e5d7281fcc00123923ab130b94f7a843eee9af405918f924"}, - {file = "grpcio_tools-1.60.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c771b19dce2bfe06899247168c077d7ab4e273f6655d8174834f9a6034415096"}, - {file = "grpcio_tools-1.60.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e5614cf0960456d21d8a0f4902e3e5e3bcacc4e400bf22f196e5dd8aabb978b7"}, - {file = "grpcio_tools-1.60.0-cp310-cp310-win32.whl", hash = "sha256:87cf439178f3eb45c1a889b2e4a17cbb4c450230d92c18d9c57e11271e239c55"}, - {file = "grpcio_tools-1.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:687f576d7ff6ce483bc9a196d1ceac45144e8733b953620a026daed8e450bc38"}, - {file = "grpcio_tools-1.60.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:2a8a758701f3ac07ed85f5a4284c6a9ddefcab7913a8e552497f919349e72438"}, - {file = "grpcio_tools-1.60.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:7c1cde49631732356cb916ee1710507967f19913565ed5f9991e6c9cb37e3887"}, - {file = "grpcio_tools-1.60.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:d941749bd8dc3f8be58fe37183143412a27bec3df8482d5abd6b4ec3f1ac2924"}, - {file = "grpcio_tools-1.60.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ee35234f1da8fba7ddbc544856ff588243f1128ea778d7a1da3039be829a134"}, - {file = "grpcio_tools-1.60.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8f7a5094adb49e85db13ea3df5d99a976c2bdfd83b0ba26af20ebb742ac6786"}, - {file = "grpcio_tools-1.60.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:24c4ead4a03037beaeb8ef2c90d13d70101e35c9fae057337ed1a9144ef10b53"}, - {file = "grpcio_tools-1.60.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:811abb9c4fb6679e0058dfa123fb065d97b158b71959c0e048e7972bbb82ba0f"}, - {file = "grpcio_tools-1.60.0-cp311-cp311-win32.whl", hash = "sha256:bd2a17b0193fbe4793c215d63ce1e01ae00a8183d81d7c04e77e1dfafc4b2b8a"}, - {file = "grpcio_tools-1.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:b22b1299b666eebd5752ba7719da536075eae3053abcf2898b65f763c314d9da"}, - {file = "grpcio_tools-1.60.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:74025fdd6d1cb7ba4b5d087995339e9a09f0c16cf15dfe56368b23e41ffeaf7a"}, - {file = "grpcio_tools-1.60.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:5a907a4f1ffba86501b2cdb8682346249ea032b922fc69a92f082ba045cca548"}, - {file = "grpcio_tools-1.60.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:1fbb9554466d560472f07d906bfc8dcaf52f365c2a407015185993e30372a886"}, - {file = "grpcio_tools-1.60.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f10ef47460ce3c6fd400f05fe757b90df63486c9b84d1ecad42dcc5f80c8ac14"}, - {file = "grpcio_tools-1.60.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:321b18f42a70813545e416ddcb8bf20defa407a8114906711c9710a69596ceda"}, - {file = "grpcio_tools-1.60.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:081336d8258f1a56542aa8a7a5dec99a2b38d902e19fbdd744594783301b0210"}, - {file = "grpcio_tools-1.60.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:addc9b23d6ff729d9f83d4a2846292d4c84f5eb2ec38f08489a6a0d66ac2b91e"}, - {file = "grpcio_tools-1.60.0-cp312-cp312-win32.whl", hash = "sha256:e87cabac7969bdde309575edc2456357667a1b28262b2c1f12580ef48315b19d"}, - {file = "grpcio_tools-1.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:e70d867c120d9849093b0ac24d861e378bc88af2552e743d83b9f642d2caa7c2"}, - {file = "grpcio_tools-1.60.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:559ce714fe212aaf4abbe1493c5bb8920def00cc77ce0d45266f4fd9d8b3166f"}, - {file = "grpcio_tools-1.60.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:7a5263a0f2ddb7b1cfb2349e392cfc4f318722e0f48f886393e06946875d40f3"}, - {file = "grpcio_tools-1.60.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:18976684a931ca4bcba65c78afa778683aefaae310f353e198b1823bf09775a0"}, - {file = "grpcio_tools-1.60.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5c519a0d4ba1ab44a004fa144089738c59278233e2010b2cf4527dc667ff297"}, - {file = "grpcio_tools-1.60.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6170873b1e5b6580ebb99e87fb6e4ea4c48785b910bd7af838cc6e44b2bccb04"}, - {file = "grpcio_tools-1.60.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fb4df80868b3e397d5fbccc004c789d2668b622b51a9d2387b4c89c80d31e2c5"}, - {file = "grpcio_tools-1.60.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dba6e32c87b4af29b5f475fb2f470f7ee3140bfc128644f17c6c59ddeb670680"}, - {file = "grpcio_tools-1.60.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f610384dee4b1ca705e8da66c5b5fe89a2de3d165c5282c3d1ddf40cb18924e4"}, - {file = "grpcio_tools-1.60.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:4041538f55aad5b3ae7e25ab314d7995d689e968bfc8aa169d939a3160b1e4c6"}, - {file = "grpcio_tools-1.60.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:2fb4cf74bfe1e707cf10bc9dd38a1ebaa145179453d150febb121c7e9cd749bf"}, - {file = "grpcio_tools-1.60.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:2fd1671c52f96e79a2302c8b1c1f78b8a561664b8b3d6946f20d8f1cc6b4225a"}, - {file = "grpcio_tools-1.60.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd1e68c232fe01dd5312a8dbe52c50ecd2b5991d517d7f7446af4ba6334ba872"}, - {file = "grpcio_tools-1.60.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17a32b3da4fc0798cdcec0a9c974ac2a1e98298f151517bf9148294a3b1a5742"}, - {file = "grpcio_tools-1.60.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9970d384fb0c084b00945ef57d98d57a8d32be106d8f0bd31387f7cbfe411b5b"}, - {file = "grpcio_tools-1.60.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5ce6bbd4936977ec1114f2903eb4342781960d521b0d82f73afedb9335251f6f"}, - {file = "grpcio_tools-1.60.0-cp38-cp38-win32.whl", hash = "sha256:2e00de389729ca8d8d1a63c2038703078a887ff738dc31be640b7da9c26d0d4f"}, - {file = "grpcio_tools-1.60.0-cp38-cp38-win_amd64.whl", hash = "sha256:6192184b1f99372ff1d9594bd4b12264e3ff26440daba7eb043726785200ff77"}, - {file = "grpcio_tools-1.60.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:eae27f9b16238e2aaee84c77b5923c6924d6dccb0bdd18435bf42acc8473ae1a"}, - {file = "grpcio_tools-1.60.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:b96981f3a31b85074b73d97c8234a5ed9053d65a36b18f4a9c45a2120a5b7a0a"}, - {file = "grpcio_tools-1.60.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:1748893efd05cf4a59a175d7fa1e4fbb652f4d84ccaa2109f7869a2be48ed25e"}, - {file = "grpcio_tools-1.60.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a6fe752205caae534f29fba907e2f59ff79aa42c6205ce9a467e9406cbac68c"}, - {file = "grpcio_tools-1.60.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3456df087ea61a0972a5bc165aed132ed6ddcc63f5749e572f9fff84540bdbad"}, - {file = "grpcio_tools-1.60.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f3d916606dcf5610d4367918245b3d9d8cd0d2ec0b7043d1bbb8c50fe9815c3a"}, - {file = "grpcio_tools-1.60.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fc01bc1079279ec342f0f1b6a107b3f5dc3169c33369cf96ada6e2e171f74e86"}, - {file = "grpcio_tools-1.60.0-cp39-cp39-win32.whl", hash = "sha256:2dd01257e4feff986d256fa0bac9f56de59dc735eceeeb83de1c126e2e91f653"}, - {file = "grpcio_tools-1.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:1b93ae8ffd18e9af9a965ebca5fa521e89066267de7abdde20721edc04e42721"}, -] - -[package.dependencies] -grpcio = ">=1.60.0" + {file = "grpcio-tools-1.62.3.tar.gz", hash = "sha256:7c7136015c3d62c3eef493efabaf9e3380e3e66d24ee8e94c01cb71377f57833"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:2f968b049c2849540751ec2100ab05e8086c24bead769ca734fdab58698408c1"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:0a8c0c4724ae9c2181b7dbc9b186df46e4f62cb18dc184e46d06c0ebeccf569e"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5782883a27d3fae8c425b29a9d3dcf5f47d992848a1b76970da3b5a28d424b26"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3d812daffd0c2d2794756bd45a353f89e55dc8f91eb2fc840c51b9f6be62667"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b47d0dda1bdb0a0ba7a9a6de88e5a1ed61f07fad613964879954961e36d49193"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ca246dffeca0498be9b4e1ee169b62e64694b0f92e6d0be2573e65522f39eea9"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-win32.whl", hash = "sha256:6a56d344b0bab30bf342a67e33d386b0b3c4e65868ffe93c341c51e1a8853ca5"}, + {file = "grpcio_tools-1.62.3-cp310-cp310-win_amd64.whl", hash = "sha256:710fecf6a171dcbfa263a0a3e7070e0df65ba73158d4c539cec50978f11dad5d"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:703f46e0012af83a36082b5f30341113474ed0d91e36640da713355cd0ea5d23"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:7cc83023acd8bc72cf74c2edbe85b52098501d5b74d8377bfa06f3e929803492"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ff7d58a45b75df67d25f8f144936a3e44aabd91afec833ee06826bd02b7fbe7"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f2483ea232bd72d98a6dc6d7aefd97e5bc80b15cd909b9e356d6f3e326b6e43"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:962c84b4da0f3b14b3cdb10bc3837ebc5f136b67d919aea8d7bb3fd3df39528a"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8ad0473af5544f89fc5a1ece8676dd03bdf160fb3230f967e05d0f4bf89620e3"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-win32.whl", hash = "sha256:db3bc9fa39afc5e4e2767da4459df82b095ef0cab2f257707be06c44a1c2c3e5"}, + {file = "grpcio_tools-1.62.3-cp311-cp311-win_amd64.whl", hash = "sha256:e0898d412a434e768a0c7e365acabe13ff1558b767e400936e26b5b6ed1ee51f"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d102b9b21c4e1e40af9a2ab3c6d41afba6bd29c0aa50ca013bf85c99cdc44ac5"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:0a52cc9444df978438b8d2332c0ca99000521895229934a59f94f37ed896b133"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141d028bf5762d4a97f981c501da873589df3f7e02f4c1260e1921e565b376fa"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47a5c093ab256dec5714a7a345f8cc89315cb57c298b276fa244f37a0ba507f0"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f6831fdec2b853c9daa3358535c55eed3694325889aa714070528cf8f92d7d6d"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e02d7c1a02e3814c94ba0cfe43d93e872c758bd8fd5c2797f894d0c49b4a1dfc"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-win32.whl", hash = "sha256:b881fd9505a84457e9f7e99362eeedd86497b659030cf57c6f0070df6d9c2b9b"}, + {file = "grpcio_tools-1.62.3-cp312-cp312-win_amd64.whl", hash = "sha256:11c625eebefd1fd40a228fc8bae385e448c7e32a6ae134e43cf13bbc23f902b7"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:ec6fbded0c61afe6f84e3c2a43e6d656791d95747d6d28b73eff1af64108c434"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:bfda6ee8990997a9df95c5606f3096dae65f09af7ca03a1e9ca28f088caca5cf"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b77f9f9cee87cd798f0fe26b7024344d1b03a7cd2d2cba7035f8433b13986325"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e02d3b96f2d0e4bab9ceaa30f37d4f75571e40c6272e95364bff3125a64d184"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1da38070738da53556a4b35ab67c1b9884a5dd48fa2f243db35dc14079ea3d0c"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ace43b26d88a58dcff16c20d23ff72b04d0a415f64d2820f4ff06b1166f50557"}, + {file = "grpcio_tools-1.62.3-cp37-cp37m-win_amd64.whl", hash = "sha256:350a80485e302daaa95d335a931f97b693e170e02d43767ab06552c708808950"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:c3a1ac9d394f8e229eb28eec2e04b9a6f5433fa19c9d32f1cb6066e3c5114a1d"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:11f363570dea661dde99e04a51bd108a5807b5df32a6f8bdf4860e34e94a4dbf"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9ad9950119d8ae27634e68b7663cc8d340ae535a0f80d85a55e56a6973ab1f"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c5d22b252dcef11dd1e0fbbe5bbfb9b4ae048e8880d33338215e8ccbdb03edc"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:27cd9ef5c5d68d5ed104b6dcb96fe9c66b82050e546c9e255716903c3d8f0373"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f4b1615adf67bd8bb71f3464146a6f9949972d06d21a4f5e87e73f6464d97f57"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-win32.whl", hash = "sha256:e18e15287c31baf574fcdf8251fb7f997d64e96c6ecf467906e576da0a079af6"}, + {file = "grpcio_tools-1.62.3-cp38-cp38-win_amd64.whl", hash = "sha256:6c3064610826f50bd69410c63101954676edc703e03f9e8f978a135f1aaf97c1"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:8e62cc7164b0b7c5128e637e394eb2ef3db0e61fc798e80c301de3b2379203ed"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c8ad5cce554e2fcaf8842dee5d9462583b601a3a78f8b76a153c38c963f58c10"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec279dcf3518201fc592c65002754f58a6b542798cd7f3ecd4af086422f33f29"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c989246c2aebc13253f08be32538a4039a64e12d9c18f6d662d7aee641dc8b5"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ca4f5eeadbb57cf03317d6a2857823239a63a59cc935f5bd6cf6e8b7af7a7ecc"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0cb3a3436ac119cbd37a7d3331d9bdf85dad21a6ac233a3411dff716dcbf401e"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-win32.whl", hash = "sha256:3eae6ea76d62fcac091e1f15c2dcedf1dc3f114f8df1a972a8a0745e89f4cf61"}, + {file = "grpcio_tools-1.62.3-cp39-cp39-win_amd64.whl", hash = "sha256:eec73a005443061f4759b71a056f745e3b000dc0dc125c9f20560232dfbcbd14"}, +] + +[package.dependencies] +grpcio = ">=1.62.3" protobuf = ">=4.21.6,<5.0dev" setuptools = "*" @@ -1668,6 +1984,109 @@ files = [ hpack = ">=4.0,<5" hyperframe = ">=6.0,<7" +[[package]] +name = "hiredis" +version = "3.0.0" +description = "Python wrapper for hiredis" +optional = false +python-versions = ">=3.8" +files = [ + {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:4b182791c41c5eb1d9ed736f0ff81694b06937ca14b0d4dadde5dadba7ff6dae"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:13c275b483a052dd645eb2cb60d6380f1f5215e4c22d6207e17b86be6dd87ffa"}, + {file = "hiredis-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1018cc7f12824506f165027eabb302735b49e63af73eb4d5450c66c88f47026"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83a29cc7b21b746cb6a480189e49f49b2072812c445e66a9e38d2004d496b81c"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e241fab6332e8fb5f14af00a4a9c6aefa22f19a336c069b7ddbf28ef8341e8d6"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1fb8de899f0145d6c4d5d4bd0ee88a78eb980a7ffabd51e9889251b8f58f1785"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b23291951959141173eec10f8573538e9349fa27f47a0c34323d1970bf891ee5"}, + {file = "hiredis-3.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e421ac9e4b5efc11705a0d5149e641d4defdc07077f748667f359e60dc904420"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:77c8006c12154c37691b24ff293c077300c22944018c3ff70094a33e10c1d795"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:41afc0d3c18b59eb50970479a9c0e5544fb4b95e3a79cf2fbaece6ddefb926fe"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:04ccae6dcd9647eae6025425ab64edb4d79fde8b9e6e115ebfabc6830170e3b2"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fe91d62b0594db5ea7d23fc2192182b1a7b6973f628a9b8b2e0a42a2be721ac6"}, + {file = "hiredis-3.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:99516d99316062824a24d145d694f5b0d030c80da693ea6f8c4ecf71a251d8bb"}, + {file = "hiredis-3.0.0-cp310-cp310-win32.whl", hash = "sha256:562eaf820de045eb487afaa37e6293fe7eceb5b25e158b5a1974b7e40bf04543"}, + {file = "hiredis-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a1c81c89ed765198da27412aa21478f30d54ef69bf5e4480089d9c3f77b8f882"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:4664dedcd5933364756d7251a7ea86d60246ccf73a2e00912872dacbfcef8978"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:47de0bbccf4c8a9f99d82d225f7672b9dd690d8fd872007b933ef51a302c9fa6"}, + {file = "hiredis-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e43679eca508ba8240d016d8cca9d27342d70184773c15bea78a23c87a1922f1"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13c345e7278c210317e77e1934b27b61394fee0dec2e8bd47e71570900f75823"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00018f22f38530768b73ea86c11f47e8d4df65facd4e562bd78773bd1baef35e"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ea3a86405baa8eb0d3639ced6926ad03e07113de54cb00fd7510cb0db76a89d"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c073848d2b1d5561f3903879ccf4e1a70c9b1e7566c7bdcc98d082fa3e7f0a1d"}, + {file = "hiredis-3.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a8dffb5f5b3415a4669d25de48b617fd9d44b0bccfc4c2ab24b06406ecc9ecb"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:22c17c96143c2a62dfd61b13803bc5de2ac526b8768d2141c018b965d0333b66"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c3ece960008dab66c6b8bb3a1350764677ee7c74ccd6270aaf1b1caf9ccebb46"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f75999ae00a920f7dce6ecae76fa5e8674a3110e5a75f12c7a2c75ae1af53396"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e069967cbd5e1900aafc4b5943888f6d34937fc59bf8918a1a546cb729b4b1e4"}, + {file = "hiredis-3.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0aacc0a78e1d94d843a6d191f224a35893e6bdfeb77a4a89264155015c65f126"}, + {file = "hiredis-3.0.0-cp311-cp311-win32.whl", hash = "sha256:719c32147ba29528cb451f037bf837dcdda4ff3ddb6cdb12c4216b0973174718"}, + {file = "hiredis-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:bdc144d56333c52c853c31b4e2e52cfbdb22d3da4374c00f5f3d67c42158970f"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:484025d2eb8f6348f7876fc5a2ee742f568915039fcb31b478fd5c242bb0fe3a"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:fcdb552ffd97151dab8e7bc3ab556dfa1512556b48a367db94b5c20253a35ee1"}, + {file = "hiredis-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bb6f9fd92f147ba11d338ef5c68af4fd2908739c09e51f186e1d90958c68cc1"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa86bf9a0ed339ec9e8a9a9d0ae4dccd8671625c83f9f9f2640729b15e07fbfd"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e194a0d5df9456995d8f510eab9f529213e7326af6b94770abf8f8b7952ddcaa"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a1df39d74ec507d79c7a82c8063eee60bf80537cdeee652f576059b9cdd15c"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f91456507427ba36fd81b2ca11053a8e112c775325acc74e993201ea912d63e9"}, + {file = "hiredis-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9862db92ef67a8a02e0d5370f07d380e14577ecb281b79720e0d7a89aedb9ee5"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d10fcd9e0eeab835f492832b2a6edb5940e2f1230155f33006a8dfd3bd2c94e4"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:48727d7d405d03977d01885f317328dc21d639096308de126c2c4e9950cbd3c9"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e0bb6102ebe2efecf8a3292c6660a0e6fac98176af6de67f020bea1c2343717"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:df274e3abb4df40f4c7274dd3e587dfbb25691826c948bc98d5fead019dfb001"}, + {file = "hiredis-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:034925b5fb514f7b11aac38cd55b3fd7e9d3af23bd6497f3f20aa5b8ba58e232"}, + {file = "hiredis-3.0.0-cp312-cp312-win32.whl", hash = "sha256:120f2dda469b28d12ccff7c2230225162e174657b49cf4cd119db525414ae281"}, + {file = "hiredis-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:e584fe5f4e6681d8762982be055f1534e0170f6308a7a90f58d737bab12ff6a8"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:122171ff47d96ed8dd4bba6c0e41d8afaba3e8194949f7720431a62aa29d8895"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ba9fc605ac558f0de67463fb588722878641e6fa1dabcda979e8e69ff581d0bd"}, + {file = "hiredis-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a631e2990b8be23178f655cae8ac6c7422af478c420dd54e25f2e26c29e766f1"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63482db3fadebadc1d01ad33afa6045ebe2ea528eb77ccaabd33ee7d9c2bad48"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f669212c390eebfbe03c4e20181f5970b82c5d0a0ad1df1785f7ffbe7d61150"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a49ef161739f8018c69b371528bdb47d7342edfdee9ddc75a4d8caddf45a6e"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98a152052b8878e5e43a2e3a14075218adafc759547c98668a21e9485882696c"}, + {file = "hiredis-3.0.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50a196af0ce657fcde9bf8a0bbe1032e22c64d8fcec2bc926a35e7ff68b3a166"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f2f312eef8aafc2255e3585dcf94d5da116c43ef837db91db9ecdc1bc930072d"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:6ca41fa40fa019cde42c21add74aadd775e71458051a15a352eabeb12eb4d084"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:6eecb343c70629f5af55a8b3e53264e44fa04e155ef7989de13668a0cb102a90"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:c3fdad75e7837a475900a1d3a5cc09aa024293c3b0605155da2d42f41bc0e482"}, + {file = "hiredis-3.0.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:8854969e7480e8d61ed7549eb232d95082a743e94138d98d7222ba4e9f7ecacd"}, + {file = "hiredis-3.0.0-cp38-cp38-win32.whl", hash = "sha256:f114a6c86edbf17554672b050cce72abf489fe58d583c7921904d5f1c9691605"}, + {file = "hiredis-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:7d99b91e42217d7b4b63354b15b41ce960e27d216783e04c4a350224d55842a4"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:4c6efcbb5687cf8d2aedcc2c3ed4ac6feae90b8547427d417111194873b66b06"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5b5cff42a522a0d81c2ae7eae5e56d0ee7365e0c4ad50c4de467d8957aff4414"}, + {file = "hiredis-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:82f794d564f4bc76b80c50b03267fe5d6589e93f08e66b7a2f674faa2fa76ebc"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a4c1791d7aa7e192f60fe028ae409f18ccdd540f8b1e6aeb0df7816c77e4a4"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2537b2cd98192323fce4244c8edbf11f3cac548a9d633dbbb12b48702f379f4"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fed69bbaa307040c62195a269f82fc3edf46b510a17abb6b30a15d7dab548df"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:869f6d5537d243080f44253491bb30aa1ec3c21754003b3bddeadedeb65842b0"}, + {file = "hiredis-3.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d435ae89073d7cd51e6b6bf78369c412216261c9c01662e7008ff00978153729"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:204b79b30a0e6be0dc2301a4d385bb61472809f09c49f400497f1cdd5a165c66"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3ea635101b739c12effd189cc19b2671c268abb03013fd1f6321ca29df3ca625"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:f359175197fd833c8dd7a8c288f1516be45415bb5c939862ab60c2918e1e1943"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ac6d929cb33dd12ad3424b75725975f0a54b5b12dbff95f2a2d660c510aa106d"}, + {file = "hiredis-3.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:100431e04d25a522ef2c3b94f294c4219c4de3bfc7d557b6253296145a144c11"}, + {file = "hiredis-3.0.0-cp39-cp39-win32.whl", hash = "sha256:e1a9c14ae9573d172dc050a6f63a644457df5d01ec4d35a6a0f097f812930f83"}, + {file = "hiredis-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:54a6dd7b478e6eb01ce15b3bb5bf771e108c6c148315bf194eb2ab776a3cac4d"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:50da7a9edf371441dfcc56288d790985ee9840d982750580710a9789b8f4a290"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9b285ef6bf1581310b0d5e8f6ce64f790a1c40e89c660e1320b35f7515433672"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dcfa684966f25b335072115de2f920228a3c2caf79d4bfa2b30f6e4f674a948"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a41be8af1fd78ca97bc948d789a09b730d1e7587d07ca53af05758f31f4b985d"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:038756db735e417ab36ee6fd7725ce412385ed2bd0767e8179a4755ea11b804f"}, + {file = "hiredis-3.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:fcecbd39bd42cef905c0b51c9689c39d0cc8b88b1671e7f40d4fb213423aef3a"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a131377493a59fb0f5eaeb2afd49c6540cafcfba5b0b3752bed707be9e7c4eaf"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d22c53f0ec5c18ecb3d92aa9420563b1c5d657d53f01356114978107b00b860"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a91e9520fbc65a799943e5c970ffbcd67905744d8becf2e75f9f0a5e8414f0"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc8043959b50141df58ab4f398e8ae84c6f9e673a2c9407be65fc789138f4a6"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:51b99cfac514173d7b8abdfe10338193e8a0eccdfe1870b646009d2fb7cbe4b5"}, + {file = "hiredis-3.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:fa1fcad89d8a41d8dc10b1e54951ec1e161deabd84ed5a2c95c3c7213bdb3514"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:898636a06d9bf575d2c594129085ad6b713414038276a4bfc5db7646b8a5be78"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:466f836dbcf86de3f9692097a7a01533dc9926986022c6617dc364a402b265c5"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23142a8af92a13fc1e3f2ca1d940df3dcf2af1d176be41fe8d89e30a837a0b60"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:793c80a3d6b0b0e8196a2d5de37a08330125668c8012922685e17aa9108c33ac"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:467d28112c7faa29b7db743f40803d927c8591e9da02b6ce3d5fadc170a542a2"}, + {file = "hiredis-3.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:dc384874a719c767b50a30750f937af18842ee5e288afba95a5a3ed703b1515a"}, + {file = "hiredis-3.0.0.tar.gz", hash = "sha256:fed8581ae26345dea1f1e0d1a96e05041a727a45e7d8d459164583e23c6ac441"}, +] + [[package]] name = "hpack" version = "4.0.0" @@ -1789,13 +2208,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" -version = "0.23.5" +version = "0.24.5" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.23.5-py3-none-any.whl", hash = "sha256:d7a7d337615e11a45cc14a0ce5a605db6b038dc24af42866f731684825226e90"}, - {file = "huggingface_hub-0.23.5.tar.gz", hash = "sha256:67a9caba79b71235be3752852ca27da86bd54311d2424ca8afdb8dda056edf98"}, + {file = "huggingface_hub-0.24.5-py3-none-any.whl", hash = "sha256:d93fb63b1f1a919a22ce91a14518974e81fc4610bf344dfe7572343ce8d3aced"}, + {file = "huggingface_hub-0.24.5.tar.gz", hash = "sha256:7b45d6744dd53ce9cbf9880957de00e9d10a9ae837f1c9b7255fc8fa4e8264f3"}, ] [package.dependencies] @@ -1808,17 +2227,17 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.3.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] hf-transfer = ["hf-transfer (>=0.1.4)"] inference = ["aiohttp", "minijinja (>=1.0)"] -quality = ["mypy (==1.5.1)", "ruff (>=0.3.0)"] +quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] tensorflow-testing = ["keras (<3.0)", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] -torch = ["safetensors", "torch"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors[torch]", "torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] @@ -1848,13 +2267,13 @@ files = [ [[package]] name = "identify" -version = "2.5.36" +version = "2.6.0" description = "File identification library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "identify-2.5.36-py2.py3-none-any.whl", hash = "sha256:37d93f380f4de590500d9dba7db359d0d3da95ffe7f9de1753faa159e71e7dfa"}, - {file = "identify-2.5.36.tar.gz", hash = "sha256:e5e00f54165f9047fbebeb4a560f9acfb8af4c88232be60a488e9b68d122745d"}, + {file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"}, + {file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"}, ] [package.extras] @@ -1873,22 +2292,22 @@ files = [ [[package]] name = "importlib-metadata" -version = "7.0.0" +version = "8.0.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-7.0.0-py3-none-any.whl", hash = "sha256:d97503976bb81f40a193d41ee6570868479c69d5068651eb039c40d850c59d67"}, - {file = "importlib_metadata-7.0.0.tar.gz", hash = "sha256:7fc841f8b8332803464e5dc1c63a2e59121f46ca186c0e2e182e80bf8c1319f7"}, + {file = "importlib_metadata-8.0.0-py3-none-any.whl", hash = "sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f"}, + {file = "importlib_metadata-8.0.0.tar.gz", hash = "sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812"}, ] [package.dependencies] zipp = ">=0.5" [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] [[package]] name = "importlib-resources" @@ -1916,29 +2335,15 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] -[[package]] -name = "intel-openmp" -version = "2021.4.0" -description = "Intel OpenMP* Runtime Library" -optional = false -python-versions = "*" -files = [ - {file = "intel_openmp-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:41c01e266a7fdb631a7609191709322da2bbf24b252ba763f125dd651bcc7675"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:3b921236a38384e2016f0f3d65af6732cf2c12918087128a9163225451e776f2"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:e2240ab8d01472fed04f3544a878cda5da16c26232b7ea1b59132dbfb48b186e"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:6e863d8fd3d7e8ef389d52cf97a50fe2afe1a19247e8c0d168ce021546f96fc9"}, - {file = "intel_openmp-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:eef4c8bcc8acefd7f5cd3b9384dbf73d59e2c99fc56545712ded913f43c4a94f"}, -] - [[package]] name = "ipykernel" -version = "6.29.4" +version = "6.29.5" description = "IPython Kernel for Jupyter" optional = false python-versions = ">=3.8" files = [ - {file = "ipykernel-6.29.4-py3-none-any.whl", hash = "sha256:1181e653d95c6808039c509ef8e67c4126b3b3af7781496c7cbfb5ed938a27da"}, - {file = "ipykernel-6.29.4.tar.gz", hash = "sha256:3d44070060f9475ac2092b760123fadf105d2e2493c24848b6691a7c4f42af5c"}, + {file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"}, + {file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"}, ] [package.dependencies] @@ -1965,13 +2370,13 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio [[package]] name = "ipython" -version = "8.24.0" +version = "8.26.0" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.10" files = [ - {file = "ipython-8.24.0-py3-none-any.whl", hash = "sha256:d7bf2f6c4314984e3e02393213bab8703cf163ede39672ce5918c51fe253a2a3"}, - {file = "ipython-8.24.0.tar.gz", hash = "sha256:010db3f8a728a578bb641fdd06c063b9fb8e96a9464c63aec6310fbcb5e80501"}, + {file = "ipython-8.26.0-py3-none-any.whl", hash = "sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff"}, + {file = "ipython-8.26.0.tar.gz", hash = "sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c"}, ] [package.dependencies] @@ -1990,7 +2395,7 @@ typing-extensions = {version = ">=4.6", markers = "python_version < \"3.12\""} [package.extras] all = ["ipython[black,doc,kernel,matplotlib,nbconvert,nbformat,notebook,parallel,qtconsole]", "ipython[test,test-extra]"] black = ["black"] -doc = ["docrepr", "exceptiongroup", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "stack-data", "typing-extensions"] +doc = ["docrepr", "exceptiongroup", "intersphinx-registry", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "tomli", "typing-extensions"] kernel = ["ipykernel"] matplotlib = ["matplotlib"] nbconvert = ["nbconvert"] @@ -1998,7 +2403,7 @@ nbformat = ["nbformat"] notebook = ["ipywidgets", "notebook"] parallel = ["ipyparallel"] qtconsole = ["qtconsole"] -test = ["pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] +test = ["packaging", "pickleshare", "pytest", "pytest-asyncio (<0.22)", "testpath"] test-extra = ["curio", "ipython[test]", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"] [[package]] @@ -2051,6 +2456,76 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "jiter" +version = "0.5.0" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jiter-0.5.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b599f4e89b3def9a94091e6ee52e1d7ad7bc33e238ebb9c4c63f211d74822c3f"}, + {file = "jiter-0.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a063f71c4b06225543dddadbe09d203dc0c95ba352d8b85f1221173480a71d5"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acc0d5b8b3dd12e91dd184b87273f864b363dfabc90ef29a1092d269f18c7e28"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c22541f0b672f4d741382a97c65609332a783501551445ab2df137ada01e019e"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63314832e302cc10d8dfbda0333a384bf4bcfce80d65fe99b0f3c0da8945a91a"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a25fbd8a5a58061e433d6fae6d5298777c0814a8bcefa1e5ecfff20c594bd749"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:503b2c27d87dfff5ab717a8200fbbcf4714516c9d85558048b1fc14d2de7d8dc"}, + {file = "jiter-0.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d1f3d27cce923713933a844872d213d244e09b53ec99b7a7fdf73d543529d6d"}, + {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c95980207b3998f2c3b3098f357994d3fd7661121f30669ca7cb945f09510a87"}, + {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:afa66939d834b0ce063f57d9895e8036ffc41c4bd90e4a99631e5f261d9b518e"}, + {file = "jiter-0.5.0-cp310-none-win32.whl", hash = "sha256:f16ca8f10e62f25fd81d5310e852df6649af17824146ca74647a018424ddeccf"}, + {file = "jiter-0.5.0-cp310-none-win_amd64.whl", hash = "sha256:b2950e4798e82dd9176935ef6a55cf6a448b5c71515a556da3f6b811a7844f1e"}, + {file = "jiter-0.5.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4c8e1ed0ef31ad29cae5ea16b9e41529eb50a7fba70600008e9f8de6376d553"}, + {file = "jiter-0.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c6f16e21276074a12d8421692515b3fd6d2ea9c94fd0734c39a12960a20e85f3"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5280e68e7740c8c128d3ae5ab63335ce6d1fb6603d3b809637b11713487af9e6"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:583c57fc30cc1fec360e66323aadd7fc3edeec01289bfafc35d3b9dcb29495e4"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26351cc14507bdf466b5f99aba3df3143a59da75799bf64a53a3ad3155ecded9"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829df14d656b3fb87e50ae8b48253a8851c707da9f30d45aacab2aa2ba2d614"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42a4bdcf7307b86cb863b2fb9bb55029b422d8f86276a50487982d99eed7c6e"}, + {file = "jiter-0.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04d461ad0aebf696f8da13c99bc1b3e06f66ecf6cfd56254cc402f6385231c06"}, + {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6375923c5f19888c9226582a124b77b622f8fd0018b843c45eeb19d9701c403"}, + {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cec323a853c24fd0472517113768c92ae0be8f8c384ef4441d3632da8baa646"}, + {file = "jiter-0.5.0-cp311-none-win32.whl", hash = "sha256:aa1db0967130b5cab63dfe4d6ff547c88b2a394c3410db64744d491df7f069bb"}, + {file = "jiter-0.5.0-cp311-none-win_amd64.whl", hash = "sha256:aa9d2b85b2ed7dc7697597dcfaac66e63c1b3028652f751c81c65a9f220899ae"}, + {file = "jiter-0.5.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9f664e7351604f91dcdd557603c57fc0d551bc65cc0a732fdacbf73ad335049a"}, + {file = "jiter-0.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:044f2f1148b5248ad2c8c3afb43430dccf676c5a5834d2f5089a4e6c5bbd64df"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:702e3520384c88b6e270c55c772d4bd6d7b150608dcc94dea87ceba1b6391248"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:528d742dcde73fad9d63e8242c036ab4a84389a56e04efd854062b660f559544"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cf80e5fe6ab582c82f0c3331df27a7e1565e2dcf06265afd5173d809cdbf9ba"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:44dfc9ddfb9b51a5626568ef4e55ada462b7328996294fe4d36de02fce42721f"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c451f7922992751a936b96c5f5b9bb9312243d9b754c34b33d0cb72c84669f4e"}, + {file = "jiter-0.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:308fce789a2f093dca1ff91ac391f11a9f99c35369117ad5a5c6c4903e1b3e3a"}, + {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7f5ad4a7c6b0d90776fdefa294f662e8a86871e601309643de30bf94bb93a64e"}, + {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ea189db75f8eca08807d02ae27929e890c7d47599ce3d0a6a5d41f2419ecf338"}, + {file = "jiter-0.5.0-cp312-none-win32.whl", hash = "sha256:e3bbe3910c724b877846186c25fe3c802e105a2c1fc2b57d6688b9f8772026e4"}, + {file = "jiter-0.5.0-cp312-none-win_amd64.whl", hash = "sha256:a586832f70c3f1481732919215f36d41c59ca080fa27a65cf23d9490e75b2ef5"}, + {file = "jiter-0.5.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f04bc2fc50dc77be9d10f73fcc4e39346402ffe21726ff41028f36e179b587e6"}, + {file = "jiter-0.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6f433a4169ad22fcb550b11179bb2b4fd405de9b982601914ef448390b2954f3"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad4a6398c85d3a20067e6c69890ca01f68659da94d74c800298581724e426c7e"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6baa88334e7af3f4d7a5c66c3a63808e5efbc3698a1c57626541ddd22f8e4fbf"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ece0a115c05efca597c6d938f88c9357c843f8c245dbbb53361a1c01afd7148"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:335942557162ad372cc367ffaf93217117401bf930483b4b3ebdb1223dbddfa7"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649b0ee97a6e6da174bffcb3c8c051a5935d7d4f2f52ea1583b5b3e7822fbf14"}, + {file = "jiter-0.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4be354c5de82157886ca7f5925dbda369b77344b4b4adf2723079715f823989"}, + {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5206144578831a6de278a38896864ded4ed96af66e1e63ec5dd7f4a1fce38a3a"}, + {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8120c60f8121ac3d6f072b97ef0e71770cc72b3c23084c72c4189428b1b1d3b6"}, + {file = "jiter-0.5.0-cp38-none-win32.whl", hash = "sha256:6f1223f88b6d76b519cb033a4d3687ca157c272ec5d6015c322fc5b3074d8a5e"}, + {file = "jiter-0.5.0-cp38-none-win_amd64.whl", hash = "sha256:c59614b225d9f434ea8fc0d0bec51ef5fa8c83679afedc0433905994fb36d631"}, + {file = "jiter-0.5.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0af3838cfb7e6afee3f00dc66fa24695199e20ba87df26e942820345b0afc566"}, + {file = "jiter-0.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:550b11d669600dbc342364fd4adbe987f14d0bbedaf06feb1b983383dcc4b961"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:489875bf1a0ffb3cb38a727b01e6673f0f2e395b2aad3c9387f94187cb214bbf"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b250ca2594f5599ca82ba7e68785a669b352156260c5362ea1b4e04a0f3e2389"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ea18e01f785c6667ca15407cd6dabbe029d77474d53595a189bdc813347218e"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:462a52be85b53cd9bffd94e2d788a09984274fe6cebb893d6287e1c296d50653"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92cc68b48d50fa472c79c93965e19bd48f40f207cb557a8346daa020d6ba973b"}, + {file = "jiter-0.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1c834133e59a8521bc87ebcad773608c6fa6ab5c7a022df24a45030826cf10bc"}, + {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab3a71ff31cf2d45cb216dc37af522d335211f3a972d2fe14ea99073de6cb104"}, + {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cccd3af9c48ac500c95e1bcbc498020c87e1781ff0345dd371462d67b76643eb"}, + {file = "jiter-0.5.0-cp39-none-win32.whl", hash = "sha256:368084d8d5c4fc40ff7c3cc513c4f73e02c85f6009217922d0823a48ee7adf61"}, + {file = "jiter-0.5.0-cp39-none-win_amd64.whl", hash = "sha256:ce03f7b4129eb72f1687fa11300fbf677b02990618428934662406d2a76742a1"}, + {file = "jiter-0.5.0.tar.gz", hash = "sha256:1d916ba875bcab5c5f7d927df998c4cb694d27dceddf3392e58beaf10563368a"}, +] + [[package]] name = "joblib" version = "1.4.2" @@ -2064,13 +2539,13 @@ files = [ [[package]] name = "jsonschema" -version = "4.22.0" +version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" files = [ - {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"}, - {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"}, + {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, + {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, ] [package.dependencies] @@ -2081,23 +2556,23 @@ rpds-py = ">=0.7.1" [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] -format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] [[package]] name = "jsonschema-path" -version = "0.3.2" +version = "0.3.3" description = "JSONSchema Spec with object-oriented paths" optional = false -python-versions = ">=3.8.0,<4.0.0" +python-versions = "<4.0.0,>=3.8.0" files = [ - {file = "jsonschema_path-0.3.2-py3-none-any.whl", hash = "sha256:271aedfefcd161a0f467bdf23e1d9183691a61eaabf4b761046a914e369336c7"}, - {file = "jsonschema_path-0.3.2.tar.gz", hash = "sha256:4d0dababf341e36e9b91a5fb2a3e3fd300b0150e7fe88df4e55cc8253c5a3989"}, + {file = "jsonschema_path-0.3.3-py3-none-any.whl", hash = "sha256:203aff257f8038cd3c67be614fe6b2001043408cb1b4e36576bc4921e09d83c4"}, + {file = "jsonschema_path-0.3.3.tar.gz", hash = "sha256:f02e5481a4288ec062f8e68c808569e427d905bedfecb7f2e4c69ef77957c382"}, ] [package.dependencies] pathable = ">=0.4.1,<0.5.0" PyYAML = ">=5.1" -referencing = ">=0.28.0,<0.32.0" +referencing = ">=0.28.0,<0.36.0" requests = ">=2.31.0,<3.0.0" [[package]] @@ -2169,13 +2644,13 @@ files = [ [[package]] name = "kubernetes" -version = "29.0.0" +version = "30.1.0" description = "Kubernetes python client" optional = false python-versions = ">=3.6" files = [ - {file = "kubernetes-29.0.0-py2.py3-none-any.whl", hash = "sha256:ab8cb0e0576ccdfb71886366efb102c6a20f268d817be065ce7f9909c631e43e"}, - {file = "kubernetes-29.0.0.tar.gz", hash = "sha256:c4812e227ae74d07d53c88293e564e54b850452715a59a927e7e1bc6b9a60459"}, + {file = "kubernetes-30.1.0-py2.py3-none-any.whl", hash = "sha256:e212e8b7579031dd2e512168b617373bc1e03888d41ac4e04039240a292d478d"}, + {file = "kubernetes-30.1.0.tar.gz", hash = "sha256:41e4c77af9f28e7a6c314e3bd06a8c6229ddd787cad684e0ab9f69b498e98ebc"}, ] [package.dependencies] @@ -2334,13 +2809,13 @@ files = [ [[package]] name = "marshmallow" -version = "3.21.2" +version = "3.21.3" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.8" files = [ - {file = "marshmallow-3.21.2-py3-none-any.whl", hash = "sha256:70b54a6282f4704d12c0a41599682c5c5450e843b9ec406308653b47c59648a1"}, - {file = "marshmallow-3.21.2.tar.gz", hash = "sha256:82408deadd8b33d56338d2182d455db632c6313aa2af61916672146bb32edc56"}, + {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, + {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, ] [package.dependencies] @@ -2412,13 +2887,13 @@ opentelemetry-sdk = ">=1.20.0" [[package]] name = "microsoft-kiota-http" -version = "1.3.1" +version = "1.3.3" description = "Kiota http request adapter implementation for httpx library" optional = false python-versions = "*" files = [ - {file = "microsoft_kiota_http-1.3.1-py2.py3-none-any.whl", hash = "sha256:d62972c6ed4c785f9808a15479a7421abb38a9519b39e6933e5d05555b9fb427"}, - {file = "microsoft_kiota_http-1.3.1.tar.gz", hash = "sha256:09d85310379f88af0a0967925d1fcbe82f2520a9fe6fa1fd50e79af813bc451d"}, + {file = "microsoft_kiota_http-1.3.3-py2.py3-none-any.whl", hash = "sha256:21109a34140bf42e18855b7cf983939b891ae30739f21a9ce045c3a715f325fd"}, + {file = "microsoft_kiota_http-1.3.3.tar.gz", hash = "sha256:0b40f37c6c158c2e5b2dffa963a7fc342d368c1a64b8cca08631ba19d0ff94a9"}, ] [package.dependencies] @@ -2444,13 +2919,13 @@ pendulum = ">=3.0.0" [[package]] name = "microsoft-kiota-serialization-json" -version = "1.2.0" +version = "1.3.0" description = "Implementation of Kiota Serialization interfaces for JSON" optional = false python-versions = "*" files = [ - {file = "microsoft_kiota_serialization_json-1.2.0-py2.py3-none-any.whl", hash = "sha256:cf68ef323157b3566b043d2282b292479bca6af0ffcf08385c806c812e507a58"}, - {file = "microsoft_kiota_serialization_json-1.2.0.tar.gz", hash = "sha256:89a4ec0128958bc92287db0cf5b6616a9f66ac42f6c7bcfe8894393d2156bed9"}, + {file = "microsoft_kiota_serialization_json-1.3.0-py2.py3-none-any.whl", hash = "sha256:fbf82835d8b77ef21b496aa711a512fe4494fa94dfe88f7fd014dffe33778e20"}, + {file = "microsoft_kiota_serialization_json-1.3.0.tar.gz", hash = "sha256:235b680e6eb646479ffb7b59d2a6f0216c4f7e1c2ff1219fd4d59e898fa6b124"}, ] [package.dependencies] @@ -2503,16 +2978,20 @@ client = ["pymilvus (>=2.3.0b1,<2.4.0)"] [[package]] name = "milvus-lite" -version = "2.4.7" +version = "2.4.9" description = "A lightweight version of Milvus wrapped with Python." optional = false python-versions = ">=3.7" files = [ - {file = "milvus_lite-2.4.7-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:c828190118b104b05b8c8e0b5a4147811c86b54b8fb67bc2e726ad10fc0b544e"}, - {file = "milvus_lite-2.4.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1537633c39879714fb15082be56a4b97f74c905a6e98e302ec01320561081af"}, - {file = "milvus_lite-2.4.7-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f016474d663045787dddf1c3aad13b7d8b61fd329220318f858184918143dcbf"}, + {file = "milvus_lite-2.4.9-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d3e617b3d68c09ad656d54bc3d8cc4ef6ef56c54015e1563d4fe4bcec6b7c90a"}, + {file = "milvus_lite-2.4.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6e7029282d6829b277ebb92f64e2370be72b938e34770e1eb649346bda5d1d7f"}, + {file = "milvus_lite-2.4.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9b8e991e4e433596f6a399a165c1a506f823ec9133332e03d7f8a114bff4550d"}, + {file = "milvus_lite-2.4.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:7f53e674602101cfbcf0a4a59d19eaa139dfd5580639f3040ad73d901f24fc0b"}, ] +[package.dependencies] +tqdm = "*" + [[package]] name = "mistralai" version = "0.4.2" @@ -2540,24 +3019,6 @@ files = [ {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"}, ] -[[package]] -name = "mkl" -version = "2021.4.0" -description = "Intel® oneAPI Math Kernel Library" -optional = false -python-versions = "*" -files = [ - {file = "mkl-2021.4.0-py2.py3-none-macosx_10_15_x86_64.macosx_11_0_x86_64.whl", hash = "sha256:67460f5cd7e30e405b54d70d1ed3ca78118370b65f7327d495e9c8847705e2fb"}, - {file = "mkl-2021.4.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:636d07d90e68ccc9630c654d47ce9fdeb036bb46e2b193b3a9ac8cfea683cce5"}, - {file = "mkl-2021.4.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:398dbf2b0d12acaf54117a5210e8f191827f373d362d796091d161f610c1ebfb"}, - {file = "mkl-2021.4.0-py2.py3-none-win32.whl", hash = "sha256:439c640b269a5668134e3dcbcea4350459c4a8bc46469669b2d67e07e3d330e8"}, - {file = "mkl-2021.4.0-py2.py3-none-win_amd64.whl", hash = "sha256:ceef3cafce4c009dd25f65d7ad0d833a0fbadc3d8903991ec92351fe5de1e718"}, -] - -[package.dependencies] -intel-openmp = "==2021.*" -tbb = "==2021.*" - [[package]] name = "mmh3" version = "4.1.0" @@ -2663,24 +3124,24 @@ files = [ [[package]] name = "more-itertools" -version = "10.2.0" +version = "10.4.0" description = "More routines for operating on iterables, beyond itertools" optional = false python-versions = ">=3.8" files = [ - {file = "more-itertools-10.2.0.tar.gz", hash = "sha256:8fccb480c43d3e99a00087634c06dd02b0d50fbf088b380de5a41a015ec239e1"}, - {file = "more_itertools-10.2.0-py3-none-any.whl", hash = "sha256:686b06abe565edfab151cb8fd385a05651e1fdf8f0a14191e4439283421f8684"}, + {file = "more-itertools-10.4.0.tar.gz", hash = "sha256:fe0e63c4ab068eac62410ab05cccca2dc71ec44ba8ef29916a0090df061cf923"}, + {file = "more_itertools-10.4.0-py3-none-any.whl", hash = "sha256:0f7d9f83a0a8dcfa8a2694a770590d98a67ea943e3d9f5298309a484758c4e27"}, ] [[package]] name = "motor" -version = "3.5.0" +version = "3.5.1" description = "Non-blocking MongoDB driver for Tornado or asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "motor-3.5.0-py3-none-any.whl", hash = "sha256:e8f1d7a3370e8dd30eb4c68aaaee46dc608fbac70a757e58f3e828124f5e7693"}, - {file = "motor-3.5.0.tar.gz", hash = "sha256:2b38e405e5a0c52d499edb8d23fa029debdf0158da092c21b44d92cac7f59942"}, + {file = "motor-3.5.1-py3-none-any.whl", hash = "sha256:f95a9ea0f011464235e0bd72910baa291db3a6009e617ac27b82f57885abafb8"}, + {file = "motor-3.5.1.tar.gz", hash = "sha256:1622bd7b39c3e6375607c14736f6e1d498128eadf6f5f93f8786cf17d37062ac"}, ] [package.dependencies] @@ -2715,51 +3176,47 @@ tests = ["pytest (>=4.6)"] [[package]] name = "msal" -version = "1.28.0" +version = "1.30.0" description = "The Microsoft Authentication Library (MSAL) for Python library enables your app to access the Microsoft Cloud by supporting authentication of users with Microsoft Azure Active Directory accounts (AAD) and Microsoft Accounts (MSA) using industry standard OAuth2 and OpenID Connect." optional = false python-versions = ">=3.7" files = [ - {file = "msal-1.28.0-py3-none-any.whl", hash = "sha256:3064f80221a21cd535ad8c3fafbb3a3582cd9c7e9af0bb789ae14f726a0ca99b"}, - {file = "msal-1.28.0.tar.gz", hash = "sha256:80bbabe34567cb734efd2ec1869b2d98195c927455369d8077b3c542088c5c9d"}, + {file = "msal-1.30.0-py3-none-any.whl", hash = "sha256:423872177410cb61683566dc3932db7a76f661a5d2f6f52f02a047f101e1c1de"}, + {file = "msal-1.30.0.tar.gz", hash = "sha256:b4bf00850092e465157d814efa24a18f788284c9a479491024d62903085ea2fb"}, ] [package.dependencies] -cryptography = ">=0.6,<45" +cryptography = ">=2.5,<45" PyJWT = {version = ">=1.0.0,<3", extras = ["crypto"]} requests = ">=2.0.0,<3" [package.extras] -broker = ["pymsalruntime (>=0.13.2,<0.15)"] +broker = ["pymsalruntime (>=0.13.2,<0.17)"] [[package]] name = "msal-extensions" -version = "1.1.0" +version = "1.2.0" description = "Microsoft Authentication Library extensions (MSAL EX) provides a persistence API that can save your data on disk, encrypted on Windows, macOS and Linux. Concurrent data access will be coordinated by a file lock mechanism." optional = false python-versions = ">=3.7" files = [ - {file = "msal-extensions-1.1.0.tar.gz", hash = "sha256:6ab357867062db7b253d0bd2df6d411c7891a0ee7308d54d1e4317c1d1c54252"}, - {file = "msal_extensions-1.1.0-py3-none-any.whl", hash = "sha256:01be9711b4c0b1a151450068eeb2c4f0997df3bba085ac299de3a66f585e382f"}, + {file = "msal_extensions-1.2.0-py3-none-any.whl", hash = "sha256:cf5ba83a2113fa6dc011a254a72f1c223c88d7dfad74cc30617c4679a417704d"}, + {file = "msal_extensions-1.2.0.tar.gz", hash = "sha256:6f41b320bfd2933d631a215c91ca0dd3e67d84bd1a2f50ce917d5874ec646bef"}, ] [package.dependencies] -msal = ">=0.4.1,<2.0.0" -packaging = "*" -portalocker = [ - {version = ">=1.0,<3", markers = "platform_system != \"Windows\""}, - {version = ">=1.6,<3", markers = "platform_system == \"Windows\""}, -] +msal = ">=1.29,<2" +portalocker = ">=1.4,<3" [[package]] name = "msgraph-core" -version = "1.0.0" +version = "1.1.2" description = "Core component of the Microsoft Graph Python SDK" optional = false python-versions = ">=3.8" files = [ - {file = "msgraph-core-1.0.0.tar.gz", hash = "sha256:f26bcbbb3cd149dd7f1613159e0c2ed862888d61bfd20ef0b08b9408eb670c9d"}, - {file = "msgraph_core-1.0.0-py3-none-any.whl", hash = "sha256:f3de5149e246833b4b03605590d0b4eacf58d9c5a10fd951c37e53f0a345afd5"}, + {file = "msgraph_core-1.1.2-py3-none-any.whl", hash = "sha256:ed0695275d66914994a6ff71e7d71736ee4c4db3548a1021b2dd3a9605247def"}, + {file = "msgraph_core-1.1.2.tar.gz", hash = "sha256:c533cad1a23980487a4aa229dc5d9b00975fc6590e157e9f51046c6e80349288"}, ] [package.dependencies] @@ -2773,25 +3230,25 @@ dev = ["bumpver", "isort", "mypy", "pylint", "pytest", "yapf"] [[package]] name = "msgraph-sdk" -version = "1.4.0" +version = "1.5.4" description = "The Microsoft Graph Python SDK" optional = false python-versions = ">=3.8" files = [ - {file = "msgraph_sdk-1.4.0-py3-none-any.whl", hash = "sha256:24f99082475ea129c3d45e44269bd64a7c6bfef8dda4f8ea692bbc9e47b71b78"}, - {file = "msgraph_sdk-1.4.0.tar.gz", hash = "sha256:715907272c240e579d7669a690504488e25ae15fec904e2918c49ca328dc4a14"}, + {file = "msgraph_sdk-1.5.4-py3-none-any.whl", hash = "sha256:9ea349f30cc4a03edb587e26554c7a4839a38c2ef30d4b5396882fd2be82dcac"}, + {file = "msgraph_sdk-1.5.4.tar.gz", hash = "sha256:b0e146328d136d1db175938d8fc901f3bb32acf3ea6fe93c0dc7c5a0abc45e39"}, ] [package.dependencies] azure-identity = ">=1.12.0" -microsoft-kiota-abstractions = ">=1.0.0,<2.0.0" +microsoft-kiota-abstractions = ">=1.3.0,<2.0.0" microsoft-kiota-authentication-azure = ">=1.0.0,<2.0.0" microsoft-kiota-http = ">=1.0.0,<2.0.0" microsoft-kiota-serialization-form = ">=0.1.0" -microsoft-kiota-serialization-json = ">=1.0.0,<2.0.0" +microsoft-kiota-serialization-json = ">=1.3.0,<2.0.0" microsoft-kiota-serialization-multipart = ">=0.1.0" microsoft-kiota-serialization-text = ">=1.0.0,<2.0.0" -msgraph-core = ">=1.0.0" +msgraph_core = ">=1.0.0" [package.extras] dev = ["bumpver", "isort", "mypy", "pylint", "pytest", "yapf"] @@ -2897,44 +3354,44 @@ files = [ [[package]] name = "mypy" -version = "1.10.0" +version = "1.11.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2"}, - {file = "mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99"}, - {file = "mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2"}, - {file = "mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9"}, - {file = "mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051"}, - {file = "mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1"}, - {file = "mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee"}, - {file = "mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de"}, - {file = "mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7"}, - {file = "mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53"}, - {file = "mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b"}, - {file = "mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30"}, - {file = "mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e"}, - {file = "mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5"}, - {file = "mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda"}, - {file = "mypy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9fd50226364cd2737351c79807775136b0abe084433b55b2e29181a4c3c878c0"}, - {file = "mypy-1.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f90cff89eea89273727d8783fef5d4a934be2fdca11b47def50cf5d311aff727"}, - {file = "mypy-1.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcfc70599efde5c67862a07a1aaf50e55bce629ace26bb19dc17cece5dd31ca4"}, - {file = "mypy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:075cbf81f3e134eadaf247de187bd604748171d6b79736fa9b6c9685b4083061"}, - {file = "mypy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:3f298531bca95ff615b6e9f2fc0333aae27fa48052903a0ac90215021cdcfa4f"}, - {file = "mypy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa7ef5244615a2523b56c034becde4e9e3f9b034854c93639adb667ec9ec2976"}, - {file = "mypy-1.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3236a4c8f535a0631f85f5fcdffba71c7feeef76a6002fcba7c1a8e57c8be1ec"}, - {file = "mypy-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2b5cdbb5dd35aa08ea9114436e0d79aceb2f38e32c21684dcf8e24e1e92821"}, - {file = "mypy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92f93b21c0fe73dc00abf91022234c79d793318b8a96faac147cd579c1671746"}, - {file = "mypy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:28d0e038361b45f099cc086d9dd99c15ff14d0188f44ac883010e172ce86c38a"}, - {file = "mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee"}, - {file = "mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131"}, + {file = "mypy-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a32fc80b63de4b5b3e65f4be82b4cfa362a46702672aa6a0f443b4689af7008c"}, + {file = "mypy-1.11.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c1952f5ea8a5a959b05ed5f16452fddadbaae48b5d39235ab4c3fc444d5fd411"}, + {file = "mypy-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1e30dc3bfa4e157e53c1d17a0dad20f89dc433393e7702b813c10e200843b03"}, + {file = "mypy-1.11.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2c63350af88f43a66d3dfeeeb8d77af34a4f07d760b9eb3a8697f0386c7590b4"}, + {file = "mypy-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:a831671bad47186603872a3abc19634f3011d7f83b083762c942442d51c58d58"}, + {file = "mypy-1.11.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7b6343d338390bb946d449677726edf60102a1c96079b4f002dedff375953fc5"}, + {file = "mypy-1.11.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4fe9f4e5e521b458d8feb52547f4bade7ef8c93238dfb5bbc790d9ff2d770ca"}, + {file = "mypy-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:886c9dbecc87b9516eff294541bf7f3655722bf22bb898ee06985cd7269898de"}, + {file = "mypy-1.11.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fca4a60e1dd9fd0193ae0067eaeeb962f2d79e0d9f0f66223a0682f26ffcc809"}, + {file = "mypy-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:0bd53faf56de9643336aeea1c925012837432b5faf1701ccca7fde70166ccf72"}, + {file = "mypy-1.11.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f39918a50f74dc5969807dcfaecafa804fa7f90c9d60506835036cc1bc891dc8"}, + {file = "mypy-1.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bc71d1fb27a428139dd78621953effe0d208aed9857cb08d002280b0422003a"}, + {file = "mypy-1.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b868d3bcff720dd7217c383474008ddabaf048fad8d78ed948bb4b624870a417"}, + {file = "mypy-1.11.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a707ec1527ffcdd1c784d0924bf5cb15cd7f22683b919668a04d2b9c34549d2e"}, + {file = "mypy-1.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:64f4a90e3ea07f590c5bcf9029035cf0efeae5ba8be511a8caada1a4893f5525"}, + {file = "mypy-1.11.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:749fd3213916f1751fff995fccf20c6195cae941dc968f3aaadf9bb4e430e5a2"}, + {file = "mypy-1.11.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b639dce63a0b19085213ec5fdd8cffd1d81988f47a2dec7100e93564f3e8fb3b"}, + {file = "mypy-1.11.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4c956b49c5d865394d62941b109728c5c596a415e9c5b2be663dd26a1ff07bc0"}, + {file = "mypy-1.11.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45df906e8b6804ef4b666af29a87ad9f5921aad091c79cc38e12198e220beabd"}, + {file = "mypy-1.11.1-cp38-cp38-win_amd64.whl", hash = "sha256:d44be7551689d9d47b7abc27c71257adfdb53f03880841a5db15ddb22dc63edb"}, + {file = "mypy-1.11.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2684d3f693073ab89d76da8e3921883019ea8a3ec20fa5d8ecca6a2db4c54bbe"}, + {file = "mypy-1.11.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:79c07eb282cb457473add5052b63925e5cc97dfab9812ee65a7c7ab5e3cb551c"}, + {file = "mypy-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11965c2f571ded6239977b14deebd3f4c3abd9a92398712d6da3a772974fad69"}, + {file = "mypy-1.11.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a2b43895a0f8154df6519706d9bca8280cda52d3d9d1514b2d9c3e26792a0b74"}, + {file = "mypy-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:1a81cf05975fd61aec5ae16501a091cfb9f605dc3e3c878c0da32f250b74760b"}, + {file = "mypy-1.11.1-py3-none-any.whl", hash = "sha256:0624bdb940255d2dd24e829d99a13cfeb72e4e9031f9492148f410ed30bcab54"}, + {file = "mypy-1.11.1.tar.gz", hash = "sha256:f404a0b069709f18bbdb702eb3dcfe51910602995de00bd39cea3050b5772d08"}, ] [package.dependencies] mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -3064,18 +3521,15 @@ test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] [[package]] name = "nodeenv" -version = "1.8.0" +version = "1.9.1" description = "Node.js virtual environment builder" optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ - {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, - {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, ] -[package.dependencies] -setuptools = "*" - [[package]] name = "numpy" version = "1.26.4" @@ -3232,25 +3686,24 @@ nvidia-nvjitlink-cu12 = "*" [[package]] name = "nvidia-nccl-cu12" -version = "2.20.5" +version = "2.19.3" description = "NVIDIA Collective Communication Library (NCCL) Runtime" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01"}, - {file = "nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56"}, + {file = "nvidia_nccl_cu12-2.19.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:a9734707a2c96443331c1e48c717024aa6678a0e2a4cb66b2c364d18cee6b48d"}, ] [[package]] name = "nvidia-nvjitlink-cu12" -version = "12.5.40" +version = "12.6.20" description = "Nvidia JIT LTO Library" optional = false python-versions = ">=3" files = [ - {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_aarch64.whl", hash = "sha256:004186d5ea6a57758fd6d57052a123c73a4815adf365eb8dd6a85c9eaa7535ff"}, - {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d9714f27c1d0f0895cd8915c07a87a1d0029a0aa36acaf9156952ec2a8a12189"}, - {file = "nvidia_nvjitlink_cu12-12.5.40-py3-none-win_amd64.whl", hash = "sha256:c3401dc8543b52d3a8158007a0c1ab4e9c768fcbd24153a48c86972102197ddd"}, + {file = "nvidia_nvjitlink_cu12-12.6.20-py3-none-manylinux2014_aarch64.whl", hash = "sha256:84fb38465a5bc7c70cbc320cfd0963eb302ee25a5e939e9f512bbba55b6072fb"}, + {file = "nvidia_nvjitlink_cu12-12.6.20-py3-none-manylinux2014_x86_64.whl", hash = "sha256:562ab97ea2c23164823b2a89cb328d01d45cb99634b8c65fe7cd60d14562bd79"}, + {file = "nvidia_nvjitlink_cu12-12.6.20-py3-none-win_amd64.whl", hash = "sha256:ed3c43a17f37b0c922a919203d2d36cbef24d41cc3e6b625182f8b58203644f6"}, ] [[package]] @@ -3296,65 +3749,66 @@ httpx = ">=0.27.0,<0.28.0" [[package]] name = "onnxruntime" -version = "1.18.0" +version = "1.18.1" description = "ONNX Runtime is a runtime accelerator for Machine Learning models" optional = false python-versions = "*" files = [ - {file = "onnxruntime-1.18.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:5a3b7993a5ecf4a90f35542a4757e29b2d653da3efe06cdd3164b91167bbe10d"}, - {file = "onnxruntime-1.18.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:15b944623b2cdfe7f7945690bfb71c10a4531b51997c8320b84e7b0bb59af902"}, - {file = "onnxruntime-1.18.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e61ce5005118064b1a0ed73ebe936bc773a102f067db34108ea6c64dd62a179"}, - {file = "onnxruntime-1.18.0-cp310-cp310-win32.whl", hash = "sha256:a4fc8a2a526eb442317d280610936a9f73deece06c7d5a91e51570860802b93f"}, - {file = "onnxruntime-1.18.0-cp310-cp310-win_amd64.whl", hash = "sha256:71ed219b768cab004e5cd83e702590734f968679bf93aa488c1a7ffbe6e220c3"}, - {file = "onnxruntime-1.18.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:3d24bd623872a72a7fe2f51c103e20fcca2acfa35d48f2accd6be1ec8633d960"}, - {file = "onnxruntime-1.18.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f15e41ca9b307a12550bfd2ec93f88905d9fba12bab7e578f05138ad0ae10d7b"}, - {file = "onnxruntime-1.18.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f45ca2887f62a7b847d526965686b2923efa72538c89b7703c7b3fe970afd59"}, - {file = "onnxruntime-1.18.0-cp311-cp311-win32.whl", hash = "sha256:9e24d9ecc8781323d9e2eeda019b4b24babc4d624e7d53f61b1fe1a929b0511a"}, - {file = "onnxruntime-1.18.0-cp311-cp311-win_amd64.whl", hash = "sha256:f8608398976ed18aef450d83777ff6f77d0b64eced1ed07a985e1a7db8ea3771"}, - {file = "onnxruntime-1.18.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:f1d79941f15fc40b1ee67738b2ca26b23e0181bf0070b5fb2984f0988734698f"}, - {file = "onnxruntime-1.18.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99e8caf3a8565c853a22d323a3eebc2a81e3de7591981f085a4f74f7a60aab2d"}, - {file = "onnxruntime-1.18.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:498d2b8380635f5e6ebc50ec1b45f181588927280f32390fb910301d234f97b8"}, - {file = "onnxruntime-1.18.0-cp312-cp312-win32.whl", hash = "sha256:ba7cc0ce2798a386c082aaa6289ff7e9bedc3dee622eef10e74830cff200a72e"}, - {file = "onnxruntime-1.18.0-cp312-cp312-win_amd64.whl", hash = "sha256:1fa175bd43f610465d5787ae06050c81f7ce09da2bf3e914eb282cb8eab363ef"}, - {file = "onnxruntime-1.18.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:0284c579c20ec8b1b472dd190290a040cc68b6caec790edb960f065d15cf164a"}, - {file = "onnxruntime-1.18.0-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d47353d036d8c380558a5643ea5f7964d9d259d31c86865bad9162c3e916d1f6"}, - {file = "onnxruntime-1.18.0-cp38-cp38-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:885509d2b9ba4b01f08f7fa28d31ee54b6477953451c7ccf124a84625f07c803"}, - {file = "onnxruntime-1.18.0-cp38-cp38-win32.whl", hash = "sha256:8614733de3695656411d71fc2f39333170df5da6c7efd6072a59962c0bc7055c"}, - {file = "onnxruntime-1.18.0-cp38-cp38-win_amd64.whl", hash = "sha256:47af3f803752fce23ea790fd8d130a47b2b940629f03193f780818622e856e7a"}, - {file = "onnxruntime-1.18.0-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:9153eb2b4d5bbab764d0aea17adadffcfc18d89b957ad191b1c3650b9930c59f"}, - {file = "onnxruntime-1.18.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2c7fd86eca727c989bb8d9c5104f3c45f7ee45f445cc75579ebe55d6b99dfd7c"}, - {file = "onnxruntime-1.18.0-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac67a4de9c1326c4d87bcbfb652c923039b8a2446bb28516219236bec3b494f5"}, - {file = "onnxruntime-1.18.0-cp39-cp39-win32.whl", hash = "sha256:6ffb445816d06497df7a6dd424b20e0b2c39639e01e7fe210e247b82d15a23b9"}, - {file = "onnxruntime-1.18.0-cp39-cp39-win_amd64.whl", hash = "sha256:46de6031cb6745f33f7eca9e51ab73e8c66037fb7a3b6b4560887c5b55ab5d5d"}, + {file = "onnxruntime-1.18.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:29ef7683312393d4ba04252f1b287d964bd67d5e6048b94d2da3643986c74d80"}, + {file = "onnxruntime-1.18.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fc706eb1df06ddf55776e15a30519fb15dda7697f987a2bbda4962845e3cec05"}, + {file = "onnxruntime-1.18.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7de69f5ced2a263531923fa68bbec52a56e793b802fcd81a03487b5e292bc3a"}, + {file = "onnxruntime-1.18.1-cp310-cp310-win32.whl", hash = "sha256:221e5b16173926e6c7de2cd437764492aa12b6811f45abd37024e7cf2ae5d7e3"}, + {file = "onnxruntime-1.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:75211b619275199c861ee94d317243b8a0fcde6032e5a80e1aa9ded8ab4c6060"}, + {file = "onnxruntime-1.18.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:f26582882f2dc581b809cfa41a125ba71ad9e715738ec6402418df356969774a"}, + {file = "onnxruntime-1.18.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef36f3a8b768506d02be349ac303fd95d92813ba3ba70304d40c3cd5c25d6a4c"}, + {file = "onnxruntime-1.18.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:170e711393e0618efa8ed27b59b9de0ee2383bd2a1f93622a97006a5ad48e434"}, + {file = "onnxruntime-1.18.1-cp311-cp311-win32.whl", hash = "sha256:9b6a33419b6949ea34e0dc009bc4470e550155b6da644571ecace4b198b0d88f"}, + {file = "onnxruntime-1.18.1-cp311-cp311-win_amd64.whl", hash = "sha256:5c1380a9f1b7788da742c759b6a02ba771fe1ce620519b2b07309decbd1a2fe1"}, + {file = "onnxruntime-1.18.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:31bd57a55e3f983b598675dfc7e5d6f0877b70ec9864b3cc3c3e1923d0a01919"}, + {file = "onnxruntime-1.18.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b9e03c4ba9f734500691a4d7d5b381cd71ee2f3ce80a1154ac8f7aed99d1ecaa"}, + {file = "onnxruntime-1.18.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:781aa9873640f5df24524f96f6070b8c550c66cb6af35710fd9f92a20b4bfbf6"}, + {file = "onnxruntime-1.18.1-cp312-cp312-win32.whl", hash = "sha256:3a2d9ab6254ca62adbb448222e630dc6883210f718065063518c8f93a32432be"}, + {file = "onnxruntime-1.18.1-cp312-cp312-win_amd64.whl", hash = "sha256:ad93c560b1c38c27c0275ffd15cd7f45b3ad3fc96653c09ce2931179982ff204"}, + {file = "onnxruntime-1.18.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:3b55dc9d3c67626388958a3eb7ad87eb7c70f75cb0f7ff4908d27b8b42f2475c"}, + {file = "onnxruntime-1.18.1-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f80dbcfb6763cc0177a31168b29b4bd7662545b99a19e211de8c734b657e0669"}, + {file = "onnxruntime-1.18.1-cp38-cp38-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f1ff2c61a16d6c8631796c54139bafea41ee7736077a0fc64ee8ae59432f5c58"}, + {file = "onnxruntime-1.18.1-cp38-cp38-win32.whl", hash = "sha256:219855bd272fe0c667b850bf1a1a5a02499269a70d59c48e6f27f9c8bcb25d02"}, + {file = "onnxruntime-1.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:afdf16aa607eb9a2c60d5ca2d5abf9f448e90c345b6b94c3ed14f4fb7e6a2d07"}, + {file = "onnxruntime-1.18.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:128df253ade673e60cea0955ec9d0e89617443a6d9ce47c2d79eb3f72a3be3de"}, + {file = "onnxruntime-1.18.1-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9839491e77e5c5a175cab3621e184d5a88925ee297ff4c311b68897197f4cde9"}, + {file = "onnxruntime-1.18.1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ad3187c1faff3ac15f7f0e7373ef4788c582cafa655a80fdbb33eaec88976c66"}, + {file = "onnxruntime-1.18.1-cp39-cp39-win32.whl", hash = "sha256:34657c78aa4e0b5145f9188b550ded3af626651b15017bf43d280d7e23dbf195"}, + {file = "onnxruntime-1.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:9c14fd97c3ddfa97da5feef595e2c73f14c2d0ec1d4ecbea99c8d96603c89589"}, ] [package.dependencies] coloredlogs = "*" flatbuffers = "*" -numpy = ">=1.21.6" +numpy = ">=1.21.6,<2.0" packaging = "*" protobuf = "*" sympy = "*" [[package]] name = "openai" -version = "1.31.0" +version = "1.41.1" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.31.0-py3-none-any.whl", hash = "sha256:82044ee3122113f2a468a1f308a8882324d09556ba5348687c535d3655ee331c"}, - {file = "openai-1.31.0.tar.gz", hash = "sha256:54ae0625b005d6a3b895db2b8438dae1059cffff0cd262a26e9015c13a29ab06"}, + {file = "openai-1.41.1-py3-none-any.whl", hash = "sha256:56fb04105263f79559aff3ceea2e1dd16f8c5385e8238cb66cf0e6888fa8bfcf"}, + {file = "openai-1.41.1.tar.gz", hash = "sha256:e38e376efd91e0d4db071e2a6517b6b4cac1c2a6fd63efdc5ec6be10c5967c1b"}, ] [package.dependencies] anyio = ">=3.5.0,<5" distro = ">=1.7.0,<2" httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" pydantic = ">=1.9.0,<3" sniffio = "*" tqdm = ">4" -typing-extensions = ">=4.7,<5" +typing-extensions = ">=4.11,<5" [package.extras] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] @@ -3424,42 +3878,42 @@ openapi-schema-validator = ">=0.6.0,<0.7.0" [[package]] name = "opentelemetry-api" -version = "1.24.0" +version = "1.26.0" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_api-1.24.0-py3-none-any.whl", hash = "sha256:0f2c363d98d10d1ce93330015ca7fd3a65f60be64e05e30f557c61de52c80ca2"}, - {file = "opentelemetry_api-1.24.0.tar.gz", hash = "sha256:42719f10ce7b5a9a73b10a4baf620574fb8ad495a9cbe5c18d76b75d8689c67e"}, + {file = "opentelemetry_api-1.26.0-py3-none-any.whl", hash = "sha256:7d7ea33adf2ceda2dd680b18b1677e4152000b37ca76e679da71ff103b943064"}, + {file = "opentelemetry_api-1.26.0.tar.gz", hash = "sha256:2bd639e4bed5b18486fef0b5a520aaffde5a18fc225e808a1ac4df363f43a1ce"}, ] [package.dependencies] deprecated = ">=1.2.6" -importlib-metadata = ">=6.0,<=7.0" +importlib-metadata = ">=6.0,<=8.0.0" [[package]] name = "opentelemetry-exporter-otlp-proto-common" -version = "1.24.0" +version = "1.26.0" description = "OpenTelemetry Protobuf encoding" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.24.0-py3-none-any.whl", hash = "sha256:e51f2c9735054d598ad2df5d3eca830fecfb5b0bda0a2fa742c9c7718e12f641"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.24.0.tar.gz", hash = "sha256:5d31fa1ff976cacc38be1ec4e3279a3f88435c75b38b1f7a099a1faffc302461"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.26.0-py3-none-any.whl", hash = "sha256:ee4d8f8891a1b9c372abf8d109409e5b81947cf66423fd998e56880057afbc71"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.26.0.tar.gz", hash = "sha256:bdbe50e2e22a1c71acaa0c8ba6efaadd58882e5a5978737a44a4c4b10d304c92"}, ] [package.dependencies] -opentelemetry-proto = "1.24.0" +opentelemetry-proto = "1.26.0" [[package]] name = "opentelemetry-exporter-otlp-proto-grpc" -version = "1.24.0" +version = "1.26.0" description = "OpenTelemetry Collector Protobuf over gRPC Exporter" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_exporter_otlp_proto_grpc-1.24.0-py3-none-any.whl", hash = "sha256:f40d62aa30a0a43cc1657428e59fcf82ad5f7ea8fff75de0f9d9cb6f739e0a3b"}, - {file = "opentelemetry_exporter_otlp_proto_grpc-1.24.0.tar.gz", hash = "sha256:217c6e30634f2c9797999ea9da29f7300479a94a610139b9df17433f915e7baa"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.26.0-py3-none-any.whl", hash = "sha256:e2be5eff72ebcb010675b818e8d7c2e7d61ec451755b8de67a140bc49b9b0280"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.26.0.tar.gz", hash = "sha256:a65b67a9a6b06ba1ec406114568e21afe88c1cdb29c464f2507d529eb906d8ae"}, ] [package.dependencies] @@ -3467,22 +3921,19 @@ deprecated = ">=1.2.6" googleapis-common-protos = ">=1.52,<2.0" grpcio = ">=1.0.0,<2.0.0" opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.24.0" -opentelemetry-proto = "1.24.0" -opentelemetry-sdk = ">=1.24.0,<1.25.0" - -[package.extras] -test = ["pytest-grpc"] +opentelemetry-exporter-otlp-proto-common = "1.26.0" +opentelemetry-proto = "1.26.0" +opentelemetry-sdk = ">=1.26.0,<1.27.0" [[package]] name = "opentelemetry-instrumentation" -version = "0.45b0" +version = "0.47b0" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_instrumentation-0.45b0-py3-none-any.whl", hash = "sha256:06c02e2c952c1b076e8eaedf1b82f715e2937ba7eeacab55913dd434fbcec258"}, - {file = "opentelemetry_instrumentation-0.45b0.tar.gz", hash = "sha256:6c47120a7970bbeb458e6a73686ee9ba84b106329a79e4a4a66761f933709c7e"}, + {file = "opentelemetry_instrumentation-0.47b0-py3-none-any.whl", hash = "sha256:88974ee52b1db08fc298334b51c19d47e53099c33740e48c4f084bd1afd052d5"}, + {file = "opentelemetry_instrumentation-0.47b0.tar.gz", hash = "sha256:96f9885e450c35e3f16a4f33145f2ebf620aea910c9fd74a392bbc0f807a350f"}, ] [package.dependencies] @@ -3492,55 +3943,55 @@ wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-instrumentation-asgi" -version = "0.45b0" +version = "0.47b0" description = "ASGI instrumentation for OpenTelemetry" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_instrumentation_asgi-0.45b0-py3-none-any.whl", hash = "sha256:8be1157ed62f0db24e45fdf7933c530c4338bd025c5d4af7830e903c0756021b"}, - {file = "opentelemetry_instrumentation_asgi-0.45b0.tar.gz", hash = "sha256:97f55620f163fd3d20323e9fd8dc3aacc826c03397213ff36b877e0f4b6b08a6"}, + {file = "opentelemetry_instrumentation_asgi-0.47b0-py3-none-any.whl", hash = "sha256:b798dc4957b3edc9dfecb47a4c05809036a4b762234c5071212fda39ead80ade"}, + {file = "opentelemetry_instrumentation_asgi-0.47b0.tar.gz", hash = "sha256:e78b7822c1bca0511e5e9610ec484b8994a81670375e570c76f06f69af7c506a"}, ] [package.dependencies] asgiref = ">=3.0,<4.0" opentelemetry-api = ">=1.12,<2.0" -opentelemetry-instrumentation = "0.45b0" -opentelemetry-semantic-conventions = "0.45b0" -opentelemetry-util-http = "0.45b0" +opentelemetry-instrumentation = "0.47b0" +opentelemetry-semantic-conventions = "0.47b0" +opentelemetry-util-http = "0.47b0" [package.extras] instruments = ["asgiref (>=3.0,<4.0)"] [[package]] name = "opentelemetry-instrumentation-fastapi" -version = "0.45b0" +version = "0.47b0" description = "OpenTelemetry FastAPI Instrumentation" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_instrumentation_fastapi-0.45b0-py3-none-any.whl", hash = "sha256:77d9c123a363129148f5f66d44094f3d67aaaa2b201396d94782b4a7f9ce4314"}, - {file = "opentelemetry_instrumentation_fastapi-0.45b0.tar.gz", hash = "sha256:5a6b91e1c08a01601845fcfcfdefd0a2aecdb3c356d4a436a3210cb58c21487e"}, + {file = "opentelemetry_instrumentation_fastapi-0.47b0-py3-none-any.whl", hash = "sha256:5ac28dd401160b02e4f544a85a9e4f61a8cbe5b077ea0379d411615376a2bd21"}, + {file = "opentelemetry_instrumentation_fastapi-0.47b0.tar.gz", hash = "sha256:0c7c10b5d971e99a420678ffd16c5b1ea4f0db3b31b62faf305fbb03b4ebee36"}, ] [package.dependencies] opentelemetry-api = ">=1.12,<2.0" -opentelemetry-instrumentation = "0.45b0" -opentelemetry-instrumentation-asgi = "0.45b0" -opentelemetry-semantic-conventions = "0.45b0" -opentelemetry-util-http = "0.45b0" +opentelemetry-instrumentation = "0.47b0" +opentelemetry-instrumentation-asgi = "0.47b0" +opentelemetry-semantic-conventions = "0.47b0" +opentelemetry-util-http = "0.47b0" [package.extras] -instruments = ["fastapi (>=0.58,<1.0)"] +instruments = ["fastapi (>=0.58,<1.0)", "fastapi-slim (>=0.111.0,<0.112.0)"] [[package]] name = "opentelemetry-proto" -version = "1.24.0" +version = "1.26.0" description = "OpenTelemetry Python Proto" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_proto-1.24.0-py3-none-any.whl", hash = "sha256:bcb80e1e78a003040db71ccf83f2ad2019273d1e0828089d183b18a1476527ce"}, - {file = "opentelemetry_proto-1.24.0.tar.gz", hash = "sha256:ff551b8ad63c6cabb1845ce217a6709358dfaba0f75ea1fa21a61ceddc78cab8"}, + {file = "opentelemetry_proto-1.26.0-py3-none-any.whl", hash = "sha256:6c4d7b4d4d9c88543bcf8c28ae3f8f0448a753dc291c18c5390444c90b76a725"}, + {file = "opentelemetry_proto-1.26.0.tar.gz", hash = "sha256:c5c18796c0cab3751fc3b98dee53855835e90c0422924b484432ac852d93dc1e"}, ] [package.dependencies] @@ -3548,95 +3999,110 @@ protobuf = ">=3.19,<5.0" [[package]] name = "opentelemetry-sdk" -version = "1.24.0" +version = "1.26.0" description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_sdk-1.24.0-py3-none-any.whl", hash = "sha256:fa731e24efe832e98bcd90902085b359dcfef7d9c9c00eb5b9a18587dae3eb59"}, - {file = "opentelemetry_sdk-1.24.0.tar.gz", hash = "sha256:75bc0563affffa827700e0f4f4a68e1e257db0df13372344aebc6f8a64cde2e5"}, + {file = "opentelemetry_sdk-1.26.0-py3-none-any.whl", hash = "sha256:feb5056a84a88670c041ea0ded9921fca559efec03905dddeb3885525e0af897"}, + {file = "opentelemetry_sdk-1.26.0.tar.gz", hash = "sha256:c90d2868f8805619535c05562d699e2f4fb1f00dbd55a86dcefca4da6fa02f85"}, ] [package.dependencies] -opentelemetry-api = "1.24.0" -opentelemetry-semantic-conventions = "0.45b0" +opentelemetry-api = "1.26.0" +opentelemetry-semantic-conventions = "0.47b0" typing-extensions = ">=3.7.4" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.45b0" +version = "0.47b0" description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_semantic_conventions-0.45b0-py3-none-any.whl", hash = "sha256:a4a6fb9a7bacd9167c082aa4681009e9acdbfa28ffb2387af50c2fef3d30c864"}, - {file = "opentelemetry_semantic_conventions-0.45b0.tar.gz", hash = "sha256:7c84215a44ac846bc4b8e32d5e78935c5c43482e491812a0bb8aaf87e4d92118"}, + {file = "opentelemetry_semantic_conventions-0.47b0-py3-none-any.whl", hash = "sha256:4ff9d595b85a59c1c1413f02bba320ce7ea6bf9e2ead2b0913c4395c7bbc1063"}, + {file = "opentelemetry_semantic_conventions-0.47b0.tar.gz", hash = "sha256:a8d57999bbe3495ffd4d510de26a97dadc1dace53e0275001b2c1b2f67992a7e"}, ] +[package.dependencies] +deprecated = ">=1.2.6" +opentelemetry-api = "1.26.0" + [[package]] name = "opentelemetry-util-http" -version = "0.45b0" +version = "0.47b0" description = "Web util for OpenTelemetry" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_util_http-0.45b0-py3-none-any.whl", hash = "sha256:6628868b501b3004e1860f976f410eeb3d3499e009719d818000f24ce17b6e33"}, - {file = "opentelemetry_util_http-0.45b0.tar.gz", hash = "sha256:4ce08b6a7d52dd7c96b7705b5b4f06fdb6aa3eac1233b3b0bfef8a0cab9a92cd"}, + {file = "opentelemetry_util_http-0.47b0-py3-none-any.whl", hash = "sha256:3d3215e09c4a723b12da6d0233a31395aeb2bb33a64d7b15a1500690ba250f19"}, + {file = "opentelemetry_util_http-0.47b0.tar.gz", hash = "sha256:352a07664c18eef827eb8ddcbd64c64a7284a39dd1655e2f16f577eb046ccb32"}, ] [[package]] name = "orjson" -version = "3.10.3" +version = "3.10.7" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.3-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9fb6c3f9f5490a3eb4ddd46fc1b6eadb0d6fc16fb3f07320149c3286a1409dd8"}, - {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:252124b198662eee80428f1af8c63f7ff077c88723fe206a25df8dc57a57b1fa"}, - {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9f3e87733823089a338ef9bbf363ef4de45e5c599a9bf50a7a9b82e86d0228da"}, - {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8334c0d87103bb9fbbe59b78129f1f40d1d1e8355bbed2ca71853af15fa4ed3"}, - {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1952c03439e4dce23482ac846e7961f9d4ec62086eb98ae76d97bd41d72644d7"}, - {file = "orjson-3.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c0403ed9c706dcd2809f1600ed18f4aae50be263bd7112e54b50e2c2bc3ebd6d"}, - {file = "orjson-3.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:382e52aa4270a037d41f325e7d1dfa395b7de0c367800b6f337d8157367bf3a7"}, - {file = "orjson-3.10.3-cp310-none-win32.whl", hash = "sha256:be2aab54313752c04f2cbaab4515291ef5af8c2256ce22abc007f89f42f49109"}, - {file = "orjson-3.10.3-cp310-none-win_amd64.whl", hash = "sha256:416b195f78ae461601893f482287cee1e3059ec49b4f99479aedf22a20b1098b"}, - {file = "orjson-3.10.3-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:73100d9abbbe730331f2242c1fc0bcb46a3ea3b4ae3348847e5a141265479700"}, - {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:544a12eee96e3ab828dbfcb4d5a0023aa971b27143a1d35dc214c176fdfb29b3"}, - {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:520de5e2ef0b4ae546bea25129d6c7c74edb43fc6cf5213f511a927f2b28148b"}, - {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccaa0a401fc02e8828a5bedfd80f8cd389d24f65e5ca3954d72c6582495b4bcf"}, - {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7bc9e8bc11bac40f905640acd41cbeaa87209e7e1f57ade386da658092dc16"}, - {file = "orjson-3.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3582b34b70543a1ed6944aca75e219e1192661a63da4d039d088a09c67543b08"}, - {file = "orjson-3.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c23dfa91481de880890d17aa7b91d586a4746a4c2aa9a145bebdbaf233768d5"}, - {file = "orjson-3.10.3-cp311-none-win32.whl", hash = "sha256:1770e2a0eae728b050705206d84eda8b074b65ee835e7f85c919f5705b006c9b"}, - {file = "orjson-3.10.3-cp311-none-win_amd64.whl", hash = "sha256:93433b3c1f852660eb5abdc1f4dd0ced2be031ba30900433223b28ee0140cde5"}, - {file = "orjson-3.10.3-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a39aa73e53bec8d410875683bfa3a8edf61e5a1c7bb4014f65f81d36467ea098"}, - {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0943a96b3fa09bee1afdfccc2cb236c9c64715afa375b2af296c73d91c23eab2"}, - {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e852baafceff8da3c9defae29414cc8513a1586ad93e45f27b89a639c68e8176"}, - {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18566beb5acd76f3769c1d1a7ec06cdb81edc4d55d2765fb677e3eaa10fa99e0"}, - {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bd2218d5a3aa43060efe649ec564ebedec8ce6ae0a43654b81376216d5ebd42"}, - {file = "orjson-3.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cf20465e74c6e17a104ecf01bf8cd3b7b252565b4ccee4548f18b012ff2f8069"}, - {file = "orjson-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ba7f67aa7f983c4345eeda16054a4677289011a478ca947cd69c0a86ea45e534"}, - {file = "orjson-3.10.3-cp312-none-win32.whl", hash = "sha256:17e0713fc159abc261eea0f4feda611d32eabc35708b74bef6ad44f6c78d5ea0"}, - {file = "orjson-3.10.3-cp312-none-win_amd64.whl", hash = "sha256:4c895383b1ec42b017dd2c75ae8a5b862fc489006afde06f14afbdd0309b2af0"}, - {file = "orjson-3.10.3-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:be2719e5041e9fb76c8c2c06b9600fe8e8584e6980061ff88dcbc2691a16d20d"}, - {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0175a5798bdc878956099f5c54b9837cb62cfbf5d0b86ba6d77e43861bcec2"}, - {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:978be58a68ade24f1af7758626806e13cff7748a677faf95fbb298359aa1e20d"}, - {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16bda83b5c61586f6f788333d3cf3ed19015e3b9019188c56983b5a299210eb5"}, - {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ad1f26bea425041e0a1adad34630c4825a9e3adec49079b1fb6ac8d36f8b754"}, - {file = "orjson-3.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9e253498bee561fe85d6325ba55ff2ff08fb5e7184cd6a4d7754133bd19c9195"}, - {file = "orjson-3.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0a62f9968bab8a676a164263e485f30a0b748255ee2f4ae49a0224be95f4532b"}, - {file = "orjson-3.10.3-cp38-none-win32.whl", hash = "sha256:8d0b84403d287d4bfa9bf7d1dc298d5c1c5d9f444f3737929a66f2fe4fb8f134"}, - {file = "orjson-3.10.3-cp38-none-win_amd64.whl", hash = "sha256:8bc7a4df90da5d535e18157220d7915780d07198b54f4de0110eca6b6c11e290"}, - {file = "orjson-3.10.3-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9059d15c30e675a58fdcd6f95465c1522b8426e092de9fff20edebfdc15e1cb0"}, - {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d40c7f7938c9c2b934b297412c067936d0b54e4b8ab916fd1a9eb8f54c02294"}, - {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4a654ec1de8fdaae1d80d55cee65893cb06494e124681ab335218be6a0691e7"}, - {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:831c6ef73f9aa53c5f40ae8f949ff7681b38eaddb6904aab89dca4d85099cb78"}, - {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99b880d7e34542db89f48d14ddecbd26f06838b12427d5a25d71baceb5ba119d"}, - {file = "orjson-3.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e5e176c994ce4bd434d7aafb9ecc893c15f347d3d2bbd8e7ce0b63071c52e25"}, - {file = "orjson-3.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b69a58a37dab856491bf2d3bbf259775fdce262b727f96aafbda359cb1d114d8"}, - {file = "orjson-3.10.3-cp39-none-win32.whl", hash = "sha256:b8d4d1a6868cde356f1402c8faeb50d62cee765a1f7ffcfd6de732ab0581e063"}, - {file = "orjson-3.10.3-cp39-none-win_amd64.whl", hash = "sha256:5102f50c5fc46d94f2033fe00d392588564378260d64377aec702f21a7a22912"}, - {file = "orjson-3.10.3.tar.gz", hash = "sha256:2b166507acae7ba2f7c315dcf185a9111ad5e992ac81f2d507aac39193c2c818"}, + {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf6ba8ebc8ef5792e2337fb0419f8009729335bb400ece005606336b7fd7bab7"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7cf6222b29fbda9e3a472b41e6a5538b48f2c8f99261eecd60aafbdb60690c"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de817e2f5fc75a9e7dd350c4b0f54617b280e26d1631811a43e7e968fa71e3e9"}, + {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348bdd16b32556cf8d7257b17cf2bdb7ab7976af4af41ebe79f9796c218f7e91"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:479fd0844ddc3ca77e0fd99644c7fe2de8e8be1efcd57705b5c92e5186e8a250"}, + {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fdf5197a21dd660cf19dfd2a3ce79574588f8f5e2dbf21bda9ee2d2b46924d84"}, + {file = "orjson-3.10.7-cp310-none-win32.whl", hash = "sha256:d374d36726746c81a49f3ff8daa2898dccab6596864ebe43d50733275c629175"}, + {file = "orjson-3.10.7-cp310-none-win_amd64.whl", hash = "sha256:cb61938aec8b0ffb6eef484d480188a1777e67b05d58e41b435c74b9d84e0b9c"}, + {file = "orjson-3.10.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7db8539039698ddfb9a524b4dd19508256107568cdad24f3682d5773e60504a2"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:480f455222cb7a1dea35c57a67578848537d2602b46c464472c995297117fa09"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a9c9b168b3a19e37fe2778c0003359f07822c90fdff8f98d9d2a91b3144d8e0"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8de062de550f63185e4c1c54151bdddfc5625e37daf0aa1e75d2a1293e3b7d9a"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b0dd04483499d1de9c8f6203f8975caf17a6000b9c0c54630cef02e44ee624e"}, + {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b58d3795dafa334fc8fd46f7c5dc013e6ad06fd5b9a4cc98cb1456e7d3558bd6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33cfb96c24034a878d83d1a9415799a73dc77480e6c40417e5dda0710d559ee6"}, + {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e724cebe1fadc2b23c6f7415bad5ee6239e00a69f30ee423f319c6af70e2a5c0"}, + {file = "orjson-3.10.7-cp311-none-win32.whl", hash = "sha256:82763b46053727a7168d29c772ed5c870fdae2f61aa8a25994c7984a19b1021f"}, + {file = "orjson-3.10.7-cp311-none-win_amd64.whl", hash = "sha256:eb8d384a24778abf29afb8e41d68fdd9a156cf6e5390c04cc07bbc24b89e98b5"}, + {file = "orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bdbb61dcc365dd9be94e8f7df91975edc9364d6a78c8f7adb69c1cdff318ec93"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b48b3db6bb6e0a08fa8c83b47bc169623f801e5cc4f24442ab2b6617da3b5313"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23820a1563a1d386414fef15c249040042b8e5d07b40ab3fe3efbfbbcbcb8864"}, + {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d352ee8ac1926d6193f602cbe36b1643bbd1bbcb25e3c1a657a4390f3000c9a5"}, + {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2d9f990623f15c0ae7ac608103c33dfe1486d2ed974ac3f40b693bad1a22a7b"}, + {file = "orjson-3.10.7-cp312-none-win32.whl", hash = "sha256:7c4c17f8157bd520cdb7195f75ddbd31671997cbe10aee559c2d613592e7d7eb"}, + {file = "orjson-3.10.7-cp312-none-win_amd64.whl", hash = "sha256:1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1"}, + {file = "orjson-3.10.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:77d325ed866876c0fa6492598ec01fe30e803272a6e8b10e992288b009cbe149"}, + {file = "orjson-3.10.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ea2c232deedcb605e853ae1db2cc94f7390ac776743b699b50b071b02bea6fe"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3dcfbede6737fdbef3ce9c37af3fb6142e8e1ebc10336daa05872bfb1d87839c"}, + {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11748c135f281203f4ee695b7f80bb1358a82a63905f9f0b794769483ea854ad"}, + {file = "orjson-3.10.7-cp313-none-win32.whl", hash = "sha256:a7e19150d215c7a13f39eb787d84db274298d3f83d85463e61d277bbd7f401d2"}, + {file = "orjson-3.10.7-cp313-none-win_amd64.whl", hash = "sha256:eef44224729e9525d5261cc8d28d6b11cafc90e6bd0be2157bde69a52ec83024"}, + {file = "orjson-3.10.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6ea2b2258eff652c82652d5e0f02bd5e0463a6a52abb78e49ac288827aaa1469"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:430ee4d85841e1483d487e7b81401785a5dfd69db5de01314538f31f8fbf7ee1"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b6146e439af4c2472c56f8540d799a67a81226e11992008cb47e1267a9b3225"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:084e537806b458911137f76097e53ce7bf5806dda33ddf6aaa66a028f8d43a23"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829cf2195838e3f93b70fd3b4292156fc5e097aac3739859ac0dcc722b27ac0"}, + {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1193b2416cbad1a769f868b1749535d5da47626ac29445803dae7cc64b3f5c98"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4e6c3da13e5a57e4b3dca2de059f243ebec705857522f188f0180ae88badd354"}, + {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c31008598424dfbe52ce8c5b47e0752dca918a4fdc4a2a32004efd9fab41d866"}, + {file = "orjson-3.10.7-cp38-none-win32.whl", hash = "sha256:7122a99831f9e7fe977dc45784d3b2edc821c172d545e6420c375e5a935f5a1c"}, + {file = "orjson-3.10.7-cp38-none-win_amd64.whl", hash = "sha256:a763bc0e58504cc803739e7df040685816145a6f3c8a589787084b54ebc9f16e"}, + {file = "orjson-3.10.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e76be12658a6fa376fcd331b1ea4e58f5a06fd0220653450f0d415b8fd0fbe20"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed350d6978d28b92939bfeb1a0570c523f6170efc3f0a0ef1f1df287cd4f4960"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144888c76f8520e39bfa121b31fd637e18d4cc2f115727865fdf9fa325b10412"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09b2d92fd95ad2402188cf51573acde57eb269eddabaa60f69ea0d733e789fe9"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b24a579123fa884f3a3caadaed7b75eb5715ee2b17ab5c66ac97d29b18fe57f"}, + {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591bcfe7512353bd609875ab38050efe3d55e18934e2f18950c108334b4ff"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f4db56635b58cd1a200b0a23744ff44206ee6aa428185e2b6c4a65b3197abdcd"}, + {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0fa5886854673222618638c6df7718ea7fe2f3f2384c452c9ccedc70b4a510a5"}, + {file = "orjson-3.10.7-cp39-none-win32.whl", hash = "sha256:8272527d08450ab16eb405f47e0f4ef0e5ff5981c3d82afe0efd25dcbef2bcd2"}, + {file = "orjson-3.10.7-cp39-none-win_amd64.whl", hash = "sha256:974683d4618c0c7dbf4f69c95a979734bf183d0658611760017f6e70a145af58"}, + {file = "orjson-3.10.7.tar.gz", hash = "sha256:75ef0640403f945f3a1f9f6400686560dbfb0fb5b16589ad62cd477043c4eee3"}, ] [[package]] @@ -3652,13 +4118,13 @@ files = [ [[package]] name = "packaging" -version = "24.0" +version = "24.1" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, - {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] [[package]] @@ -3747,13 +4213,13 @@ files = [ [[package]] name = "parse" -version = "1.20.1" +version = "1.20.2" description = "parse() is the opposite of format()" optional = false python-versions = "*" files = [ - {file = "parse-1.20.1-py2.py3-none-any.whl", hash = "sha256:76ddd5214255ae711db4c512be636151fbabaa948c6f30115aecc440422ca82c"}, - {file = "parse-1.20.1.tar.gz", hash = "sha256:09002ca350ad42e76629995f71f7b518670bcf93548bdde3684fd55d2be51975"}, + {file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"}, + {file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"}, ] [[package]] @@ -3897,84 +4363,95 @@ ptyprocess = ">=0.5" [[package]] name = "pillow" -version = "10.3.0" +version = "10.4.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.8" files = [ - {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, - {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, - {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, - {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, - {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, - {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, - {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, - {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, - {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, - {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, - {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, - {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, - {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, - {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, - {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, - {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, - {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] + {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, + {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, + {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, + {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, + {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, + {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, + {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, + {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, + {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, + {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, + {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, + {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, + {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, + {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, + {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, + {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, + {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, + {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, + {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, + {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, + {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, + {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, + {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, + {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, + {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"}, + {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"}, + {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"}, + {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"}, + {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"}, + {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"}, + {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"}, + {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"}, + {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"}, + {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"}, + {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"}, + {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, + {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"}, + {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"}, + {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] @@ -3983,17 +4460,18 @@ xmp = ["defusedxml"] [[package]] name = "pinecone-client" -version = "4.1.1" +version = "5.0.1" description = "Pinecone client and SDK" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "pinecone_client-4.1.1-py3-none-any.whl", hash = "sha256:e74ea91a0129a80f301662e286b1883f2eb896683ff7d2cdb03ea06346844d0d"}, - {file = "pinecone_client-4.1.1.tar.gz", hash = "sha256:b2e78c29de50c180dbfe75e15f08c87ec1a3a4f1bc6b2be1f0ccaee1ab4434fa"}, + {file = "pinecone_client-5.0.1-py3-none-any.whl", hash = "sha256:c8f7835e1045ba84e295f217a8e85573ffb80b41501bbc1af6d92c9631c567a7"}, + {file = "pinecone_client-5.0.1.tar.gz", hash = "sha256:11c33ff5d1c38a6ce69e69fe532c0f22f312fb28d761bb30b3767816d3181d64"}, ] [package.dependencies] certifi = ">=2019.11.17" +pinecone-plugin-inference = ">=1.0.3,<2.0.0" pinecone-plugin-interface = ">=0.0.7,<0.0.8" tqdm = ">=4.64.1" typing-extensions = ">=3.7.4" @@ -4005,6 +4483,20 @@ urllib3 = [ [package.extras] grpc = ["googleapis-common-protos (>=1.53.0)", "grpcio (>=1.44.0)", "grpcio (>=1.59.0)", "lz4 (>=3.1.3)", "protobuf (>=4.25,<5.0)", "protoc-gen-openapiv2 (>=0.0.1,<0.0.2)"] +[[package]] +name = "pinecone-plugin-inference" +version = "1.0.3" +description = "Embeddings plugin for Pinecone SDK" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "pinecone_plugin_inference-1.0.3-py3-none-any.whl", hash = "sha256:bbdfe5dba99a87374d9e3315b62b8e1bbca52d5fe069a64cd6b212efbc8b9afd"}, + {file = "pinecone_plugin_inference-1.0.3.tar.gz", hash = "sha256:c6519ba730123713a181c010f0db9d6449d11de451b8e79bec4efd662b096f41"}, +] + +[package.dependencies] +pinecone-plugin-interface = ">=0.0.7,<0.0.8" + [[package]] name = "pinecone-plugin-interface" version = "0.0.7" @@ -4049,13 +4541,13 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "portalocker" -version = "2.8.2" +version = "2.10.1" description = "Wraps the portalocker recipe for easy usage" optional = false python-versions = ">=3.8" files = [ - {file = "portalocker-2.8.2-py3-none-any.whl", hash = "sha256:cfb86acc09b9aa7c3b43594e19be1345b9d16af3feb08bf92f23d4dce513a28e"}, - {file = "portalocker-2.8.2.tar.gz", hash = "sha256:2b035aa7828e46c58e9b31390ee1f169b98e1066ab10b9a6a861fe7e25ee4f33"}, + {file = "portalocker-2.10.1-py3-none-any.whl", hash = "sha256:53a5984ebc86a025552264b459b46a2086e269b21823cb572f8f28ee759e45bf"}, + {file = "portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f"}, ] [package.dependencies] @@ -4117,13 +4609,13 @@ ssv = ["swagger-spec-validator (>=2.4,<3.0)"] [[package]] name = "pre-commit" -version = "3.7.1" +version = "3.8.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.9" files = [ - {file = "pre_commit-3.7.1-py2.py3-none-any.whl", hash = "sha256:fae36fd1d7ad7d6a5a1c0b0d5adb2ed1a3bda5a21bf6c3e5372073d7a11cd4c5"}, - {file = "pre_commit-3.7.1.tar.gz", hash = "sha256:8ca3ad567bc78a4972a3f1a477e94a79d4597e8140a6e0b651c5e33899c3654a"}, + {file = "pre_commit-3.8.0-py2.py3-none-any.whl", hash = "sha256:9a90a53bf82fdd8778d58085faf8d83df56e40dfe18f45b19446e26bf1b3a63f"}, + {file = "pre_commit-3.8.0.tar.gz", hash = "sha256:8bb6494d4a20423842e198980c9ecf9f96607a07ea29549e180eef9ae80fe7af"}, ] [package.dependencies] @@ -4135,13 +4627,13 @@ virtualenv = ">=20.10.0" [[package]] name = "prompt-toolkit" -version = "3.0.43" +version = "3.0.47" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, - {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, + {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, + {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, ] [package.dependencies] @@ -4166,47 +4658,48 @@ testing = ["google-api-core (>=1.31.5)"] [[package]] name = "protobuf" -version = "4.25.3" +version = "4.25.4" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.3-cp310-abi3-win32.whl", hash = "sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa"}, - {file = "protobuf-4.25.3-cp310-abi3-win_amd64.whl", hash = "sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8"}, - {file = "protobuf-4.25.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c"}, - {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019"}, - {file = "protobuf-4.25.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d"}, - {file = "protobuf-4.25.3-cp38-cp38-win32.whl", hash = "sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2"}, - {file = "protobuf-4.25.3-cp38-cp38-win_amd64.whl", hash = "sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4"}, - {file = "protobuf-4.25.3-cp39-cp39-win32.whl", hash = "sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4"}, - {file = "protobuf-4.25.3-cp39-cp39-win_amd64.whl", hash = "sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c"}, - {file = "protobuf-4.25.3-py3-none-any.whl", hash = "sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9"}, - {file = "protobuf-4.25.3.tar.gz", hash = "sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c"}, + {file = "protobuf-4.25.4-cp310-abi3-win32.whl", hash = "sha256:db9fd45183e1a67722cafa5c1da3e85c6492a5383f127c86c4c4aa4845867dc4"}, + {file = "protobuf-4.25.4-cp310-abi3-win_amd64.whl", hash = "sha256:ba3d8504116a921af46499471c63a85260c1a5fc23333154a427a310e015d26d"}, + {file = "protobuf-4.25.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:eecd41bfc0e4b1bd3fa7909ed93dd14dd5567b98c941d6c1ad08fdcab3d6884b"}, + {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:4c8a70fdcb995dcf6c8966cfa3a29101916f7225e9afe3ced4395359955d3835"}, + {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:3319e073562e2515c6ddc643eb92ce20809f5d8f10fead3332f71c63be6a7040"}, + {file = "protobuf-4.25.4-cp38-cp38-win32.whl", hash = "sha256:7e372cbbda66a63ebca18f8ffaa6948455dfecc4e9c1029312f6c2edcd86c4e1"}, + {file = "protobuf-4.25.4-cp38-cp38-win_amd64.whl", hash = "sha256:051e97ce9fa6067a4546e75cb14f90cf0232dcb3e3d508c448b8d0e4265b61c1"}, + {file = "protobuf-4.25.4-cp39-cp39-win32.whl", hash = "sha256:90bf6fd378494eb698805bbbe7afe6c5d12c8e17fca817a646cd6a1818c696ca"}, + {file = "protobuf-4.25.4-cp39-cp39-win_amd64.whl", hash = "sha256:ac79a48d6b99dfed2729ccccee547b34a1d3d63289c71cef056653a846a2240f"}, + {file = "protobuf-4.25.4-py3-none-any.whl", hash = "sha256:bfbebc1c8e4793cfd58589acfb8a1026be0003e852b9da7db5a4285bde996978"}, + {file = "protobuf-4.25.4.tar.gz", hash = "sha256:0dc4a62cc4052a036ee2204d26fe4d835c62827c855c8a03f29fe6da146b380d"}, ] [[package]] name = "psutil" -version = "5.9.8" +version = "6.0.0" description = "Cross-platform lib for process and system monitoring in Python." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" -files = [ - {file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"}, - {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"}, - {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7"}, - {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36"}, - {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d"}, - {file = "psutil-5.9.8-cp27-none-win32.whl", hash = "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e"}, - {file = "psutil-5.9.8-cp27-none-win_amd64.whl", hash = "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631"}, - {file = "psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81"}, - {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421"}, - {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4"}, - {file = "psutil-5.9.8-cp36-cp36m-win32.whl", hash = "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee"}, - {file = "psutil-5.9.8-cp36-cp36m-win_amd64.whl", hash = "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2"}, - {file = "psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0"}, - {file = "psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf"}, - {file = "psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"}, - {file = "psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c"}, +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, + {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, + {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"}, + {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"}, + {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"}, + {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"}, + {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"}, + {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, + {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"}, + {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"}, + {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, + {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, + {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, + {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, ] [package.extras] @@ -4326,13 +4819,13 @@ files = [ [[package]] name = "pure-eval" -version = "0.2.2" +version = "0.2.3" description = "Safely evaluate AST nodes without side effects" optional = false python-versions = "*" files = [ - {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, - {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, + {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, + {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, ] [package.extras] @@ -4340,52 +4833,55 @@ tests = ["pytest"] [[package]] name = "pyarrow" -version = "16.1.0" +version = "17.0.0" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.8" files = [ - {file = "pyarrow-16.1.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:17e23b9a65a70cc733d8b738baa6ad3722298fa0c81d88f63ff94bf25eaa77b9"}, - {file = "pyarrow-16.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4740cc41e2ba5d641071d0ab5e9ef9b5e6e8c7611351a5cb7c1d175eaf43674a"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98100e0268d04e0eec47b73f20b39c45b4006f3c4233719c3848aa27a03c1aef"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f68f409e7b283c085f2da014f9ef81e885d90dcd733bd648cfba3ef265961848"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:a8914cd176f448e09746037b0c6b3a9d7688cef451ec5735094055116857580c"}, - {file = "pyarrow-16.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:48be160782c0556156d91adbdd5a4a7e719f8d407cb46ae3bb4eaee09b3111bd"}, - {file = "pyarrow-16.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9cf389d444b0f41d9fe1444b70650fea31e9d52cfcb5f818b7888b91b586efff"}, - {file = "pyarrow-16.1.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:d0ebea336b535b37eee9eee31761813086d33ed06de9ab6fc6aaa0bace7b250c"}, - {file = "pyarrow-16.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e73cfc4a99e796727919c5541c65bb88b973377501e39b9842ea71401ca6c1c"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf9251264247ecfe93e5f5a0cd43b8ae834f1e61d1abca22da55b20c788417f6"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddf5aace92d520d3d2a20031d8b0ec27b4395cab9f74e07cc95edf42a5cc0147"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:25233642583bf658f629eb230b9bb79d9af4d9f9229890b3c878699c82f7d11e"}, - {file = "pyarrow-16.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a33a64576fddfbec0a44112eaf844c20853647ca833e9a647bfae0582b2ff94b"}, - {file = "pyarrow-16.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:185d121b50836379fe012753cf15c4ba9638bda9645183ab36246923875f8d1b"}, - {file = "pyarrow-16.1.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:2e51ca1d6ed7f2e9d5c3c83decf27b0d17bb207a7dea986e8dc3e24f80ff7d6f"}, - {file = "pyarrow-16.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06ebccb6f8cb7357de85f60d5da50e83507954af617d7b05f48af1621d331c9a"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b04707f1979815f5e49824ce52d1dceb46e2f12909a48a6a753fe7cafbc44a0c"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d32000693deff8dc5df444b032b5985a48592c0697cb6e3071a5d59888714e2"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:8785bb10d5d6fd5e15d718ee1d1f914fe768bf8b4d1e5e9bf253de8a26cb1628"}, - {file = "pyarrow-16.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:e1369af39587b794873b8a307cc6623a3b1194e69399af0efd05bb202195a5a7"}, - {file = "pyarrow-16.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:febde33305f1498f6df85e8020bca496d0e9ebf2093bab9e0f65e2b4ae2b3444"}, - {file = "pyarrow-16.1.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b5f5705ab977947a43ac83b52ade3b881eb6e95fcc02d76f501d549a210ba77f"}, - {file = "pyarrow-16.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0d27bf89dfc2576f6206e9cd6cf7a107c9c06dc13d53bbc25b0bd4556f19cf5f"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d07de3ee730647a600037bc1d7b7994067ed64d0eba797ac74b2bc77384f4c2"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbef391b63f708e103df99fbaa3acf9f671d77a183a07546ba2f2c297b361e83"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:19741c4dbbbc986d38856ee7ddfdd6a00fc3b0fc2d928795b95410d38bb97d15"}, - {file = "pyarrow-16.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f2c5fb249caa17b94e2b9278b36a05ce03d3180e6da0c4c3b3ce5b2788f30eed"}, - {file = "pyarrow-16.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:e6b6d3cd35fbb93b70ade1336022cc1147b95ec6af7d36906ca7fe432eb09710"}, - {file = "pyarrow-16.1.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:18da9b76a36a954665ccca8aa6bd9f46c1145f79c0bb8f4f244f5f8e799bca55"}, - {file = "pyarrow-16.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:99f7549779b6e434467d2aa43ab2b7224dd9e41bdde486020bae198978c9e05e"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f07fdffe4fd5b15f5ec15c8b64584868d063bc22b86b46c9695624ca3505b7b4"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddfe389a08ea374972bd4065d5f25d14e36b43ebc22fc75f7b951f24378bf0b5"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3b20bd67c94b3a2ea0a749d2a5712fc845a69cb5d52e78e6449bbd295611f3aa"}, - {file = "pyarrow-16.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ba8ac20693c0bb0bf4b238751d4409e62852004a8cf031c73b0e0962b03e45e3"}, - {file = "pyarrow-16.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:31a1851751433d89a986616015841977e0a188662fcffd1a5677453f1df2de0a"}, - {file = "pyarrow-16.1.0.tar.gz", hash = "sha256:15fbb22ea96d11f0b5768504a3f961edab25eaf4197c341720c4a387f6c60315"}, + {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"}, + {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8"}, + {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047"}, + {file = "pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977"}, + {file = "pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420"}, + {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4"}, + {file = "pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22"}, + {file = "pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a"}, + {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b"}, + {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"}, + {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"}, + {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"}, + {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"}, + {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"}, + {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"}, + {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"}, + {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"}, ] [package.dependencies] numpy = ">=1.16.6" +[package.extras] +test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] + [[package]] name = "pyasn1" version = "0.6.0" @@ -4437,18 +4933,18 @@ files = [ [[package]] name = "pydantic" -version = "2.8.0" +version = "2.8.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.8.0-py3-none-any.whl", hash = "sha256:ead4f3a1e92386a734ca1411cb25d94147cf8778ed5be6b56749047676d6364e"}, - {file = "pydantic-2.8.0.tar.gz", hash = "sha256:d970ffb9d030b710795878940bd0489842c638e7252fc4a19c3ae2f7da4d6141"}, + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.20.0" +pydantic-core = "2.20.1" typing-extensions = {version = ">=4.6.1", markers = "python_version < \"3.13\""} [package.extras] @@ -4456,99 +4952,100 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.20.0" +version = "2.20.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e9dcd7fb34f7bfb239b5fa420033642fff0ad676b765559c3737b91f664d4fa9"}, - {file = "pydantic_core-2.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:649a764d9b0da29816889424697b2a3746963ad36d3e0968784ceed6e40c6355"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7701df088d0b05f3460f7ba15aec81ac8b0fb5690367dfd072a6c38cf5b7fdb5"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ab760f17c3e792225cdaef31ca23c0aea45c14ce80d8eff62503f86a5ab76bff"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cb1ad5b4d73cde784cf64580166568074f5ccd2548d765e690546cff3d80937d"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b81ec2efc04fc1dbf400647d4357d64fb25543bae38d2d19787d69360aad21c9"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4a9732a5cad764ba37f3aa873dccb41b584f69c347a57323eda0930deec8e10"}, - {file = "pydantic_core-2.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6dc85b9e10cc21d9c1055f15684f76fa4facadddcb6cd63abab702eb93c98943"}, - {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:21d9f7e24f63fdc7118e6cc49defaab8c1d27570782f7e5256169d77498cf7c7"}, - {file = "pydantic_core-2.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8b315685832ab9287e6124b5d74fc12dda31e6421d7f6b08525791452844bc2d"}, - {file = "pydantic_core-2.20.0-cp310-none-win32.whl", hash = "sha256:c3dc8ec8b87c7ad534c75b8855168a08a7036fdb9deeeed5705ba9410721c84d"}, - {file = "pydantic_core-2.20.0-cp310-none-win_amd64.whl", hash = "sha256:85770b4b37bb36ef93a6122601795231225641003e0318d23c6233c59b424279"}, - {file = "pydantic_core-2.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:58e251bb5a5998f7226dc90b0b753eeffa720bd66664eba51927c2a7a2d5f32c"}, - {file = "pydantic_core-2.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:78d584caac52c24240ef9ecd75de64c760bbd0e20dbf6973631815e3ef16ef8b"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5084ec9721f82bef5ff7c4d1ee65e1626783abb585f8c0993833490b63fe1792"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6d0f52684868db7c218437d260e14d37948b094493f2646f22d3dda7229bbe3f"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1def125d59a87fe451212a72ab9ed34c118ff771e5473fef4f2f95d8ede26d75"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b34480fd6778ab356abf1e9086a4ced95002a1e195e8d2fd182b0def9d944d11"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d42669d319db366cb567c3b444f43caa7ffb779bf9530692c6f244fc635a41eb"}, - {file = "pydantic_core-2.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:53b06aea7a48919a254b32107647be9128c066aaa6ee6d5d08222325f25ef175"}, - {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1f038156b696a1c39d763b2080aeefa87ddb4162c10aa9fabfefffc3dd8180fa"}, - {file = "pydantic_core-2.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3f0f3a4a23717280a5ee3ac4fb1f81d6fde604c9ec5100f7f6f987716bb8c137"}, - {file = "pydantic_core-2.20.0-cp311-none-win32.whl", hash = "sha256:316fe7c3fec017affd916a0c83d6f1ec697cbbbdf1124769fa73328e7907cc2e"}, - {file = "pydantic_core-2.20.0-cp311-none-win_amd64.whl", hash = "sha256:2d06a7fa437f93782e3f32d739c3ec189f82fca74336c08255f9e20cea1ed378"}, - {file = "pydantic_core-2.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:d6f8c49657f3eb7720ed4c9b26624063da14937fc94d1812f1e04a2204db3e17"}, - {file = "pydantic_core-2.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ad1bd2f377f56fec11d5cfd0977c30061cd19f4fa199bf138b200ec0d5e27eeb"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed741183719a5271f97d93bbcc45ed64619fa38068aaa6e90027d1d17e30dc8d"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d82e5ed3a05f2dcb89c6ead2fd0dbff7ac09bc02c1b4028ece2d3a3854d049ce"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2ba34a099576234671f2e4274e5bc6813b22e28778c216d680eabd0db3f7dad"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:879ae6bb08a063b3e1b7ac8c860096d8fd6b48dd9b2690b7f2738b8c835e744b"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0eefc7633a04c0694340aad91fbfd1986fe1a1e0c63a22793ba40a18fcbdc8"}, - {file = "pydantic_core-2.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:73deadd6fd8a23e2f40b412b3ac617a112143c8989a4fe265050fd91ba5c0608"}, - {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:35681445dc85446fb105943d81ae7569aa7e89de80d1ca4ac3229e05c311bdb1"}, - {file = "pydantic_core-2.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0f6dd3612a3b9f91f2e63924ea18a4476656c6d01843ca20a4c09e00422195af"}, - {file = "pydantic_core-2.20.0-cp312-none-win32.whl", hash = "sha256:7e37b6bb6e90c2b8412b06373c6978d9d81e7199a40e24a6ef480e8acdeaf918"}, - {file = "pydantic_core-2.20.0-cp312-none-win_amd64.whl", hash = "sha256:7d4df13d1c55e84351fab51383520b84f490740a9f1fec905362aa64590b7a5d"}, - {file = "pydantic_core-2.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:d43e7ab3b65e4dc35a7612cfff7b0fd62dce5bc11a7cd198310b57f39847fd6c"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b6a24d7b5893392f2b8e3b7a0031ae3b14c6c1942a4615f0d8794fdeeefb08b"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2f13c3e955a087c3ec86f97661d9f72a76e221281b2262956af381224cfc243"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72432fd6e868c8d0a6849869e004b8bcae233a3c56383954c228316694920b38"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d70a8ff2d4953afb4cbe6211f17268ad29c0b47e73d3372f40e7775904bc28fc"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e49524917b8d3c2f42cd0d2df61178e08e50f5f029f9af1f402b3ee64574392"}, - {file = "pydantic_core-2.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4f0f71653b1c1bad0350bc0b4cc057ab87b438ff18fa6392533811ebd01439c"}, - {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:16197e6f4fdecb9892ed2436e507e44f0a1aa2cff3b9306d1c879ea2f9200997"}, - {file = "pydantic_core-2.20.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:763602504bf640b3ded3bba3f8ed8a1cc2fc6a87b8d55c1c5689f428c49c947e"}, - {file = "pydantic_core-2.20.0-cp313-none-win32.whl", hash = "sha256:a3f243f318bd9523277fa123b3163f4c005a3e8619d4b867064de02f287a564d"}, - {file = "pydantic_core-2.20.0-cp313-none-win_amd64.whl", hash = "sha256:03aceaf6a5adaad3bec2233edc5a7905026553916615888e53154807e404545c"}, - {file = "pydantic_core-2.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d6f2d8b8da1f03f577243b07bbdd3412eee3d37d1f2fd71d1513cbc76a8c1239"}, - {file = "pydantic_core-2.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a272785a226869416c6b3c1b7e450506152d3844207331f02f27173562c917e0"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efbb412d55a4ffe73963fed95c09ccb83647ec63b711c4b3752be10a56f0090b"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1e4f46189d8740561b43655263a41aac75ff0388febcb2c9ec4f1b60a0ec12f3"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3df115f4a3c8c5e4d5acf067d399c6466d7e604fc9ee9acbe6f0c88a0c3cf"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a340d2bdebe819d08f605e9705ed551c3feb97e4fd71822d7147c1e4bdbb9508"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:616b9c2f882393d422ba11b40e72382fe975e806ad693095e9a3b67c59ea6150"}, - {file = "pydantic_core-2.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25c46bb2ff6084859bbcfdf4f1a63004b98e88b6d04053e8bf324e115398e9e7"}, - {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:23425eccef8f2c342f78d3a238c824623836c6c874d93c726673dbf7e56c78c0"}, - {file = "pydantic_core-2.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:52527e8f223ba29608d999d65b204676398009725007c9336651c2ec2d93cffc"}, - {file = "pydantic_core-2.20.0-cp38-none-win32.whl", hash = "sha256:1c3c5b7f70dd19a6845292b0775295ea81c61540f68671ae06bfe4421b3222c2"}, - {file = "pydantic_core-2.20.0-cp38-none-win_amd64.whl", hash = "sha256:8093473d7b9e908af1cef30025609afc8f5fd2a16ff07f97440fd911421e4432"}, - {file = "pydantic_core-2.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ee7785938e407418795e4399b2bf5b5f3cf6cf728077a7f26973220d58d885cf"}, - {file = "pydantic_core-2.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e75794883d635071cf6b4ed2a5d7a1e50672ab7a051454c76446ef1ebcdcc91"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:344e352c96e53b4f56b53d24728217c69399b8129c16789f70236083c6ceb2ac"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:978d4123ad1e605daf1ba5e01d4f235bcf7b6e340ef07e7122e8e9cfe3eb61ab"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c05eaf6c863781eb834ab41f5963604ab92855822a2062897958089d1335dad"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bc7e43b4a528ffca8c9151b6a2ca34482c2fdc05e6aa24a84b7f475c896fc51d"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:658287a29351166510ebbe0a75c373600cc4367a3d9337b964dada8d38bcc0f4"}, - {file = "pydantic_core-2.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1dacf660d6de692fe351e8c806e7efccf09ee5184865893afbe8e59be4920b4a"}, - {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3e147fc6e27b9a487320d78515c5f29798b539179f7777018cedf51b7749e4f4"}, - {file = "pydantic_core-2.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c867230d715a3dd1d962c8d9bef0d3168994ed663e21bf748b6e3a529a129aab"}, - {file = "pydantic_core-2.20.0-cp39-none-win32.whl", hash = "sha256:22b813baf0dbf612752d8143a2dbf8e33ccb850656b7850e009bad2e101fc377"}, - {file = "pydantic_core-2.20.0-cp39-none-win_amd64.whl", hash = "sha256:3a7235b46c1bbe201f09b6f0f5e6c36b16bad3d0532a10493742f91fbdc8035f"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cafde15a6f7feaec2f570646e2ffc5b73412295d29134a29067e70740ec6ee20"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2aec8eeea0b08fd6bc2213d8e86811a07491849fd3d79955b62d83e32fa2ad5f"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840200827984f1c4e114008abc2f5ede362d6e11ed0b5931681884dd41852ff1"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ea1d8b7df522e5ced34993c423c3bf3735c53df8b2a15688a2f03a7d678800"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d5b8376a867047bf08910573deb95d3c8dfb976eb014ee24f3b5a61ccc5bee1b"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d08264b4460326cefacc179fc1411304d5af388a79910832835e6f641512358b"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7a3639011c2e8a9628466f616ed7fb413f30032b891898e10895a0a8b5857d6c"}, - {file = "pydantic_core-2.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:05e83ce2f7eba29e627dd8066aa6c4c0269b2d4f889c0eba157233a353053cea"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:603a843fea76a595c8f661cd4da4d2281dff1e38c4a836a928eac1a2f8fe88e4"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac76f30d5d3454f4c28826d891fe74d25121a346c69523c9810ebba43f3b1cec"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e3b1d4b1b3f6082849f9b28427ef147a5b46a6132a3dbaf9ca1baa40c88609"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2761f71faed820e25ec62eacba670d1b5c2709bb131a19fcdbfbb09884593e5a"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0586cddbf4380e24569b8a05f234e7305717cc8323f50114dfb2051fcbce2a3"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b8c46a8cf53e849eea7090f331ae2202cd0f1ceb090b00f5902c423bd1e11805"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b4a085bd04af7245e140d1b95619fe8abb445a3d7fdf219b3f80c940853268ef"}, - {file = "pydantic_core-2.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:116b326ac82c8b315e7348390f6d30bcfe6e688a7d3f1de50ff7bcc2042a23c2"}, - {file = "pydantic_core-2.20.0.tar.gz", hash = "sha256:366be8e64e0cb63d87cf79b4e1765c0703dd6313c729b22e7b9e378db6b96877"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, ] [package.dependencies] @@ -4556,13 +5053,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pydantic-settings" -version = "2.3.4" +version = "2.4.0" description = "Settings management using Pydantic" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_settings-2.3.4-py3-none-any.whl", hash = "sha256:11ad8bacb68a045f00e4f862c7a718c8a9ec766aa8fd4c32e39a0594b207b53a"}, - {file = "pydantic_settings-2.3.4.tar.gz", hash = "sha256:c5802e3d62b78e82522319bbc9b8f8ffb28ad1c988a99311d04f2a6051fca0a7"}, + {file = "pydantic_settings-2.4.0-py3-none-any.whl", hash = "sha256:bb6849dc067f1687574c12a639e231f3a6feeed0a12d710c1382045c5db1c315"}, + {file = "pydantic_settings-2.4.0.tar.gz", hash = "sha256:ed81c3a0f46392b4d7c0a565c05884e6e54b3456e6f0fe4d8814981172dc9a88"}, ] [package.dependencies] @@ -4570,6 +5067,7 @@ pydantic = ">=2.7.0" python-dotenv = ">=0.21.0" [package.extras] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] toml = ["tomli (>=2.0.1)"] yaml = ["pyyaml (>=6.0.1)"] @@ -4589,13 +5087,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pyjwt" -version = "2.8.0" +version = "2.9.0" description = "JSON Web Token implementation in Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"}, - {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"}, + {file = "PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850"}, + {file = "pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c"}, ] [package.dependencies] @@ -4603,8 +5101,8 @@ cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"cryp [package.extras] crypto = ["cryptography (>=3.4.0)"] -dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] -docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] @@ -4619,22 +5117,22 @@ files = [ [[package]] name = "pymilvus" -version = "2.4.3" +version = "2.4.5" description = "Python Sdk for Milvus" optional = false python-versions = ">=3.8" files = [ - {file = "pymilvus-2.4.3-py3-none-any.whl", hash = "sha256:38239e89f8d739f665141d0b80908990b5f59681e889e135c234a4a45669a5c8"}, - {file = "pymilvus-2.4.3.tar.gz", hash = "sha256:703ac29296cdce03d6dc2aaebbe959e57745c141a94150e371dc36c61c226cc1"}, + {file = "pymilvus-2.4.5-py3-none-any.whl", hash = "sha256:dc4f2d1eac8db9cf3951de39566a1a244695760bb94d8310fbfc73d6d62bb267"}, + {file = "pymilvus-2.4.5.tar.gz", hash = "sha256:1a497fe9b41d6bf62b1d5e1c412960922dde1598576fcbb8818040c8af11149f"}, ] [package.dependencies] environs = "<=9.5.0" grpcio = ">=1.49.1,<=1.63.0" -milvus-lite = ">=2.4.0,<2.5.0" +milvus-lite = {version = ">=2.4.0,<2.5.0", markers = "sys_platform != \"win32\""} pandas = ">=1.2.4" protobuf = ">=3.20.0" -setuptools = ">=67" +setuptools = ">69" ujson = ">=2.0.0" [package.extras] @@ -4644,71 +5142,61 @@ model = ["milvus-model (>=0.1.0)"] [[package]] name = "pymongo" -version = "4.7.2" +version = "4.8.0" description = "Python driver for MongoDB " optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pymongo-4.7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:268d8578c0500012140c5460755ea405cbfe541ef47c81efa9d6744f0f99aeca"}, - {file = "pymongo-4.7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:827611beb6c483260d520cfa6a49662d980dfa5368a04296f65fa39e78fccea7"}, - {file = "pymongo-4.7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a754e366c404d19ff3f077ddeed64be31e0bb515e04f502bf11987f1baa55a16"}, - {file = "pymongo-4.7.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c44efab10d9a3db920530f7bcb26af8f408b7273d2f0214081d3891979726328"}, - {file = "pymongo-4.7.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35b3f0c7d49724859d4df5f0445818d525824a6cd55074c42573d9b50764df67"}, - {file = "pymongo-4.7.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e37faf298a37ffb3e0809e77fbbb0a32b6a2d18a83c59cfc2a7b794ea1136b0"}, - {file = "pymongo-4.7.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1bcd58669e56c08f1e72c5758868b5df169fe267501c949ee83c418e9df9155"}, - {file = "pymongo-4.7.2-cp310-cp310-win32.whl", hash = "sha256:c72d16fede22efe7cdd1f422e8da15760e9498024040429362886f946c10fe95"}, - {file = "pymongo-4.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:12d1fef77d25640cb78893d07ff7d2fac4c4461d8eec45bd3b9ad491a1115d6e"}, - {file = "pymongo-4.7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fc5af24fcf5fc6f7f40d65446400d45dd12bea933d0299dc9e90c5b22197f1e9"}, - {file = "pymongo-4.7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:730778b6f0964b164c187289f906bbc84cb0524df285b7a85aa355bbec43eb21"}, - {file = "pymongo-4.7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47a1a4832ef2f4346dcd1a10a36ade7367ad6905929ddb476459abb4fd1b98cb"}, - {file = "pymongo-4.7.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6eab12c6385526d386543d6823b07187fefba028f0da216506e00f0e1855119"}, - {file = "pymongo-4.7.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37e9ea81fa59ee9274457ed7d59b6c27f6f2a5fe8e26f184ecf58ea52a019cb8"}, - {file = "pymongo-4.7.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e9d9d2c0aae73aa4369bd373ac2ac59f02c46d4e56c4b6d6e250cfe85f76802"}, - {file = "pymongo-4.7.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb6e00a79dff22c9a72212ad82021b54bdb3b85f38a85f4fc466bde581d7d17a"}, - {file = "pymongo-4.7.2-cp311-cp311-win32.whl", hash = "sha256:02efd1bb3397e24ef2af45923888b41a378ce00cb3a4259c5f4fc3c70497a22f"}, - {file = "pymongo-4.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:87bb453ac3eb44db95cb6d5a616fbc906c1c00661eec7f55696253a6245beb8a"}, - {file = "pymongo-4.7.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:12c466e02133b7f8f4ff1045c6b5916215c5f7923bc83fd6e28e290cba18f9f6"}, - {file = "pymongo-4.7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f91073049c43d14e66696970dd708d319b86ee57ef9af359294eee072abaac79"}, - {file = "pymongo-4.7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87032f818bf5052ab742812c715eff896621385c43f8f97cdd37d15b5d394e95"}, - {file = "pymongo-4.7.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6a87eef394039765679f75c6a47455a4030870341cb76eafc349c5944408c882"}, - {file = "pymongo-4.7.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d275596f840018858757561840767b39272ac96436fcb54f5cac6d245393fd97"}, - {file = "pymongo-4.7.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82102e353be13f1a6769660dd88115b1da382447672ba1c2662a0fbe3df1d861"}, - {file = "pymongo-4.7.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:194065c9d445017b3c82fb85f89aa2055464a080bde604010dc8eb932a6b3c95"}, - {file = "pymongo-4.7.2-cp312-cp312-win32.whl", hash = "sha256:db4380d1e69fdad1044a4b8f3bb105200542c49a0dde93452d938ff9db1d6d29"}, - {file = "pymongo-4.7.2-cp312-cp312-win_amd64.whl", hash = "sha256:fadc6e8db7707c861ebe25b13ad6aca19ea4d2c56bf04a26691f46c23dadf6e4"}, - {file = "pymongo-4.7.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2cb77d09bd012cb4b30636e7e38d00b5f9be5eb521c364bde66490c45ee6c4b4"}, - {file = "pymongo-4.7.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56bf8b706946952acdea0fe478f8e44f1ed101c4b87f046859e6c3abe6c0a9f4"}, - {file = "pymongo-4.7.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcf337d1b252405779d9c79978d6ca15eab3cdaa2f44c100a79221bddad97c8a"}, - {file = "pymongo-4.7.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ffd1519edbe311df73c74ec338de7d294af535b2748191c866ea3a7c484cd15"}, - {file = "pymongo-4.7.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d59776f435564159196d971aa89422ead878174aff8fe18e06d9a0bc6d648c"}, - {file = "pymongo-4.7.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:347c49cf7f0ba49ea87c1a5a1984187ecc5516b7c753f31938bf7b37462824fd"}, - {file = "pymongo-4.7.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:84bc00200c3cbb6c98a2bb964c9e8284b641e4a33cf10c802390552575ee21de"}, - {file = "pymongo-4.7.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fcaf8c911cb29316a02356f89dbc0e0dfcc6a712ace217b6b543805690d2aefd"}, - {file = "pymongo-4.7.2-cp37-cp37m-win32.whl", hash = "sha256:b48a5650ee5320d59f6d570bd99a8d5c58ac6f297a4e9090535f6561469ac32e"}, - {file = "pymongo-4.7.2-cp37-cp37m-win_amd64.whl", hash = "sha256:5239ef7e749f1326ea7564428bf861d5250aa39d7f26d612741b1b1273227062"}, - {file = "pymongo-4.7.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d2dcf608d35644e8d276d61bf40a93339d8d66a0e5f3e3f75b2c155a421a1b71"}, - {file = "pymongo-4.7.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:25eeb2c18ede63891cbd617943dd9e6b9cbccc54f276e0b2e693a0cc40f243c5"}, - {file = "pymongo-4.7.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9349f0bb17a31371d4cacb64b306e4ca90413a3ad1fffe73ac7cd495570d94b5"}, - {file = "pymongo-4.7.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ffd4d7cb2e6c6e100e2b39606d38a9ffc934e18593dc9bb326196afc7d93ce3d"}, - {file = "pymongo-4.7.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9a8bd37f5dabc86efceb8d8cbff5969256523d42d08088f098753dba15f3b37a"}, - {file = "pymongo-4.7.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c78f156edc59b905c80c9003e022e1a764c54fd40ac4fea05b0764f829790e2"}, - {file = "pymongo-4.7.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9d892fb91e81cccb83f507cdb2ea0aa026ec3ced7f12a1d60f6a5bf0f20f9c1f"}, - {file = "pymongo-4.7.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87832d6076c2c82f42870157414fd876facbb6554d2faf271ffe7f8f30ce7bed"}, - {file = "pymongo-4.7.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ce1a374ea0e49808e0380ffc64284c0ce0f12bd21042b4bef1af3eb7bdf49054"}, - {file = "pymongo-4.7.2-cp38-cp38-win32.whl", hash = "sha256:eb0642e5f0dd7e86bb358749cc278e70b911e617f519989d346f742dc9520dfb"}, - {file = "pymongo-4.7.2-cp38-cp38-win_amd64.whl", hash = "sha256:4bdb5ffe1cd3728c9479671a067ef44dacafc3743741d4dc700c377c4231356f"}, - {file = "pymongo-4.7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:743552033c63f0afdb56b9189ab04b5c1dbffd7310cf7156ab98eebcecf24621"}, - {file = "pymongo-4.7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5239776633f7578b81207e5646245415a5a95f6ae5ef5dff8e7c2357e6264bfc"}, - {file = "pymongo-4.7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:727ad07952c155cd20045f2ce91143c7dc4fb01a5b4e8012905a89a7da554b0c"}, - {file = "pymongo-4.7.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9385654f01a90f73827af4db90c290a1519f7d9102ba43286e187b373e9a78e9"}, - {file = "pymongo-4.7.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d833651f1ba938bb7501f13e326b96cfbb7d98867b2d545ca6d69c7664903e0"}, - {file = "pymongo-4.7.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf17ea9cea14d59b0527403dd7106362917ced7c4ec936c4ba22bd36c912c8e0"}, - {file = "pymongo-4.7.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cecd2df037249d1c74f0af86fb5b766104a5012becac6ff63d85d1de53ba8b98"}, - {file = "pymongo-4.7.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65b4c00dedbd333698b83cd2095a639a6f0d7c4e2a617988f6c65fb46711f028"}, - {file = "pymongo-4.7.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d9b6cbc037108ff1a0a867e7670d8513c37f9bcd9ee3d2464411bfabf70ca002"}, - {file = "pymongo-4.7.2-cp39-cp39-win32.whl", hash = "sha256:cf28430ec1924af1bffed37b69a812339084697fd3f3e781074a0148e6475803"}, - {file = "pymongo-4.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:e004527ea42a6b99a8b8d5b42b42762c3bdf80f88fbdb5c3a9d47f3808495b86"}, - {file = "pymongo-4.7.2.tar.gz", hash = "sha256:9024e1661c6e40acf468177bf90ce924d1bc681d2b244adda3ed7b2f4c4d17d7"}, + {file = "pymongo-4.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f2b7bec27e047e84947fbd41c782f07c54c30c76d14f3b8bf0c89f7413fac67a"}, + {file = "pymongo-4.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c68fe128a171493018ca5c8020fc08675be130d012b7ab3efe9e22698c612a1"}, + {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:920d4f8f157a71b3cb3f39bc09ce070693d6e9648fb0e30d00e2657d1dca4e49"}, + {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52b4108ac9469febba18cea50db972605cc43978bedaa9fea413378877560ef8"}, + {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:180d5eb1dc28b62853e2f88017775c4500b07548ed28c0bd9c005c3d7bc52526"}, + {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aec2b9088cdbceb87e6ca9c639d0ff9b9d083594dda5ca5d3c4f6774f4c81b33"}, + {file = "pymongo-4.8.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0cf61450feadca81deb1a1489cb1a3ae1e4266efd51adafecec0e503a8dcd84"}, + {file = "pymongo-4.8.0-cp310-cp310-win32.whl", hash = "sha256:8b18c8324809539c79bd6544d00e0607e98ff833ca21953df001510ca25915d1"}, + {file = "pymongo-4.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e5df28f74002e37bcbdfdc5109799f670e4dfef0fb527c391ff84f078050e7b5"}, + {file = "pymongo-4.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6b50040d9767197b77ed420ada29b3bf18a638f9552d80f2da817b7c4a4c9c68"}, + {file = "pymongo-4.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:417369ce39af2b7c2a9c7152c1ed2393edfd1cbaf2a356ba31eb8bcbd5c98dd7"}, + {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf821bd3befb993a6db17229a2c60c1550e957de02a6ff4dd0af9476637b2e4d"}, + {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9365166aa801c63dff1a3cb96e650be270da06e3464ab106727223123405510f"}, + {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc8b8582f4209c2459b04b049ac03c72c618e011d3caa5391ff86d1bda0cc486"}, + {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16e5019f75f6827bb5354b6fef8dfc9d6c7446894a27346e03134d290eb9e758"}, + {file = "pymongo-4.8.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b5802151fc2b51cd45492c80ed22b441d20090fb76d1fd53cd7760b340ff554"}, + {file = "pymongo-4.8.0-cp311-cp311-win32.whl", hash = "sha256:4bf58e6825b93da63e499d1a58de7de563c31e575908d4e24876234ccb910eba"}, + {file = "pymongo-4.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:b747c0e257b9d3e6495a018309b9e0c93b7f0d65271d1d62e572747f4ffafc88"}, + {file = "pymongo-4.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e6a720a3d22b54183352dc65f08cd1547204d263e0651b213a0a2e577e838526"}, + {file = "pymongo-4.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:31e4d21201bdf15064cf47ce7b74722d3e1aea2597c6785882244a3bb58c7eab"}, + {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6b804bb4f2d9dc389cc9e827d579fa327272cdb0629a99bfe5b83cb3e269ebf"}, + {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f2fbdb87fe5075c8beb17a5c16348a1ea3c8b282a5cb72d173330be2fecf22f5"}, + {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd39455b7ee70aabee46f7399b32ab38b86b236c069ae559e22be6b46b2bbfc4"}, + {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:940d456774b17814bac5ea7fc28188c7a1338d4a233efbb6ba01de957bded2e8"}, + {file = "pymongo-4.8.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:236bbd7d0aef62e64caf4b24ca200f8c8670d1a6f5ea828c39eccdae423bc2b2"}, + {file = "pymongo-4.8.0-cp312-cp312-win32.whl", hash = "sha256:47ec8c3f0a7b2212dbc9be08d3bf17bc89abd211901093e3ef3f2adea7de7a69"}, + {file = "pymongo-4.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:e84bc7707492f06fbc37a9f215374d2977d21b72e10a67f1b31893ec5a140ad8"}, + {file = "pymongo-4.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:519d1bab2b5e5218c64340b57d555d89c3f6c9d717cecbf826fb9d42415e7750"}, + {file = "pymongo-4.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:87075a1feb1e602e539bdb1ef8f4324a3427eb0d64208c3182e677d2c0718b6f"}, + {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f53429515d2b3e86dcc83dadecf7ff881e538c168d575f3688698a8707b80a"}, + {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdc20cd1e1141b04696ffcdb7c71e8a4a665db31fe72e51ec706b3bdd2d09f36"}, + {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:284d0717d1a7707744018b0b6ee7801b1b1ff044c42f7be7a01bb013de639470"}, + {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5bf0eb8b6ef40fa22479f09375468c33bebb7fe49d14d9c96c8fd50355188b0"}, + {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ecd71b9226bd1d49416dc9f999772038e56f415a713be51bf18d8676a0841c8"}, + {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0061af6e8c5e68b13f1ec9ad5251247726653c5af3c0bbdfbca6cf931e99216"}, + {file = "pymongo-4.8.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:658d0170f27984e0d89c09fe5c42296613b711a3ffd847eb373b0dbb5b648d5f"}, + {file = "pymongo-4.8.0-cp38-cp38-win32.whl", hash = "sha256:3ed1c316718a2836f7efc3d75b4b0ffdd47894090bc697de8385acd13c513a70"}, + {file = "pymongo-4.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:7148419eedfea9ecb940961cfe465efaba90595568a1fb97585fb535ea63fe2b"}, + {file = "pymongo-4.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8400587d594761e5136a3423111f499574be5fd53cf0aefa0d0f05b180710b0"}, + {file = "pymongo-4.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af3e98dd9702b73e4e6fd780f6925352237f5dce8d99405ff1543f3771201704"}, + {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de3a860f037bb51f968de320baef85090ff0bbb42ec4f28ec6a5ddf88be61871"}, + {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0fc18b3a093f3db008c5fea0e980dbd3b743449eee29b5718bc2dc15ab5088bb"}, + {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18c9d8f975dd7194c37193583fd7d1eb9aea0c21ee58955ecf35362239ff31ac"}, + {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:408b2f8fdbeca3c19e4156f28fff1ab11c3efb0407b60687162d49f68075e63c"}, + {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6564780cafd6abeea49759fe661792bd5a67e4f51bca62b88faab497ab5fe89"}, + {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d18d86bc9e103f4d3d4f18b85a0471c0e13ce5b79194e4a0389a224bb70edd53"}, + {file = "pymongo-4.8.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9097c331577cecf8034422956daaba7ec74c26f7b255d718c584faddd7fa2e3c"}, + {file = "pymongo-4.8.0-cp39-cp39-win32.whl", hash = "sha256:d5428dbcd43d02f6306e1c3c95f692f68b284e6ee5390292242f509004c9e3a8"}, + {file = "pymongo-4.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:ef7225755ed27bfdb18730c68f6cb023d06c28f2b734597480fb4c0e500feb6f"}, + {file = "pymongo-4.8.0.tar.gz", hash = "sha256:454f2295875744dc70f1881e4b2eb99cdad008a33574bc8aaf120530f66c0cde"}, ] [package.dependencies] @@ -4716,6 +5204,7 @@ dnspython = ">=1.16.0,<3.0.0" [package.extras] aws = ["pymongo-auth-aws (>=1.1.0,<2.0.0)"] +docs = ["furo (==2023.9.10)", "readthedocs-sphinx-search (>=0.3,<1.0)", "sphinx (>=5.3,<8)", "sphinx-rtd-theme (>=2,<3)", "sphinxcontrib-shellcheck (>=1,<2)"] encryption = ["certifi", "pymongo-auth-aws (>=1.1.0,<2.0.0)", "pymongocrypt (>=1.6.0,<2.0.0)"] gssapi = ["pykerberos", "winkerberos (>=0.5.0)"] ocsp = ["certifi", "cryptography (>=2.5)", "pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service-identity (>=18.1.0)"] @@ -4771,13 +5260,13 @@ files = [ [[package]] name = "pytest" -version = "8.2.2" +version = "8.3.2" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.2.2-py3-none-any.whl", hash = "sha256:c434598117762e2bd304e526244f67bf66bbd7b5d6cf22138be51ff661980343"}, - {file = "pytest-8.2.2.tar.gz", hash = "sha256:de4bb8104e201939ccdc688b27a89a7be2079b22e2bd2b07f806b6ba71117977"}, + {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, + {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, ] [package.dependencies] @@ -4785,7 +5274,7 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=1.5,<2.0" +pluggy = ">=1.5,<2" tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] @@ -4793,13 +5282,13 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments [[package]] name = "pytest-asyncio" -version = "0.23.7" +version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "pytest_asyncio-0.23.7-py3-none-any.whl", hash = "sha256:009b48127fbe44518a547bddd25611551b0e43ccdbf1e67d12479f569832c20b"}, - {file = "pytest_asyncio-0.23.7.tar.gz", hash = "sha256:5f5c72948f4c49e7db4f29f2521d4031f1c27f86e57b046126654083d4770268"}, + {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, + {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, ] [package.dependencies] @@ -4827,6 +5316,27 @@ pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] +[[package]] +name = "pytest-xdist" +version = "3.6.1" +description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"}, + {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"}, +] + +[package.dependencies] +execnet = ">=2.1" +psutil = {version = ">=3.0", optional = true, markers = "extra == \"psutil\""} +pytest = ">=7.0.0" + +[package.extras] +psutil = ["psutil (>=3.0)"] +setproctitle = ["setproctitle"] +testing = ["filelock"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -4855,20 +5365,6 @@ files = [ [package.extras] cli = ["click (>=5.0)"] -[[package]] -name = "python-multipart" -version = "0.0.9" -description = "A streaming multipart parser for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"}, - {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, -] - -[package.extras] -dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"] - [[package]] name = "pytz" version = "2024.1" @@ -4905,159 +5401,182 @@ files = [ [[package]] name = "pyyaml" -version = "6.0.1" +version = "6.0.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, ] [[package]] name = "pyzmq" -version = "26.0.3" +version = "26.1.0" description = "Python bindings for 0MQ" optional = false python-versions = ">=3.7" files = [ - {file = "pyzmq-26.0.3-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:44dd6fc3034f1eaa72ece33588867df9e006a7303725a12d64c3dff92330f625"}, - {file = "pyzmq-26.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:acb704195a71ac5ea5ecf2811c9ee19ecdc62b91878528302dd0be1b9451cc90"}, - {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dbb9c997932473a27afa93954bb77a9f9b786b4ccf718d903f35da3232317de"}, - {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6bcb34f869d431799c3ee7d516554797f7760cb2198ecaa89c3f176f72d062be"}, - {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ece17ec5f20d7d9b442e5174ae9f020365d01ba7c112205a4d59cf19dc38ee"}, - {file = "pyzmq-26.0.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ba6e5e6588e49139a0979d03a7deb9c734bde647b9a8808f26acf9c547cab1bf"}, - {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3bf8b000a4e2967e6dfdd8656cd0757d18c7e5ce3d16339e550bd462f4857e59"}, - {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2136f64fbb86451dbbf70223635a468272dd20075f988a102bf8a3f194a411dc"}, - {file = "pyzmq-26.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e8918973fbd34e7814f59143c5f600ecd38b8038161239fd1a3d33d5817a38b8"}, - {file = "pyzmq-26.0.3-cp310-cp310-win32.whl", hash = "sha256:0aaf982e68a7ac284377d051c742610220fd06d330dcd4c4dbb4cdd77c22a537"}, - {file = "pyzmq-26.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:f1a9b7d00fdf60b4039f4455afd031fe85ee8305b019334b72dcf73c567edc47"}, - {file = "pyzmq-26.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:80b12f25d805a919d53efc0a5ad7c0c0326f13b4eae981a5d7b7cc343318ebb7"}, - {file = "pyzmq-26.0.3-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:a72a84570f84c374b4c287183debc776dc319d3e8ce6b6a0041ce2e400de3f32"}, - {file = "pyzmq-26.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ca684ee649b55fd8f378127ac8462fb6c85f251c2fb027eb3c887e8ee347bcd"}, - {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e222562dc0f38571c8b1ffdae9d7adb866363134299264a1958d077800b193b7"}, - {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f17cde1db0754c35a91ac00b22b25c11da6eec5746431d6e5092f0cd31a3fea9"}, - {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b7c0c0b3244bb2275abe255d4a30c050d541c6cb18b870975553f1fb6f37527"}, - {file = "pyzmq-26.0.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:ac97a21de3712afe6a6c071abfad40a6224fd14fa6ff0ff8d0c6e6cd4e2f807a"}, - {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:88b88282e55fa39dd556d7fc04160bcf39dea015f78e0cecec8ff4f06c1fc2b5"}, - {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:72b67f966b57dbd18dcc7efbc1c7fc9f5f983e572db1877081f075004614fcdd"}, - {file = "pyzmq-26.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f4b6cecbbf3b7380f3b61de3a7b93cb721125dc125c854c14ddc91225ba52f83"}, - {file = "pyzmq-26.0.3-cp311-cp311-win32.whl", hash = "sha256:eed56b6a39216d31ff8cd2f1d048b5bf1700e4b32a01b14379c3b6dde9ce3aa3"}, - {file = "pyzmq-26.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:3191d312c73e3cfd0f0afdf51df8405aafeb0bad71e7ed8f68b24b63c4f36500"}, - {file = "pyzmq-26.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:b6907da3017ef55139cf0e417c5123a84c7332520e73a6902ff1f79046cd3b94"}, - {file = "pyzmq-26.0.3-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:068ca17214038ae986d68f4a7021f97e187ed278ab6dccb79f837d765a54d753"}, - {file = "pyzmq-26.0.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7821d44fe07335bea256b9f1f41474a642ca55fa671dfd9f00af8d68a920c2d4"}, - {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eeb438a26d87c123bb318e5f2b3d86a36060b01f22fbdffd8cf247d52f7c9a2b"}, - {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69ea9d6d9baa25a4dc9cef5e2b77b8537827b122214f210dd925132e34ae9b12"}, - {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7daa3e1369355766dea11f1d8ef829905c3b9da886ea3152788dc25ee6079e02"}, - {file = "pyzmq-26.0.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6ca7a9a06b52d0e38ccf6bca1aeff7be178917893f3883f37b75589d42c4ac20"}, - {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1b7d0e124948daa4d9686d421ef5087c0516bc6179fdcf8828b8444f8e461a77"}, - {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e746524418b70f38550f2190eeee834db8850088c834d4c8406fbb9bc1ae10b2"}, - {file = "pyzmq-26.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6b3146f9ae6af82c47a5282ac8803523d381b3b21caeae0327ed2f7ecb718798"}, - {file = "pyzmq-26.0.3-cp312-cp312-win32.whl", hash = "sha256:2b291d1230845871c00c8462c50565a9cd6026fe1228e77ca934470bb7d70ea0"}, - {file = "pyzmq-26.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:926838a535c2c1ea21c903f909a9a54e675c2126728c21381a94ddf37c3cbddf"}, - {file = "pyzmq-26.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:5bf6c237f8c681dfb91b17f8435b2735951f0d1fad10cc5dfd96db110243370b"}, - {file = "pyzmq-26.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c0991f5a96a8e620f7691e61178cd8f457b49e17b7d9cfa2067e2a0a89fc1d5"}, - {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dbf012d8fcb9f2cf0643b65df3b355fdd74fc0035d70bb5c845e9e30a3a4654b"}, - {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:01fbfbeb8249a68d257f601deb50c70c929dc2dfe683b754659569e502fbd3aa"}, - {file = "pyzmq-26.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c8eb19abe87029c18f226d42b8a2c9efdd139d08f8bf6e085dd9075446db450"}, - {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5344b896e79800af86ad643408ca9aa303a017f6ebff8cee5a3163c1e9aec987"}, - {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:204e0f176fd1d067671157d049466869b3ae1fc51e354708b0dc41cf94e23a3a"}, - {file = "pyzmq-26.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a42db008d58530efa3b881eeee4991146de0b790e095f7ae43ba5cc612decbc5"}, - {file = "pyzmq-26.0.3-cp37-cp37m-win32.whl", hash = "sha256:8d7a498671ca87e32b54cb47c82a92b40130a26c5197d392720a1bce1b3c77cf"}, - {file = "pyzmq-26.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:3b4032a96410bdc760061b14ed6a33613ffb7f702181ba999df5d16fb96ba16a"}, - {file = "pyzmq-26.0.3-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2cc4e280098c1b192c42a849de8de2c8e0f3a84086a76ec5b07bfee29bda7d18"}, - {file = "pyzmq-26.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bde86a2ed3ce587fa2b207424ce15b9a83a9fa14422dcc1c5356a13aed3df9d"}, - {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:34106f68e20e6ff253c9f596ea50397dbd8699828d55e8fa18bd4323d8d966e6"}, - {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ebbbd0e728af5db9b04e56389e2299a57ea8b9dd15c9759153ee2455b32be6ad"}, - {file = "pyzmq-26.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6b1d1c631e5940cac5a0b22c5379c86e8df6a4ec277c7a856b714021ab6cfad"}, - {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e891ce81edd463b3b4c3b885c5603c00141151dd9c6936d98a680c8c72fe5c67"}, - {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9b273ecfbc590a1b98f014ae41e5cf723932f3b53ba9367cfb676f838038b32c"}, - {file = "pyzmq-26.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b32bff85fb02a75ea0b68f21e2412255b5731f3f389ed9aecc13a6752f58ac97"}, - {file = "pyzmq-26.0.3-cp38-cp38-win32.whl", hash = "sha256:f6c21c00478a7bea93caaaef9e7629145d4153b15a8653e8bb4609d4bc70dbfc"}, - {file = "pyzmq-26.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:3401613148d93ef0fd9aabdbddb212de3db7a4475367f49f590c837355343972"}, - {file = "pyzmq-26.0.3-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:2ed8357f4c6e0daa4f3baf31832df8a33334e0fe5b020a61bc8b345a3db7a606"}, - {file = "pyzmq-26.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c1c8f2a2ca45292084c75bb6d3a25545cff0ed931ed228d3a1810ae3758f975f"}, - {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b63731993cdddcc8e087c64e9cf003f909262b359110070183d7f3025d1c56b5"}, - {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b3cd31f859b662ac5d7f4226ec7d8bd60384fa037fc02aee6ff0b53ba29a3ba8"}, - {file = "pyzmq-26.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:115f8359402fa527cf47708d6f8a0f8234f0e9ca0cab7c18c9c189c194dbf620"}, - {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:715bdf952b9533ba13dfcf1f431a8f49e63cecc31d91d007bc1deb914f47d0e4"}, - {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e1258c639e00bf5e8a522fec6c3eaa3e30cf1c23a2f21a586be7e04d50c9acab"}, - {file = "pyzmq-26.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:15c59e780be8f30a60816a9adab900c12a58d79c1ac742b4a8df044ab2a6d920"}, - {file = "pyzmq-26.0.3-cp39-cp39-win32.whl", hash = "sha256:d0cdde3c78d8ab5b46595054e5def32a755fc028685add5ddc7403e9f6de9879"}, - {file = "pyzmq-26.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:ce828058d482ef860746bf532822842e0ff484e27f540ef5c813d516dd8896d2"}, - {file = "pyzmq-26.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:788f15721c64109cf720791714dc14afd0f449d63f3a5487724f024345067381"}, - {file = "pyzmq-26.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c18645ef6294d99b256806e34653e86236eb266278c8ec8112622b61db255de"}, - {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e6bc96ebe49604df3ec2c6389cc3876cabe475e6bfc84ced1bf4e630662cb35"}, - {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:971e8990c5cc4ddcff26e149398fc7b0f6a042306e82500f5e8db3b10ce69f84"}, - {file = "pyzmq-26.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8416c23161abd94cc7da80c734ad7c9f5dbebdadfdaa77dad78244457448223"}, - {file = "pyzmq-26.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:082a2988364b60bb5de809373098361cf1dbb239623e39e46cb18bc035ed9c0c"}, - {file = "pyzmq-26.0.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d57dfbf9737763b3a60d26e6800e02e04284926329aee8fb01049635e957fe81"}, - {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:77a85dca4c2430ac04dc2a2185c2deb3858a34fe7f403d0a946fa56970cf60a1"}, - {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4c82a6d952a1d555bf4be42b6532927d2a5686dd3c3e280e5f63225ab47ac1f5"}, - {file = "pyzmq-26.0.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4496b1282c70c442809fc1b151977c3d967bfb33e4e17cedbf226d97de18f709"}, - {file = "pyzmq-26.0.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:e4946d6bdb7ba972dfda282f9127e5756d4f299028b1566d1245fa0d438847e6"}, - {file = "pyzmq-26.0.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:03c0ae165e700364b266876d712acb1ac02693acd920afa67da2ebb91a0b3c09"}, - {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:3e3070e680f79887d60feeda051a58d0ac36622e1759f305a41059eff62c6da7"}, - {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6ca08b840fe95d1c2bd9ab92dac5685f949fc6f9ae820ec16193e5ddf603c3b2"}, - {file = "pyzmq-26.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e76654e9dbfb835b3518f9938e565c7806976c07b37c33526b574cc1a1050480"}, - {file = "pyzmq-26.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:871587bdadd1075b112e697173e946a07d722459d20716ceb3d1bd6c64bd08ce"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d0a2d1bd63a4ad79483049b26514e70fa618ce6115220da9efdff63688808b17"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0270b49b6847f0d106d64b5086e9ad5dc8a902413b5dbbb15d12b60f9c1747a4"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:703c60b9910488d3d0954ca585c34f541e506a091a41930e663a098d3b794c67"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74423631b6be371edfbf7eabb02ab995c2563fee60a80a30829176842e71722a"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4adfbb5451196842a88fda3612e2c0414134874bffb1c2ce83ab4242ec9e027d"}, - {file = "pyzmq-26.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3516119f4f9b8671083a70b6afaa0a070f5683e431ab3dc26e9215620d7ca1ad"}, - {file = "pyzmq-26.0.3.tar.gz", hash = "sha256:dba7d9f2e047dfa2bca3b01f4f84aa5246725203d6284e3790f2ca15fba6b40a"}, + {file = "pyzmq-26.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:263cf1e36862310bf5becfbc488e18d5d698941858860c5a8c079d1511b3b18e"}, + {file = "pyzmq-26.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d5c8b17f6e8f29138678834cf8518049e740385eb2dbf736e8f07fc6587ec682"}, + {file = "pyzmq-26.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75a95c2358fcfdef3374cb8baf57f1064d73246d55e41683aaffb6cfe6862917"}, + {file = "pyzmq-26.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f99de52b8fbdb2a8f5301ae5fc0f9e6b3ba30d1d5fc0421956967edcc6914242"}, + {file = "pyzmq-26.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bcbfbab4e1895d58ab7da1b5ce9a327764f0366911ba5b95406c9104bceacb0"}, + {file = "pyzmq-26.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:77ce6a332c7e362cb59b63f5edf730e83590d0ab4e59c2aa5bd79419a42e3449"}, + {file = "pyzmq-26.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ba0a31d00e8616149a5ab440d058ec2da621e05d744914774c4dde6837e1f545"}, + {file = "pyzmq-26.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8b88641384e84a258b740801cd4dbc45c75f148ee674bec3149999adda4a8598"}, + {file = "pyzmq-26.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2fa76ebcebe555cce90f16246edc3ad83ab65bb7b3d4ce408cf6bc67740c4f88"}, + {file = "pyzmq-26.1.0-cp310-cp310-win32.whl", hash = "sha256:fbf558551cf415586e91160d69ca6416f3fce0b86175b64e4293644a7416b81b"}, + {file = "pyzmq-26.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:a7b8aab50e5a288c9724d260feae25eda69582be84e97c012c80e1a5e7e03fb2"}, + {file = "pyzmq-26.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:08f74904cb066e1178c1ec706dfdb5c6c680cd7a8ed9efebeac923d84c1f13b1"}, + {file = "pyzmq-26.1.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:46d6800b45015f96b9d92ece229d92f2aef137d82906577d55fadeb9cf5fcb71"}, + {file = "pyzmq-26.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5bc2431167adc50ba42ea3e5e5f5cd70d93e18ab7b2f95e724dd8e1bd2c38120"}, + {file = "pyzmq-26.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3bb34bebaa1b78e562931a1687ff663d298013f78f972a534f36c523311a84d"}, + {file = "pyzmq-26.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3f6329340cef1c7ba9611bd038f2d523cea79f09f9c8f6b0553caba59ec562"}, + {file = "pyzmq-26.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:471880c4c14e5a056a96cd224f5e71211997d40b4bf5e9fdded55dafab1f98f2"}, + {file = "pyzmq-26.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:ce6f2b66799971cbae5d6547acefa7231458289e0ad481d0be0740535da38d8b"}, + {file = "pyzmq-26.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a1f6ea5b1d6cdbb8cfa0536f0d470f12b4b41ad83625012e575f0e3ecfe97f0"}, + {file = "pyzmq-26.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b45e6445ac95ecb7d728604bae6538f40ccf4449b132b5428c09918523abc96d"}, + {file = "pyzmq-26.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:94c4262626424683feea0f3c34951d39d49d354722db2745c42aa6bb50ecd93b"}, + {file = "pyzmq-26.1.0-cp311-cp311-win32.whl", hash = "sha256:a0f0ab9df66eb34d58205913f4540e2ad17a175b05d81b0b7197bc57d000e829"}, + {file = "pyzmq-26.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:8efb782f5a6c450589dbab4cb0f66f3a9026286333fe8f3a084399149af52f29"}, + {file = "pyzmq-26.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f133d05aaf623519f45e16ab77526e1e70d4e1308e084c2fb4cedb1a0c764bbb"}, + {file = "pyzmq-26.1.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:3d3146b1c3dcc8a1539e7cc094700b2be1e605a76f7c8f0979b6d3bde5ad4072"}, + {file = "pyzmq-26.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d9270fbf038bf34ffca4855bcda6e082e2c7f906b9eb8d9a8ce82691166060f7"}, + {file = "pyzmq-26.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:995301f6740a421afc863a713fe62c0aaf564708d4aa057dfdf0f0f56525294b"}, + {file = "pyzmq-26.1.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7eca8b89e56fb8c6c26dd3e09bd41b24789022acf1cf13358e96f1cafd8cae3"}, + {file = "pyzmq-26.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d4feb2e83dfe9ace6374a847e98ee9d1246ebadcc0cb765482e272c34e5820"}, + {file = "pyzmq-26.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d4fafc2eb5d83f4647331267808c7e0c5722c25a729a614dc2b90479cafa78bd"}, + {file = "pyzmq-26.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:58c33dc0e185dd97a9ac0288b3188d1be12b756eda67490e6ed6a75cf9491d79"}, + {file = "pyzmq-26.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:68a0a1d83d33d8367ddddb3e6bb4afbb0f92bd1dac2c72cd5e5ddc86bdafd3eb"}, + {file = "pyzmq-26.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2ae7c57e22ad881af78075e0cea10a4c778e67234adc65c404391b417a4dda83"}, + {file = "pyzmq-26.1.0-cp312-cp312-win32.whl", hash = "sha256:347e84fc88cc4cb646597f6d3a7ea0998f887ee8dc31c08587e9c3fd7b5ccef3"}, + {file = "pyzmq-26.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:9f136a6e964830230912f75b5a116a21fe8e34128dcfd82285aa0ef07cb2c7bd"}, + {file = "pyzmq-26.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:a4b7a989c8f5a72ab1b2bbfa58105578753ae77b71ba33e7383a31ff75a504c4"}, + {file = "pyzmq-26.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d416f2088ac8f12daacffbc2e8918ef4d6be8568e9d7155c83b7cebed49d2322"}, + {file = "pyzmq-26.1.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:ecb6c88d7946166d783a635efc89f9a1ff11c33d680a20df9657b6902a1d133b"}, + {file = "pyzmq-26.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:471312a7375571857a089342beccc1a63584315188560c7c0da7e0a23afd8a5c"}, + {file = "pyzmq-26.1.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e6cea102ffa16b737d11932c426f1dc14b5938cf7bc12e17269559c458ac334"}, + {file = "pyzmq-26.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec7248673ffc7104b54e4957cee38b2f3075a13442348c8d651777bf41aa45ee"}, + {file = "pyzmq-26.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:0614aed6f87d550b5cecb03d795f4ddbb1544b78d02a4bd5eecf644ec98a39f6"}, + {file = "pyzmq-26.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:e8746ce968be22a8a1801bf4a23e565f9687088580c3ed07af5846580dd97f76"}, + {file = "pyzmq-26.1.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:7688653574392d2eaeef75ddcd0b2de5b232d8730af29af56c5adf1df9ef8d6f"}, + {file = "pyzmq-26.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:8d4dac7d97f15c653a5fedcafa82626bd6cee1450ccdaf84ffed7ea14f2b07a4"}, + {file = "pyzmq-26.1.0-cp313-cp313-win32.whl", hash = "sha256:ccb42ca0a4a46232d716779421bbebbcad23c08d37c980f02cc3a6bd115ad277"}, + {file = "pyzmq-26.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e1e5d0a25aea8b691a00d6b54b28ac514c8cc0d8646d05f7ca6cb64b97358250"}, + {file = "pyzmq-26.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:fc82269d24860cfa859b676d18850cbb8e312dcd7eada09e7d5b007e2f3d9eb1"}, + {file = "pyzmq-26.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:416ac51cabd54f587995c2b05421324700b22e98d3d0aa2cfaec985524d16f1d"}, + {file = "pyzmq-26.1.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:ff832cce719edd11266ca32bc74a626b814fff236824aa1aeaad399b69fe6eae"}, + {file = "pyzmq-26.1.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:393daac1bcf81b2a23e696b7b638eedc965e9e3d2112961a072b6cd8179ad2eb"}, + {file = "pyzmq-26.1.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9869fa984c8670c8ab899a719eb7b516860a29bc26300a84d24d8c1b71eae3ec"}, + {file = "pyzmq-26.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b3b8e36fd4c32c0825b4461372949ecd1585d326802b1321f8b6dc1d7e9318c"}, + {file = "pyzmq-26.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:3ee647d84b83509b7271457bb428cc347037f437ead4b0b6e43b5eba35fec0aa"}, + {file = "pyzmq-26.1.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:45cb1a70eb00405ce3893041099655265fabcd9c4e1e50c330026e82257892c1"}, + {file = "pyzmq-26.1.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:5cca7b4adb86d7470e0fc96037771981d740f0b4cb99776d5cb59cd0e6684a73"}, + {file = "pyzmq-26.1.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:91d1a20bdaf3b25f3173ff44e54b1cfbc05f94c9e8133314eb2962a89e05d6e3"}, + {file = "pyzmq-26.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c0665d85535192098420428c779361b8823d3d7ec4848c6af3abb93bc5c915bf"}, + {file = "pyzmq-26.1.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:96d7c1d35ee4a495df56c50c83df7af1c9688cce2e9e0edffdbf50889c167595"}, + {file = "pyzmq-26.1.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b281b5ff5fcc9dcbfe941ac5c7fcd4b6c065adad12d850f95c9d6f23c2652384"}, + {file = "pyzmq-26.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5384c527a9a004445c5074f1e20db83086c8ff1682a626676229aafd9cf9f7d1"}, + {file = "pyzmq-26.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:754c99a9840839375ee251b38ac5964c0f369306eddb56804a073b6efdc0cd88"}, + {file = "pyzmq-26.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9bdfcb74b469b592972ed881bad57d22e2c0acc89f5e8c146782d0d90fb9f4bf"}, + {file = "pyzmq-26.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bd13f0231f4788db619347b971ca5f319c5b7ebee151afc7c14632068c6261d3"}, + {file = "pyzmq-26.1.0-cp37-cp37m-win32.whl", hash = "sha256:c5668dac86a869349828db5fc928ee3f58d450dce2c85607067d581f745e4fb1"}, + {file = "pyzmq-26.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ad875277844cfaeca7fe299ddf8c8d8bfe271c3dc1caf14d454faa5cdbf2fa7a"}, + {file = "pyzmq-26.1.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:65c6e03cc0222eaf6aad57ff4ecc0a070451e23232bb48db4322cc45602cede0"}, + {file = "pyzmq-26.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:038ae4ffb63e3991f386e7fda85a9baab7d6617fe85b74a8f9cab190d73adb2b"}, + {file = "pyzmq-26.1.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:bdeb2c61611293f64ac1073f4bf6723b67d291905308a7de9bb2ca87464e3273"}, + {file = "pyzmq-26.1.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:61dfa5ee9d7df297c859ac82b1226d8fefaf9c5113dc25c2c00ecad6feeeb04f"}, + {file = "pyzmq-26.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3292d384537b9918010769b82ab3e79fca8b23d74f56fc69a679106a3e2c2cf"}, + {file = "pyzmq-26.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f9499c70c19ff0fbe1007043acb5ad15c1dec7d8e84ab429bca8c87138e8f85c"}, + {file = "pyzmq-26.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d3dd5523ed258ad58fed7e364c92a9360d1af8a9371e0822bd0146bdf017ef4c"}, + {file = "pyzmq-26.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baba2fd199b098c5544ef2536b2499d2e2155392973ad32687024bd8572a7d1c"}, + {file = "pyzmq-26.1.0-cp38-cp38-win32.whl", hash = "sha256:ddbb2b386128d8eca92bd9ca74e80f73fe263bcca7aa419f5b4cbc1661e19741"}, + {file = "pyzmq-26.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:79e45a4096ec8388cdeb04a9fa5e9371583bcb826964d55b8b66cbffe7b33c86"}, + {file = "pyzmq-26.1.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:add52c78a12196bc0fda2de087ba6c876ea677cbda2e3eba63546b26e8bf177b"}, + {file = "pyzmq-26.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:98c03bd7f3339ff47de7ea9ac94a2b34580a8d4df69b50128bb6669e1191a895"}, + {file = "pyzmq-26.1.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dcc37d9d708784726fafc9c5e1232de655a009dbf97946f117aefa38d5985a0f"}, + {file = "pyzmq-26.1.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5a6ed52f0b9bf8dcc64cc82cce0607a3dfed1dbb7e8c6f282adfccc7be9781de"}, + {file = "pyzmq-26.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:451e16ae8bea3d95649317b463c9f95cd9022641ec884e3d63fc67841ae86dfe"}, + {file = "pyzmq-26.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:906e532c814e1d579138177a00ae835cd6becbf104d45ed9093a3aaf658f6a6a"}, + {file = "pyzmq-26.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:05bacc4f94af468cc82808ae3293390278d5f3375bb20fef21e2034bb9a505b6"}, + {file = "pyzmq-26.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:57bb2acba798dc3740e913ffadd56b1fcef96f111e66f09e2a8db3050f1f12c8"}, + {file = "pyzmq-26.1.0-cp39-cp39-win32.whl", hash = "sha256:f774841bb0e8588505002962c02da420bcfb4c5056e87a139c6e45e745c0e2e2"}, + {file = "pyzmq-26.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:359c533bedc62c56415a1f5fcfd8279bc93453afdb0803307375ecf81c962402"}, + {file = "pyzmq-26.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:7907419d150b19962138ecec81a17d4892ea440c184949dc29b358bc730caf69"}, + {file = "pyzmq-26.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b24079a14c9596846bf7516fe75d1e2188d4a528364494859106a33d8b48be38"}, + {file = "pyzmq-26.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59d0acd2976e1064f1b398a00e2c3e77ed0a157529779e23087d4c2fb8aaa416"}, + {file = "pyzmq-26.1.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:911c43a4117915203c4cc8755e0f888e16c4676a82f61caee2f21b0c00e5b894"}, + {file = "pyzmq-26.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10163e586cc609f5f85c9b233195554d77b1e9a0801388907441aaeb22841c5"}, + {file = "pyzmq-26.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:28a8b2abb76042f5fd7bd720f7fea48c0fd3e82e9de0a1bf2c0de3812ce44a42"}, + {file = "pyzmq-26.1.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bef24d3e4ae2c985034439f449e3f9e06bf579974ce0e53d8a507a1577d5b2ab"}, + {file = "pyzmq-26.1.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2cd0f4d314f4a2518e8970b6f299ae18cff7c44d4a1fc06fc713f791c3a9e3ea"}, + {file = "pyzmq-26.1.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fa25a620eed2a419acc2cf10135b995f8f0ce78ad00534d729aa761e4adcef8a"}, + {file = "pyzmq-26.1.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef3b048822dca6d231d8a8ba21069844ae38f5d83889b9b690bf17d2acc7d099"}, + {file = "pyzmq-26.1.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:9a6847c92d9851b59b9f33f968c68e9e441f9a0f8fc972c5580c5cd7cbc6ee24"}, + {file = "pyzmq-26.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9b9305004d7e4e6a824f4f19b6d8f32b3578aad6f19fc1122aaf320cbe3dc83"}, + {file = "pyzmq-26.1.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:63c1d3a65acb2f9c92dce03c4e1758cc552f1ae5c78d79a44e3bb88d2fa71f3a"}, + {file = "pyzmq-26.1.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d36b8fffe8b248a1b961c86fbdfa0129dfce878731d169ede7fa2631447331be"}, + {file = "pyzmq-26.1.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67976d12ebfd61a3bc7d77b71a9589b4d61d0422282596cf58c62c3866916544"}, + {file = "pyzmq-26.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:998444debc8816b5d8d15f966e42751032d0f4c55300c48cc337f2b3e4f17d03"}, + {file = "pyzmq-26.1.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e5c88b2f13bcf55fee78ea83567b9fe079ba1a4bef8b35c376043440040f7edb"}, + {file = "pyzmq-26.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d906d43e1592be4b25a587b7d96527cb67277542a5611e8ea9e996182fae410"}, + {file = "pyzmq-26.1.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80b0c9942430d731c786545da6be96d824a41a51742e3e374fedd9018ea43106"}, + {file = "pyzmq-26.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:314d11564c00b77f6224d12eb3ddebe926c301e86b648a1835c5b28176c83eab"}, + {file = "pyzmq-26.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:093a1a3cae2496233f14b57f4b485da01b4ff764582c854c0f42c6dd2be37f3d"}, + {file = "pyzmq-26.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3c397b1b450f749a7e974d74c06d69bd22dd362142f370ef2bd32a684d6b480c"}, + {file = "pyzmq-26.1.0.tar.gz", hash = "sha256:6c5aeea71f018ebd3b9115c7cb13863dd850e98ca6b9258509de1246461a7e7f"}, ] [package.dependencies] @@ -5065,13 +5584,13 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "qdrant-client" -version = "1.10.0" +version = "1.11.0" description = "Client library for the Qdrant vector search engine" optional = false python-versions = ">=3.8" files = [ - {file = "qdrant_client-1.10.0-py3-none-any.whl", hash = "sha256:423c2586709ccf3db20850cd85c3d18954692a8faff98367dfa9dc82ab7f91d9"}, - {file = "qdrant_client-1.10.0.tar.gz", hash = "sha256:47c4f7abfab152fb7e5e4902ab0e2e9e33483c49ea5e80128ccd0295f342cf9b"}, + {file = "qdrant_client-1.11.0-py3-none-any.whl", hash = "sha256:1f574ccebb91c0bc8a620c9a41a5a010084fbc4d8c6f1cd0ab7b2eeb97336fc0"}, + {file = "qdrant_client-1.11.0.tar.gz", hash = "sha256:7c1d4d7a96cfd1ee0cde2a21c607e9df86bcca795ad8d1fd274d295ab64b8458"}, ] [package.dependencies] @@ -5087,36 +5606,37 @@ pydantic = ">=1.10.8" urllib3 = ">=1.26.14,<3" [package.extras] -fastembed = ["fastembed (==0.2.7)"] -fastembed-gpu = ["fastembed-gpu (==0.2.7)"] +fastembed = ["fastembed (==0.3.4)"] +fastembed-gpu = ["fastembed-gpu (==0.3.4)"] [[package]] name = "redis" -version = "4.6.0" +version = "5.0.8" description = "Python client for Redis database and key-value store" optional = false python-versions = ">=3.7" files = [ - {file = "redis-4.6.0-py3-none-any.whl", hash = "sha256:e2b03db868160ee4591de3cb90d40ebb50a90dd302138775937f6a42b7ed183c"}, - {file = "redis-4.6.0.tar.gz", hash = "sha256:585dc516b9eb042a619ef0a39c3d7d55fe81bdb4df09a52c9cdde0d07bf1aa7d"}, + {file = "redis-5.0.8-py3-none-any.whl", hash = "sha256:56134ee08ea909106090934adc36f65c9bcbbaecea5b21ba704ba6fb561f8eb4"}, + {file = "redis-5.0.8.tar.gz", hash = "sha256:0c5b10d387568dfe0698c6fad6615750c24170e548ca2deac10c649d463e9870"}, ] [package.dependencies] -async-timeout = {version = ">=4.0.2", markers = "python_full_version <= \"3.11.2\""} +async-timeout = {version = ">=4.0.3", markers = "python_full_version < \"3.11.3\""} +hiredis = {version = ">1.0.0", optional = true, markers = "extra == \"hiredis\""} [package.extras] -hiredis = ["hiredis (>=1.0.0)"] +hiredis = ["hiredis (>1.0.0)"] ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)"] [[package]] name = "referencing" -version = "0.31.1" +version = "0.35.1" description = "JSON Referencing + Python" optional = false python-versions = ">=3.8" files = [ - {file = "referencing-0.31.1-py3-none-any.whl", hash = "sha256:c19c4d006f1757e3dd75c4f784d38f8698d87b649c54f9ace14e5e8c9667c01d"}, - {file = "referencing-0.31.1.tar.gz", hash = "sha256:81a1471c68c9d5e3831c30ad1dd9815c45b558e596653db751a2bfdd17b3b9ec"}, + {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, + {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, ] [package.dependencies] @@ -5125,101 +5645,101 @@ rpds-py = ">=0.7.0" [[package]] name = "regex" -version = "2024.5.15" +version = "2024.7.24" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" files = [ - {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a81e3cfbae20378d75185171587cbf756015ccb14840702944f014e0d93ea09f"}, - {file = "regex-2024.5.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7b59138b219ffa8979013be7bc85bb60c6f7b7575df3d56dc1e403a438c7a3f6"}, - {file = "regex-2024.5.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0bd000c6e266927cb7a1bc39d55be95c4b4f65c5be53e659537537e019232b1"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eaa7ddaf517aa095fa8da0b5015c44d03da83f5bd49c87961e3c997daed0de7"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba68168daedb2c0bab7fd7e00ced5ba90aebf91024dea3c88ad5063c2a562cca"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e8d717bca3a6e2064fc3a08df5cbe366369f4b052dcd21b7416e6d71620dca1"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1337b7dbef9b2f71121cdbf1e97e40de33ff114801263b275aafd75303bd62b5"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9ebd0a36102fcad2f03696e8af4ae682793a5d30b46c647eaf280d6cfb32796"}, - {file = "regex-2024.5.15-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9efa1a32ad3a3ea112224897cdaeb6aa00381627f567179c0314f7b65d354c62"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1595f2d10dff3d805e054ebdc41c124753631b6a471b976963c7b28543cf13b0"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b802512f3e1f480f41ab5f2cfc0e2f761f08a1f41092d6718868082fc0d27143"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:a0981022dccabca811e8171f913de05720590c915b033b7e601f35ce4ea7019f"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:19068a6a79cf99a19ccefa44610491e9ca02c2be3305c7760d3831d38a467a6f"}, - {file = "regex-2024.5.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:1b5269484f6126eee5e687785e83c6b60aad7663dafe842b34691157e5083e53"}, - {file = "regex-2024.5.15-cp310-cp310-win32.whl", hash = "sha256:ada150c5adfa8fbcbf321c30c751dc67d2f12f15bd183ffe4ec7cde351d945b3"}, - {file = "regex-2024.5.15-cp310-cp310-win_amd64.whl", hash = "sha256:ac394ff680fc46b97487941f5e6ae49a9f30ea41c6c6804832063f14b2a5a145"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f5b1dff3ad008dccf18e652283f5e5339d70bf8ba7c98bf848ac33db10f7bc7a"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c6a2b494a76983df8e3d3feea9b9ffdd558b247e60b92f877f93a1ff43d26656"}, - {file = "regex-2024.5.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a32b96f15c8ab2e7d27655969a23895eb799de3665fa94349f3b2fbfd547236f"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10002e86e6068d9e1c91eae8295ef690f02f913c57db120b58fdd35a6bb1af35"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ec54d5afa89c19c6dd8541a133be51ee1017a38b412b1321ccb8d6ddbeb4cf7d"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10e4ce0dca9ae7a66e6089bb29355d4432caed736acae36fef0fdd7879f0b0cb"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e507ff1e74373c4d3038195fdd2af30d297b4f0950eeda6f515ae3d84a1770f"}, - {file = "regex-2024.5.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1f059a4d795e646e1c37665b9d06062c62d0e8cc3c511fe01315973a6542e40"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0721931ad5fe0dda45d07f9820b90b2148ccdd8e45bb9e9b42a146cb4f695649"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:833616ddc75ad595dee848ad984d067f2f31be645d603e4d158bba656bbf516c"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:287eb7f54fc81546346207c533ad3c2c51a8d61075127d7f6d79aaf96cdee890"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:19dfb1c504781a136a80ecd1fff9f16dddf5bb43cec6871778c8a907a085bb3d"}, - {file = "regex-2024.5.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:119af6e56dce35e8dfb5222573b50c89e5508d94d55713c75126b753f834de68"}, - {file = "regex-2024.5.15-cp311-cp311-win32.whl", hash = "sha256:1c1c174d6ec38d6c8a7504087358ce9213d4332f6293a94fbf5249992ba54efa"}, - {file = "regex-2024.5.15-cp311-cp311-win_amd64.whl", hash = "sha256:9e717956dcfd656f5055cc70996ee2cc82ac5149517fc8e1b60261b907740201"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:632b01153e5248c134007209b5c6348a544ce96c46005d8456de1d552455b014"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e64198f6b856d48192bf921421fdd8ad8eb35e179086e99e99f711957ffedd6e"}, - {file = "regex-2024.5.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68811ab14087b2f6e0fc0c2bae9ad689ea3584cad6917fc57be6a48bbd012c49"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ec0c2fea1e886a19c3bee0cd19d862b3aa75dcdfb42ebe8ed30708df64687a"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0c0c0003c10f54a591d220997dd27d953cd9ccc1a7294b40a4be5312be8797b"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2431b9e263af1953c55abbd3e2efca67ca80a3de8a0437cb58e2421f8184717a"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a605586358893b483976cffc1723fb0f83e526e8f14c6e6614e75919d9862cf"}, - {file = "regex-2024.5.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391d7f7f1e409d192dba8bcd42d3e4cf9e598f3979cdaed6ab11288da88cb9f2"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9ff11639a8d98969c863d4617595eb5425fd12f7c5ef6621a4b74b71ed8726d5"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4eee78a04e6c67e8391edd4dad3279828dd66ac4b79570ec998e2155d2e59fd5"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8fe45aa3f4aa57faabbc9cb46a93363edd6197cbc43523daea044e9ff2fea83e"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d0a3d8d6acf0c78a1fff0e210d224b821081330b8524e3e2bc5a68ef6ab5803d"}, - {file = "regex-2024.5.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c486b4106066d502495b3025a0a7251bf37ea9540433940a23419461ab9f2a80"}, - {file = "regex-2024.5.15-cp312-cp312-win32.whl", hash = "sha256:c49e15eac7c149f3670b3e27f1f28a2c1ddeccd3a2812cba953e01be2ab9b5fe"}, - {file = "regex-2024.5.15-cp312-cp312-win_amd64.whl", hash = "sha256:673b5a6da4557b975c6c90198588181029c60793835ce02f497ea817ff647cb2"}, - {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:87e2a9c29e672fc65523fb47a90d429b70ef72b901b4e4b1bd42387caf0d6835"}, - {file = "regex-2024.5.15-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c3bea0ba8b73b71b37ac833a7f3fd53825924165da6a924aec78c13032f20850"}, - {file = "regex-2024.5.15-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bfc4f82cabe54f1e7f206fd3d30fda143f84a63fe7d64a81558d6e5f2e5aaba9"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5bb9425fe881d578aeca0b2b4b3d314ec88738706f66f219c194d67179337cb"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:64c65783e96e563103d641760664125e91bd85d8e49566ee560ded4da0d3e704"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf2430df4148b08fb4324b848672514b1385ae3807651f3567871f130a728cc3"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5397de3219a8b08ae9540c48f602996aa6b0b65d5a61683e233af8605c42b0f2"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:455705d34b4154a80ead722f4f185b04c4237e8e8e33f265cd0798d0e44825fa"}, - {file = "regex-2024.5.15-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2b6f1b3bb6f640c1a92be3bbfbcb18657b125b99ecf141fb3310b5282c7d4ed"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3ad070b823ca5890cab606c940522d05d3d22395d432f4aaaf9d5b1653e47ced"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5b5467acbfc153847d5adb21e21e29847bcb5870e65c94c9206d20eb4e99a384"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e6662686aeb633ad65be2a42b4cb00178b3fbf7b91878f9446075c404ada552f"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:2b4c884767504c0e2401babe8b5b7aea9148680d2e157fa28f01529d1f7fcf67"}, - {file = "regex-2024.5.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3cd7874d57f13bf70078f1ff02b8b0aa48d5b9ed25fc48547516c6aba36f5741"}, - {file = "regex-2024.5.15-cp38-cp38-win32.whl", hash = "sha256:e4682f5ba31f475d58884045c1a97a860a007d44938c4c0895f41d64481edbc9"}, - {file = "regex-2024.5.15-cp38-cp38-win_amd64.whl", hash = "sha256:d99ceffa25ac45d150e30bd9ed14ec6039f2aad0ffa6bb87a5936f5782fc1569"}, - {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13cdaf31bed30a1e1c2453ef6015aa0983e1366fad2667657dbcac7b02f67133"}, - {file = "regex-2024.5.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cac27dcaa821ca271855a32188aa61d12decb6fe45ffe3e722401fe61e323cd1"}, - {file = "regex-2024.5.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7dbe2467273b875ea2de38ded4eba86cbcbc9a1a6d0aa11dcf7bd2e67859c435"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f18a9a3513a99c4bef0e3efd4c4a5b11228b48aa80743be822b71e132ae4f5"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d347a741ea871c2e278fde6c48f85136c96b8659b632fb57a7d1ce1872547600"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1878b8301ed011704aea4c806a3cadbd76f84dece1ec09cc9e4dc934cfa5d4da"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4babf07ad476aaf7830d77000874d7611704a7fcf68c9c2ad151f5d94ae4bfc4"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35cb514e137cb3488bce23352af3e12fb0dbedd1ee6e60da053c69fb1b29cc6c"}, - {file = "regex-2024.5.15-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cdd09d47c0b2efee9378679f8510ee6955d329424c659ab3c5e3a6edea696294"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:72d7a99cd6b8f958e85fc6ca5b37c4303294954eac1376535b03c2a43eb72629"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a094801d379ab20c2135529948cb84d417a2169b9bdceda2a36f5f10977ebc16"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:c0c18345010870e58238790a6779a1219b4d97bd2e77e1140e8ee5d14df071aa"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:16093f563098448ff6b1fa68170e4acbef94e6b6a4e25e10eae8598bb1694b5d"}, - {file = "regex-2024.5.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e38a7d4e8f633a33b4c7350fbd8bad3b70bf81439ac67ac38916c4a86b465456"}, - {file = "regex-2024.5.15-cp39-cp39-win32.whl", hash = "sha256:71a455a3c584a88f654b64feccc1e25876066c4f5ef26cd6dd711308aa538694"}, - {file = "regex-2024.5.15-cp39-cp39-win_amd64.whl", hash = "sha256:cab12877a9bdafde5500206d1020a584355a97884dfd388af3699e9137bf7388"}, - {file = "regex-2024.5.15.tar.gz", hash = "sha256:d3ee02d9e5f482cc8309134a91eeaacbdd2261ba111b0fef3748eeb4913e6a2c"}, + {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b0d3f567fafa0633aee87f08b9276c7062da9616931382993c03808bb68ce"}, + {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3426de3b91d1bc73249042742f45c2148803c111d1175b283270177fdf669024"}, + {file = "regex-2024.7.24-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f273674b445bcb6e4409bf8d1be67bc4b58e8b46fd0d560055d515b8830063cd"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23acc72f0f4e1a9e6e9843d6328177ae3074b4182167e34119ec7233dfeccf53"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65fd3d2e228cae024c411c5ccdffae4c315271eee4a8b839291f84f796b34eca"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c414cbda77dbf13c3bc88b073a1a9f375c7b0cb5e115e15d4b73ec3a2fbc6f59"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7a89eef64b5455835f5ed30254ec19bf41f7541cd94f266ab7cbd463f00c41"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19c65b00d42804e3fbea9708f0937d157e53429a39b7c61253ff15670ff62cb5"}, + {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7a5486ca56c8869070a966321d5ab416ff0f83f30e0e2da1ab48815c8d165d46"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f51f9556785e5a203713f5efd9c085b4a45aecd2a42573e2b5041881b588d1f"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a4997716674d36a82eab3e86f8fa77080a5d8d96a389a61ea1d0e3a94a582cf7"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c0abb5e4e8ce71a61d9446040c1e86d4e6d23f9097275c5bd49ed978755ff0fe"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:18300a1d78cf1290fa583cd8b7cde26ecb73e9f5916690cf9d42de569c89b1ce"}, + {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:416c0e4f56308f34cdb18c3f59849479dde5b19febdcd6e6fa4d04b6c31c9faa"}, + {file = "regex-2024.7.24-cp310-cp310-win32.whl", hash = "sha256:fb168b5924bef397b5ba13aabd8cf5df7d3d93f10218d7b925e360d436863f66"}, + {file = "regex-2024.7.24-cp310-cp310-win_amd64.whl", hash = "sha256:6b9fc7e9cc983e75e2518496ba1afc524227c163e43d706688a6bb9eca41617e"}, + {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:382281306e3adaaa7b8b9ebbb3ffb43358a7bbf585fa93821300a418bb975281"}, + {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4fdd1384619f406ad9037fe6b6eaa3de2749e2e12084abc80169e8e075377d3b"}, + {file = "regex-2024.7.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3d974d24edb231446f708c455fd08f94c41c1ff4f04bcf06e5f36df5ef50b95a"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2ec4419a3fe6cf8a4795752596dfe0adb4aea40d3683a132bae9c30b81e8d73"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb563dd3aea54c797adf513eeec819c4213d7dbfc311874eb4fd28d10f2ff0f2"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45104baae8b9f67569f0f1dca5e1f1ed77a54ae1cd8b0b07aba89272710db61e"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:994448ee01864501912abf2bad9203bffc34158e80fe8bfb5b031f4f8e16da51"}, + {file = "regex-2024.7.24-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fac296f99283ac232d8125be932c5cd7644084a30748fda013028c815ba3364"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7e37e809b9303ec3a179085415cb5f418ecf65ec98cdfe34f6a078b46ef823ee"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:01b689e887f612610c869421241e075c02f2e3d1ae93a037cb14f88ab6a8934c"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f6442f0f0ff81775eaa5b05af8a0ffa1dda36e9cf6ec1e0d3d245e8564b684ce"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:871e3ab2838fbcb4e0865a6e01233975df3a15e6fce93b6f99d75cacbd9862d1"}, + {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c918b7a1e26b4ab40409820ddccc5d49871a82329640f5005f73572d5eaa9b5e"}, + {file = "regex-2024.7.24-cp311-cp311-win32.whl", hash = "sha256:2dfbb8baf8ba2c2b9aa2807f44ed272f0913eeeba002478c4577b8d29cde215c"}, + {file = "regex-2024.7.24-cp311-cp311-win_amd64.whl", hash = "sha256:538d30cd96ed7d1416d3956f94d54e426a8daf7c14527f6e0d6d425fcb4cca52"}, + {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fe4ebef608553aff8deb845c7f4f1d0740ff76fa672c011cc0bacb2a00fbde86"}, + {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:74007a5b25b7a678459f06559504f1eec2f0f17bca218c9d56f6a0a12bfffdad"}, + {file = "regex-2024.7.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7df9ea48641da022c2a3c9c641650cd09f0cd15e8908bf931ad538f5ca7919c9"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1141a1dcc32904c47f6846b040275c6e5de0bf73f17d7a409035d55b76f289"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80c811cfcb5c331237d9bad3bea2c391114588cf4131707e84d9493064d267f9"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7214477bf9bd195894cf24005b1e7b496f46833337b5dedb7b2a6e33f66d962c"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d55588cba7553f0b6ec33130bc3e114b355570b45785cebdc9daed8c637dd440"}, + {file = "regex-2024.7.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558a57cfc32adcf19d3f791f62b5ff564922942e389e3cfdb538a23d65a6b610"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a512eed9dfd4117110b1881ba9a59b31433caed0c4101b361f768e7bcbaf93c5"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86b17ba823ea76256b1885652e3a141a99a5c4422f4a869189db328321b73799"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5eefee9bfe23f6df09ffb6dfb23809f4d74a78acef004aa904dc7c88b9944b05"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:731fcd76bbdbf225e2eb85b7c38da9633ad3073822f5ab32379381e8c3c12e94"}, + {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eaef80eac3b4cfbdd6de53c6e108b4c534c21ae055d1dbea2de6b3b8ff3def38"}, + {file = "regex-2024.7.24-cp312-cp312-win32.whl", hash = "sha256:185e029368d6f89f36e526764cf12bf8d6f0e3a2a7737da625a76f594bdfcbfc"}, + {file = "regex-2024.7.24-cp312-cp312-win_amd64.whl", hash = "sha256:2f1baff13cc2521bea83ab2528e7a80cbe0ebb2c6f0bfad15be7da3aed443908"}, + {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:66b4c0731a5c81921e938dcf1a88e978264e26e6ac4ec96a4d21ae0354581ae0"}, + {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:88ecc3afd7e776967fa16c80f974cb79399ee8dc6c96423321d6f7d4b881c92b"}, + {file = "regex-2024.7.24-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:64bd50cf16bcc54b274e20235bf8edbb64184a30e1e53873ff8d444e7ac656b2"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb462f0e346fcf41a901a126b50f8781e9a474d3927930f3490f38a6e73b6950"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a82465ebbc9b1c5c50738536fdfa7cab639a261a99b469c9d4c7dcbb2b3f1e57"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68a8f8c046c6466ac61a36b65bb2395c74451df2ffb8458492ef49900efed293"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac8e84fff5d27420f3c1e879ce9929108e873667ec87e0c8eeb413a5311adfe"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba2537ef2163db9e6ccdbeb6f6424282ae4dea43177402152c67ef869cf3978b"}, + {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:43affe33137fcd679bdae93fb25924979517e011f9dea99163f80b82eadc7e53"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c9bb87fdf2ab2370f21e4d5636e5317775e5d51ff32ebff2cf389f71b9b13750"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:945352286a541406f99b2655c973852da7911b3f4264e010218bbc1cc73168f2"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:8bc593dcce679206b60a538c302d03c29b18e3d862609317cb560e18b66d10cf"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3f3b6ca8eae6d6c75a6cff525c8530c60e909a71a15e1b731723233331de4169"}, + {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c51edc3541e11fbe83f0c4d9412ef6c79f664a3745fab261457e84465ec9d5a8"}, + {file = "regex-2024.7.24-cp38-cp38-win32.whl", hash = "sha256:d0a07763776188b4db4c9c7fb1b8c494049f84659bb387b71c73bbc07f189e96"}, + {file = "regex-2024.7.24-cp38-cp38-win_amd64.whl", hash = "sha256:8fd5afd101dcf86a270d254364e0e8dddedebe6bd1ab9d5f732f274fa00499a5"}, + {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0ffe3f9d430cd37d8fa5632ff6fb36d5b24818c5c986893063b4e5bdb84cdf24"}, + {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25419b70ba00a16abc90ee5fce061228206173231f004437730b67ac77323f0d"}, + {file = "regex-2024.7.24-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33e2614a7ce627f0cdf2ad104797d1f68342d967de3695678c0cb84f530709f8"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d33a0021893ede5969876052796165bab6006559ab845fd7b515a30abdd990dc"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04ce29e2c5fedf296b1a1b0acc1724ba93a36fb14031f3abfb7abda2806c1535"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b16582783f44fbca6fcf46f61347340c787d7530d88b4d590a397a47583f31dd"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:836d3cc225b3e8a943d0b02633fb2f28a66e281290302a79df0e1eaa984ff7c1"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:438d9f0f4bc64e8dea78274caa5af971ceff0f8771e1a2333620969936ba10be"}, + {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:973335b1624859cb0e52f96062a28aa18f3a5fc77a96e4a3d6d76e29811a0e6e"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c5e69fd3eb0b409432b537fe3c6f44ac089c458ab6b78dcec14478422879ec5f"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fbf8c2f00904eaf63ff37718eb13acf8e178cb940520e47b2f05027f5bb34ce3"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2757ace61bc4061b69af19e4689fa4416e1a04840f33b441034202b5cd02d4"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:44fc61b99035fd9b3b9453f1713234e5a7c92a04f3577252b45feefe1b327759"}, + {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:84c312cdf839e8b579f504afcd7b65f35d60b6285d892b19adea16355e8343c9"}, + {file = "regex-2024.7.24-cp39-cp39-win32.whl", hash = "sha256:ca5b2028c2f7af4e13fb9fc29b28d0ce767c38c7facdf64f6c2cd040413055f1"}, + {file = "regex-2024.7.24-cp39-cp39-win_amd64.whl", hash = "sha256:7c479f5ae937ec9985ecaf42e2e10631551d909f203e31308c12d703922742f9"}, + {file = "regex-2024.7.24.tar.gz", hash = "sha256:9cfd009eed1a46b27c14039ad5bbc5e71b6367c5b2e6d5f5da0ea91600817506"}, ] [[package]] name = "requests" -version = "2.32.2" +version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" files = [ - {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, - {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -5284,110 +5804,114 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" -version = "0.18.1" +version = "0.20.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" files = [ - {file = "rpds_py-0.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d31dea506d718693b6b2cffc0648a8929bdc51c70a311b2770f09611caa10d53"}, - {file = "rpds_py-0.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:732672fbc449bab754e0b15356c077cc31566df874964d4801ab14f71951ea80"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a98a1f0552b5f227a3d6422dbd61bc6f30db170939bd87ed14f3c339aa6c7c9"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f1944ce16401aad1e3f7d312247b3d5de7981f634dc9dfe90da72b87d37887d"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38e14fb4e370885c4ecd734f093a2225ee52dc384b86fa55fe3f74638b2cfb09"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08d74b184f9ab6289b87b19fe6a6d1a97fbfea84b8a3e745e87a5de3029bf944"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d70129cef4a8d979caa37e7fe957202e7eee8ea02c5e16455bc9808a59c6b2f0"}, - {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0bb20e3a11bd04461324a6a798af34d503f8d6f1aa3d2aa8901ceaf039176d"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81c5196a790032e0fc2464c0b4ab95f8610f96f1f2fa3d4deacce6a79852da60"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f3027be483868c99b4985fda802a57a67fdf30c5d9a50338d9db646d590198da"}, - {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d44607f98caa2961bab4fa3c4309724b185b464cdc3ba6f3d7340bac3ec97cc1"}, - {file = "rpds_py-0.18.1-cp310-none-win32.whl", hash = "sha256:c273e795e7a0f1fddd46e1e3cb8be15634c29ae8ff31c196debb620e1edb9333"}, - {file = "rpds_py-0.18.1-cp310-none-win_amd64.whl", hash = "sha256:8352f48d511de5f973e4f2f9412736d7dea76c69faa6d36bcf885b50c758ab9a"}, - {file = "rpds_py-0.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6b5ff7e1d63a8281654b5e2896d7f08799378e594f09cf3674e832ecaf396ce8"}, - {file = "rpds_py-0.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8927638a4d4137a289e41d0fd631551e89fa346d6dbcfc31ad627557d03ceb6d"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:154bf5c93d79558b44e5b50cc354aa0459e518e83677791e6adb0b039b7aa6a7"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07f2139741e5deb2c5154a7b9629bc5aa48c766b643c1a6750d16f865a82c5fc"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c7672e9fba7425f79019db9945b16e308ed8bc89348c23d955c8c0540da0a07"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:489bdfe1abd0406eba6b3bb4fdc87c7fa40f1031de073d0cfb744634cc8fa261"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c20f05e8e3d4fc76875fc9cb8cf24b90a63f5a1b4c5b9273f0e8225e169b100"}, - {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:967342e045564cef76dfcf1edb700b1e20838d83b1aa02ab313e6a497cf923b8"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2cc7c1a47f3a63282ab0f422d90ddac4aa3034e39fc66a559ab93041e6505da7"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f7afbfee1157e0f9376c00bb232e80a60e59ed716e3211a80cb8506550671e6e"}, - {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e6934d70dc50f9f8ea47081ceafdec09245fd9f6032669c3b45705dea096b88"}, - {file = "rpds_py-0.18.1-cp311-none-win32.whl", hash = "sha256:c69882964516dc143083d3795cb508e806b09fc3800fd0d4cddc1df6c36e76bb"}, - {file = "rpds_py-0.18.1-cp311-none-win_amd64.whl", hash = "sha256:70a838f7754483bcdc830444952fd89645569e7452e3226de4a613a4c1793fb2"}, - {file = "rpds_py-0.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3dd3cd86e1db5aadd334e011eba4e29d37a104b403e8ca24dcd6703c68ca55b3"}, - {file = "rpds_py-0.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05f3d615099bd9b13ecf2fc9cf2d839ad3f20239c678f461c753e93755d629ee"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35b2b771b13eee8729a5049c976197ff58a27a3829c018a04341bcf1ae409b2b"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ee17cd26b97d537af8f33635ef38be873073d516fd425e80559f4585a7b90c43"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b646bf655b135ccf4522ed43d6902af37d3f5dbcf0da66c769a2b3938b9d8184"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19ba472b9606c36716062c023afa2484d1e4220548751bda14f725a7de17b4f6"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e30ac5e329098903262dc5bdd7e2086e0256aa762cc8b744f9e7bf2a427d3f8"}, - {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d58ad6317d188c43750cb76e9deacf6051d0f884d87dc6518e0280438648a9ac"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e1735502458621921cee039c47318cb90b51d532c2766593be6207eec53e5c4c"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f5bab211605d91db0e2995a17b5c6ee5edec1270e46223e513eaa20da20076ac"}, - {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2fc24a329a717f9e2448f8cd1f960f9dac4e45b6224d60734edeb67499bab03a"}, - {file = "rpds_py-0.18.1-cp312-none-win32.whl", hash = "sha256:1805d5901779662d599d0e2e4159d8a82c0b05faa86ef9222bf974572286b2b6"}, - {file = "rpds_py-0.18.1-cp312-none-win_amd64.whl", hash = "sha256:720edcb916df872d80f80a1cc5ea9058300b97721efda8651efcd938a9c70a72"}, - {file = "rpds_py-0.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c827576e2fa017a081346dce87d532a5310241648eb3700af9a571a6e9fc7e74"}, - {file = "rpds_py-0.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aa3679e751408d75a0b4d8d26d6647b6d9326f5e35c00a7ccd82b78ef64f65f8"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0abeee75434e2ee2d142d650d1e54ac1f8b01e6e6abdde8ffd6eeac6e9c38e20"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed402d6153c5d519a0faf1bb69898e97fb31613b49da27a84a13935ea9164dfc"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:338dee44b0cef8b70fd2ef54b4e09bb1b97fc6c3a58fea5db6cc083fd9fc2724"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7750569d9526199c5b97e5a9f8d96a13300950d910cf04a861d96f4273d5b104"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:607345bd5912aacc0c5a63d45a1f73fef29e697884f7e861094e443187c02be5"}, - {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:207c82978115baa1fd8d706d720b4a4d2b0913df1c78c85ba73fe6c5804505f0"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6d1e42d2735d437e7e80bab4d78eb2e459af48c0a46e686ea35f690b93db792d"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5463c47c08630007dc0fe99fb480ea4f34a89712410592380425a9b4e1611d8e"}, - {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:06d218939e1bf2ca50e6b0ec700ffe755e5216a8230ab3e87c059ebb4ea06afc"}, - {file = "rpds_py-0.18.1-cp38-none-win32.whl", hash = "sha256:312fe69b4fe1ffbe76520a7676b1e5ac06ddf7826d764cc10265c3b53f96dbe9"}, - {file = "rpds_py-0.18.1-cp38-none-win_amd64.whl", hash = "sha256:9437ca26784120a279f3137ee080b0e717012c42921eb07861b412340f85bae2"}, - {file = "rpds_py-0.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:19e515b78c3fc1039dd7da0a33c28c3154458f947f4dc198d3c72db2b6b5dc93"}, - {file = "rpds_py-0.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7b28c5b066bca9a4eb4e2f2663012debe680f097979d880657f00e1c30875a0"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:673fdbbf668dd958eff750e500495ef3f611e2ecc209464f661bc82e9838991e"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d960de62227635d2e61068f42a6cb6aae91a7fe00fca0e3aeed17667c8a34611"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352a88dc7892f1da66b6027af06a2e7e5d53fe05924cc2cfc56495b586a10b72"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e0ee01ad8260184db21468a6e1c37afa0529acc12c3a697ee498d3c2c4dcaf3"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c39ad2f512b4041343ea3c7894339e4ca7839ac38ca83d68a832fc8b3748ab"}, - {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aaa71ee43a703c321906813bb252f69524f02aa05bf4eec85f0c41d5d62d0f4c"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6cd8098517c64a85e790657e7b1e509b9fe07487fd358e19431cb120f7d96338"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4adec039b8e2928983f885c53b7cc4cda8965b62b6596501a0308d2703f8af1b"}, - {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32b7daaa3e9389db3695964ce8e566e3413b0c43e3394c05e4b243a4cd7bef26"}, - {file = "rpds_py-0.18.1-cp39-none-win32.whl", hash = "sha256:2625f03b105328729f9450c8badda34d5243231eef6535f80064d57035738360"}, - {file = "rpds_py-0.18.1-cp39-none-win_amd64.whl", hash = "sha256:bf18932d0003c8c4d51a39f244231986ab23ee057d235a12b2684ea26a353590"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cbfbea39ba64f5e53ae2915de36f130588bba71245b418060ec3330ebf85678e"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a3d456ff2a6a4d2adcdf3c1c960a36f4fd2fec6e3b4902a42a384d17cf4e7a65"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7700936ef9d006b7ef605dc53aa364da2de5a3aa65516a1f3ce73bf82ecfc7ae"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51584acc5916212e1bf45edd17f3a6b05fe0cbb40482d25e619f824dccb679de"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:942695a206a58d2575033ff1e42b12b2aece98d6003c6bc739fbf33d1773b12f"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b906b5f58892813e5ba5c6056d6a5ad08f358ba49f046d910ad992196ea61397"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f8e3fecca256fefc91bb6765a693d96692459d7d4c644660a9fff32e517843"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7732770412bab81c5a9f6d20aeb60ae943a9b36dcd990d876a773526468e7163"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:bd1105b50ede37461c1d51b9698c4f4be6e13e69a908ab7751e3807985fc0346"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:618916f5535784960f3ecf8111581f4ad31d347c3de66d02e728de460a46303c"}, - {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:17c6d2155e2423f7e79e3bb18151c686d40db42d8645e7977442170c360194d4"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c4c4c3f878df21faf5fac86eda32671c27889e13570645a9eea0a1abdd50922"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:fab6ce90574645a0d6c58890e9bcaac8d94dff54fb51c69e5522a7358b80ab64"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:531796fb842b53f2695e94dc338929e9f9dbf473b64710c28af5a160b2a8927d"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:740884bc62a5e2bbb31e584f5d23b32320fd75d79f916f15a788d527a5e83644"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:998125738de0158f088aef3cb264a34251908dd2e5d9966774fdab7402edfab7"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2be6e9dd4111d5b31ba3b74d17da54a8319d8168890fbaea4b9e5c3de630ae5"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0cee71bc618cd93716f3c1bf56653740d2d13ddbd47673efa8bf41435a60daa"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c3caec4ec5cd1d18e5dd6ae5194d24ed12785212a90b37f5f7f06b8bedd7139"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:27bba383e8c5231cd559affe169ca0b96ec78d39909ffd817f28b166d7ddd4d8"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:a888e8bdb45916234b99da2d859566f1e8a1d2275a801bb8e4a9644e3c7e7909"}, - {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6031b25fb1b06327b43d841f33842b383beba399884f8228a6bb3df3088485ff"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48c2faaa8adfacefcbfdb5f2e2e7bdad081e5ace8d182e5f4ade971f128e6bb3"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d85164315bd68c0806768dc6bb0429c6f95c354f87485ee3593c4f6b14def2bd"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afd80f6c79893cfc0574956f78a0add8c76e3696f2d6a15bca2c66c415cf2d4"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa242ac1ff583e4ec7771141606aafc92b361cd90a05c30d93e343a0c2d82a89"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21be4770ff4e08698e1e8e0bce06edb6ea0626e7c8f560bc08222880aca6a6f"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c45a639e93a0c5d4b788b2613bd637468edd62f8f95ebc6fcc303d58ab3f0a8"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910e71711d1055b2768181efa0a17537b2622afeb0424116619817007f8a2b10"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9bb1f182a97880f6078283b3505a707057c42bf55d8fca604f70dedfdc0772a"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d54f74f40b1f7aaa595a02ff42ef38ca654b1469bef7d52867da474243cc633"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:8d2e182c9ee01135e11e9676e9a62dfad791a7a467738f06726872374a83db49"}, - {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:636a15acc588f70fda1661234761f9ed9ad79ebed3f2125d44be0862708b666e"}, - {file = "rpds_py-0.18.1.tar.gz", hash = "sha256:dc48b479d540770c811fbd1eb9ba2bb66951863e448efec2e2c102625328e92f"}, + {file = "rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2"}, + {file = "rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf"}, + {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140"}, + {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f"}, + {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce"}, + {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94"}, + {file = "rpds_py-0.20.0-cp310-none-win32.whl", hash = "sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee"}, + {file = "rpds_py-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399"}, + {file = "rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489"}, + {file = "rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209"}, + {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3"}, + {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272"}, + {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad"}, + {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58"}, + {file = "rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0"}, + {file = "rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c"}, + {file = "rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6"}, + {file = "rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4"}, + {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef"}, + {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821"}, + {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940"}, + {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174"}, + {file = "rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139"}, + {file = "rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585"}, + {file = "rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29"}, + {file = "rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879"}, + {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f"}, + {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c"}, + {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2"}, + {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57"}, + {file = "rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a"}, + {file = "rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2"}, + {file = "rpds_py-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f2fbf7db2012d4876fb0d66b5b9ba6591197b0f165db8d99371d976546472a24"}, + {file = "rpds_py-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e5f3cd7397c8f86c8cc72d5a791071431c108edd79872cdd96e00abd8497d29"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce9845054c13696f7af7f2b353e6b4f676dab1b4b215d7fe5e05c6f8bb06f965"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c3e130fd0ec56cb76eb49ef52faead8ff09d13f4527e9b0c400307ff72b408e1"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b16aa0107ecb512b568244ef461f27697164d9a68d8b35090e9b0c1c8b27752"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7f429242aae2947246587d2964fad750b79e8c233a2367f71b554e9447949c"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0fc424a5842a11e28956e69395fbbeab2c97c42253169d87e90aac2886d751"}, + {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8c00a3b1e70c1d3891f0db1b05292747f0dbcfb49c43f9244d04c70fbc40eb8"}, + {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40ce74fc86ee4645d0a225498d091d8bc61f39b709ebef8204cb8b5a464d3c0e"}, + {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4fe84294c7019456e56d93e8ababdad5a329cd25975be749c3f5f558abb48253"}, + {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:338ca4539aad4ce70a656e5187a3a31c5204f261aef9f6ab50e50bcdffaf050a"}, + {file = "rpds_py-0.20.0-cp38-none-win32.whl", hash = "sha256:54b43a2b07db18314669092bb2de584524d1ef414588780261e31e85846c26a5"}, + {file = "rpds_py-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:a1862d2d7ce1674cffa6d186d53ca95c6e17ed2b06b3f4c476173565c862d232"}, + {file = "rpds_py-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3fde368e9140312b6e8b6c09fb9f8c8c2f00999d1823403ae90cc00480221b22"}, + {file = "rpds_py-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9824fb430c9cf9af743cf7aaf6707bf14323fb51ee74425c380f4c846ea70789"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11ef6ce74616342888b69878d45e9f779b95d4bd48b382a229fe624a409b72c5"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c52d3f2f82b763a24ef52f5d24358553e8403ce05f893b5347098014f2d9eff2"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d35cef91e59ebbeaa45214861874bc6f19eb35de96db73e467a8358d701a96c"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d72278a30111e5b5525c1dd96120d9e958464316f55adb030433ea905866f4de"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c29cbbba378759ac5786730d1c3cb4ec6f8ababf5c42a9ce303dc4b3d08cda"}, + {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6632f2d04f15d1bd6fe0eedd3b86d9061b836ddca4c03d5cf5c7e9e6b7c14580"}, + {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d0b67d87bb45ed1cd020e8fbf2307d449b68abc45402fe1a4ac9e46c3c8b192b"}, + {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec31a99ca63bf3cd7f1a5ac9fe95c5e2d060d3c768a09bc1d16e235840861420"}, + {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e6c9976e38f4d8c4a63bd8a8edac5307dffd3ee7e6026d97f3cc3a2dc02a0b"}, + {file = "rpds_py-0.20.0-cp39-none-win32.whl", hash = "sha256:569b3ea770c2717b730b61998b6c54996adee3cef69fc28d444f3e7920313cf7"}, + {file = "rpds_py-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:e6900ecdd50ce0facf703f7a00df12374b74bbc8ad9fe0f6559947fb20f82364"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344"}, + {file = "rpds_py-0.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f918a1a130a6dfe1d7fe0f105064141342e7dd1611f2e6a21cd2f5c8cb1cfb3e"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f60012a73aa396be721558caa3a6fd49b3dd0033d1675c6d59c4502e870fcf0c"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d2b1ad682a3dfda2a4e8ad8572f3100f95fad98cb99faf37ff0ddfe9cbf9d03"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:614fdafe9f5f19c63ea02817fa4861c606a59a604a77c8cdef5aa01d28b97921"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa518bcd7600c584bf42e6617ee8132869e877db2f76bcdc281ec6a4113a53ab"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0475242f447cc6cb8a9dd486d68b2ef7fbee84427124c232bff5f63b1fe11e5"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90a4cd061914a60bd51c68bcb4357086991bd0bb93d8aa66a6da7701370708f"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:def7400461c3a3f26e49078302e1c1b38f6752342c77e3cf72ce91ca69fb1bc1"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:65794e4048ee837494aea3c21a28ad5fc080994dfba5b036cf84de37f7ad5074"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:faefcc78f53a88f3076b7f8be0a8f8d35133a3ecf7f3770895c25f8813460f08"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5b4f105deeffa28bbcdff6c49b34e74903139afa690e35d2d9e3c2c2fba18cec"}, + {file = "rpds_py-0.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdfc3a892927458d98f3d55428ae46b921d1f7543b89382fdb483f5640daaec8"}, + {file = "rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121"}, ] [[package]] @@ -5483,138 +6007,148 @@ files = [ [[package]] name = "ruff" -version = "0.5.2" +version = "0.5.7" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.5.2-py3-none-linux_armv6l.whl", hash = "sha256:7bab8345df60f9368d5f4594bfb8b71157496b44c30ff035d1d01972e764d3be"}, - {file = "ruff-0.5.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:1aa7acad382ada0189dbe76095cf0a36cd0036779607c397ffdea16517f535b1"}, - {file = "ruff-0.5.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:aec618d5a0cdba5592c60c2dee7d9c865180627f1a4a691257dea14ac1aa264d"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b62adc5ce81780ff04077e88bac0986363e4a3260ad3ef11ae9c14aa0e67ef"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dc42ebf56ede83cb080a50eba35a06e636775649a1ffd03dc986533f878702a3"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c15c6e9f88c67ffa442681365d11df38afb11059fc44238e71a9d9f1fd51de70"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d3de9a5960f72c335ef00763d861fc5005ef0644cb260ba1b5a115a102157251"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe5a968ae933e8f7627a7b2fc8893336ac2be0eb0aace762d3421f6e8f7b7f83"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a04f54a9018f75615ae52f36ea1c5515e356e5d5e214b22609ddb546baef7132"}, - {file = "ruff-0.5.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed02fb52e3741f0738db5f93e10ae0fb5c71eb33a4f2ba87c9a2fa97462a649"}, - {file = "ruff-0.5.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3cf8fe659f6362530435d97d738eb413e9f090e7e993f88711b0377fbdc99f60"}, - {file = "ruff-0.5.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:237a37e673e9f3cbfff0d2243e797c4862a44c93d2f52a52021c1a1b0899f846"}, - {file = "ruff-0.5.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:2a2949ce7c1cbd8317432ada80fe32156df825b2fd611688814c8557824ef060"}, - {file = "ruff-0.5.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:481af57c8e99da92ad168924fd82220266043c8255942a1cb87958b108ac9335"}, - {file = "ruff-0.5.2-py3-none-win32.whl", hash = "sha256:f1aea290c56d913e363066d83d3fc26848814a1fed3d72144ff9c930e8c7c718"}, - {file = "ruff-0.5.2-py3-none-win_amd64.whl", hash = "sha256:8532660b72b5d94d2a0a7a27ae7b9b40053662d00357bb2a6864dd7e38819084"}, - {file = "ruff-0.5.2-py3-none-win_arm64.whl", hash = "sha256:73439805c5cb68f364d826a5c5c4b6c798ded6b7ebaa4011f01ce6c94e4d5583"}, - {file = "ruff-0.5.2.tar.gz", hash = "sha256:2c0df2d2de685433794a14d8d2e240df619b748fbe3367346baa519d8e6f1ca2"}, + {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"}, + {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"}, + {file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"}, + {file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"}, + {file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"}, + {file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"}, + {file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"}, + {file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"}, + {file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"}, + {file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"}, + {file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"}, + {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"}, ] [[package]] name = "safetensors" -version = "0.4.3" +version = "0.4.4" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "safetensors-0.4.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:dcf5705cab159ce0130cd56057f5f3425023c407e170bca60b4868048bae64fd"}, - {file = "safetensors-0.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb4f8c5d0358a31e9a08daeebb68f5e161cdd4018855426d3f0c23bb51087055"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70a5319ef409e7f88686a46607cbc3c428271069d8b770076feaf913664a07ac"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fb9c65bd82f9ef3ce4970dc19ee86be5f6f93d032159acf35e663c6bea02b237"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edb5698a7bc282089f64c96c477846950358a46ede85a1c040e0230344fdde10"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efcc860be094b8d19ac61b452ec635c7acb9afa77beb218b1d7784c6d41fe8ad"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d88b33980222085dd6001ae2cad87c6068e0991d4f5ccf44975d216db3b57376"}, - {file = "safetensors-0.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5fc6775529fb9f0ce2266edd3e5d3f10aab068e49f765e11f6f2a63b5367021d"}, - {file = "safetensors-0.4.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9c6ad011c1b4e3acff058d6b090f1da8e55a332fbf84695cf3100c649cc452d1"}, - {file = "safetensors-0.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8c496c5401c1b9c46d41a7688e8ff5b0310a3b9bae31ce0f0ae870e1ea2b8caf"}, - {file = "safetensors-0.4.3-cp310-none-win32.whl", hash = "sha256:38e2a8666178224a51cca61d3cb4c88704f696eac8f72a49a598a93bbd8a4af9"}, - {file = "safetensors-0.4.3-cp310-none-win_amd64.whl", hash = "sha256:393e6e391467d1b2b829c77e47d726f3b9b93630e6a045b1d1fca67dc78bf632"}, - {file = "safetensors-0.4.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:22f3b5d65e440cec0de8edaa672efa888030802e11c09b3d6203bff60ebff05a"}, - {file = "safetensors-0.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c4fa560ebd4522adddb71dcd25d09bf211b5634003f015a4b815b7647d62ebe"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9afd5358719f1b2cf425fad638fc3c887997d6782da317096877e5b15b2ce93"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d8c5093206ef4b198600ae484230402af6713dab1bd5b8e231905d754022bec7"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0b2104df1579d6ba9052c0ae0e3137c9698b2d85b0645507e6fd1813b70931a"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8cf18888606dad030455d18f6c381720e57fc6a4170ee1966adb7ebc98d4d6a3"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0bf4f9d6323d9f86eef5567eabd88f070691cf031d4c0df27a40d3b4aaee755b"}, - {file = "safetensors-0.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:585c9ae13a205807b63bef8a37994f30c917ff800ab8a1ca9c9b5d73024f97ee"}, - {file = "safetensors-0.4.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faefeb3b81bdfb4e5a55b9bbdf3d8d8753f65506e1d67d03f5c851a6c87150e9"}, - {file = "safetensors-0.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:befdf0167ad626f22f6aac6163477fcefa342224a22f11fdd05abb3995c1783c"}, - {file = "safetensors-0.4.3-cp311-none-win32.whl", hash = "sha256:a7cef55929dcbef24af3eb40bedec35d82c3c2fa46338bb13ecf3c5720af8a61"}, - {file = "safetensors-0.4.3-cp311-none-win_amd64.whl", hash = "sha256:840b7ac0eff5633e1d053cc9db12fdf56b566e9403b4950b2dc85393d9b88d67"}, - {file = "safetensors-0.4.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:22d21760dc6ebae42e9c058d75aa9907d9f35e38f896e3c69ba0e7b213033856"}, - {file = "safetensors-0.4.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d22c1a10dff3f64d0d68abb8298a3fd88ccff79f408a3e15b3e7f637ef5c980"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1648568667f820b8c48317c7006221dc40aced1869908c187f493838a1362bc"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:446e9fe52c051aeab12aac63d1017e0f68a02a92a027b901c4f8e931b24e5397"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fef5d70683643618244a4f5221053567ca3e77c2531e42ad48ae05fae909f542"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a1f4430cc0c9d6afa01214a4b3919d0a029637df8e09675ceef1ca3f0dfa0df"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d603846a8585b9432a0fd415db1d4c57c0f860eb4aea21f92559ff9902bae4d"}, - {file = "safetensors-0.4.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a844cdb5d7cbc22f5f16c7e2a0271170750763c4db08381b7f696dbd2c78a361"}, - {file = "safetensors-0.4.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:88887f69f7a00cf02b954cdc3034ffb383b2303bc0ab481d4716e2da51ddc10e"}, - {file = "safetensors-0.4.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ee463219d9ec6c2be1d331ab13a8e0cd50d2f32240a81d498266d77d07b7e71e"}, - {file = "safetensors-0.4.3-cp312-none-win32.whl", hash = "sha256:d0dd4a1db09db2dba0f94d15addc7e7cd3a7b0d393aa4c7518c39ae7374623c3"}, - {file = "safetensors-0.4.3-cp312-none-win_amd64.whl", hash = "sha256:d14d30c25897b2bf19b6fb5ff7e26cc40006ad53fd4a88244fdf26517d852dd7"}, - {file = "safetensors-0.4.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d1456f814655b224d4bf6e7915c51ce74e389b413be791203092b7ff78c936dd"}, - {file = "safetensors-0.4.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:455d538aa1aae4a8b279344a08136d3f16334247907b18a5c3c7fa88ef0d3c46"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf476bca34e1340ee3294ef13e2c625833f83d096cfdf69a5342475602004f95"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02ef3a24face643456020536591fbd3c717c5abaa2737ec428ccbbc86dffa7a4"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7de32d0d34b6623bb56ca278f90db081f85fb9c5d327e3c18fd23ac64f465768"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a0deb16a1d3ea90c244ceb42d2c6c276059616be21a19ac7101aa97da448faf"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c59d51f182c729f47e841510b70b967b0752039f79f1de23bcdd86462a9b09ee"}, - {file = "safetensors-0.4.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1f598b713cc1a4eb31d3b3203557ac308acf21c8f41104cdd74bf640c6e538e3"}, - {file = "safetensors-0.4.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5757e4688f20df083e233b47de43845d1adb7e17b6cf7da5f8444416fc53828d"}, - {file = "safetensors-0.4.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fe746d03ed8d193674a26105e4f0fe6c726f5bb602ffc695b409eaf02f04763d"}, - {file = "safetensors-0.4.3-cp37-none-win32.whl", hash = "sha256:0d5ffc6a80f715c30af253e0e288ad1cd97a3d0086c9c87995e5093ebc075e50"}, - {file = "safetensors-0.4.3-cp37-none-win_amd64.whl", hash = "sha256:a11c374eb63a9c16c5ed146457241182f310902bd2a9c18255781bb832b6748b"}, - {file = "safetensors-0.4.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1e31be7945f66be23f4ec1682bb47faa3df34cb89fc68527de6554d3c4258a4"}, - {file = "safetensors-0.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:03a4447c784917c9bf01d8f2ac5080bc15c41692202cd5f406afba16629e84d6"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d244bcafeb1bc06d47cfee71727e775bca88a8efda77a13e7306aae3813fa7e4"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53c4879b9c6bd7cd25d114ee0ef95420e2812e676314300624594940a8d6a91f"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74707624b81f1b7f2b93f5619d4a9f00934d5948005a03f2c1845ffbfff42212"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d52c958dc210265157573f81d34adf54e255bc2b59ded6218500c9b15a750eb"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f9568f380f513a60139971169c4a358b8731509cc19112369902eddb33faa4d"}, - {file = "safetensors-0.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0d9cd8e1560dfc514b6d7859247dc6a86ad2f83151a62c577428d5102d872721"}, - {file = "safetensors-0.4.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:89f9f17b0dacb913ed87d57afbc8aad85ea42c1085bd5de2f20d83d13e9fc4b2"}, - {file = "safetensors-0.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1139eb436fd201c133d03c81209d39ac57e129f5e74e34bb9ab60f8d9b726270"}, - {file = "safetensors-0.4.3-cp38-none-win32.whl", hash = "sha256:d9c289f140a9ae4853fc2236a2ffc9a9f2d5eae0cb673167e0f1b8c18c0961ac"}, - {file = "safetensors-0.4.3-cp38-none-win_amd64.whl", hash = "sha256:622afd28968ef3e9786562d352659a37de4481a4070f4ebac883f98c5836563e"}, - {file = "safetensors-0.4.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8651c7299cbd8b4161a36cd6a322fa07d39cd23535b144d02f1c1972d0c62f3c"}, - {file = "safetensors-0.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e375d975159ac534c7161269de24ddcd490df2157b55c1a6eeace6cbb56903f0"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:084fc436e317f83f7071fc6a62ca1c513b2103db325cd09952914b50f51cf78f"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:41a727a7f5e6ad9f1db6951adee21bbdadc632363d79dc434876369a17de6ad6"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7dbbde64b6c534548696808a0e01276d28ea5773bc9a2dfb97a88cd3dffe3df"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bbae3b4b9d997971431c346edbfe6e41e98424a097860ee872721e176040a893"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01e4b22e3284cd866edeabe4f4d896229495da457229408d2e1e4810c5187121"}, - {file = "safetensors-0.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dd37306546b58d3043eb044c8103a02792cc024b51d1dd16bd3dd1f334cb3ed"}, - {file = "safetensors-0.4.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8815b5e1dac85fc534a97fd339e12404db557878c090f90442247e87c8aeaea"}, - {file = "safetensors-0.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e011cc162503c19f4b1fd63dfcddf73739c7a243a17dac09b78e57a00983ab35"}, - {file = "safetensors-0.4.3-cp39-none-win32.whl", hash = "sha256:01feb3089e5932d7e662eda77c3ecc389f97c0883c4a12b5cfdc32b589a811c3"}, - {file = "safetensors-0.4.3-cp39-none-win_amd64.whl", hash = "sha256:3f9cdca09052f585e62328c1c2923c70f46814715c795be65f0b93f57ec98a02"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1b89381517891a7bb7d1405d828b2bf5d75528299f8231e9346b8eba092227f9"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:cd6fff9e56df398abc5866b19a32124815b656613c1c5ec0f9350906fd798aac"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:840caf38d86aa7014fe37ade5d0d84e23dcfbc798b8078015831996ecbc206a3"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9650713b2cfa9537a2baf7dd9fee458b24a0aaaa6cafcea8bdd5fb2b8efdc34"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e4119532cd10dba04b423e0f86aecb96cfa5a602238c0aa012f70c3a40c44b50"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e066e8861eef6387b7c772344d1fe1f9a72800e04ee9a54239d460c400c72aab"}, - {file = "safetensors-0.4.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:90964917f5b0fa0fa07e9a051fbef100250c04d150b7026ccbf87a34a54012e0"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c41e1893d1206aa7054029681778d9a58b3529d4c807002c156d58426c225173"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae7613a119a71a497d012ccc83775c308b9c1dab454806291427f84397d852fd"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9bac020faba7f5dc481e881b14b6425265feabb5bfc552551d21189c0eddc3"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:420a98f593ff9930f5822560d14c395ccbc57342ddff3b463bc0b3d6b1951550"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f5e6883af9a68c0028f70a4c19d5a6ab6238a379be36ad300a22318316c00cb0"}, - {file = "safetensors-0.4.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:cdd0a3b5da66e7f377474599814dbf5cbf135ff059cc73694de129b58a5e8a2c"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9bfb92f82574d9e58401d79c70c716985dc049b635fef6eecbb024c79b2c46ad"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3615a96dd2dcc30eb66d82bc76cda2565f4f7bfa89fcb0e31ba3cea8a1a9ecbb"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:868ad1b6fc41209ab6bd12f63923e8baeb1a086814cb2e81a65ed3d497e0cf8f"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffba80aa49bd09195145a7fd233a7781173b422eeb995096f2b30591639517"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0acbe31340ab150423347e5b9cc595867d814244ac14218932a5cf1dd38eb39"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19bbdf95de2cf64f25cd614c5236c8b06eb2cfa47cbf64311f4b5d80224623a3"}, - {file = "safetensors-0.4.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b852e47eb08475c2c1bd8131207b405793bfc20d6f45aff893d3baaad449ed14"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5d07cbca5b99babb692d76d8151bec46f461f8ad8daafbfd96b2fca40cadae65"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1ab6527a20586d94291c96e00a668fa03f86189b8a9defa2cdd34a1a01acc7d5"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02318f01e332cc23ffb4f6716e05a492c5f18b1d13e343c49265149396284a44"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec4b52ce9a396260eb9731eb6aea41a7320de22ed73a1042c2230af0212758ce"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:018b691383026a2436a22b648873ed11444a364324e7088b99cd2503dd828400"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:309b10dbcab63269ecbf0e2ca10ce59223bb756ca5d431ce9c9eeabd446569da"}, - {file = "safetensors-0.4.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b277482120df46e27a58082df06a15aebda4481e30a1c21eefd0921ae7e03f65"}, - {file = "safetensors-0.4.3.tar.gz", hash = "sha256:2f85fc50c4e07a21e95c24e07460fe6f7e2859d0ce88092838352b798ce711c2"}, + {file = "safetensors-0.4.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2adb497ada13097f30e386e88c959c0fda855a5f6f98845710f5bb2c57e14f12"}, + {file = "safetensors-0.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7db7fdc2d71fd1444d85ca3f3d682ba2df7d61a637dfc6d80793f439eae264ab"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d4f0eed76b430f009fbefca1a0028ddb112891b03cb556d7440d5cd68eb89a9"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:57d216fab0b5c432aabf7170883d7c11671622bde8bd1436c46d633163a703f6"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d9b76322e49c056bcc819f8bdca37a2daa5a6d42c07f30927b501088db03309"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32f0d1f6243e90ee43bc6ee3e8c30ac5b09ca63f5dd35dbc985a1fc5208c451a"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44d464bdc384874601a177375028012a5f177f1505279f9456fea84bbc575c7f"}, + {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63144e36209ad8e4e65384dbf2d52dd5b1866986079c00a72335402a38aacdc5"}, + {file = "safetensors-0.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:051d5ecd490af7245258000304b812825974d5e56f14a3ff7e1b8b2ba6dc2ed4"}, + {file = "safetensors-0.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51bc8429d9376224cd3cf7e8ce4f208b4c930cd10e515b6ac6a72cbc3370f0d9"}, + {file = "safetensors-0.4.4-cp310-none-win32.whl", hash = "sha256:fb7b54830cee8cf9923d969e2df87ce20e625b1af2fd194222ab902d3adcc29c"}, + {file = "safetensors-0.4.4-cp310-none-win_amd64.whl", hash = "sha256:4b3e8aa8226d6560de8c2b9d5ff8555ea482599c670610758afdc97f3e021e9c"}, + {file = "safetensors-0.4.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bbaa31f2cb49013818bde319232ccd72da62ee40f7d2aa532083eda5664e85ff"}, + {file = "safetensors-0.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9fdcb80f4e9fbb33b58e9bf95e7dbbedff505d1bcd1c05f7c7ce883632710006"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55c14c20be247b8a1aeaf3ab4476265e3ca83096bb8e09bb1a7aa806088def4f"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:949aaa1118660f992dbf0968487b3e3cfdad67f948658ab08c6b5762e90cc8b6"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c11a4ab7debc456326a2bac67f35ee0ac792bcf812c7562a4a28559a5c795e27"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0cea44bba5c5601b297bc8307e4075535b95163402e4906b2e9b82788a2a6df"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9d752c97f6bbe327352f76e5b86442d776abc789249fc5e72eacb49e6916482"}, + {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03f2bb92e61b055ef6cc22883ad1ae898010a95730fa988c60a23800eb742c2c"}, + {file = "safetensors-0.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:87bf3f91a9328a941acc44eceffd4e1f5f89b030985b2966637e582157173b98"}, + {file = "safetensors-0.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:20d218ec2b6899d29d6895419a58b6e44cc5ff8f0cc29fac8d236a8978ab702e"}, + {file = "safetensors-0.4.4-cp311-none-win32.whl", hash = "sha256:8079486118919f600c603536e2490ca37b3dbd3280e3ad6eaacfe6264605ac8a"}, + {file = "safetensors-0.4.4-cp311-none-win_amd64.whl", hash = "sha256:2f8c2eb0615e2e64ee27d478c7c13f51e5329d7972d9e15528d3e4cfc4a08f0d"}, + {file = "safetensors-0.4.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:baec5675944b4a47749c93c01c73d826ef7d42d36ba8d0dba36336fa80c76426"}, + {file = "safetensors-0.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f15117b96866401825f3e94543145028a2947d19974429246ce59403f49e77c6"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a13a9caea485df164c51be4eb0c87f97f790b7c3213d635eba2314d959fe929"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b54bc4ca5f9b9bba8cd4fb91c24b2446a86b5ae7f8975cf3b7a277353c3127c"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08332c22e03b651c8eb7bf5fc2de90044f3672f43403b3d9ac7e7e0f4f76495e"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb62841e839ee992c37bb75e75891c7f4904e772db3691c59daaca5b4ab960e1"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e5b927acc5f2f59547270b0309a46d983edc44be64e1ca27a7fcb0474d6cd67"}, + {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a69c71b1ae98a8021a09a0b43363b0143b0ce74e7c0e83cacba691b62655fb8"}, + {file = "safetensors-0.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23654ad162c02a5636f0cd520a0310902c4421aab1d91a0b667722a4937cc445"}, + {file = "safetensors-0.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0677c109d949cf53756859160b955b2e75b0eefe952189c184d7be30ecf7e858"}, + {file = "safetensors-0.4.4-cp312-none-win32.whl", hash = "sha256:a51d0ddd4deb8871c6de15a772ef40b3dbd26a3c0451bb9e66bc76fc5a784e5b"}, + {file = "safetensors-0.4.4-cp312-none-win_amd64.whl", hash = "sha256:2d065059e75a798bc1933c293b68d04d79b586bb7f8c921e0ca1e82759d0dbb1"}, + {file = "safetensors-0.4.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:9d625692578dd40a112df30c02a1adf068027566abd8e6a74893bb13d441c150"}, + {file = "safetensors-0.4.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7cabcf39c81e5b988d0adefdaea2eb9b4fd9bd62d5ed6559988c62f36bfa9a89"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8359bef65f49d51476e9811d59c015f0ddae618ee0e44144f5595278c9f8268c"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1a32c662e7df9226fd850f054a3ead0e4213a96a70b5ce37b2d26ba27004e013"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c329a4dcc395364a1c0d2d1574d725fe81a840783dda64c31c5a60fc7d41472c"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:239ee093b1db877c9f8fe2d71331a97f3b9c7c0d3ab9f09c4851004a11f44b65"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd574145d930cf9405a64f9923600879a5ce51d9f315443a5f706374841327b6"}, + {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f6784eed29f9e036acb0b7769d9e78a0dc2c72c2d8ba7903005350d817e287a4"}, + {file = "safetensors-0.4.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:65a4a6072436bf0a4825b1c295d248cc17e5f4651e60ee62427a5bcaa8622a7a"}, + {file = "safetensors-0.4.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:df81e3407630de060ae8313da49509c3caa33b1a9415562284eaf3d0c7705f9f"}, + {file = "safetensors-0.4.4-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:e4a0f374200e8443d9746e947ebb346c40f83a3970e75a685ade0adbba5c48d9"}, + {file = "safetensors-0.4.4-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:181fb5f3dee78dae7fd7ec57d02e58f7936498d587c6b7c1c8049ef448c8d285"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb4ac1d8f6b65ec84ddfacd275079e89d9df7c92f95675ba96c4f790a64df6e"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76897944cd9239e8a70955679b531b9a0619f76e25476e57ed373322d9c2075d"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a9e9d1a27e51a0f69e761a3d581c3af46729ec1c988fa1f839e04743026ae35"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:005ef9fc0f47cb9821c40793eb029f712e97278dae84de91cb2b4809b856685d"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26987dac3752688c696c77c3576f951dbbdb8c57f0957a41fb6f933cf84c0b62"}, + {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c05270b290acd8d249739f40d272a64dd597d5a4b90f27d830e538bc2549303c"}, + {file = "safetensors-0.4.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:068d3a33711fc4d93659c825a04480ff5a3854e1d78632cdc8f37fee917e8a60"}, + {file = "safetensors-0.4.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:063421ef08ca1021feea8b46951251b90ae91f899234dd78297cbe7c1db73b99"}, + {file = "safetensors-0.4.4-cp37-none-win32.whl", hash = "sha256:d52f5d0615ea83fd853d4e1d8acf93cc2e0223ad4568ba1e1f6ca72e94ea7b9d"}, + {file = "safetensors-0.4.4-cp37-none-win_amd64.whl", hash = "sha256:88a5ac3280232d4ed8e994cbc03b46a1807ce0aa123867b40c4a41f226c61f94"}, + {file = "safetensors-0.4.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3467ab511bfe3360967d7dc53b49f272d59309e57a067dd2405b4d35e7dcf9dc"}, + {file = "safetensors-0.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2ab4c96d922e53670ce25fbb9b63d5ea972e244de4fa1dd97b590d9fd66aacef"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87df18fce4440477c3ef1fd7ae17c704a69a74a77e705a12be135ee0651a0c2d"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e5fe345b2bc7d88587149ac11def1f629d2671c4c34f5df38aed0ba59dc37f8"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9f1a3e01dce3cd54060791e7e24588417c98b941baa5974700eeb0b8eb65b0a0"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c6bf35e9a8998d8339fd9a05ac4ce465a4d2a2956cc0d837b67c4642ed9e947"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:166c0c52f6488b8538b2a9f3fbc6aad61a7261e170698779b371e81b45f0440d"}, + {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87e9903b8668a16ef02c08ba4ebc91e57a49c481e9b5866e31d798632805014b"}, + {file = "safetensors-0.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a9c421153aa23c323bd8483d4155b4eee82c9a50ac11cccd83539104a8279c64"}, + {file = "safetensors-0.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a4b8617499b2371c7353302c5116a7e0a3a12da66389ce53140e607d3bf7b3d3"}, + {file = "safetensors-0.4.4-cp38-none-win32.whl", hash = "sha256:c6280f5aeafa1731f0a3709463ab33d8e0624321593951aefada5472f0b313fd"}, + {file = "safetensors-0.4.4-cp38-none-win_amd64.whl", hash = "sha256:6ceed6247fc2d33b2a7b7d25d8a0fe645b68798856e0bc7a9800c5fd945eb80f"}, + {file = "safetensors-0.4.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5cf6c6f6193797372adf50c91d0171743d16299491c75acad8650107dffa9269"}, + {file = "safetensors-0.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:419010156b914a3e5da4e4adf992bee050924d0fe423c4b329e523e2c14c3547"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88f6fd5a5c1302ce79993cc5feeadcc795a70f953c762544d01fb02b2db4ea33"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d468cffb82d90789696d5b4d8b6ab8843052cba58a15296691a7a3df55143cd2"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9353c2af2dd467333d4850a16edb66855e795561cd170685178f706c80d2c71e"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83c155b4a33368d9b9c2543e78f2452090fb030c52401ca608ef16fa58c98353"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9850754c434e636ce3dc586f534bb23bcbd78940c304775bee9005bf610e98f1"}, + {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:275f500b4d26f67b6ec05629a4600645231bd75e4ed42087a7c1801bff04f4b3"}, + {file = "safetensors-0.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5c2308de665b7130cd0e40a2329278226e4cf083f7400c51ca7e19ccfb3886f3"}, + {file = "safetensors-0.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e06a9ebc8656e030ccfe44634f2a541b4b1801cd52e390a53ad8bacbd65f8518"}, + {file = "safetensors-0.4.4-cp39-none-win32.whl", hash = "sha256:ef73df487b7c14b477016947c92708c2d929e1dee2bacdd6fff5a82ed4539537"}, + {file = "safetensors-0.4.4-cp39-none-win_amd64.whl", hash = "sha256:83d054818a8d1198d8bd8bc3ea2aac112a2c19def2bf73758321976788706398"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1d1f34c71371f0e034004a0b583284b45d233dd0b5f64a9125e16b8a01d15067"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1a8043a33d58bc9b30dfac90f75712134ca34733ec3d8267b1bd682afe7194f5"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8db8f0c59c84792c12661f8efa85de160f80efe16b87a9d5de91b93f9e0bce3c"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfc1fc38e37630dd12d519bdec9dcd4b345aec9930bb9ce0ed04461f49e58b52"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5c9d86d9b13b18aafa88303e2cd21e677f5da2a14c828d2c460fe513af2e9a5"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:43251d7f29a59120a26f5a0d9583b9e112999e500afabcfdcb91606d3c5c89e3"}, + {file = "safetensors-0.4.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:2c42e9b277513b81cf507e6121c7b432b3235f980cac04f39f435b7902857f91"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3daacc9a4e3f428a84dd56bf31f20b768eb0b204af891ed68e1f06db9edf546f"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:218bbb9b883596715fc9997bb42470bf9f21bb832c3b34c2bf744d6fa8f2bbba"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bd5efc26b39f7fc82d4ab1d86a7f0644c8e34f3699c33f85bfa9a717a030e1b"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56ad9776b65d8743f86698a1973292c966cf3abff627efc44ed60e66cc538ddd"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:30f23e6253c5f43a809dea02dc28a9f5fa747735dc819f10c073fe1b605e97d4"}, + {file = "safetensors-0.4.4-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5512078d00263de6cb04e9d26c9ae17611098f52357fea856213e38dc462f81f"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b96c3d9266439d17f35fc2173111d93afc1162f168e95aed122c1ca517b1f8f1"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:08d464aa72a9a13826946b4fb9094bb4b16554bbea2e069e20bd903289b6ced9"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:210160816d5a36cf41f48f38473b6f70d7bcb4b0527bedf0889cc0b4c3bb07db"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb276a53717f2bcfb6df0bcf284d8a12069002508d4c1ca715799226024ccd45"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a2c28c6487f17d8db0089e8b2cdc13de859366b94cc6cdc50e1b0a4147b56551"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7915f0c60e4e6e65d90f136d85dd3b429ae9191c36b380e626064694563dbd9f"}, + {file = "safetensors-0.4.4-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:00eea99ae422fbfa0b46065acbc58b46bfafadfcec179d4b4a32d5c45006af6c"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bb1ed4fcb0b3c2f3ea2c5767434622fe5d660e5752f21ac2e8d737b1e5e480bb"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:73fc9a0a4343188bdb421783e600bfaf81d0793cd4cce6bafb3c2ed567a74cd5"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c37e6b714200824c73ca6eaf007382de76f39466a46e97558b8dc4cf643cfbf"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f75698c5c5c542417ac4956acfc420f7d4a2396adca63a015fd66641ea751759"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca1a209157f242eb183e209040097118472e169f2e069bfbd40c303e24866543"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:177f2b60a058f92a3cec7a1786c9106c29eca8987ecdfb79ee88126e5f47fa31"}, + {file = "safetensors-0.4.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ee9622e84fe6e4cd4f020e5fda70d6206feff3157731df7151d457fdae18e541"}, + {file = "safetensors-0.4.4.tar.gz", hash = "sha256:5fe3e9b705250d0172ed4e100a811543108653fb2b66b9e702a088ad03772a07"}, ] [package.extras] @@ -5632,32 +6166,32 @@ torch = ["safetensors[numpy]", "torch (>=1.10)"] [[package]] name = "scikit-learn" -version = "1.5.0" +version = "1.5.1" description = "A set of python modules for machine learning and data mining" optional = false python-versions = ">=3.9" files = [ - {file = "scikit_learn-1.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:12e40ac48555e6b551f0a0a5743cc94cc5a765c9513fe708e01f0aa001da2801"}, - {file = "scikit_learn-1.5.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:f405c4dae288f5f6553b10c4ac9ea7754d5180ec11e296464adb5d6ac68b6ef5"}, - {file = "scikit_learn-1.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df8ccabbf583315f13160a4bb06037bde99ea7d8211a69787a6b7c5d4ebb6fc3"}, - {file = "scikit_learn-1.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c75ea812cd83b1385bbfa94ae971f0d80adb338a9523f6bbcb5e0b0381151d4"}, - {file = "scikit_learn-1.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:a90c5da84829a0b9b4bf00daf62754b2be741e66b5946911f5bdfaa869fcedd6"}, - {file = "scikit_learn-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a65af2d8a6cce4e163a7951a4cfbfa7fceb2d5c013a4b593686c7f16445cf9d"}, - {file = "scikit_learn-1.5.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:4c0c56c3005f2ec1db3787aeaabefa96256580678cec783986836fc64f8ff622"}, - {file = "scikit_learn-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f77547165c00625551e5c250cefa3f03f2fc92c5e18668abd90bfc4be2e0bff"}, - {file = "scikit_learn-1.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:118a8d229a41158c9f90093e46b3737120a165181a1b58c03461447aa4657415"}, - {file = "scikit_learn-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:a03b09f9f7f09ffe8c5efffe2e9de1196c696d811be6798ad5eddf323c6f4d40"}, - {file = "scikit_learn-1.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:460806030c666addee1f074788b3978329a5bfdc9b7d63e7aad3f6d45c67a210"}, - {file = "scikit_learn-1.5.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:1b94d6440603752b27842eda97f6395f570941857456c606eb1d638efdb38184"}, - {file = "scikit_learn-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d82c2e573f0f2f2f0be897e7a31fcf4e73869247738ab8c3ce7245549af58ab8"}, - {file = "scikit_learn-1.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3a10e1d9e834e84d05e468ec501a356226338778769317ee0b84043c0d8fb06"}, - {file = "scikit_learn-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:855fc5fa8ed9e4f08291203af3d3e5fbdc4737bd617a371559aaa2088166046e"}, - {file = "scikit_learn-1.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:40fb7d4a9a2db07e6e0cae4dc7bdbb8fada17043bac24104d8165e10e4cff1a2"}, - {file = "scikit_learn-1.5.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:47132440050b1c5beb95f8ba0b2402bbd9057ce96ec0ba86f2f445dd4f34df67"}, - {file = "scikit_learn-1.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:174beb56e3e881c90424e21f576fa69c4ffcf5174632a79ab4461c4c960315ac"}, - {file = "scikit_learn-1.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261fe334ca48f09ed64b8fae13f9b46cc43ac5f580c4a605cbb0a517456c8f71"}, - {file = "scikit_learn-1.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:057b991ac64b3e75c9c04b5f9395eaf19a6179244c089afdebaad98264bff37c"}, - {file = "scikit_learn-1.5.0.tar.gz", hash = "sha256:789e3db01c750ed6d496fa2db7d50637857b451e57bcae863bff707c1247bef7"}, + {file = "scikit_learn-1.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:781586c414f8cc58e71da4f3d7af311e0505a683e112f2f62919e3019abd3745"}, + {file = "scikit_learn-1.5.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:f5b213bc29cc30a89a3130393b0e39c847a15d769d6e59539cd86b75d276b1a7"}, + {file = "scikit_learn-1.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ff4ba34c2abff5ec59c803ed1d97d61b036f659a17f55be102679e88f926fac"}, + {file = "scikit_learn-1.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:161808750c267b77b4a9603cf9c93579c7a74ba8486b1336034c2f1579546d21"}, + {file = "scikit_learn-1.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:10e49170691514a94bb2e03787aa921b82dbc507a4ea1f20fd95557862c98dc1"}, + {file = "scikit_learn-1.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:154297ee43c0b83af12464adeab378dee2d0a700ccd03979e2b821e7dd7cc1c2"}, + {file = "scikit_learn-1.5.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:b5e865e9bd59396220de49cb4a57b17016256637c61b4c5cc81aaf16bc123bbe"}, + {file = "scikit_learn-1.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:909144d50f367a513cee6090873ae582dba019cb3fca063b38054fa42704c3a4"}, + {file = "scikit_learn-1.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689b6f74b2c880276e365fe84fe4f1befd6a774f016339c65655eaff12e10cbf"}, + {file = "scikit_learn-1.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:9a07f90846313a7639af6a019d849ff72baadfa4c74c778821ae0fad07b7275b"}, + {file = "scikit_learn-1.5.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5944ce1faada31c55fb2ba20a5346b88e36811aab504ccafb9f0339e9f780395"}, + {file = "scikit_learn-1.5.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0828673c5b520e879f2af6a9e99eee0eefea69a2188be1ca68a6121b809055c1"}, + {file = "scikit_learn-1.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508907e5f81390e16d754e8815f7497e52139162fd69c4fdbd2dfa5d6cc88915"}, + {file = "scikit_learn-1.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97625f217c5c0c5d0505fa2af28ae424bd37949bb2f16ace3ff5f2f81fb4498b"}, + {file = "scikit_learn-1.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:da3f404e9e284d2b0a157e1b56b6566a34eb2798205cba35a211df3296ab7a74"}, + {file = "scikit_learn-1.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:88e0672c7ac21eb149d409c74cc29f1d611d5158175846e7a9c2427bd12b3956"}, + {file = "scikit_learn-1.5.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:7b073a27797a283187a4ef4ee149959defc350b46cbf63a84d8514fe16b69855"}, + {file = "scikit_learn-1.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b59e3e62d2be870e5c74af4e793293753565c7383ae82943b83383fdcf5cc5c1"}, + {file = "scikit_learn-1.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bd8d3a19d4bd6dc5a7d4f358c8c3a60934dc058f363c34c0ac1e9e12a31421d"}, + {file = "scikit_learn-1.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:5f57428de0c900a98389c4a433d4a3cf89de979b3aa24d1c1d251802aa15e44d"}, + {file = "scikit_learn-1.5.1.tar.gz", hash = "sha256:0ea5d40c0e3951df445721927448755d3fe1d80833b0b7308ebff5d2a45e6414"}, ] [package.dependencies] @@ -5668,8 +6202,8 @@ threadpoolctl = ">=3.1.0" [package.extras] benchmark = ["matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "pandas (>=1.1.5)"] -build = ["cython (>=3.0.10)", "meson-python (>=0.15.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.23)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.15.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +build = ["cython (>=3.0.10)", "meson-python (>=0.16.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory_profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "polars (>=0.20.23)", "pooch (>=1.6.0)", "pydata-sphinx-theme (>=0.15.3)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=7.3.7)", "sphinx-copybutton (>=0.5.2)", "sphinx-design (>=0.5.0)", "sphinx-gallery (>=0.16.0)", "sphinx-prompt (>=1.4.0)", "sphinx-remove-toctrees (>=1.0.0.post1)", "sphinxcontrib-sass (>=0.3.4)", "sphinxext-opengraph (>=0.9.1)"] examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] install = ["joblib (>=1.2.0)", "numpy (>=1.19.5)", "scipy (>=1.6.0)", "threadpoolctl (>=3.1.0)"] maintenance = ["conda-lock (==2.5.6)"] @@ -5743,18 +6277,71 @@ dev = ["pre-commit", "pytest", "ruff (>=0.3.0)"] [[package]] name = "setuptools" -version = "70.0.0" +version = "72.1.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-70.0.0-py3-none-any.whl", hash = "sha256:54faa7f2e8d2d11bcd2c07bed282eef1046b5c080d1c32add737d7b5817b1ad4"}, - {file = "setuptools-70.0.0.tar.gz", hash = "sha256:f211a66637b8fa059bb28183da127d4e86396c991a942b028c6650d4319c3fd0"}, + {file = "setuptools-72.1.0-py3-none-any.whl", hash = "sha256:5a03e1860cf56bb6ef48ce186b0e557fdba433237481a9a625176c2831be15d1"}, + {file = "setuptools-72.1.0.tar.gz", hash = "sha256:8d243eff56d095e5817f796ede6ae32941278f542e0f941867cc05ae52b162ec"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mypy (==1.9)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "ordered-set (>=3.1.1)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (<0.4)", "pytest-ruff (>=0.2.1)", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "shapely" +version = "2.0.5" +description = "Manipulation and analysis of geometric objects" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shapely-2.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:89d34787c44f77a7d37d55ae821f3a784fa33592b9d217a45053a93ade899375"}, + {file = "shapely-2.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:798090b426142df2c5258779c1d8d5734ec6942f778dab6c6c30cfe7f3bf64ff"}, + {file = "shapely-2.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45211276900c4790d6bfc6105cbf1030742da67594ea4161a9ce6812a6721e68"}, + {file = "shapely-2.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e119444bc27ca33e786772b81760f2028d930ac55dafe9bc50ef538b794a8e1"}, + {file = "shapely-2.0.5-cp310-cp310-win32.whl", hash = "sha256:9a4492a2b2ccbeaebf181e7310d2dfff4fdd505aef59d6cb0f217607cb042fb3"}, + {file = "shapely-2.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:1e5cb5ee72f1bc7ace737c9ecd30dc174a5295fae412972d3879bac2e82c8fae"}, + {file = "shapely-2.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5bbfb048a74cf273db9091ff3155d373020852805a37dfc846ab71dde4be93ec"}, + {file = "shapely-2.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93be600cbe2fbaa86c8eb70656369f2f7104cd231f0d6585c7d0aa555d6878b8"}, + {file = "shapely-2.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f8e71bb9a46814019f6644c4e2560a09d44b80100e46e371578f35eaaa9da1c"}, + {file = "shapely-2.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5251c28a29012e92de01d2e84f11637eb1d48184ee8f22e2df6c8c578d26760"}, + {file = "shapely-2.0.5-cp311-cp311-win32.whl", hash = "sha256:35110e80070d664781ec7955c7de557456b25727a0257b354830abb759bf8311"}, + {file = "shapely-2.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c6b78c0007a34ce7144f98b7418800e0a6a5d9a762f2244b00ea560525290c9"}, + {file = "shapely-2.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:03bd7b5fa5deb44795cc0a503999d10ae9d8a22df54ae8d4a4cd2e8a93466195"}, + {file = "shapely-2.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ff9521991ed9e201c2e923da014e766c1aa04771bc93e6fe97c27dcf0d40ace"}, + {file = "shapely-2.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b65365cfbf657604e50d15161ffcc68de5cdb22a601bbf7823540ab4918a98d"}, + {file = "shapely-2.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21f64e647a025b61b19585d2247137b3a38a35314ea68c66aaf507a1c03ef6fe"}, + {file = "shapely-2.0.5-cp312-cp312-win32.whl", hash = "sha256:3ac7dc1350700c139c956b03d9c3df49a5b34aaf91d024d1510a09717ea39199"}, + {file = "shapely-2.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:30e8737983c9d954cd17feb49eb169f02f1da49e24e5171122cf2c2b62d65c95"}, + {file = "shapely-2.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ff7731fea5face9ec08a861ed351734a79475631b7540ceb0b66fb9732a5f529"}, + {file = "shapely-2.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff9e520af0c5a578e174bca3c18713cd47a6c6a15b6cf1f50ac17dc8bb8db6a2"}, + {file = "shapely-2.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49b299b91557b04acb75e9732645428470825061f871a2edc36b9417d66c1fc5"}, + {file = "shapely-2.0.5-cp37-cp37m-win32.whl", hash = "sha256:b5870633f8e684bf6d1ae4df527ddcb6f3895f7b12bced5c13266ac04f47d231"}, + {file = "shapely-2.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:401cb794c5067598f50518e5a997e270cd7642c4992645479b915c503866abed"}, + {file = "shapely-2.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e91ee179af539100eb520281ba5394919067c6b51824e6ab132ad4b3b3e76dd0"}, + {file = "shapely-2.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8af6f7260f809c0862741ad08b1b89cb60c130ae30efab62320bbf4ee9cc71fa"}, + {file = "shapely-2.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5456dd522800306ba3faef77c5ba847ec30a0bd73ab087a25e0acdd4db2514f"}, + {file = "shapely-2.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b714a840402cde66fd7b663bb08cacb7211fa4412ea2a209688f671e0d0631fd"}, + {file = "shapely-2.0.5-cp38-cp38-win32.whl", hash = "sha256:7e8cf5c252fac1ea51b3162be2ec3faddedc82c256a1160fc0e8ddbec81b06d2"}, + {file = "shapely-2.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:4461509afdb15051e73ab178fae79974387f39c47ab635a7330d7fee02c68a3f"}, + {file = "shapely-2.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7545a39c55cad1562be302d74c74586f79e07b592df8ada56b79a209731c0219"}, + {file = "shapely-2.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4c83a36f12ec8dee2066946d98d4d841ab6512a6ed7eb742e026a64854019b5f"}, + {file = "shapely-2.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89e640c2cd37378480caf2eeda9a51be64201f01f786d127e78eaeff091ec897"}, + {file = "shapely-2.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06efe39beafde3a18a21dde169d32f315c57da962826a6d7d22630025200c5e6"}, + {file = "shapely-2.0.5-cp39-cp39-win32.whl", hash = "sha256:8203a8b2d44dcb366becbc8c3d553670320e4acf0616c39e218c9561dd738d92"}, + {file = "shapely-2.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:7fed9dbfbcfec2682d9a047b9699db8dcc890dfca857ecba872c42185fc9e64e"}, + {file = "shapely-2.0.5.tar.gz", hash = "sha256:bff2366bc786bfa6cb353d6b47d0443c570c32776612e527ee47b6df63fcfe32"}, +] + +[package.dependencies] +numpy = ">=1.14,<3" + +[package.extras] +docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +test = ["pytest", "pytest-cov"] [[package]] name = "shellingham" @@ -5812,13 +6399,13 @@ tests = ["Django", "birdseye", "littleutils", "numpy (>=1.16.5)", "pandas (>=0.2 [[package]] name = "soupsieve" -version = "2.5" +version = "2.6" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.8" files = [ - {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, - {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, + {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, + {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, ] [[package]] @@ -5859,51 +6446,41 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7 [[package]] name = "std-uritemplate" -version = "0.0.57" +version = "1.0.5" description = "std-uritemplate implementation for Python" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "std_uritemplate-0.0.57-py3-none-any.whl", hash = "sha256:66691cb6ff1d1b3612741053d6f5573ec7eb1c1a33ffb5ca49557e8aa2372aa8"}, - {file = "std_uritemplate-0.0.57.tar.gz", hash = "sha256:f4adc717aec138562e652b95da74fc6815a942231d971314856b81f434c1b94c"}, + {file = "std_uritemplate-1.0.5-py3-none-any.whl", hash = "sha256:8daf745b350ef3bc7b4ef82460a6c48aa459ca65fce8bda8657178959e3832d7"}, + {file = "std_uritemplate-1.0.5.tar.gz", hash = "sha256:6ea31e72f96ab2b54d93c774de2175ce5350a833fbf7c024bb3718a3a539f605"}, ] [[package]] name = "sympy" -version = "1.12" +version = "1.13.2" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" files = [ - {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, - {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, + {file = "sympy-1.13.2-py3-none-any.whl", hash = "sha256:c51d75517712f1aed280d4ce58506a4a88d635d6b5dd48b39102a7ae1f3fcfe9"}, + {file = "sympy-1.13.2.tar.gz", hash = "sha256:401449d84d07be9d0c7a46a64bd54fe097667d5e7181bfe67ec777be9e01cb13"}, ] [package.dependencies] -mpmath = ">=0.19" +mpmath = ">=1.1.0,<1.4" -[[package]] -name = "tbb" -version = "2021.12.0" -description = "Intel® oneAPI Threading Building Blocks (oneTBB)" -optional = false -python-versions = "*" -files = [ - {file = "tbb-2021.12.0-py2.py3-none-manylinux1_i686.whl", hash = "sha256:f2cc9a7f8ababaa506cbff796ce97c3bf91062ba521e15054394f773375d81d8"}, - {file = "tbb-2021.12.0-py2.py3-none-manylinux1_x86_64.whl", hash = "sha256:a925e9a7c77d3a46ae31c34b0bb7f801c4118e857d137b68f68a8e458fcf2bd7"}, - {file = "tbb-2021.12.0-py3-none-win32.whl", hash = "sha256:b1725b30c174048edc8be70bd43bb95473f396ce895d91151a474d0fa9f450a8"}, - {file = "tbb-2021.12.0-py3-none-win_amd64.whl", hash = "sha256:fc2772d850229f2f3df85f1109c4844c495a2db7433d38200959ee9265b34789"}, -] +[package.extras] +dev = ["hypothesis (>=6.70.0)", "pytest (>=7.1.0)"] [[package]] name = "tenacity" -version = "8.3.0" +version = "9.0.0" description = "Retry code until it succeeds" optional = false python-versions = ">=3.8" files = [ - {file = "tenacity-8.3.0-py3-none-any.whl", hash = "sha256:3649f6443dbc0d9b01b9d8020a9c4ec7a1ff5f6f3c6c8a036ef371f573fe9185"}, - {file = "tenacity-8.3.0.tar.gz", hash = "sha256:953d4e6ad24357bceffbc9707bc74349aca9d245f68eb65419cf0c249a1949a2"}, + {file = "tenacity-9.0.0-py3-none-any.whl", hash = "sha256:93de0c98785b27fcf659856aa9f54bfbd399e29969b0621bc7f762bd441b4539"}, + {file = "tenacity-9.0.0.tar.gz", hash = "sha256:807f37ca97d62aa361264d497b0e31e92b8027044942bfa756160d908320d73b"}, ] [package.extras] @@ -6069,38 +6646,42 @@ files = [ [[package]] name = "torch" -version = "2.3.1" +version = "2.2.2" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" optional = false python-versions = ">=3.8.0" files = [ - {file = "torch-2.3.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:605a25b23944be5ab7c3467e843580e1d888b8066e5aaf17ff7bf9cc30001cc3"}, - {file = "torch-2.3.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f2357eb0965583a0954d6f9ad005bba0091f956aef879822274b1bcdb11bd308"}, - {file = "torch-2.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:32b05fe0d1ada7f69c9f86c14ff69b0ef1957a5a54199bacba63d22d8fab720b"}, - {file = "torch-2.3.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:7c09a94362778428484bcf995f6004b04952106aee0ef45ff0b4bab484f5498d"}, - {file = "torch-2.3.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:b2ec81b61bb094ea4a9dee1cd3f7b76a44555375719ad29f05c0ca8ef596ad39"}, - {file = "torch-2.3.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:490cc3d917d1fe0bd027057dfe9941dc1d6d8e3cae76140f5dd9a7e5bc7130ab"}, - {file = "torch-2.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:5802530783bd465fe66c2df99123c9a54be06da118fbd785a25ab0a88123758a"}, - {file = "torch-2.3.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:a7dd4ed388ad1f3d502bf09453d5fe596c7b121de7e0cfaca1e2017782e9bbac"}, - {file = "torch-2.3.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:a486c0b1976a118805fc7c9641d02df7afbb0c21e6b555d3bb985c9f9601b61a"}, - {file = "torch-2.3.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:224259821fe3e4c6f7edf1528e4fe4ac779c77addaa74215eb0b63a5c474d66c"}, - {file = "torch-2.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:e5fdccbf6f1334b2203a61a0e03821d5845f1421defe311dabeae2fc8fbeac2d"}, - {file = "torch-2.3.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:3c333dc2ebc189561514eda06e81df22bf8fb64e2384746b2cb9f04f96d1d4c8"}, - {file = "torch-2.3.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:07e9ba746832b8d069cacb45f312cadd8ad02b81ea527ec9766c0e7404bb3feb"}, - {file = "torch-2.3.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:462d1c07dbf6bb5d9d2f3316fee73a24f3d12cd8dacf681ad46ef6418f7f6626"}, - {file = "torch-2.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:ff60bf7ce3de1d43ad3f6969983f321a31f0a45df3690921720bcad6a8596cc4"}, - {file = "torch-2.3.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:bee0bd33dc58aa8fc8a7527876e9b9a0e812ad08122054a5bff2ce5abf005b10"}, - {file = "torch-2.3.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:aaa872abde9a3d4f91580f6396d54888620f4a0b92e3976a6034759df4b961ad"}, - {file = "torch-2.3.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:3d7a7f7ef21a7520510553dc3938b0c57c116a7daee20736a9e25cbc0e832bdc"}, - {file = "torch-2.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:4777f6cefa0c2b5fa87223c213e7b6f417cf254a45e5829be4ccd1b2a4ee1011"}, - {file = "torch-2.3.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:2bb5af780c55be68fe100feb0528d2edebace1d55cb2e351de735809ba7391eb"}, + {file = "torch-2.2.2-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:bc889d311a855dd2dfd164daf8cc903a6b7273a747189cebafdd89106e4ad585"}, + {file = "torch-2.2.2-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:15dffa4cc3261fa73d02f0ed25f5fa49ecc9e12bf1ae0a4c1e7a88bbfaad9030"}, + {file = "torch-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:11e8fe261233aeabd67696d6b993eeb0896faa175c6b41b9a6c9f0334bdad1c5"}, + {file = "torch-2.2.2-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:b2e2200b245bd9f263a0d41b6a2dab69c4aca635a01b30cca78064b0ef5b109e"}, + {file = "torch-2.2.2-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:877b3e6593b5e00b35bbe111b7057464e76a7dd186a287280d941b564b0563c2"}, + {file = "torch-2.2.2-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:ad4c03b786e074f46606f4151c0a1e3740268bcf29fbd2fdf6666d66341c1dcb"}, + {file = "torch-2.2.2-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:32827fa1fbe5da8851686256b4cd94cc7b11be962862c2293811c94eea9457bf"}, + {file = "torch-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:f9ef0a648310435511e76905f9b89612e45ef2c8b023bee294f5e6f7e73a3e7c"}, + {file = "torch-2.2.2-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:95b9b44f3bcebd8b6cd8d37ec802048c872d9c567ba52c894bba90863a439059"}, + {file = "torch-2.2.2-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:49aa4126ede714c5aeef7ae92969b4b0bbe67f19665106463c39f22e0a1860d1"}, + {file = "torch-2.2.2-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:cf12cdb66c9c940227ad647bc9cf5dba7e8640772ae10dfe7569a0c1e2a28aca"}, + {file = "torch-2.2.2-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:89ddac2a8c1fb6569b90890955de0c34e1724f87431cacff4c1979b5f769203c"}, + {file = "torch-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:451331406b760f4b1ab298ddd536486ab3cfb1312614cfe0532133535be60bea"}, + {file = "torch-2.2.2-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:eb4d6e9d3663e26cd27dc3ad266b34445a16b54908e74725adb241aa56987533"}, + {file = "torch-2.2.2-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:bf9558da7d2bf7463390b3b2a61a6a3dbb0b45b161ee1dd5ec640bf579d479fc"}, + {file = "torch-2.2.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cd2bf7697c9e95fb5d97cc1d525486d8cf11a084c6af1345c2c2c22a6b0029d0"}, + {file = "torch-2.2.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:b421448d194496e1114d87a8b8d6506bce949544e513742b097e2ab8f7efef32"}, + {file = "torch-2.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:3dbcd563a9b792161640c0cffe17e3270d85e8f4243b1f1ed19cca43d28d235b"}, + {file = "torch-2.2.2-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:31f4310210e7dda49f1fb52b0ec9e59382cfcb938693f6d5378f25b43d7c1d29"}, + {file = "torch-2.2.2-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:c795feb7e8ce2e0ef63f75f8e1ab52e7fd5e1a4d7d0c31367ade1e3de35c9e95"}, + {file = "torch-2.2.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:a6e5770d68158d07456bfcb5318b173886f579fdfbf747543901ce718ea94782"}, + {file = "torch-2.2.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:67dcd726edff108e2cd6c51ff0e416fd260c869904de95750e80051358680d24"}, + {file = "torch-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:539d5ef6c4ce15bd3bd47a7b4a6e7c10d49d4d21c0baaa87c7d2ef8698632dfb"}, + {file = "torch-2.2.2-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:dff696de90d6f6d1e8200e9892861fd4677306d0ef604cb18f2134186f719f82"}, + {file = "torch-2.2.2-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:3a4dd910663fd7a124c056c878a52c2b0be4a5a424188058fe97109d4436ee42"}, ] [package.dependencies] filelock = "*" fsspec = "*" jinja2 = "*" -mkl = {version = ">=2021.1.1,<=2021.4.0", markers = "platform_system == \"Windows\""} networkx = "*" nvidia-cublas-cu12 = {version = "12.1.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cuda-cupti-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} @@ -6111,10 +6692,10 @@ nvidia-cufft-cu12 = {version = "11.0.2.54", markers = "platform_system == \"Linu nvidia-curand-cu12 = {version = "10.3.2.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cusolver-cu12 = {version = "11.4.5.107", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-cusparse-cu12 = {version = "12.1.0.106", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} -nvidia-nccl-cu12 = {version = "2.20.5", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +nvidia-nccl-cu12 = {version = "2.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} nvidia-nvtx-cu12 = {version = "12.1.105", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} sympy = "*" -triton = {version = "2.3.1", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\""} +triton = {version = "2.2.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and python_version < \"3.12\""} typing-extensions = ">=4.8.0" [package.extras] @@ -6143,13 +6724,13 @@ files = [ [[package]] name = "tqdm" -version = "4.66.4" +version = "4.66.5" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.4-py3-none-any.whl", hash = "sha256:b75ca56b413b030bc3f00af51fd2c1a1a5eac6a0c1cca83cbb37a5c52abce644"}, - {file = "tqdm-4.66.4.tar.gz", hash = "sha256:e4d936c9de8727928f3be6079590e97d9abfe8d39a590be678eb5919ffc186bb"}, + {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, + {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, ] [package.dependencies] @@ -6178,20 +6759,20 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0, [[package]] name = "transformers" -version = "4.42.4" +version = "4.44.0" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.8.0" files = [ - {file = "transformers-4.42.4-py3-none-any.whl", hash = "sha256:6d59061392d0f1da312af29c962df9017ff3c0108c681a56d1bc981004d16d24"}, - {file = "transformers-4.42.4.tar.gz", hash = "sha256:f956e25e24df851f650cb2c158b6f4352dfae9d702f04c113ed24fc36ce7ae2d"}, + {file = "transformers-4.44.0-py3-none-any.whl", hash = "sha256:ea0ff72def71e9f4812d9414d4803b22681b1617aa6f511bd51cfff2b44a6fca"}, + {file = "transformers-4.44.0.tar.gz", hash = "sha256:75699495e30b7635ca444d8d372e138c687ab51a875b387e33f1fb759c37f196"}, ] [package.dependencies] accelerate = {version = ">=0.21.0", optional = true, markers = "extra == \"torch\""} filelock = "*" huggingface-hub = ">=0.23.2,<1.0" -numpy = ">=1.17,<2.0" +numpy = ">=1.17" packaging = ">=20.0" pyyaml = ">=5.1" regex = "!=2019.12.17" @@ -6204,15 +6785,15 @@ tqdm = ">=4.27" [package.extras] accelerate = ["accelerate (>=0.21.0)"] agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] -all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"] audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] benchmark = ["optimum-benchmark (>=0.2.0)"] codecarbon = ["codecarbon (==1.2.0)"] deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"] -deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.4.4)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] -dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.4.4)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.4.4)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.19,<0.20)", "urllib3 (<2.0.0)"] -dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.4.4)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.19,<0.20)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"] flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] ftfy = ["ftfy"] @@ -6223,42 +6804,42 @@ natten = ["natten (>=0.14.6,<0.15.0)"] onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] optuna = ["optuna"] -quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "ruff (==0.4.4)", "urllib3 (<2.0.0)"] +quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "ruff (==0.5.1)", "urllib3 (<2.0.0)"] ray = ["ray[tune] (>=2.7.0)"] retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] -ruff = ["ruff (==0.4.4)"] +ruff = ["ruff (==0.5.1)"] sagemaker = ["sagemaker (>=2.31.0)"] sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] serving = ["fastapi", "pydantic", "starlette", "uvicorn"] sigopt = ["sigopt"] sklearn = ["scikit-learn"] speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.4.4)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] -tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] -tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<0.24)", "tensorflow-text (<2.16)", "tf2onnx"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] +tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<0.24)", "tensorflow-text (<2.16)", "tf2onnx"] tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] timm = ["timm (<=0.9.16)"] tokenizers = ["tokenizers (>=0.19,<0.20)"] torch = ["accelerate (>=0.21.0)", "torch"] torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] -torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17,<2.0)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.19,<0.20)", "torch", "tqdm (>=4.27)"] +torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.19,<0.20)", "torch", "tqdm (>=4.27)"] video = ["av (==9.2.0)", "decord (==0.6.0)"] vision = ["Pillow (>=10.0.1,<=15.0)"] [[package]] name = "triton" -version = "2.3.1" +version = "2.2.0" description = "A language and compiler for custom Deep Learning operations" optional = false python-versions = "*" files = [ - {file = "triton-2.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c84595cbe5e546b1b290d2a58b1494df5a2ef066dd890655e5b8a8a92205c33"}, - {file = "triton-2.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9d64ae33bcb3a7a18081e3a746e8cf87ca8623ca13d2c362413ce7a486f893e"}, - {file = "triton-2.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eaf80e8761a9e3498aa92e7bf83a085b31959c61f5e8ac14eedd018df6fccd10"}, - {file = "triton-2.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b13bf35a2b659af7159bf78e92798dc62d877aa991de723937329e2d382f1991"}, - {file = "triton-2.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63381e35ded3304704ea867ffde3b7cfc42c16a55b3062d41e017ef510433d66"}, - {file = "triton-2.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d968264523c7a07911c8fb51b4e0d1b920204dae71491b1fe7b01b62a31e124"}, + {file = "triton-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2294514340cfe4e8f4f9e5c66c702744c4a117d25e618bd08469d0bfed1e2e5"}, + {file = "triton-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da58a152bddb62cafa9a857dd2bc1f886dbf9f9c90a2b5da82157cd2b34392b0"}, + {file = "triton-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af58716e721460a61886668b205963dc4d1e4ac20508cc3f623aef0d70283d5"}, + {file = "triton-2.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8fe46d3ab94a8103e291bd44c741cc294b91d1d81c1a2888254cbf7ff846dab"}, + {file = "triton-2.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ce26093e539d727e7cf6f6f0d932b1ab0574dc02567e684377630d86723ace"}, + {file = "triton-2.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:227cc6f357c5efcb357f3867ac2a8e7ecea2298cd4606a8ba1e931d1d5a947df"}, ] [package.dependencies] @@ -6286,26 +6867,81 @@ rich = ">=10.11.0" shellingham = ">=1.3.0" typing-extensions = ">=3.7.4.3" +[[package]] +name = "types-cffi" +version = "1.16.0.20240331" +description = "Typing stubs for cffi" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-cffi-1.16.0.20240331.tar.gz", hash = "sha256:b8b20d23a2b89cfed5f8c5bc53b0cb8677c3aac6d970dbc771e28b9c698f5dee"}, + {file = "types_cffi-1.16.0.20240331-py3-none-any.whl", hash = "sha256:a363e5ea54a4eb6a4a105d800685fde596bc318089b025b27dee09849fe41ff0"}, +] + +[package.dependencies] +types-setuptools = "*" + +[[package]] +name = "types-pyopenssl" +version = "24.1.0.20240722" +description = "Typing stubs for pyOpenSSL" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-pyOpenSSL-24.1.0.20240722.tar.gz", hash = "sha256:47913b4678a01d879f503a12044468221ed8576263c1540dcb0484ca21b08c39"}, + {file = "types_pyOpenSSL-24.1.0.20240722-py3-none-any.whl", hash = "sha256:6a7a5d2ec042537934cfb4c9d4deb0e16c4c6250b09358df1f083682fe6fda54"}, +] + +[package.dependencies] +cryptography = ">=35.0.0" +types-cffi = "*" + [[package]] name = "types-pyyaml" -version = "6.0.12.20240311" +version = "6.0.12.20240808" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.8" files = [ - {file = "types-PyYAML-6.0.12.20240311.tar.gz", hash = "sha256:a9e0f0f88dc835739b0c1ca51ee90d04ca2a897a71af79de9aec5f38cb0a5342"}, - {file = "types_PyYAML-6.0.12.20240311-py3-none-any.whl", hash = "sha256:b845b06a1c7e54b8e5b4c683043de0d9caf205e7434b3edc678ff2411979b8f6"}, + {file = "types-PyYAML-6.0.12.20240808.tar.gz", hash = "sha256:b8f76ddbd7f65440a8bda5526a9607e4c7a322dc2f8e1a8c405644f9a6f4b9af"}, + {file = "types_PyYAML-6.0.12.20240808-py3-none-any.whl", hash = "sha256:deda34c5c655265fc517b546c902aa6eed2ef8d3e921e4765fe606fe2afe8d35"}, +] + +[[package]] +name = "types-redis" +version = "4.6.0.20240806" +description = "Typing stubs for redis" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-redis-4.6.0.20240806.tar.gz", hash = "sha256:60dd02c2b91ea2d42ad079ac58dedc31d71d6eedb1c21d3796811b02baac655d"}, + {file = "types_redis-4.6.0.20240806-py3-none-any.whl", hash = "sha256:9d8fbe0ce37e3660c0a06982db7812384295d10a93d637c7f8604a2f3c88b0e6"}, +] + +[package.dependencies] +cryptography = ">=35.0.0" +types-pyOpenSSL = "*" + +[[package]] +name = "types-setuptools" +version = "71.1.0.20240813" +description = "Typing stubs for setuptools" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-setuptools-71.1.0.20240813.tar.gz", hash = "sha256:94ff4f0af18c7c24ac88932bcb0f5655fb7187a001b7c61e53a1bfdaf9877b54"}, + {file = "types_setuptools-71.1.0.20240813-py3-none-any.whl", hash = "sha256:d9d9ba2936f5d3b47b59ae9bf65942a60063ac1d6bbee180a8a79fbb43f22ce5"}, ] [[package]] name = "typing-extensions" -version = "4.12.0" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.12.0-py3-none-any.whl", hash = "sha256:b349c66bea9016ac22978d800cfff206d5f9816951f12a7d0ec5578b0a819594"}, - {file = "typing_extensions-4.12.0.tar.gz", hash = "sha256:8cbcdc8606ebcb0d95453ad7dc5065e6237b6aa230a31e81d0f440c30fed5fd8"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] @@ -6436,61 +7072,61 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "usearch" -version = "2.12.0" +version = "2.13.2" description = "Smaller & Faster Single-File Vector Search Engine from Unum" optional = false python-versions = "*" files = [ - {file = "usearch-2.12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:58b29fc5fa20c7cdd6cd8261f39fedaffd03061601c1624b33a80bdfb29a6844"}, - {file = "usearch-2.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:61e1d186f066507a230ca27e24eaeb051a901b3c5293c2c155f08f534a19d248"}, - {file = "usearch-2.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:28b8901b615a548c8ade2662e9051de9420c34a2d1a8c91d2ba11edb0c3db14f"}, - {file = "usearch-2.12.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7ba988719adb424caa786be318dfdbf1c44b066368f6eee99cf2f424b5f25091"}, - {file = "usearch-2.12.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a7e01373688bd7503868fc506b84765ce59cce65828d613147c0ee05241bdf9b"}, - {file = "usearch-2.12.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c24c0046d17a36f636f7a96f8b812dd7a40ef8b0cbec12fb8fdf2fa5be4a37cc"}, - {file = "usearch-2.12.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:88367f82ef931b98a8c9b1759dff69ac63dc8ef759ee73d2e7f5fdedca02f21b"}, - {file = "usearch-2.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:50380710ad6eb730ab1927b919e206c765fe2eb869444ceba80dc7a81a5fd656"}, - {file = "usearch-2.12.0-cp310-cp310-win_arm64.whl", hash = "sha256:a5edbaef570b084ec1db9d9669329c860bd4a72128efd5867eb93dd2bdc6d23c"}, - {file = "usearch-2.12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4af0d62027425d1d02ef29ee5072501d8395ec6532079aa7834d11b8eaf5972f"}, - {file = "usearch-2.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e91962e35738772ad7f6d15ca5cb9cb6b425a71a7fc9c7e495ce3783742a7df7"}, - {file = "usearch-2.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1bb80d3a6a16adad876088d18eadb9a50b40a4331e0f76a0bbbccd7d577d8016"}, - {file = "usearch-2.12.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ed2f229d2d80be82a09bd4b580c30e3a89228cfd295a3d9faa07b5c02a4aa10"}, - {file = "usearch-2.12.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:3ffe8e866d08fc7fc92148e81d96862893e23c260a45b73e81e19140870d0480"}, - {file = "usearch-2.12.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3fd47c8ef364f54a4737d64e905c5b0031ec8fbecd399cd41d2945819b67a269"}, - {file = "usearch-2.12.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:117bcebdab14057b9ac228a346af5dff65cfe0a780e1398e999ac20def6488e3"}, - {file = "usearch-2.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:522627dc95764ab70122db838a66807034183c1a6d26dcd5ed38fdd9e7d24beb"}, - {file = "usearch-2.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:58f027c2eeeabd75e235cbad2c479b1eea8a751453d5b2580955cdedaec20de1"}, - {file = "usearch-2.12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ac653eb025f75b59a75ef3b7da58c0a1139aca9d0d8c8af2554511ddb1c371e6"}, - {file = "usearch-2.12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ebc5ad46be372b98ef4f667a8cd3df47647de88dc0ee5435cf94195e148e8202"}, - {file = "usearch-2.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a0f2165b6427ed240f4277655ab754a67d3ed47bcbf2ea717c80e4ead095503a"}, - {file = "usearch-2.12.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b20bb4905a21efff7f391306d33a2ffc5bef647cf710c0b562b27b2c1dbe4b51"}, - {file = "usearch-2.12.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:48de7f35c1c7d259c35f6d1779ab773811126feec363c8ada5c0efa7cfe0e54b"}, - {file = "usearch-2.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f0e8b79b2dc4a322037eb904a240e7628e9d801a9d0d431e50a3b534c08c91a6"}, - {file = "usearch-2.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d0290c15fc4b441ef148feb398c1d94d6f4db5dbd4f51b8a77d37938656c3c85"}, - {file = "usearch-2.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:542469e287208cdd9b29c192de726d3bca7cb070dfe772a7b76b3e50ce4dbbf4"}, - {file = "usearch-2.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:f3ee8bf67606479d5f453dd2bbdb331a1681e5f21cc5329109d04c83661b20d1"}, - {file = "usearch-2.12.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:130d4bef17b44027061e4c66e745c411d71bc27760e0f269afc8dad3f5d364f9"}, - {file = "usearch-2.12.0-cp37-cp37m-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a90d20929fdc925a9083beb8a4cfdc00f6dac2675be460c83c91b59e5cc731b2"}, - {file = "usearch-2.12.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:b6f5b990c2c09d5d02d1125e610aae1cefeeb58bcd8e7a2f9877c00948ce0765"}, - {file = "usearch-2.12.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:4776973f3c3a7aa387ef070e1d50e438a021202d7b0b85600eb0444c79d60c2e"}, - {file = "usearch-2.12.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:f833ad91f4369eae0cce29ef1d6d3ddcea013243c28032ce5051c55c2ee326f7"}, - {file = "usearch-2.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b4661fc61a0cb6516cd985d4fcab9a513d330f761b08c3fcdd5f8da810aa6bf2"}, - {file = "usearch-2.12.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fca77f8e2b506830f8203b48bb1e3fefe9fa46bf57c8047ae30ffd17c13697c"}, - {file = "usearch-2.12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aaaeef87c7dad25053fc88866f5e48eea414e4937328027e8f74141f9c644a1e"}, - {file = "usearch-2.12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e1833fd5dcaa545892d217876c73f20ca209ae9a2dd30ba8d381cbff95bf689c"}, - {file = "usearch-2.12.0-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d95995accefffd2a6db83ebb25ac47bb149a4df487f197d14559b79801ba2c1"}, - {file = "usearch-2.12.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:e8a948a7f273054469a59f914140de705ad0bfdd41a4f21deba4d30d847191d1"}, - {file = "usearch-2.12.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ab89351fa1104456948b5052bec752fbda4747bc01c25b90991005053834a7ab"}, - {file = "usearch-2.12.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:44e0a7f103e6949eaf588018d1876b4adc563c819a0f7a97876dec4c1b4c3aa6"}, - {file = "usearch-2.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:26d001d0804bb1051b8eff16f1398cbf728ec23cacdf8d1476cf43e5b00665be"}, - {file = "usearch-2.12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b1ec392af176dfcdbd03bb30db2b0eddab10a3d4a789994fe71c678556df50f2"}, - {file = "usearch-2.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f144ea6b9baf4af2358f6a0425d3ea7be79b77a0b97cf236879104fd37dce9d7"}, - {file = "usearch-2.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:562a25fa49ed31f88d5798086c6b603952dd27146f3d1ac879cf0e15a3645656"}, - {file = "usearch-2.12.0-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eacbced5348a4703b93be9fc16cec826dfb782fb73924f3e6e6db60db7f6677d"}, - {file = "usearch-2.12.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:6098c4c0feae641195dc5f36d7f8009712ca4048a0e2472a39d0c8415b1c3ea8"}, - {file = "usearch-2.12.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:78f75e35aca2a1d085fc3f750dc4cde68cf8dcc79fdeff326abb0fc4c58f7674"}, - {file = "usearch-2.12.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a9fd31a99a989f463574ec6c029f066a7b39810b1849c0c30c6d5e860bbf383a"}, - {file = "usearch-2.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:8c7e2c1d5ca2ed0ada93484cced017607b802b334936c44158ce66a1cb0f15ab"}, - {file = "usearch-2.12.0-cp39-cp39-win_arm64.whl", hash = "sha256:eff6627db77d1b6865accafdd7068e577d68c1de296f31987dfc945e5dc64aec"}, + {file = "usearch-2.13.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9d1e39e46bc132df19930b8432a32722946f339ebbdbdd0075fbc0819ba00103"}, + {file = "usearch-2.13.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c6cb9ab2448c531c17847135e06cf00abdb6a45bfc06e13330144e0baf0b3fdb"}, + {file = "usearch-2.13.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1f649031009b4828ae87aba650ee620a617a98bfcacd501f76f0b92ad93aef77"}, + {file = "usearch-2.13.2-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5c13219c73e506663fcb577722c57a91bcdbafc7e8d20f9d3233efee643dba72"}, + {file = "usearch-2.13.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:2ce68c330273d7a1eb3e1ef39dc318f60bd74eca055877ece865c7c45c2440eb"}, + {file = "usearch-2.13.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3cc1ef99a7023d13d9c6e2d0cf182fe9f13b5fcafba559247c4cecfc12fa47ee"}, + {file = "usearch-2.13.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f57ff2e6d4c517b86908b9f77ebfb71e18db25110589f2b7c28b5f713d582ba2"}, + {file = "usearch-2.13.2-cp310-cp310-win_amd64.whl", hash = "sha256:a11768735610d221f775ad34a9a904a637d94e71c9b0746243da7383197ca03e"}, + {file = "usearch-2.13.2-cp310-cp310-win_arm64.whl", hash = "sha256:906ad9304b0dc678fa79afd5282869c48bb88039914c4d4c14cf98b3fd8596da"}, + {file = "usearch-2.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3ba61347e7eda059c2f02dec4ad4ff89b317e10c9d25a73b06f92b8b2f40a855"}, + {file = "usearch-2.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:11796b2d10bce16d373f9d2badc2ed361bd44b5b96e02fbd30c48adbb084c63d"}, + {file = "usearch-2.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f5de2eb6d6468a369c051f7523d5431fa64d3b2331c6191b6430d7344de575eb"}, + {file = "usearch-2.13.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:30ca771280bb47de63cb3d77d727b5c5537f60477b1da9857e40d9835db7a664"}, + {file = "usearch-2.13.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a2c75f980536893d6e7879d2be34ad426b0823c3197b4a5e8d07cd6944787784"}, + {file = "usearch-2.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:682b5b7b2935269269d862ac651356bc80b5005e3943d7cbaecb949828a82359"}, + {file = "usearch-2.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4905b65f00b02f609f3fff6b954b1e912b0349498e907f926290094838d5e996"}, + {file = "usearch-2.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:f3dfcabd448547f1cd1d315a4f7493c360e0972a4bce0d0217a95a58e60d6369"}, + {file = "usearch-2.13.2-cp311-cp311-win_arm64.whl", hash = "sha256:7881dc53571fbb8b81ee4c41ca4d666d76441fe69f3e99641fa8da99b98ecbf1"}, + {file = "usearch-2.13.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:cadf54a120e76472ae8a355ba5189d524ef0a0a0cadf07c399669283128a47c8"}, + {file = "usearch-2.13.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:14effe0f433847f41b7a2304165a23b6c6a0955e46a26731fc89cb4488d3debf"}, + {file = "usearch-2.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fa17338313cf50e04cf11785e5892976513152a4b5f37b019602f772e35c4cc3"}, + {file = "usearch-2.13.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed6f52e841fb49e244bcbcdad982febaacd782eff1e8cf31377de02baa4e504"}, + {file = "usearch-2.13.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9d66e3274dbb71f978df4acd741da288bbdb129b9af6f5ac6223182f7f7f9fb8"}, + {file = "usearch-2.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:14b0fb3ac8829e805e4971d846d248e80f7b5274c59d845678bcaa6fbe84426d"}, + {file = "usearch-2.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9ce2f471bf3e947841f446f8e44963edffa90db66f5d315d0e0e738f0369264f"}, + {file = "usearch-2.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:9a69c6ae3d35e9fa03366782984ff97df3a8ee4d6995d51dee5bdf59fb13c5be"}, + {file = "usearch-2.13.2-cp312-cp312-win_arm64.whl", hash = "sha256:9bfecb48814b77c439f8c0d72eb6e645d3a00a16f9385643f78732e4c207b68a"}, + {file = "usearch-2.13.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f6055b056557a02b63506c2c6bf30b97f7645f212accba1f4fdce8826ccfa823"}, + {file = "usearch-2.13.2-cp37-cp37m-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5115c25e1ba4a5beff0fa4780ea7db3b60a827efe3f72453b7fee6b299878d19"}, + {file = "usearch-2.13.2-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:04aca42da4dcccd20c6524a3ece6e4e3e458ea5a15fd51f2d39bc9b353d475c0"}, + {file = "usearch-2.13.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:ba46879670aa27fff4d5446296a95d1ff62e52d9165d8ac6ac3fdd949998d0c9"}, + {file = "usearch-2.13.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:d25bcea3f81d1bf2e836dc35f3c83d7d39b7123b4b39f77827af547fec5b8d15"}, + {file = "usearch-2.13.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7dcd9c9c1509dc6435d332569f05312eba6dab820b5ed28674e0b0444de23057"}, + {file = "usearch-2.13.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:16014dd2e8b798eb8012223c51847b59d9ad8b7a9424b6ae32101f3f31d6e711"}, + {file = "usearch-2.13.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:158aeb09fecc25d725465c0c6dee0793fe34eae668e23545eb927706e9ac1e35"}, + {file = "usearch-2.13.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fd0c6f27c07505929f09a90637c59f3719a0b2201faed61ee3cbeca65af56165"}, + {file = "usearch-2.13.2-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:64cf63b0e0a707d0064fd0d0eb73899d36a6ed6f694603d24e3fb6921903b09c"}, + {file = "usearch-2.13.2-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:2bbae49cabea6982cb1a8f68aab0a3772c8f9ce0e9e6a9842969b39d391c919b"}, + {file = "usearch-2.13.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:cb586334a6b801fe2a6ca7dae5af7a1b7c26aa01efffef708eff35cda45ce5a3"}, + {file = "usearch-2.13.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:94d144f5a5616a1b5f333219ee3f05420aa2fd44d6463e58affaf0e62bd1143d"}, + {file = "usearch-2.13.2-cp38-cp38-win_amd64.whl", hash = "sha256:30dac0f71a6f05c80075f62e32b1a535b41a5073499ecbe577ca0298c1be8a8c"}, + {file = "usearch-2.13.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b7eeeda7d2f9f3b5e0fbd0c6befc783461c43777a97ae46a358acd44500ce8a4"}, + {file = "usearch-2.13.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4b01ce27094c30e370766b145190842f2715362113da712322bc9eed7a1099d2"}, + {file = "usearch-2.13.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0a4afa048fec3893651841c6430e6b98f85c1a9690687823fdf6c31712bd09f"}, + {file = "usearch-2.13.2-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef1cce3580f946d97b9b58278b6960632abcd4b62c2be566f0ea11dd78cc0252"}, + {file = "usearch-2.13.2-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:8c48a1e24f37c97e698471ecd25393ef5291a71f0e90887a1fe0001dfbe19aa5"}, + {file = "usearch-2.13.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bfbd43571f42af16cd30796d7132edfe5514088bafc96f5178caf4990e1efd14"}, + {file = "usearch-2.13.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:486134f647b3ddc5baae49f57ef014618bb7c9f0d2b8c6adc178ab793ad2191f"}, + {file = "usearch-2.13.2-cp39-cp39-win_amd64.whl", hash = "sha256:a92a2fa400024a5bf0a09d0d49f86db6db787eb9d7de7b1f2f0249e796e9408c"}, + {file = "usearch-2.13.2-cp39-cp39-win_arm64.whl", hash = "sha256:bc39d38d8552325dd87ce2946ec94ab7f65e5895e8e681d5996d79197d8adfeb"}, ] [package.dependencies] @@ -6499,13 +7135,13 @@ tqdm = "*" [[package]] name = "uvicorn" -version = "0.29.0" +version = "0.30.6" description = "The lightning-fast ASGI server." optional = false python-versions = ">=3.8" files = [ - {file = "uvicorn-0.29.0-py3-none-any.whl", hash = "sha256:2c2aac7ff4f4365c206fd773a39bf4ebd1047c238f8b8268ad996829323473de"}, - {file = "uvicorn-0.29.0.tar.gz", hash = "sha256:6a69214c0b6a087462412670b3ef21224fa48cae0e452b5883e8e8bdfdd11dd0"}, + {file = "uvicorn-0.30.6-py3-none-any.whl", hash = "sha256:65fd46fe3fda5bdc1b03b94eb634923ff18cd35b2f084813ea79d1f103f711b5"}, + {file = "uvicorn-0.30.6.tar.gz", hash = "sha256:4b15decdda1e72be08209e860a1e10e92439ad5b97cf44cc945fcbee66fc5788"}, ] [package.dependencies] @@ -6569,24 +7205,27 @@ test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)" [[package]] name = "validators" -version = "0.28.3" +version = "0.33.0" description = "Python Data Validation for Humans™" optional = false python-versions = ">=3.8" files = [ - {file = "validators-0.28.3-py3-none-any.whl", hash = "sha256:53cafa854f13850156259d9cc479b864ee901f6a96e6b109e6fc33f98f37d99f"}, - {file = "validators-0.28.3.tar.gz", hash = "sha256:c6c79840bcde9ba77b19f6218f7738188115e27830cbaff43264bc4ed24c429d"}, + {file = "validators-0.33.0-py3-none-any.whl", hash = "sha256:134b586a98894f8139865953899fc2daeb3d0c35569552c5518f089ae43ed075"}, + {file = "validators-0.33.0.tar.gz", hash = "sha256:535867e9617f0100e676a1257ba1e206b9bfd847ddc171e4d44811f07ff0bfbf"}, ] +[package.extras] +crypto-eth-addresses = ["eth-hash[pycryptodome] (>=0.7.0)"] + [[package]] name = "virtualenv" -version = "20.26.2" +version = "20.26.3" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.26.2-py3-none-any.whl", hash = "sha256:a624db5e94f01ad993d476b9ee5346fdf7b9de43ccaee0e0197012dc838a0e9b"}, - {file = "virtualenv-20.26.2.tar.gz", hash = "sha256:82bf0f4eebbb78d36ddaee0283d43fe5736b53880b8a8cdcd37390a07ac3741c"}, + {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, + {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, ] [package.dependencies] @@ -6600,86 +7239,98 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [[package]] name = "watchfiles" -version = "0.21.0" +version = "0.23.0" description = "Simple, modern and high performance file watching and code reload in python." optional = false python-versions = ">=3.8" files = [ - {file = "watchfiles-0.21.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:27b4035013f1ea49c6c0b42d983133b136637a527e48c132d368eb19bf1ac6aa"}, - {file = "watchfiles-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c81818595eff6e92535ff32825f31c116f867f64ff8cdf6562cd1d6b2e1e8f3e"}, - {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c107ea3cf2bd07199d66f156e3ea756d1b84dfd43b542b2d870b77868c98c03"}, - {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d9ac347653ebd95839a7c607608703b20bc07e577e870d824fa4801bc1cb124"}, - {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5eb86c6acb498208e7663ca22dbe68ca2cf42ab5bf1c776670a50919a56e64ab"}, - {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f564bf68404144ea6b87a78a3f910cc8de216c6b12a4cf0b27718bf4ec38d303"}, - {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d0f32ebfaa9c6011f8454994f86108c2eb9c79b8b7de00b36d558cadcedaa3d"}, - {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6d45d9b699ecbac6c7bd8e0a2609767491540403610962968d258fd6405c17c"}, - {file = "watchfiles-0.21.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:aff06b2cac3ef4616e26ba17a9c250c1fe9dd8a5d907d0193f84c499b1b6e6a9"}, - {file = "watchfiles-0.21.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d9792dff410f266051025ecfaa927078b94cc7478954b06796a9756ccc7e14a9"}, - {file = "watchfiles-0.21.0-cp310-none-win32.whl", hash = "sha256:214cee7f9e09150d4fb42e24919a1e74d8c9b8a9306ed1474ecaddcd5479c293"}, - {file = "watchfiles-0.21.0-cp310-none-win_amd64.whl", hash = "sha256:1ad7247d79f9f55bb25ab1778fd47f32d70cf36053941f07de0b7c4e96b5d235"}, - {file = "watchfiles-0.21.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:668c265d90de8ae914f860d3eeb164534ba2e836811f91fecc7050416ee70aa7"}, - {file = "watchfiles-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a23092a992e61c3a6a70f350a56db7197242f3490da9c87b500f389b2d01eef"}, - {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e7941bbcfdded9c26b0bf720cb7e6fd803d95a55d2c14b4bd1f6a2772230c586"}, - {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11cd0c3100e2233e9c53106265da31d574355c288e15259c0d40a4405cbae317"}, - {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78f30cbe8b2ce770160d3c08cff01b2ae9306fe66ce899b73f0409dc1846c1b"}, - {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6674b00b9756b0af620aa2a3346b01f8e2a3dc729d25617e1b89cf6af4a54eb1"}, - {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd7ac678b92b29ba630d8c842d8ad6c555abda1b9ef044d6cc092dacbfc9719d"}, - {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c873345680c1b87f1e09e0eaf8cf6c891b9851d8b4d3645e7efe2ec20a20cc7"}, - {file = "watchfiles-0.21.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:49f56e6ecc2503e7dbe233fa328b2be1a7797d31548e7a193237dcdf1ad0eee0"}, - {file = "watchfiles-0.21.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:02d91cbac553a3ad141db016e3350b03184deaafeba09b9d6439826ee594b365"}, - {file = "watchfiles-0.21.0-cp311-none-win32.whl", hash = "sha256:ebe684d7d26239e23d102a2bad2a358dedf18e462e8808778703427d1f584400"}, - {file = "watchfiles-0.21.0-cp311-none-win_amd64.whl", hash = "sha256:4566006aa44cb0d21b8ab53baf4b9c667a0ed23efe4aaad8c227bfba0bf15cbe"}, - {file = "watchfiles-0.21.0-cp311-none-win_arm64.whl", hash = "sha256:c550a56bf209a3d987d5a975cdf2063b3389a5d16caf29db4bdddeae49f22078"}, - {file = "watchfiles-0.21.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:51ddac60b96a42c15d24fbdc7a4bfcd02b5a29c047b7f8bf63d3f6f5a860949a"}, - {file = "watchfiles-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:511f0b034120cd1989932bf1e9081aa9fb00f1f949fbd2d9cab6264916ae89b1"}, - {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cfb92d49dbb95ec7a07511bc9efb0faff8fe24ef3805662b8d6808ba8409a71a"}, - {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f92944efc564867bbf841c823c8b71bb0be75e06b8ce45c084b46411475a915"}, - {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:642d66b75eda909fd1112d35c53816d59789a4b38c141a96d62f50a3ef9b3360"}, - {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d23bcd6c8eaa6324fe109d8cac01b41fe9a54b8c498af9ce464c1aeeb99903d6"}, - {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18d5b4da8cf3e41895b34e8c37d13c9ed294954907929aacd95153508d5d89d7"}, - {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b8d1eae0f65441963d805f766c7e9cd092f91e0c600c820c764a4ff71a0764c"}, - {file = "watchfiles-0.21.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1fd9a5205139f3c6bb60d11f6072e0552f0a20b712c85f43d42342d162be1235"}, - {file = "watchfiles-0.21.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a1e3014a625bcf107fbf38eece0e47fa0190e52e45dc6eee5a8265ddc6dc5ea7"}, - {file = "watchfiles-0.21.0-cp312-none-win32.whl", hash = "sha256:9d09869f2c5a6f2d9df50ce3064b3391d3ecb6dced708ad64467b9e4f2c9bef3"}, - {file = "watchfiles-0.21.0-cp312-none-win_amd64.whl", hash = "sha256:18722b50783b5e30a18a8a5db3006bab146d2b705c92eb9a94f78c72beb94094"}, - {file = "watchfiles-0.21.0-cp312-none-win_arm64.whl", hash = "sha256:a3b9bec9579a15fb3ca2d9878deae789df72f2b0fdaf90ad49ee389cad5edab6"}, - {file = "watchfiles-0.21.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:4ea10a29aa5de67de02256a28d1bf53d21322295cb00bd2d57fcd19b850ebd99"}, - {file = "watchfiles-0.21.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:40bca549fdc929b470dd1dbfcb47b3295cb46a6d2c90e50588b0a1b3bd98f429"}, - {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9b37a7ba223b2f26122c148bb8d09a9ff312afca998c48c725ff5a0a632145f7"}, - {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec8c8900dc5c83650a63dd48c4d1d245343f904c4b64b48798c67a3767d7e165"}, - {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ad3fe0a3567c2f0f629d800409cd528cb6251da12e81a1f765e5c5345fd0137"}, - {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d353c4cfda586db2a176ce42c88f2fc31ec25e50212650c89fdd0f560ee507b"}, - {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83a696da8922314ff2aec02987eefb03784f473281d740bf9170181829133765"}, - {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a03651352fc20975ee2a707cd2d74a386cd303cc688f407296064ad1e6d1562"}, - {file = "watchfiles-0.21.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3ad692bc7792be8c32918c699638b660c0de078a6cbe464c46e1340dadb94c19"}, - {file = "watchfiles-0.21.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06247538e8253975bdb328e7683f8515ff5ff041f43be6c40bff62d989b7d0b0"}, - {file = "watchfiles-0.21.0-cp38-none-win32.whl", hash = "sha256:9a0aa47f94ea9a0b39dd30850b0adf2e1cd32a8b4f9c7aa443d852aacf9ca214"}, - {file = "watchfiles-0.21.0-cp38-none-win_amd64.whl", hash = "sha256:8d5f400326840934e3507701f9f7269247f7c026d1b6cfd49477d2be0933cfca"}, - {file = "watchfiles-0.21.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7f762a1a85a12cc3484f77eee7be87b10f8c50b0b787bb02f4e357403cad0c0e"}, - {file = "watchfiles-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6e9be3ef84e2bb9710f3f777accce25556f4a71e15d2b73223788d528fcc2052"}, - {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4c48a10d17571d1275701e14a601e36959ffada3add8cdbc9e5061a6e3579a5d"}, - {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c889025f59884423428c261f212e04d438de865beda0b1e1babab85ef4c0f01"}, - {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:66fac0c238ab9a2e72d026b5fb91cb902c146202bbd29a9a1a44e8db7b710b6f"}, - {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4a21f71885aa2744719459951819e7bf5a906a6448a6b2bbce8e9cc9f2c8128"}, - {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c9198c989f47898b2c22201756f73249de3748e0fc9de44adaf54a8b259cc0c"}, - {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f57c4461cd24fda22493109c45b3980863c58a25b8bec885ca8bea6b8d4b28"}, - {file = "watchfiles-0.21.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:853853cbf7bf9408b404754b92512ebe3e3a83587503d766d23e6bf83d092ee6"}, - {file = "watchfiles-0.21.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d5b1dc0e708fad9f92c296ab2f948af403bf201db8fb2eb4c8179db143732e49"}, - {file = "watchfiles-0.21.0-cp39-none-win32.whl", hash = "sha256:59137c0c6826bd56c710d1d2bda81553b5e6b7c84d5a676747d80caf0409ad94"}, - {file = "watchfiles-0.21.0-cp39-none-win_amd64.whl", hash = "sha256:6cb8fdc044909e2078c248986f2fc76f911f72b51ea4a4fbbf472e01d14faa58"}, - {file = "watchfiles-0.21.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab03a90b305d2588e8352168e8c5a1520b721d2d367f31e9332c4235b30b8994"}, - {file = "watchfiles-0.21.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:927c589500f9f41e370b0125c12ac9e7d3a2fd166b89e9ee2828b3dda20bfe6f"}, - {file = "watchfiles-0.21.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bd467213195e76f838caf2c28cd65e58302d0254e636e7c0fca81efa4a2e62c"}, - {file = "watchfiles-0.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02b73130687bc3f6bb79d8a170959042eb56eb3a42df3671c79b428cd73f17cc"}, - {file = "watchfiles-0.21.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:08dca260e85ffae975448e344834d765983237ad6dc308231aa16e7933db763e"}, - {file = "watchfiles-0.21.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3ccceb50c611c433145502735e0370877cced72a6c70fd2410238bcbc7fe51d8"}, - {file = "watchfiles-0.21.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57d430f5fb63fea141ab71ca9c064e80de3a20b427ca2febcbfcef70ff0ce895"}, - {file = "watchfiles-0.21.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd5fad9b9c0dd89904bbdea978ce89a2b692a7ee8a0ce19b940e538c88a809c"}, - {file = "watchfiles-0.21.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:be6dd5d52b73018b21adc1c5d28ac0c68184a64769052dfeb0c5d9998e7f56a2"}, - {file = "watchfiles-0.21.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b3cab0e06143768499384a8a5efb9c4dc53e19382952859e4802f294214f36ec"}, - {file = "watchfiles-0.21.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c6ed10c2497e5fedadf61e465b3ca12a19f96004c15dcffe4bd442ebadc2d85"}, - {file = "watchfiles-0.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43babacef21c519bc6631c5fce2a61eccdfc011b4bcb9047255e9620732c8097"}, - {file = "watchfiles-0.21.0.tar.gz", hash = "sha256:c76c635fabf542bb78524905718c39f736a98e5ab25b23ec6d4abede1a85a6a3"}, + {file = "watchfiles-0.23.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:bee8ce357a05c20db04f46c22be2d1a2c6a8ed365b325d08af94358e0688eeb4"}, + {file = "watchfiles-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4ccd3011cc7ee2f789af9ebe04745436371d36afe610028921cab9f24bb2987b"}, + {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb02d41c33be667e6135e6686f1bb76104c88a312a18faa0ef0262b5bf7f1a0f"}, + {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf12ac34c444362f3261fb3ff548f0037ddd4c5bb85f66c4be30d2936beb3c5"}, + {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0b2c25040a3c0ce0e66c7779cc045fdfbbb8d59e5aabfe033000b42fe44b53e"}, + {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf2be4b9eece4f3da8ba5f244b9e51932ebc441c0867bd6af46a3d97eb068d6"}, + {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40cb8fa00028908211eb9f8d47744dca21a4be6766672e1ff3280bee320436f1"}, + {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f48c917ffd36ff9a5212614c2d0d585fa8b064ca7e66206fb5c095015bc8207"}, + {file = "watchfiles-0.23.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9d183e3888ada88185ab17064079c0db8c17e32023f5c278d7bf8014713b1b5b"}, + {file = "watchfiles-0.23.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9837edf328b2805346f91209b7e660f65fb0e9ca18b7459d075d58db082bf981"}, + {file = "watchfiles-0.23.0-cp310-none-win32.whl", hash = "sha256:296e0b29ab0276ca59d82d2da22cbbdb39a23eed94cca69aed274595fb3dfe42"}, + {file = "watchfiles-0.23.0-cp310-none-win_amd64.whl", hash = "sha256:4ea756e425ab2dfc8ef2a0cb87af8aa7ef7dfc6fc46c6f89bcf382121d4fff75"}, + {file = "watchfiles-0.23.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:e397b64f7aaf26915bf2ad0f1190f75c855d11eb111cc00f12f97430153c2eab"}, + {file = "watchfiles-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b4ac73b02ca1824ec0a7351588241fd3953748d3774694aa7ddb5e8e46aef3e3"}, + {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130a896d53b48a1cecccfa903f37a1d87dbb74295305f865a3e816452f6e49e4"}, + {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c5e7803a65eb2d563c73230e9d693c6539e3c975ccfe62526cadde69f3fda0cf"}, + {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1aa4cc85202956d1a65c88d18c7b687b8319dbe6b1aec8969784ef7a10e7d1a"}, + {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87f889f6e58849ddb7c5d2cb19e2e074917ed1c6e3ceca50405775166492cca8"}, + {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37fd826dac84c6441615aa3f04077adcc5cac7194a021c9f0d69af20fb9fa788"}, + {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee7db6e36e7a2c15923072e41ea24d9a0cf39658cb0637ecc9307b09d28827e1"}, + {file = "watchfiles-0.23.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2368c5371c17fdcb5a2ea71c5c9d49f9b128821bfee69503cc38eae00feb3220"}, + {file = "watchfiles-0.23.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:857af85d445b9ba9178db95658c219dbd77b71b8264e66836a6eba4fbf49c320"}, + {file = "watchfiles-0.23.0-cp311-none-win32.whl", hash = "sha256:1d636c8aeb28cdd04a4aa89030c4b48f8b2954d8483e5f989774fa441c0ed57b"}, + {file = "watchfiles-0.23.0-cp311-none-win_amd64.whl", hash = "sha256:46f1d8069a95885ca529645cdbb05aea5837d799965676e1b2b1f95a4206313e"}, + {file = "watchfiles-0.23.0-cp311-none-win_arm64.whl", hash = "sha256:e495ed2a7943503766c5d1ff05ae9212dc2ce1c0e30a80d4f0d84889298fa304"}, + {file = "watchfiles-0.23.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1db691bad0243aed27c8354b12d60e8e266b75216ae99d33e927ff5238d270b5"}, + {file = "watchfiles-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62d2b18cb1edaba311fbbfe83fb5e53a858ba37cacb01e69bc20553bb70911b8"}, + {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e087e8fdf1270d000913c12e6eca44edd02aad3559b3e6b8ef00f0ce76e0636f"}, + {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dd41d5c72417b87c00b1b635738f3c283e737d75c5fa5c3e1c60cd03eac3af77"}, + {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e5f3ca0ff47940ce0a389457b35d6df601c317c1e1a9615981c474452f98de1"}, + {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6991e3a78f642368b8b1b669327eb6751439f9f7eaaa625fae67dd6070ecfa0b"}, + {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f7252f52a09f8fa5435dc82b6af79483118ce6bd51eb74e6269f05ee22a7b9f"}, + {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e01bcb8d767c58865207a6c2f2792ad763a0fe1119fb0a430f444f5b02a5ea0"}, + {file = "watchfiles-0.23.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8e56fbcdd27fce061854ddec99e015dd779cae186eb36b14471fc9ae713b118c"}, + {file = "watchfiles-0.23.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bd3e2d64500a6cad28bcd710ee6269fbeb2e5320525acd0cfab5f269ade68581"}, + {file = "watchfiles-0.23.0-cp312-none-win32.whl", hash = "sha256:eb99c954291b2fad0eff98b490aa641e128fbc4a03b11c8a0086de8b7077fb75"}, + {file = "watchfiles-0.23.0-cp312-none-win_amd64.whl", hash = "sha256:dccc858372a56080332ea89b78cfb18efb945da858fabeb67f5a44fa0bcb4ebb"}, + {file = "watchfiles-0.23.0-cp312-none-win_arm64.whl", hash = "sha256:6c21a5467f35c61eafb4e394303720893066897fca937bade5b4f5877d350ff8"}, + {file = "watchfiles-0.23.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ba31c32f6b4dceeb2be04f717811565159617e28d61a60bb616b6442027fd4b9"}, + {file = "watchfiles-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:85042ab91814fca99cec4678fc063fb46df4cbb57b4835a1cc2cb7a51e10250e"}, + {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24655e8c1c9c114005c3868a3d432c8aa595a786b8493500071e6a52f3d09217"}, + {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b1a950ab299a4a78fd6369a97b8763732bfb154fdb433356ec55a5bce9515c1"}, + {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8d3c5cd327dd6ce0edfc94374fb5883d254fe78a5e9d9dfc237a1897dc73cd1"}, + {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ff785af8bacdf0be863ec0c428e3288b817e82f3d0c1d652cd9c6d509020dd0"}, + {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02b7ba9d4557149410747353e7325010d48edcfe9d609a85cb450f17fd50dc3d"}, + {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a1b05c0afb2cd2f48c1ed2ae5487b116e34b93b13074ed3c22ad5c743109f0"}, + {file = "watchfiles-0.23.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:109a61763e7318d9f821b878589e71229f97366fa6a5c7720687d367f3ab9eef"}, + {file = "watchfiles-0.23.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:9f8e6bb5ac007d4a4027b25f09827ed78cbbd5b9700fd6c54429278dacce05d1"}, + {file = "watchfiles-0.23.0-cp313-none-win32.whl", hash = "sha256:f46c6f0aec8d02a52d97a583782d9af38c19a29900747eb048af358a9c1d8e5b"}, + {file = "watchfiles-0.23.0-cp313-none-win_amd64.whl", hash = "sha256:f449afbb971df5c6faeb0a27bca0427d7b600dd8f4a068492faec18023f0dcff"}, + {file = "watchfiles-0.23.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:2dddc2487d33e92f8b6222b5fb74ae2cfde5e8e6c44e0248d24ec23befdc5366"}, + {file = "watchfiles-0.23.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e75695cc952e825fa3e0684a7f4a302f9128721f13eedd8dbd3af2ba450932b8"}, + {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2537ef60596511df79b91613a5bb499b63f46f01a11a81b0a2b0dedf645d0a9c"}, + {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20b423b58f5fdde704a226b598a2d78165fe29eb5621358fe57ea63f16f165c4"}, + {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b98732ec893975455708d6fc9a6daab527fc8bbe65be354a3861f8c450a632a4"}, + {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee1f5fcbf5bc33acc0be9dd31130bcba35d6d2302e4eceafafd7d9018c7755ab"}, + {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8f195338a5a7b50a058522b39517c50238358d9ad8284fd92943643144c0c03"}, + {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:524fcb8d59b0dbee2c9b32207084b67b2420f6431ed02c18bd191e6c575f5c48"}, + {file = "watchfiles-0.23.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0eff099a4df36afaa0eea7a913aa64dcf2cbd4e7a4f319a73012210af4d23810"}, + {file = "watchfiles-0.23.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a8323daae27ea290ba3350c70c836c0d2b0fb47897fa3b0ca6a5375b952b90d3"}, + {file = "watchfiles-0.23.0-cp38-none-win32.whl", hash = "sha256:aafea64a3ae698695975251f4254df2225e2624185a69534e7fe70581066bc1b"}, + {file = "watchfiles-0.23.0-cp38-none-win_amd64.whl", hash = "sha256:c846884b2e690ba62a51048a097acb6b5cd263d8bd91062cd6137e2880578472"}, + {file = "watchfiles-0.23.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a753993635eccf1ecb185dedcc69d220dab41804272f45e4aef0a67e790c3eb3"}, + {file = "watchfiles-0.23.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6bb91fa4d0b392f0f7e27c40981e46dda9eb0fbc84162c7fb478fe115944f491"}, + {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1f67312efa3902a8e8496bfa9824d3bec096ff83c4669ea555c6bdd213aa516"}, + {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7ca6b71dcc50d320c88fb2d88ecd63924934a8abc1673683a242a7ca7d39e781"}, + {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2aec5c29915caf08771d2507da3ac08e8de24a50f746eb1ed295584ba1820330"}, + {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1733b9bc2c8098c6bdb0ff7a3d7cb211753fecb7bd99bdd6df995621ee1a574b"}, + {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02ff5d7bd066c6a7673b17c8879cd8ee903078d184802a7ee851449c43521bdd"}, + {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e2de19801b0eaa4c5292a223effb7cfb43904cb742c5317a0ac686ed604765"}, + {file = "watchfiles-0.23.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8ada449e22198c31fb013ae7e9add887e8d2bd2335401abd3cbc55f8c5083647"}, + {file = "watchfiles-0.23.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3af1b05361e1cc497bf1be654a664750ae61f5739e4bb094a2be86ec8c6db9b6"}, + {file = "watchfiles-0.23.0-cp39-none-win32.whl", hash = "sha256:486bda18be5d25ab5d932699ceed918f68eb91f45d018b0343e3502e52866e5e"}, + {file = "watchfiles-0.23.0-cp39-none-win_amd64.whl", hash = "sha256:d2d42254b189a346249424fb9bb39182a19289a2409051ee432fb2926bad966a"}, + {file = "watchfiles-0.23.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9265cf87a5b70147bfb2fec14770ed5b11a5bb83353f0eee1c25a81af5abfe"}, + {file = "watchfiles-0.23.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9f02a259fcbbb5fcfe7a0805b1097ead5ba7a043e318eef1db59f93067f0b49b"}, + {file = "watchfiles-0.23.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ebaebb53b34690da0936c256c1cdb0914f24fb0e03da76d185806df9328abed"}, + {file = "watchfiles-0.23.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd257f98cff9c6cb39eee1a83c7c3183970d8a8d23e8cf4f47d9a21329285cee"}, + {file = "watchfiles-0.23.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:aba037c1310dd108411d27b3d5815998ef0e83573e47d4219f45753c710f969f"}, + {file = "watchfiles-0.23.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:a96ac14e184aa86dc43b8a22bb53854760a58b2966c2b41580de938e9bf26ed0"}, + {file = "watchfiles-0.23.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11698bb2ea5e991d10f1f4f83a39a02f91e44e4bd05f01b5c1ec04c9342bf63c"}, + {file = "watchfiles-0.23.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efadd40fca3a04063d40c4448c9303ce24dd6151dc162cfae4a2a060232ebdcb"}, + {file = "watchfiles-0.23.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:556347b0abb4224c5ec688fc58214162e92a500323f50182f994f3ad33385dcb"}, + {file = "watchfiles-0.23.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1cf7f486169986c4b9d34087f08ce56a35126600b6fef3028f19ca16d5889071"}, + {file = "watchfiles-0.23.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f18de0f82c62c4197bea5ecf4389288ac755896aac734bd2cc44004c56e4ac47"}, + {file = "watchfiles-0.23.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:532e1f2c491274d1333a814e4c5c2e8b92345d41b12dc806cf07aaff786beb66"}, + {file = "watchfiles-0.23.0.tar.gz", hash = "sha256:9338ade39ff24f8086bb005d16c29f8e9f19e55b18dcb04dfa26fcbc09da497b"}, ] [package.dependencies] @@ -6698,13 +7349,13 @@ files = [ [[package]] name = "weaviate-client" -version = "4.6.5" +version = "4.7.1" description = "A python native Weaviate client" optional = false python-versions = ">=3.8" files = [ - {file = "weaviate_client-4.6.5-py3-none-any.whl", hash = "sha256:ed5b1c26c86081b5286e7b292de80e0380c964d34b4bffc842c1eb9dfadf7e15"}, - {file = "weaviate_client-4.6.5.tar.gz", hash = "sha256:3926fd0c350c54b668b824f9085959904562821ebb6fc237b7e253daf4645904"}, + {file = "weaviate_client-4.7.1-py3-none-any.whl", hash = "sha256:342f5c67b126cee4dc3a60467ad1ae74971cd5614e27af6fb13d687a345352c4"}, + {file = "weaviate_client-4.7.1.tar.gz", hash = "sha256:af99ac4e53613d2ff5b797372e95d004d0c8a1dd10a7f592068bcb423a30af30"}, ] [package.dependencies] @@ -6715,7 +7366,7 @@ grpcio-tools = ">=1.57.0,<2.0.0" httpx = ">=0.25.0,<=0.27.0" pydantic = ">=2.5.0,<3.0.0" requests = ">=2.30.0,<3.0.0" -validators = "0.28.3" +validators = "0.33.0" [[package]] name = "webencodings" @@ -7026,25 +7677,26 @@ multidict = ">=4.0" [[package]] name = "zipp" -version = "3.19.1" +version = "3.20.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"}, - {file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"}, + {file = "zipp-3.20.0-py3-none-any.whl", hash = "sha256:58da6168be89f0be59beb194da1250516fdaa062ccebd30127ac65d30045e10d"}, + {file = "zipp-3.20.0.tar.gz", hash = "sha256:0145e43d89664cfe1a2e533adc75adafed82fe2da404b4bbb6b026c0157bdb31"}, ] [package.extras] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] [extras] -all = ["azure-ai-inference", "azure-core", "azure-cosmos", "azure-identity", "azure-search-documents", "chromadb", "ipykernel", "milvus", "mistralai", "motor", "ollama", "pinecone-client", "psycopg", "pyarrow", "pymilvus", "qdrant-client", "redis", "sentence-transformers", "transformers", "usearch", "weaviate-client"] +all = ["anthropic", "azure-ai-inference", "azure-core", "azure-cosmos", "azure-identity", "azure-search-documents", "chromadb", "ipykernel", "milvus", "mistralai", "motor", "ollama", "pinecone-client", "psycopg", "pyarrow", "pymilvus", "qdrant-client", "redis", "sentence-transformers", "torch", "transformers", "usearch", "weaviate-client"] +anthropic = ["anthropic"] azure = ["azure-ai-inference", "azure-core", "azure-cosmos", "azure-identity", "azure-search-documents"] chromadb = ["chromadb"] -google = ["google-generativeai"] -hugging-face = ["sentence-transformers", "transformers"] +google = ["google-cloud-aiplatform", "google-generativeai"] +hugging-face = ["sentence-transformers", "torch", "transformers"] milvus = ["milvus", "pymilvus"] mistralai = ["mistralai"] mongo = ["motor"] @@ -7053,11 +7705,11 @@ ollama = ["ollama"] pinecone = ["pinecone-client"] postgres = ["psycopg"] qdrant = ["qdrant-client"] -redis = ["redis"] +redis = ["redis", "types-redis"] usearch = ["pyarrow", "usearch"] weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = "^3.10,<3.13" -content-hash = "c29fb1fca8d1da50daf3538331cce8f45bbdc9949d0699feaced0fe049787251" +content-hash = "45bffc6686e76fda8799c7a786d0618594cf8f8b7450bb8d805423882a0c20b3" diff --git a/python/pyproject.toml b/python/pyproject.toml index 96557cf8aa7a..da2c21769037 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,10 +1,16 @@ [tool.poetry] name = "semantic-kernel" -version = "1.3.0" +version = "1.6.0" description = "Semantic Kernel Python SDK" authors = ["Microsoft "] readme = "pip/README.md" packages = [{include = "semantic_kernel"}] +homepage = "https://learn.microsoft.com/en-us/semantic-kernel/overview/" +repository = "https://github.com/microsoft/semantic-kernel/" + +[tool.poetry.urls] +"Source Code" = "https://github.com/microsoft/semantic-kernel/tree/main/python" +"Release Notes" = "https://github.com/microsoft/semantic-kernel/releases?q=tag%3Apython-1&expanded=true" [tool.poetry.dependencies] python = "^3.10,<3.13" @@ -48,46 +54,52 @@ azure-cosmos = { version = "^4.7.0", optional = true} # chroma chromadb = { version = ">=0.4.13,<0.6.0", optional = true} # google +google-cloud-aiplatform = { version = "^1.60.0", optional = true} google-generativeai = { version = "^0.7.2", optional = true} # hugging face -transformers = { version = "^4.28.1", extras=["torch"], optional = true} +transformers = { version = "^4.28.1", extras=['torch'], optional = true} sentence-transformers = { version = "^2.2.2", optional = true} +torch = {version = "2.2.2", optional = true} # mongo motor = { version = "^3.3.2", optional = true } # notebooks ipykernel = { version = "^6.21.1", optional = true} # milvus -pymilvus = { version = ">=2.3,<2.4.4", optional = true} +pymilvus = { version = ">=2.3,<2.4.6", optional = true} milvus = { version = ">=2.3,<2.3.8", markers = 'sys_platform != "win32"', optional = true} # mistralai mistralai = { version = "^0.4.1", optional = true} # ollama ollama = { version = "^0.2.1", optional = true} +# anthropic +anthropic = { version = "^0.32.0", optional = true } # pinecone -pinecone-client = { version = ">=3.0.0", optional = true} +pinecone-client = { version = "^5.0.0", optional = true} # postgres -psycopg = { version="^3.1.9", extras=["binary","pool"], optional = true} +psycopg = { version="^3.2.1", extras=["binary","pool"], optional = true} # qdrant qdrant-client = { version = '^1.9', optional = true} # redis -redis = { version = "^4.6.0", optional = true} +redis = { version = "^5.0.7", extras=['hiredis'], optional = true} +types-redis = { version="^4.6.0.20240425", optional = true } # usearch usearch = { version = "^2.9", optional = true} -pyarrow = { version = ">=12.0.1,<17.0.0", optional = true} +pyarrow = { version = ">=12.0.1,<18.0.0", optional = true} weaviate-client = { version = ">=3.18,<5.0", optional = true} -ruff = "0.5.2" +pandas = {version = "^2.2.2", optional = true} [tool.poetry.group.dev.dependencies] pre-commit = ">=3.7.1" -ruff = ">=0.4.5" ipykernel = "^6.29.4" nbconvert = "^7.16.4" pytest = "^8.2.1" +pytest-xdist = { version="^3.6.1", extras=["psutil"]} +pytest-cov = ">=5.0.0" pytest-asyncio = "^0.23.7" snoop = "^0.4.3" -pytest-cov = ">=5.0.0" mypy = ">=1.10.0" types-PyYAML = "^6.0.12.20240311" +ruff = "^0.5.2" [tool.poetry.group.unit-tests] optional = true @@ -99,9 +111,17 @@ azure-core = "^1.28.0" azure-cosmos = "^4.7.0" mistralai = "^0.4.1" ollama = "^0.2.1" +google-cloud-aiplatform = "^1.60.0" +anthropic = "^0.32.0" google-generativeai = "^0.7.2" -transformers = { version = "^4.28.1", extras=["torch"]} -sentence-transformers = "^2.2.2" +transformers = { version = "^4.28.1", extras=['torch']} +sentence-transformers = { version = "^2.2.2"} +torch = {version = "2.2.2"} +# qdrant +qdrant-client = '^1.9' +# redis +redis = { version = "^5.0.7", extras=['hiredis']} +pandas = {version = "^2.2.2"} [tool.poetry.group.tests] optional = true @@ -117,62 +137,98 @@ msgraph-sdk = "^1.2.0" # chroma chromadb = ">=0.4.13,<0.6.0" # google +google-cloud-aiplatform = "^1.60.0" google-generativeai = "^0.7.2" # hugging face -transformers = { version = "^4.28.1", extras=["torch"]} -sentence-transformers = "^2.2.2" +transformers = { version = "^4.28.1", extras=['torch']} +sentence-transformers = { version = "^2.2.2"} +torch = {version = "2.2.2"} # milvus -pymilvus = ">=2.3,<2.4.4" +pymilvus = ">=2.3,<2.4.6" milvus = { version = ">=2.3,<2.3.8", markers = 'sys_platform != "win32"'} # mistralai mistralai = "^0.4.1" # ollama ollama = "^0.2.1" +# anthropic +anthropic = "^0.32.0" # mongodb motor = "^3.3.2" # pinecone -pinecone-client = ">=3.0.0" +pinecone-client = "^5.0.0" # postgres psycopg = { version="^3.1.9", extras=["binary","pool"]} # qdrant qdrant-client = '^1.9' # redis -redis = "^4.6.0" +redis = { version="^5.0.7", extras=['hiredis']} +types-redis = { version="^4.6.0.20240425" } # usearch usearch = "^2.9" -pyarrow = ">=12.0.1,<17.0.0" +pyarrow = ">=12.0.1,<18.0.0" # weaviate weaviate-client = ">=3.18,<5.0" +pandas = {version = "^2.2.2"} # Extras are exposed to pip, this allows a user to easily add the right dependencies to their environment [tool.poetry.extras] -all = ["transformers", "sentence-transformers", "qdrant-client", "chromadb", "pymilvus", "milvus", "mistralai", "ollama", "google", "weaviate-client", "pinecone-client", "psycopg", "redis", "azure-ai-inference", "azure-search-documents", "azure-core", "azure-identity", "azure-cosmos", "usearch", "pyarrow", "ipykernel", "motor"] +all = ["transformers", "sentence-transformers", "torch", "qdrant-client", "chromadb", "pymilvus", "milvus", "mistralai", "ollama", "anthropic", "google", "weaviate-client", "pinecone-client", "psycopg", "redis", "azure-ai-inference", "azure-search-documents", "azure-core", "azure-identity", "azure-cosmos", "usearch", "pyarrow", "ipykernel", "motor"] azure = ["azure-ai-inference", "azure-search-documents", "azure-core", "azure-identity", "azure-cosmos", "msgraph-sdk"] chromadb = ["chromadb"] -google = ["google-generativeai"] -hugging_face = ["transformers", "sentence-transformers"] +google = ["google-cloud-aiplatform", "google-generativeai"] +hugging_face = ["transformers", "sentence-transformers", "torch"] milvus = ["pymilvus", "milvus"] mistralai = ["mistralai"] ollama = ["ollama"] +anthropic = ["anthropic"] mongo = ["motor"] notebooks = ["ipykernel"] pinecone = ["pinecone-client"] postgres = ["psycopg"] qdrant = ["qdrant-client"] -redis = ["redis"] +redis = ["redis", "types-redis"] usearch = ["usearch", "pyarrow"] weaviate = ["weaviate-client"] +[tool.pytest.ini_options] +addopts = "-ra -q -r fEX" + [tool.ruff] line-length = 120 target-version = "py310" include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"] [tool.ruff.lint] +fixable = ["ALL"] +unfixable = [] +preview = true +select = [ + "D", #pydocstyle checks + "E", #error checks + "F", #pyflakes checks + "I", #isort + "CPY", #copyright + "ISC", #implicit string concat + "INP", #implicit namespace package + "RSE", #raise exception parantheses check + "RET", #flake8-return check + "SIM", #flake8-simplify check + "TD", #todos + "FIX", #fixme checks + "ERA", #remove connected out code + "RUF" #RUF specific rules +] +ignore = [ + "D100", #allow missing docstring in public module + "D104", #allow missing docstring in public package + "TD003", #allow missing link to todo issue + "FIX002" #allow todo +] + +[tool.ruff.format] preview = true -select = ["D", "E", "F", "I", "CPY", "ISC", "INP", "RSE102", "RET", "SIM", "TD", "FIX", "ERA001", "RUF"] -ignore = ["D100", "D101", "D104", "TD003", "FIX002"] +docstring-code-format = true [tool.ruff.lint.pydocstyle] convention = "google" diff --git a/python/samples/concepts/README.md b/python/samples/concepts/README.md index ac7e6350c714..0ef6120ad285 100644 --- a/python/samples/concepts/README.md +++ b/python/samples/concepts/README.md @@ -4,7 +4,7 @@ This section contains code snippets that demonstrate the usage of Semantic Kerne | Features | Description | | -------- | ----------- | -| Agents | Creating and using agents in Semantic Kernel | +| Agents | Creating and using [agents](../../semantic_kernel/agents/) in Semantic Kernel | | AutoFunctionCalling | Using `Auto Function Calling` to allow function call capable models to invoke Kernel Functions automatically | | ChatCompletion | Using [`ChatCompletion`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/connectors/ai/chat_completion_client_base.py) messaging capable service with models | | Filtering | Creating and using Filters | diff --git a/python/samples/concepts/agents/README.md b/python/samples/concepts/agents/README.md deleted file mode 100644 index b723dd4f8642..000000000000 --- a/python/samples/concepts/agents/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Semantic Kernel Agents - Getting Started - -This project contains a step by step guide to get started with _Semantic Kernel Agents_ in Python. - -#### PyPI: - -- For the use of agents, the minimum allowed Semantic Kernel pypi version is 1.3.0. - -#### Source - -- [Semantic Kernel Agent Framework](../../../semantic_kernel/agents/) - -## Examples - -The getting started with agents examples include: - -| Example | Description | -| ------------------------------------------- | --------------------------------------- | -| [step1_agent](../agents/step1_agent.py) | How to create and use an agent. | -| [step2_plugins](../agents/step2_plugins.py) | How to associate plugins with an agent. | - -## Configuring the Kernel - -Similar to the Semantic Kernel Python concept samples, it is necessary to configure the secrets -and keys used by the kernel. See the follow "Configuring the Kernel" [guide](../README.md#configuring-the-kernel) for -more information. - -## Running Concept Samples - -Concept samples can be run in an IDE or via the command line. After setting up the required api key -for your AI connector, the samples run without any extra command line arguments. diff --git a/python/samples/concepts/agents/assistant_agent_chart_maker.py b/python/samples/concepts/agents/assistant_agent_chart_maker.py new file mode 100644 index 000000000000..95148734d39b --- /dev/null +++ b/python/samples/concepts/agents/assistant_agent_chart_maker.py @@ -0,0 +1,94 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio + +from semantic_kernel.agents.open_ai import AzureAssistantAgent, OpenAIAssistantAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.file_reference_content import FileReferenceContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +##################################################################### +# The following sample demonstrates how to create an OpenAI # +# assistant using either Azure OpenAI or OpenAI and leverage the # +# assistant and leverage the assistant's file search functionality. # +##################################################################### + +AGENT_NAME = "ChartMaker" +AGENT_INSTRUCTIONS = "Create charts as requested without explanation." + +# Note: you may toggle this to switch between AzureOpenAI and OpenAI +use_azure_openai = True + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, input: str) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for message in agent.invoke(thread_id=thread_id): + if message.content: + print(f"# {message.role}: {message.content}") + + if len(message.items) > 0: + for item in message.items: + if isinstance(item, FileReferenceContent): + print(f"\n`{message.role}` => {item.file_id}") + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + # Define a service_id for the sample + service_id = "agent" + + # Create the agent configuration + if use_azure_openai: + agent = await AzureAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, + ) + else: + agent = await OpenAIAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, + ) + + # Define a thread and invoke the agent with the user input + thread_id = await agent.create_thread() + + try: + await invoke_agent( + agent, + thread_id=thread_id, + input=""" + Display this data using a bar-chart: + + Banding Brown Pink Yellow Sum + X00000 339 433 126 898 + X00300 48 421 222 691 + X12345 16 395 352 763 + Others 23 373 156 552 + Sum 426 1622 856 2904 + """, + ) + await invoke_agent( + agent, + thread_id=thread_id, + input="Can you regenerate this same chart using the category names as the bar colors?", + ) + finally: + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/agents/assistant_agent_file_manipulation.py b/python/samples/concepts/agents/assistant_agent_file_manipulation.py new file mode 100644 index 000000000000..ff13f38a5504 --- /dev/null +++ b/python/samples/concepts/agents/assistant_agent_file_manipulation.py @@ -0,0 +1,85 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio +import os + +from semantic_kernel.agents.open_ai import OpenAIAssistantAgent +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.contents.annotation_content import AnnotationContent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +##################################################################### +# The following sample demonstrates how to create an OpenAI # +# assistant using either Azure OpenAI or OpenAI and leverage the # +# assistant's ability to have the code interpreter work with # +# uploaded files. # +##################################################################### + +AGENT_NAME = "FileManipulation" +AGENT_INSTRUCTIONS = "Find answers to the user's questions in the provided file." + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, input: str) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in agent.invoke(thread_id=thread_id): + print(f"# {content.role}: {content.content}") + + if len(content.items) > 0: + for item in content.items: + if isinstance(item, AnnotationContent): + print(f"\n`{item.quote}` => {item.file_id}") + response_content = await agent.client.files.content(item.file_id) + print(response_content.text) + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + # Define a service_id for the sample + service_id = "agent" + + # Get the path to the sales.csv file + csv_file_path = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + "agent_assistant_file_manipulation", + "sales.csv", + ) + + # Create the assistant agent + agent = await AzureAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, + code_interpreter_filenames=[csv_file_path], + ) + + # Create a thread and specify the file to use for code interpretation + thread_id = await agent.create_thread() + + try: + await invoke_agent(agent, thread_id=thread_id, input="Which segment had the most sales?") + await invoke_agent(agent, thread_id=thread_id, input="List the top 5 countries that generated the most profit.") + await invoke_agent( + agent, + thread_id=thread_id, + input="Create a tab delimited file report of profit by each country per month.", + ) + finally: + if agent is not None: + [await agent.delete_file(file_id) for file_id in agent.code_interpreter_file_ids] + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/agents/mixed_chat_agents.py b/python/samples/concepts/agents/mixed_chat_agents.py new file mode 100644 index 000000000000..493f5e70f457 --- /dev/null +++ b/python/samples/concepts/agents/mixed_chat_agents.py @@ -0,0 +1,89 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent +from semantic_kernel.agents.open_ai import OpenAIAssistantAgent +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +##################################################################### +# The following sample demonstrates how to create an OpenAI # +# assistant using either Azure OpenAI or OpenAI, a chat completion # +# agent and have them participate in a group chat to work towards # +# the user's requirement. # +##################################################################### + + +class ApprovalTerminationStrategy(TerminationStrategy): + """A strategy for determining when an agent should terminate.""" + + async def should_agent_terminate(self, agent, history): + """Check if the agent should terminate.""" + return "approved" in history[-1].content.lower() + + +REVIEWER_NAME = "ArtDirector" +REVIEWER_INSTRUCTIONS = """ +You are an art director who has opinions about copywriting born of a love for David Ogilvy. +The goal is to determine if the given copy is acceptable to print. +If so, state that it is approved. Only include the word "approved" if it is so. +If not, provide insight on how to refine suggested copy without example. +""" + +COPYWRITER_NAME = "CopyWriter" +COPYWRITER_INSTRUCTIONS = """ +You are a copywriter with ten years of experience and are known for brevity and a dry humor. +The goal is to refine and decide on the single best copy as an expert in the field. +Only provide a single proposal per response. +You're laser focused on the goal at hand. +Don't waste time with chit chat. +Consider suggestions when refining an idea. +""" + + +def _create_kernel_with_chat_completion(service_id: str) -> Kernel: + kernel = Kernel() + kernel.add_service(AzureChatCompletion(service_id=service_id)) + return kernel + + +async def main(): + try: + agent_reviewer = ChatCompletionAgent( + service_id="artdirector", + kernel=_create_kernel_with_chat_completion("artdirector"), + name=REVIEWER_NAME, + instructions=REVIEWER_INSTRUCTIONS, + ) + + agent_writer = await OpenAIAssistantAgent.create( + service_id="copywriter", + kernel=Kernel(), + name=COPYWRITER_NAME, + instructions=COPYWRITER_INSTRUCTIONS, + ) + + chat = AgentGroupChat( + agents=[agent_writer, agent_reviewer], + termination_strategy=ApprovalTerminationStrategy(agents=[agent_reviewer], maximum_iterations=10), + ) + + input = "a slogan for a new line of electric cars." + + await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=input)) + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in chat.invoke(): + print(f"# {content.role} - {content.name or '*'}: '{content.content}'") + + print(f"# IS COMPLETE: {chat.is_complete}") + finally: + await agent_writer.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/agents/mixed_chat_files.py b/python/samples/concepts/agents/mixed_chat_files.py new file mode 100644 index 000000000000..b97cce8dd593 --- /dev/null +++ b/python/samples/concepts/agents/mixed_chat_files.py @@ -0,0 +1,95 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os + +from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent +from semantic_kernel.agents.open_ai import OpenAIAssistantAgent +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +##################################################################### +# The following sample demonstrates how to create an OpenAI # +# assistant using either Azure OpenAI or OpenAI, a chat completion # +# agent and have them participate in a group chat working on # +# an uploaded file. # +##################################################################### + + +class ApprovalTerminationStrategy(TerminationStrategy): + """A strategy for determining when an agent should terminate.""" + + async def should_agent_terminate(self, agent, history): + """Check if the agent should terminate.""" + return "approved" in history[-1].content.lower() + + +SUMMARY_INSTRUCTIONS = "Summarize the entire conversation for the user in natural language." + + +def _create_kernel_with_chat_completion(service_id: str) -> Kernel: + kernel = Kernel() + kernel.add_service(AzureChatCompletion(service_id=service_id)) + # kernel.add_service(OpenAIChatCompletion(service_id=service_id)) + return kernel + + +async def invoke_agent( + chat: AgentGroupChat, agent: ChatCompletionAgent | OpenAIAssistantAgent, input: str | None = None +) -> None: + """Invoke the agent with the user input.""" + if input: + await chat.add_chat_message(message=ChatMessageContent(role=AuthorRole.USER, content=input)) + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in chat.invoke(agent=agent): + print(f"# {content.role} - {content.name or '*'}: '{content.content}'") + + +async def main(): + try: + file_path = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + "mixed_chat_files", + "user-context.txt", + ) + + analyst_agent = await OpenAIAssistantAgent.create( + service_id="analyst", + kernel=Kernel(), + enable_code_interpreter=True, + code_interpreter_filenames=[file_path], + name="AnalystAgent", + ) + + service_id = "summary" + summary_agent = ChatCompletionAgent( + service_id=service_id, + kernel=_create_kernel_with_chat_completion(service_id=service_id), + instructions=SUMMARY_INSTRUCTIONS, + name="SummaryAgent", + ) + + chat = AgentGroupChat() + + await invoke_agent( + chat=chat, + agent=analyst_agent, + input=""" + Create a tab delimited file report of the ordered (descending) frequency distribution + of words in the file 'user-context.txt' for any words used more than once. + """, + ) + await invoke_agent(chat=chat, agent=summary_agent) + finally: + if analyst_agent is not None: + [await analyst_agent.delete_file(file_id=file_id) for file_id in analyst_agent.code_interpreter_file_ids] + await analyst_agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/agents/mixed_chat_reset.py b/python/samples/concepts/agents/mixed_chat_reset.py new file mode 100644 index 000000000000..2480358ac4da --- /dev/null +++ b/python/samples/concepts/agents/mixed_chat_reset.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from typing import TYPE_CHECKING + +from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent +from semantic_kernel.agents.open_ai import OpenAIAssistantAgent +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +if TYPE_CHECKING: + from semantic_kernel.agents.agent import Agent + +##################################################################### +# The following sample demonstrates how to create an OpenAI # +# assistant using either Azure OpenAI or OpenAI, a chat completion # +# agent and have them participate in a group chat to work towards # +# the user's requirement. It also demonstrates how the underlying # +# agent reset method is used to clear the current state of the chat # +##################################################################### + +INSTRUCTIONS = """ +The user may either provide information or query on information previously provided. +If the query does not correspond with information provided, inform the user that their query cannot be answered. +""" + + +def _create_kernel_with_chat_completion(service_id: str) -> Kernel: + kernel = Kernel() + kernel.add_service(AzureChatCompletion(service_id=service_id)) + return kernel + + +async def main(): + try: + assistant_agent = await OpenAIAssistantAgent.create( + service_id="copywriter", + kernel=Kernel(), + name=f"{OpenAIAssistantAgent.__name__}", + instructions=INSTRUCTIONS, + ) + + chat_agent = ChatCompletionAgent( + service_id="chat", + kernel=_create_kernel_with_chat_completion("chat"), + name=f"{ChatCompletionAgent.__name__}", + instructions=INSTRUCTIONS, + ) + + chat = AgentGroupChat() + + async def invoke_agent(agent: "Agent", input: str | None = None): + if input is not None: + await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=input)) + print(f"\n{AuthorRole.USER}: '{input}'") + + async for message in chat.invoke(agent=agent): + if message.content is not None: + print(f"\n# {message.role} - {message.name or '*'}: '{message.content}'") + + await invoke_agent(agent=assistant_agent, input="What is my favorite color?") + await invoke_agent(agent=chat_agent) + + await invoke_agent(agent=assistant_agent, input="I like green.") + await invoke_agent(agent=chat_agent) + + await invoke_agent(agent=assistant_agent, input="What is my favorite color?") + await invoke_agent(agent=chat_agent) + + print("\nResetting chat...") + await chat.reset() + + await invoke_agent(agent=assistant_agent, input="What is my favorite color?") + await invoke_agent(agent=chat_agent) + finally: + await chat.reset() + await assistant_agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/auto_function_calling/chat_gpt_api_function_calling.py b/python/samples/concepts/auto_function_calling/chat_gpt_api_function_calling.py index a8ab7d64e290..f0381c1048ac 100644 --- a/python/samples/concepts/auto_function_calling/chat_gpt_api_function_calling.py +++ b/python/samples/concepts/auto_function_calling/chat_gpt_api_function_calling.py @@ -6,6 +6,7 @@ from typing import TYPE_CHECKING from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAIChatPromptExecutionSettings from semantic_kernel.contents import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent @@ -82,7 +83,7 @@ max_tokens=2000, temperature=0.7, top_p=0.8, - function_choice_behavior="auto", + function_choice_behavior=FunctionChoiceBehavior.Auto(auto_invoke=True), ) history = ChatHistory() diff --git a/python/samples/concepts/chat_completion/chat_anthropic_api.py b/python/samples/concepts/chat_completion/chat_anthropic_api.py new file mode 100644 index 000000000000..4cfb4e277b1f --- /dev/null +++ b/python/samples/concepts/chat_completion/chat_anthropic_api.py @@ -0,0 +1,85 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.anthropic import AnthropicChatCompletion +from semantic_kernel.contents import ChatHistory + +system_message = """ +You are a chat bot. Your name is Mosscap and +you have one goal: figure out what people need. +Your full name, should you need to know it, is +Splendid Speckled Mosscap. You communicate +effectively, but you tend to answer with long +flowery prose. +""" + +kernel = Kernel() + +service_id = "mistral-ai-chat" +kernel.add_service(AnthropicChatCompletion(service_id=service_id, ai_model_id="claude-3-opus-20240229")) + +settings = kernel.get_prompt_execution_settings_from_service_id(service_id) +settings.max_tokens = 2000 +settings.temperature = 0.7 +settings.top_p = 0.8 + +chat_function = kernel.add_function( + plugin_name="ChatBot", + function_name="Chat", + prompt="{{$chat_history}}{{$user_input}}", + template_format="semantic-kernel", + prompt_execution_settings=settings, +) + +chat_history = ChatHistory(system_message=None) +chat_history.add_user_message("Hi there, who are you?") +chat_history.add_assistant_message("I am Mosscap, a chat bot. I'm trying to figure out what people need") + + +async def chat() -> bool: + try: + user_input = input("User:> ") + except KeyboardInterrupt: + print("\n\nExiting chat...") + return False + except EOFError: + print("\n\nExiting chat...") + return False + + if user_input == "exit": + print("\n\nExiting chat...") + return False + + stream = True + if stream: + answer = kernel.invoke_stream( + chat_function, + user_input=user_input, + chat_history=chat_history, + ) + print("Mosscap:> ", end="") + async for message in answer: + print(str(message[0]), end="") + print("\n") + return True + answer = await kernel.invoke( + chat_function, + user_input=user_input, + chat_history=chat_history, + ) + print(f"Mosscap:> {answer}") + chat_history.add_user_message(user_input) + chat_history.add_assistant_message(str(answer)) + return True + + +async def main() -> None: + chatting = True + while chatting: + chatting = await chat() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/filtering/auto_function_invoke_filters.py b/python/samples/concepts/filtering/auto_function_invoke_filters.py index f25b9a305af9..bf5f7e358716 100644 --- a/python/samples/concepts/filtering/auto_function_invoke_filters.py +++ b/python/samples/concepts/filtering/auto_function_invoke_filters.py @@ -9,6 +9,7 @@ from semantic_kernel.contents import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent from semantic_kernel.core_plugins import MathPlugin, TimePlugin from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import ( AutoFunctionInvocationContext, @@ -93,13 +94,19 @@ async def auto_function_invocation_filter(context: AutoFunctionInvocationContext print(f"Number of function calls: {len(function_calls)}") # if we don't call next, it will skip this function, and go to the next one await next(context) + ############################# + # Note: to simply return the unaltered function results, uncomment the `context.terminate = True` line and + # comment out the lines starting with `result = context.function_result` through `context.terminate = True`. + # context.terminate = True + ############################# result = context.function_result - for fc in function_calls: - if fc.plugin_name == "math": - context.function_result = FunctionResult( - function=result.function, value="Stop trying to ask me to do math, I don't like it!" - ) - context.terminate = True + if context.function.plugin_name == "math": + print("Altering the Math plugin") + context.function_result = FunctionResult( + function=result.function, + value="Stop trying to ask me to do math, I don't like it!", + ) + context.terminate = True def print_tool_calls(message: ChatMessageContent) -> None: @@ -139,16 +146,26 @@ async def chat() -> bool: result = await kernel.invoke(chat_function, arguments=arguments) - # If tools are used, and auto invoke tool calls is False, the response will be of type - # ChatMessageContent with information about the tool calls, which need to be sent - # back to the model to get the final response. - if isinstance(result.value[0].items[0], FunctionCallContent): - print_tool_calls(result.value[0]) - return True - history.add_user_message(user_input) - history.add_assistant_message(str(result)) - print(f"Mosscap:> {result}") + + # Check if any result.value is a FunctionResultContent + if any(isinstance(item, FunctionResultContent) for item in result.value[0].items): + for fr in result.value[0].items: + if isinstance(fr, FunctionResultContent): + print(f"Mosscap:> {fr.result} for function: {fr.name}") + history.add_assistant_message(str(fr.result)) + elif any(isinstance(item, FunctionCallContent) for item in result.value[0].items): + # If tools are used, and auto invoke tool calls is False, the response will be of type + # ChatMessageContent with information about the tool calls, which need to be sent + # back to the model to get the final response. + for fcc in result.value[0].items: + if isinstance(fcc, FunctionCallContent): + print_tool_calls(fcc) + history.add_assistant_message(str(result)) + else: + print(f"Mosscap:> {result}") + history.add_assistant_message(str(result)) + return True diff --git a/python/samples/concepts/images/image_generation.py b/python/samples/concepts/images/image_generation.py new file mode 100644 index 000000000000..f013c0598900 --- /dev/null +++ b/python/samples/concepts/images/image_generation.py @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from urllib.request import urlopen + +try: + from PIL import Image + + pil_available = True +except ImportError: + pil_available = False + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAITextToImage +from semantic_kernel.contents import ChatHistory, ChatMessageContent, ImageContent, TextContent +from semantic_kernel.functions.kernel_arguments import KernelArguments + + +async def main(): + kernel = Kernel() + dalle3 = OpenAITextToImage() + kernel.add_service(dalle3) + kernel.add_service(OpenAIChatCompletion(service_id="default")) + + image = await dalle3.generate_image( + description="a painting of a flower vase", width=1024, height=1024, quality="hd", style="vivid" + ) + print(image) + if pil_available: + img = Image.open(urlopen(image)) # nosec + img.show() + + result = await kernel.invoke_prompt( + prompt="{{$chat_history}}", + arguments=KernelArguments( + chat_history=ChatHistory( + messages=[ + ChatMessageContent( + role="user", + items=[ + TextContent(text="What is in this image?"), + ImageContent(uri=image), + ], + ) + ] + ) + ), + ) + print(result) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/memory/data_models.py b/python/samples/concepts/memory/data_models.py new file mode 100644 index 000000000000..0f6b176d99b0 --- /dev/null +++ b/python/samples/concepts/memory/data_models.py @@ -0,0 +1,160 @@ +# Copyright (c) Microsoft. All rights reserved. + +from dataclasses import dataclass, field +from typing import Annotated, Any +from uuid import uuid4 + +from pandas import DataFrame +from pydantic import Field + +from semantic_kernel.data import ( + VectorStoreRecordDataField, + VectorStoreRecordDefinition, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, + vectorstoremodel, +) +from semantic_kernel.kernel_pydantic import KernelBaseModel + +# This concept shows the different ways you can create a vector store data model +# using dataclasses, Pydantic, and Python classes. +# As well as using types like Pandas Dataframes. + +# There are a number of universal things about these data models: +# they must specify the type of field through the annotation (or the definition). +# there must be at least one field of type VectorStoreRecordKeyField. +# If you set the embedding_property_name in the VectorStoreRecordDataField, that field must exist and be a vector field. +# A unannotated field is allowed but must have a default value. + +# The purpose of these models is to be what you pass to and get back from a vector store. +# There maybe limitations to data types that the vector store can handle, +# so not every store will be able to handle completely the same model. +# for instance, some stores only allow a string as the keyfield, while others allow str and int, +# so defining the key with a int, might make some stores unusable. + +# The decorator takes the class and pulls out the fields and annotations to create a definition, +# of type VectorStoreRecordDefinition. +# This definition is used for the vector store to know how to handle the data model. + +# You can also create the definition yourself, and pass it to the vector stores together with a standard type, +# like a dict or list. +# Or you can use the definition in container mode with something like a Pandas Dataframe. + + +# Data model using built-in Python dataclasses +@vectorstoremodel +@dataclass +class DataModelDataclass: + vector: Annotated[list[float], VectorStoreRecordVectorField] + key: Annotated[str, VectorStoreRecordKeyField()] = field(default_factory=lambda: str(uuid4())) + content: Annotated[str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector")] = ( + "content1" + ) + other: str | None = None + + +# Data model using Pydantic BaseModels +@vectorstoremodel +class DataModelPydantic(KernelBaseModel): + vector: Annotated[list[float], VectorStoreRecordVectorField] + key: Annotated[str, VectorStoreRecordKeyField()] = Field(default_factory=lambda: str(uuid4())) + content: Annotated[str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector")] = ( + "content1" + ) + other: str | None = None + + +# Data model using Pydantic BaseModels with mixed annotations (from pydantic and SK) +@vectorstoremodel +class DataModelPydanticComplex(KernelBaseModel): + vector: Annotated[list[float], VectorStoreRecordVectorField] + key: Annotated[str, Field(default_factory=lambda: str(uuid4())), VectorStoreRecordKeyField()] + content: Annotated[str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector")] = ( + "content1" + ) + other: str | None = None + + +# Data model using Python classes +# This one includes a custom serialize and deserialize method +@vectorstoremodel +class DataModelPython: + def __init__( + self, + vector: Annotated[list[float], VectorStoreRecordVectorField], + key: Annotated[str, VectorStoreRecordKeyField] = None, + content: Annotated[ + str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector") + ] = "content1", + other: str | None = None, + ): + self.vector = vector + self.other = other + self.key = key or str(uuid4()) + self.content = content + + def __str__(self) -> str: + return f"DataModelPython(vector={self.vector}, key={self.key}, content={self.content}, other={self.other})" + + def serialize(self) -> dict[str, Any]: + return { + "vector": self.vector, + "key": self.key, + "content": self.content, + } + + @classmethod + def deserialize(cls, obj: dict[str, Any]) -> "DataModelDataclass": + return cls( + vector=obj["vector"], + key=obj["key"], + content=obj["content"], + ) + + +# Data model definition for use with Pandas +# note the container mode flag, which makes sure that records that are returned are in a container +# even when requesting a batch of records. +# There is also a to_dict and from_dict method, which are used to convert the data model to and from a dict, +# these should be specific to the type used, if using dict as type then these can be left off. +data_model_definition_pandas = VectorStoreRecordDefinition( + fields={ + "vector": VectorStoreRecordVectorField(property_type="list[float]"), + "key": VectorStoreRecordKeyField(property_type="str"), + "content": VectorStoreRecordDataField( + property_type="str", has_embedding=True, embedding_property_name="vector" + ), + }, + container_mode=True, + to_dict=lambda record, **_: record.to_dict(orient="records"), + from_dict=lambda records, **_: DataFrame(records), +) + + +if __name__ == "__main__": + data_item1 = DataModelDataclass(content="Hello, world!", vector=[1.0, 2.0, 3.0], other=None) + data_item2 = DataModelPydantic(content="Hello, world!", vector=[1.0, 2.0, 3.0], other=None) + data_item3 = DataModelPydanticComplex(content="Hello, world!", vector=[1.0, 2.0, 3.0], other=None) + data_item4 = DataModelPython(content="Hello, world!", vector=[1.0, 2.0, 3.0], other=None) + print("Example records:") + print(f"DataClass:\n {data_item1}", end="\n\n") + print(f"Pydantic:\n {data_item2}", end="\n\n") + print(f"Pydantic with annotations:\n {data_item3}", end="\n\n") + print(f"Python:\n {data_item4}", end="\n\n") + + print("Item definitions:") + print(f"DataClass:\n {data_item1.__kernel_vectorstoremodel_definition__}", end="\n\n") + print(f"Pydantic:\n {data_item2.__kernel_vectorstoremodel_definition__}", end="\n\n") + print(f"Pydantic with annotations:\n {data_item3.__kernel_vectorstoremodel_definition__}", end="\n\n") + print(f"Python:\n {data_item4.__kernel_vectorstoremodel_definition__}", end="\n\n") + print(f"Definition for use with Pandas:\n {data_model_definition_pandas}", end="\n\n") + if ( + data_item1.__kernel_vectorstoremodel_definition__.fields + == data_item2.__kernel_vectorstoremodel_definition__.fields + == data_item3.__kernel_vectorstoremodel_definition__.fields + == data_item4.__kernel_vectorstoremodel_definition__.fields + == data_model_definition_pandas.fields + ): + print("All data models are the same") + else: + print("Data models are not the same") diff --git a/python/samples/concepts/memory/new_memory.py b/python/samples/concepts/memory/new_memory.py new file mode 100644 index 000000000000..7ab149fde00a --- /dev/null +++ b/python/samples/concepts/memory/new_memory.py @@ -0,0 +1,130 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from dataclasses import dataclass, field +from typing import Annotated +from uuid import uuid4 + +import numpy as np + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.open_ai import OpenAIEmbeddingPromptExecutionSettings, OpenAITextEmbedding +from semantic_kernel.connectors.memory.azure_ai_search import AzureAISearchCollection +from semantic_kernel.connectors.memory.qdrant import QdrantCollection +from semantic_kernel.connectors.memory.redis import RedisHashsetCollection, RedisJsonCollection +from semantic_kernel.connectors.memory.volatile import VolatileCollection +from semantic_kernel.data import ( + VectorStoreRecordCollection, + VectorStoreRecordDataField, + VectorStoreRecordKeyField, + VectorStoreRecordUtils, + VectorStoreRecordVectorField, + vectorstoremodel, +) + + +@vectorstoremodel +@dataclass +class MyDataModelArray: + vector: Annotated[ + np.ndarray | None, + VectorStoreRecordVectorField( + embedding_settings={"embedding": OpenAIEmbeddingPromptExecutionSettings(dimensions=1536)}, + index_kind="hnsw", + dimensions=1536, + distance_function="cosine", + property_type="float", + serialize_function=np.ndarray.tolist, + deserialize_function=np.array, + ), + ] = None + other: str | None = None + id: Annotated[str, VectorStoreRecordKeyField()] = field(default_factory=lambda: str(uuid4())) + content: Annotated[ + str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector", property_type="str") + ] = "content1" + + +@vectorstoremodel +@dataclass +class MyDataModelList: + vector: Annotated[ + list[float] | None, + VectorStoreRecordVectorField( + embedding_settings={"embedding": OpenAIEmbeddingPromptExecutionSettings(dimensions=1536)}, + index_kind="hnsw", + dimensions=1536, + distance_function="cosine", + property_type="float", + ), + ] = None + other: str | None = None + id: Annotated[str, VectorStoreRecordKeyField()] = field(default_factory=lambda: str(uuid4())) + content: Annotated[ + str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector", property_type="str") + ] = "content1" + + +# configuration +# specify which store (redis_json, redis_hash, qdrant, Azure AI Search or volatile) to use +# and which model (vectors as list or as numpy arrays) +store = "volatile" +collection_name = "test" +MyDataModel = MyDataModelArray + +stores: dict[str, VectorStoreRecordCollection] = { + "ai_search": AzureAISearchCollection[MyDataModel]( + data_model_type=MyDataModel, + ), + "redis_json": RedisJsonCollection[MyDataModel]( + data_model_type=MyDataModel, + collection_name=collection_name, + prefix_collection_name_to_key_names=True, + ), + "redis_hashset": RedisHashsetCollection[MyDataModel]( + data_model_type=MyDataModel, + collection_name=collection_name, + prefix_collection_name_to_key_names=True, + ), + "qdrant": QdrantCollection[MyDataModel]( + data_model_type=MyDataModel, collection_name=collection_name, prefer_grpc=True, named_vectors=False + ), + "volatile": VolatileCollection[MyDataModel]( + data_model_type=MyDataModel, + collection_name=collection_name, + ), +} + + +async def main(): + kernel = Kernel() + service_id = "embedding" + ai_model_id = "text-embedding-3-small" + kernel.add_service(OpenAITextEmbedding(service_id=service_id, ai_model_id=ai_model_id)) + async with stores[store] as record_store: + await record_store.create_collection_if_not_exists() + + record1 = MyDataModel(content="My text", id="e6103c03-487f-4d7d-9c23-4723651c17f4") + record2 = MyDataModel(content="My other text", id="09caec77-f7e1-466a-bcec-f1d51c5b15be") + + records = await VectorStoreRecordUtils(kernel).add_vector_to_records( + [record1, record2], data_model_type=MyDataModel + ) + keys = await record_store.upsert_batch(records) + print(f"upserted {keys=}") + + results = await record_store.get_batch([record1.id, record2.id]) + if results: + for result in results: + print(f"found {result.id=}") + print(f"{result.content=}") + if result.vector is not None: + print(f"{result.vector[:5]=}") + else: + print("not found") + + +if __name__ == "__main__": + import asyncio + + asyncio.run(main()) diff --git a/python/samples/concepts/memory/pandas_memory.py b/python/samples/concepts/memory/pandas_memory.py new file mode 100644 index 000000000000..e04a1ac3950f --- /dev/null +++ b/python/samples/concepts/memory/pandas_memory.py @@ -0,0 +1,75 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from uuid import uuid4 + +import pandas as pd + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.open_ai import ( + OpenAIEmbeddingPromptExecutionSettings, + OpenAITextEmbedding, +) +from semantic_kernel.connectors.memory.azure_ai_search import AzureAISearchCollection +from semantic_kernel.data import ( + VectorStoreRecordDataField, + VectorStoreRecordDefinition, + VectorStoreRecordKeyField, + VectorStoreRecordUtils, + VectorStoreRecordVectorField, +) + +model_fields = VectorStoreRecordDefinition( + container_mode=True, + fields={ + "content": VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector"), + "id": VectorStoreRecordKeyField(), + "vector": VectorStoreRecordVectorField( + embedding_settings={"embedding": OpenAIEmbeddingPromptExecutionSettings(dimensions=1536)} + ), + }, + to_dict=lambda record, **_: record.to_dict(orient="records"), + from_dict=lambda records, **_: pd.DataFrame(records), +) + + +async def main(): + # setup the kernel + kernel = Kernel() + kernel.add_service(OpenAITextEmbedding(service_id="embedding", ai_model_id="text-embedding-3-small")) + + # create the record collection + record_collection = AzureAISearchCollection[pd.DataFrame]( + data_model_type=pd.DataFrame, + data_model_definition=model_fields, + ) + # create some records + records = [ + {"id": str(uuid4()), "content": "my dict text", "vector": None}, + {"id": str(uuid4()), "content": "my second text", "vector": None}, + ] + + # create the dataframe and add the embeddings + df = pd.DataFrame(records) + df = await VectorStoreRecordUtils(kernel).add_vector_to_records(df, None, data_model_definition=model_fields) + print("Records with embeddings:") + print(df.shape) + print(df.head(5)) + + # upsert the records (for a container, upsert and upsert_batch are equivalent) + await record_collection.upsert_batch(df) + + # retrieve a record + result = await record_collection.get(records[0]["id"]) + print("Retrieved records:") + print(result.shape) + print(result.head(5)) + + # explicit cleanup, usually not needed, but a script like this + # closes so fast that the async close triggered by delete may not finish on time + del record_collection + await asyncio.sleep(1) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/model_as_a_service/mmlu_model_eval.py b/python/samples/concepts/model_as_a_service/mmlu_model_eval.py index d11ed9a55bbd..733323111f49 100644 --- a/python/samples/concepts/model_as_a_service/mmlu_model_eval.py +++ b/python/samples/concepts/model_as_a_service/mmlu_model_eval.py @@ -25,6 +25,8 @@ def setup_kernel(): """Set up the kernel with AI services.""" kernel = Kernel() + + # Add multiple AI services to the kernel kernel.add_service( AzureAIInferenceChatCompletion( ai_model_id="Llama3-8b", @@ -47,14 +49,21 @@ def setup_kernel(): ) ) - # Add the plugin + # Add the plugin to the kernel kernel.add_plugin(MMLUPlugin(), "MMLUPlugin") return kernel def load_mmlu_dataset(subjects: list[str]) -> dict[str, Dataset]: - """Load a dataset.""" + """Load the MMLU dataset for given subjects. + + Args: + subjects (list[str]): List of subjects to load the dataset for. + + Returns: + dict[str, Dataset]: A dictionary with subjects as keys and corresponding datasets as values. + """ login() datasets = {} @@ -86,7 +95,7 @@ async def evaluate( """Run a sample and return if the answer was correct. Args: - sample (str): The sample containing the question and the correct answer. + sample (dict): The sample containing the question and the correct answer. subject (str): The subject of the sample. kernel (Kernel): The kernel. service_id (str): The service id. @@ -94,7 +103,10 @@ async def evaluate( Returns: bool: Whether the answer was correct. """ + # Initialize chat history with a system message chat_history = ChatHistory(system_message=formatted_system_message(subject)) + + # Add the user message to the chat history chat_history.add_user_message( formatted_question( sample["question"], @@ -105,7 +117,10 @@ async def evaluate( ), ) + # Determine the correct answer correct_answer = expected_answer_to_letter(sample["answer"]) + + # Get the chat response from the AI service response = await kernel.get_service(service_id).get_chat_message_content( chat_history, settings=kernel.get_prompt_execution_settings_from_service_id(service_id), @@ -114,10 +129,12 @@ async def evaluate( if not response: return False + # Compare the AI response with the correct answer return response.content.strip() == correct_answer async def main(): + # Load the MMLU dataset for specified subjects datasets = load_mmlu_dataset( [ "college_computer_science", @@ -129,17 +146,21 @@ async def main(): # See here for a full list of subjects: https://huggingface.co/datasets/cais/mmlu/viewer ] ) + + # Set up the kernel with AI services kernel = setup_kernel() ai_services = kernel.get_services_by_type(ChatCompletionClientBase).keys() - # Total number of samples + # Calculate total number of samples totals = sum([datasets[subject].num_rows for subject in datasets]) - # Total number of correct answers by each AI service + + # Initialize counters for correct answers by each AI service total_corrects = {ai_service: 0.0 for ai_service in ai_services} for subject in datasets: - # Number of correct answers by each AI service for this subject corrects = {ai_service: 0.0 for ai_service in ai_services} print(f"Evaluating {subject}...") + + # Evaluate each sample in the dataset for sample in tqdm(datasets[subject]): for ai_service in ai_services: kernel_arguments = KernelArguments( @@ -156,10 +177,13 @@ async def main(): corrects[ai_service] += 1 print(f"Finished evaluating {subject}.") + + # Print accuracy for each AI service for ai_service in ai_services: total_corrects[ai_service] += corrects[ai_service] print(f"Accuracy of {ai_service}: {corrects[ai_service] / datasets[subject].num_rows * 100:.2f}%.") + # Print overall results print("Overall results:") for ai_service in ai_services: print(f"Overall Accuracy of {ai_service}: {total_corrects[ai_service] / totals * 100:.2f}%.") diff --git a/python/samples/concepts/resources/agent_assistant_file_manipulation/sales.csv b/python/samples/concepts/resources/agent_assistant_file_manipulation/sales.csv new file mode 100644 index 000000000000..da217c62db3e --- /dev/null +++ b/python/samples/concepts/resources/agent_assistant_file_manipulation/sales.csv @@ -0,0 +1,701 @@ +Segment,Country,Product,Units Sold,Sale Price,Gross Sales,Discounts,Sales,COGS,Profit,Date,Month Number,Month Name,Year +Government,Canada,Carretera,1618.5,20.00,32370.00,0.00,32370.00,16185.00,16185.00,1/1/2014,1,January,2014 +Government,Germany,Carretera,1321,20.00,26420.00,0.00,26420.00,13210.00,13210.00,1/1/2014,1,January,2014 +Midmarket,France,Carretera,2178,15.00,32670.00,0.00,32670.00,21780.00,10890.00,6/1/2014,6,June,2014 +Midmarket,Germany,Carretera,888,15.00,13320.00,0.00,13320.00,8880.00,4440.00,6/1/2014,6,June,2014 +Midmarket,Mexico,Carretera,2470,15.00,37050.00,0.00,37050.00,24700.00,12350.00,6/1/2014,6,June,2014 +Government,Germany,Carretera,1513,350.00,529550.00,0.00,529550.00,393380.00,136170.00,12/1/2014,12,December,2014 +Midmarket,Germany,Montana,921,15.00,13815.00,0.00,13815.00,9210.00,4605.00,3/1/2014,3,March,2014 +Channel Partners,Canada,Montana,2518,12.00,30216.00,0.00,30216.00,7554.00,22662.00,6/1/2014,6,June,2014 +Government,France,Montana,1899,20.00,37980.00,0.00,37980.00,18990.00,18990.00,6/1/2014,6,June,2014 +Channel Partners,Germany,Montana,1545,12.00,18540.00,0.00,18540.00,4635.00,13905.00,6/1/2014,6,June,2014 +Midmarket,Mexico,Montana,2470,15.00,37050.00,0.00,37050.00,24700.00,12350.00,6/1/2014,6,June,2014 +Enterprise,Canada,Montana,2665.5,125.00,333187.50,0.00,333187.50,319860.00,13327.50,7/1/2014,7,July,2014 +Small Business,Mexico,Montana,958,300.00,287400.00,0.00,287400.00,239500.00,47900.00,8/1/2014,8,August,2014 +Government,Germany,Montana,2146,7.00,15022.00,0.00,15022.00,10730.00,4292.00,9/1/2014,9,September,2014 +Enterprise,Canada,Montana,345,125.00,43125.00,0.00,43125.00,41400.00,1725.00,10/1/2013,10,October,2013 +Midmarket,United States of America,Montana,615,15.00,9225.00,0.00,9225.00,6150.00,3075.00,12/1/2014,12,December,2014 +Government,Canada,Paseo,292,20.00,5840.00,0.00,5840.00,2920.00,2920.00,2/1/2014,2,February,2014 +Midmarket,Mexico,Paseo,974,15.00,14610.00,0.00,14610.00,9740.00,4870.00,2/1/2014,2,February,2014 +Channel Partners,Canada,Paseo,2518,12.00,30216.00,0.00,30216.00,7554.00,22662.00,6/1/2014,6,June,2014 +Government,Germany,Paseo,1006,350.00,352100.00,0.00,352100.00,261560.00,90540.00,6/1/2014,6,June,2014 +Channel Partners,Germany,Paseo,367,12.00,4404.00,0.00,4404.00,1101.00,3303.00,7/1/2014,7,July,2014 +Government,Mexico,Paseo,883,7.00,6181.00,0.00,6181.00,4415.00,1766.00,8/1/2014,8,August,2014 +Midmarket,France,Paseo,549,15.00,8235.00,0.00,8235.00,5490.00,2745.00,9/1/2013,9,September,2013 +Small Business,Mexico,Paseo,788,300.00,236400.00,0.00,236400.00,197000.00,39400.00,9/1/2013,9,September,2013 +Midmarket,Mexico,Paseo,2472,15.00,37080.00,0.00,37080.00,24720.00,12360.00,9/1/2014,9,September,2014 +Government,United States of America,Paseo,1143,7.00,8001.00,0.00,8001.00,5715.00,2286.00,10/1/2014,10,October,2014 +Government,Canada,Paseo,1725,350.00,603750.00,0.00,603750.00,448500.00,155250.00,11/1/2013,11,November,2013 +Channel Partners,United States of America,Paseo,912,12.00,10944.00,0.00,10944.00,2736.00,8208.00,11/1/2013,11,November,2013 +Midmarket,Canada,Paseo,2152,15.00,32280.00,0.00,32280.00,21520.00,10760.00,12/1/2013,12,December,2013 +Government,Canada,Paseo,1817,20.00,36340.00,0.00,36340.00,18170.00,18170.00,12/1/2014,12,December,2014 +Government,Germany,Paseo,1513,350.00,529550.00,0.00,529550.00,393380.00,136170.00,12/1/2014,12,December,2014 +Government,Mexico,Velo,1493,7.00,10451.00,0.00,10451.00,7465.00,2986.00,1/1/2014,1,January,2014 +Enterprise,France,Velo,1804,125.00,225500.00,0.00,225500.00,216480.00,9020.00,2/1/2014,2,February,2014 +Channel Partners,Germany,Velo,2161,12.00,25932.00,0.00,25932.00,6483.00,19449.00,3/1/2014,3,March,2014 +Government,Germany,Velo,1006,350.00,352100.00,0.00,352100.00,261560.00,90540.00,6/1/2014,6,June,2014 +Channel Partners,Germany,Velo,1545,12.00,18540.00,0.00,18540.00,4635.00,13905.00,6/1/2014,6,June,2014 +Enterprise,United States of America,Velo,2821,125.00,352625.00,0.00,352625.00,338520.00,14105.00,8/1/2014,8,August,2014 +Enterprise,Canada,Velo,345,125.00,43125.00,0.00,43125.00,41400.00,1725.00,10/1/2013,10,October,2013 +Small Business,Canada,VTT,2001,300.00,600300.00,0.00,600300.00,500250.00,100050.00,2/1/2014,2,February,2014 +Channel Partners,Germany,VTT,2838,12.00,34056.00,0.00,34056.00,8514.00,25542.00,4/1/2014,4,April,2014 +Midmarket,France,VTT,2178,15.00,32670.00,0.00,32670.00,21780.00,10890.00,6/1/2014,6,June,2014 +Midmarket,Germany,VTT,888,15.00,13320.00,0.00,13320.00,8880.00,4440.00,6/1/2014,6,June,2014 +Government,France,VTT,1527,350.00,534450.00,0.00,534450.00,397020.00,137430.00,9/1/2013,9,September,2013 +Small Business,France,VTT,2151,300.00,645300.00,0.00,645300.00,537750.00,107550.00,9/1/2014,9,September,2014 +Government,Canada,VTT,1817,20.00,36340.00,0.00,36340.00,18170.00,18170.00,12/1/2014,12,December,2014 +Government,France,Amarilla,2750,350.00,962500.00,0.00,962500.00,715000.00,247500.00,2/1/2014,2,February,2014 +Channel Partners,United States of America,Amarilla,1953,12.00,23436.00,0.00,23436.00,5859.00,17577.00,4/1/2014,4,April,2014 +Enterprise,Germany,Amarilla,4219.5,125.00,527437.50,0.00,527437.50,506340.00,21097.50,4/1/2014,4,April,2014 +Government,France,Amarilla,1899,20.00,37980.00,0.00,37980.00,18990.00,18990.00,6/1/2014,6,June,2014 +Government,Germany,Amarilla,1686,7.00,11802.00,0.00,11802.00,8430.00,3372.00,7/1/2014,7,July,2014 +Channel Partners,United States of America,Amarilla,2141,12.00,25692.00,0.00,25692.00,6423.00,19269.00,8/1/2014,8,August,2014 +Government,United States of America,Amarilla,1143,7.00,8001.00,0.00,8001.00,5715.00,2286.00,10/1/2014,10,October,2014 +Midmarket,United States of America,Amarilla,615,15.00,9225.00,0.00,9225.00,6150.00,3075.00,12/1/2014,12,December,2014 +Government,France,Paseo,3945,7.00,27615.00,276.15,27338.85,19725.00,7613.85,1/1/2014,1,January,2014 +Midmarket,France,Paseo,2296,15.00,34440.00,344.40,34095.60,22960.00,11135.60,2/1/2014,2,February,2014 +Government,France,Paseo,1030,7.00,7210.00,72.10,7137.90,5150.00,1987.90,5/1/2014,5,May,2014 +Government,France,Velo,639,7.00,4473.00,44.73,4428.27,3195.00,1233.27,11/1/2014,11,November,2014 +Government,Canada,VTT,1326,7.00,9282.00,92.82,9189.18,6630.00,2559.18,3/1/2014,3,March,2014 +Channel Partners,United States of America,Carretera,1858,12.00,22296.00,222.96,22073.04,5574.00,16499.04,2/1/2014,2,February,2014 +Government,Mexico,Carretera,1210,350.00,423500.00,4235.00,419265.00,314600.00,104665.00,3/1/2014,3,March,2014 +Government,United States of America,Carretera,2529,7.00,17703.00,177.03,17525.97,12645.00,4880.97,7/1/2014,7,July,2014 +Channel Partners,Canada,Carretera,1445,12.00,17340.00,173.40,17166.60,4335.00,12831.60,9/1/2014,9,September,2014 +Enterprise,United States of America,Carretera,330,125.00,41250.00,412.50,40837.50,39600.00,1237.50,9/1/2013,9,September,2013 +Channel Partners,France,Carretera,2671,12.00,32052.00,320.52,31731.48,8013.00,23718.48,9/1/2014,9,September,2014 +Channel Partners,Germany,Carretera,766,12.00,9192.00,91.92,9100.08,2298.00,6802.08,10/1/2013,10,October,2013 +Small Business,Mexico,Carretera,494,300.00,148200.00,1482.00,146718.00,123500.00,23218.00,10/1/2013,10,October,2013 +Government,Mexico,Carretera,1397,350.00,488950.00,4889.50,484060.50,363220.00,120840.50,10/1/2014,10,October,2014 +Government,France,Carretera,2155,350.00,754250.00,7542.50,746707.50,560300.00,186407.50,12/1/2014,12,December,2014 +Midmarket,Mexico,Montana,2214,15.00,33210.00,332.10,32877.90,22140.00,10737.90,3/1/2014,3,March,2014 +Small Business,United States of America,Montana,2301,300.00,690300.00,6903.00,683397.00,575250.00,108147.00,4/1/2014,4,April,2014 +Government,France,Montana,1375.5,20.00,27510.00,275.10,27234.90,13755.00,13479.90,7/1/2014,7,July,2014 +Government,Canada,Montana,1830,7.00,12810.00,128.10,12681.90,9150.00,3531.90,8/1/2014,8,August,2014 +Small Business,United States of America,Montana,2498,300.00,749400.00,7494.00,741906.00,624500.00,117406.00,9/1/2013,9,September,2013 +Enterprise,United States of America,Montana,663,125.00,82875.00,828.75,82046.25,79560.00,2486.25,10/1/2013,10,October,2013 +Midmarket,United States of America,Paseo,1514,15.00,22710.00,227.10,22482.90,15140.00,7342.90,2/1/2014,2,February,2014 +Government,United States of America,Paseo,4492.5,7.00,31447.50,314.48,31133.03,22462.50,8670.53,4/1/2014,4,April,2014 +Enterprise,United States of America,Paseo,727,125.00,90875.00,908.75,89966.25,87240.00,2726.25,6/1/2014,6,June,2014 +Enterprise,France,Paseo,787,125.00,98375.00,983.75,97391.25,94440.00,2951.25,6/1/2014,6,June,2014 +Enterprise,Mexico,Paseo,1823,125.00,227875.00,2278.75,225596.25,218760.00,6836.25,7/1/2014,7,July,2014 +Midmarket,Germany,Paseo,747,15.00,11205.00,112.05,11092.95,7470.00,3622.95,9/1/2014,9,September,2014 +Channel Partners,Germany,Paseo,766,12.00,9192.00,91.92,9100.08,2298.00,6802.08,10/1/2013,10,October,2013 +Small Business,United States of America,Paseo,2905,300.00,871500.00,8715.00,862785.00,726250.00,136535.00,11/1/2014,11,November,2014 +Government,France,Paseo,2155,350.00,754250.00,7542.50,746707.50,560300.00,186407.50,12/1/2014,12,December,2014 +Government,France,Velo,3864,20.00,77280.00,772.80,76507.20,38640.00,37867.20,4/1/2014,4,April,2014 +Government,Mexico,Velo,362,7.00,2534.00,25.34,2508.66,1810.00,698.66,5/1/2014,5,May,2014 +Enterprise,Canada,Velo,923,125.00,115375.00,1153.75,114221.25,110760.00,3461.25,8/1/2014,8,August,2014 +Enterprise,United States of America,Velo,663,125.00,82875.00,828.75,82046.25,79560.00,2486.25,10/1/2013,10,October,2013 +Government,Canada,Velo,2092,7.00,14644.00,146.44,14497.56,10460.00,4037.56,11/1/2013,11,November,2013 +Government,Germany,VTT,263,7.00,1841.00,18.41,1822.59,1315.00,507.59,3/1/2014,3,March,2014 +Government,Canada,VTT,943.5,350.00,330225.00,3302.25,326922.75,245310.00,81612.75,4/1/2014,4,April,2014 +Enterprise,United States of America,VTT,727,125.00,90875.00,908.75,89966.25,87240.00,2726.25,6/1/2014,6,June,2014 +Enterprise,France,VTT,787,125.00,98375.00,983.75,97391.25,94440.00,2951.25,6/1/2014,6,June,2014 +Small Business,Germany,VTT,986,300.00,295800.00,2958.00,292842.00,246500.00,46342.00,9/1/2014,9,September,2014 +Small Business,Mexico,VTT,494,300.00,148200.00,1482.00,146718.00,123500.00,23218.00,10/1/2013,10,October,2013 +Government,Mexico,VTT,1397,350.00,488950.00,4889.50,484060.50,363220.00,120840.50,10/1/2014,10,October,2014 +Enterprise,France,VTT,1744,125.00,218000.00,2180.00,215820.00,209280.00,6540.00,11/1/2014,11,November,2014 +Channel Partners,United States of America,Amarilla,1989,12.00,23868.00,238.68,23629.32,5967.00,17662.32,9/1/2013,9,September,2013 +Midmarket,France,Amarilla,321,15.00,4815.00,48.15,4766.85,3210.00,1556.85,11/1/2013,11,November,2013 +Enterprise,Canada,Carretera,742.5,125.00,92812.50,1856.25,90956.25,89100.00,1856.25,4/1/2014,4,April,2014 +Channel Partners,Canada,Carretera,1295,12.00,15540.00,310.80,15229.20,3885.00,11344.20,10/1/2014,10,October,2014 +Small Business,Germany,Carretera,214,300.00,64200.00,1284.00,62916.00,53500.00,9416.00,10/1/2013,10,October,2013 +Government,France,Carretera,2145,7.00,15015.00,300.30,14714.70,10725.00,3989.70,11/1/2013,11,November,2013 +Government,Canada,Carretera,2852,350.00,998200.00,19964.00,978236.00,741520.00,236716.00,12/1/2014,12,December,2014 +Channel Partners,United States of America,Montana,1142,12.00,13704.00,274.08,13429.92,3426.00,10003.92,6/1/2014,6,June,2014 +Government,United States of America,Montana,1566,20.00,31320.00,626.40,30693.60,15660.00,15033.60,10/1/2014,10,October,2014 +Channel Partners,Mexico,Montana,690,12.00,8280.00,165.60,8114.40,2070.00,6044.40,11/1/2014,11,November,2014 +Enterprise,Mexico,Montana,1660,125.00,207500.00,4150.00,203350.00,199200.00,4150.00,11/1/2013,11,November,2013 +Midmarket,Canada,Paseo,2363,15.00,35445.00,708.90,34736.10,23630.00,11106.10,2/1/2014,2,February,2014 +Small Business,France,Paseo,918,300.00,275400.00,5508.00,269892.00,229500.00,40392.00,5/1/2014,5,May,2014 +Small Business,Germany,Paseo,1728,300.00,518400.00,10368.00,508032.00,432000.00,76032.00,5/1/2014,5,May,2014 +Channel Partners,United States of America,Paseo,1142,12.00,13704.00,274.08,13429.92,3426.00,10003.92,6/1/2014,6,June,2014 +Enterprise,Mexico,Paseo,662,125.00,82750.00,1655.00,81095.00,79440.00,1655.00,6/1/2014,6,June,2014 +Channel Partners,Canada,Paseo,1295,12.00,15540.00,310.80,15229.20,3885.00,11344.20,10/1/2014,10,October,2014 +Enterprise,Germany,Paseo,809,125.00,101125.00,2022.50,99102.50,97080.00,2022.50,10/1/2013,10,October,2013 +Enterprise,Mexico,Paseo,2145,125.00,268125.00,5362.50,262762.50,257400.00,5362.50,10/1/2013,10,October,2013 +Channel Partners,France,Paseo,1785,12.00,21420.00,428.40,20991.60,5355.00,15636.60,11/1/2013,11,November,2013 +Small Business,Canada,Paseo,1916,300.00,574800.00,11496.00,563304.00,479000.00,84304.00,12/1/2014,12,December,2014 +Government,Canada,Paseo,2852,350.00,998200.00,19964.00,978236.00,741520.00,236716.00,12/1/2014,12,December,2014 +Enterprise,Canada,Paseo,2729,125.00,341125.00,6822.50,334302.50,327480.00,6822.50,12/1/2014,12,December,2014 +Midmarket,United States of America,Paseo,1925,15.00,28875.00,577.50,28297.50,19250.00,9047.50,12/1/2013,12,December,2013 +Government,United States of America,Paseo,2013,7.00,14091.00,281.82,13809.18,10065.00,3744.18,12/1/2013,12,December,2013 +Channel Partners,France,Paseo,1055,12.00,12660.00,253.20,12406.80,3165.00,9241.80,12/1/2014,12,December,2014 +Channel Partners,Mexico,Paseo,1084,12.00,13008.00,260.16,12747.84,3252.00,9495.84,12/1/2014,12,December,2014 +Government,United States of America,Velo,1566,20.00,31320.00,626.40,30693.60,15660.00,15033.60,10/1/2014,10,October,2014 +Government,Germany,Velo,2966,350.00,1038100.00,20762.00,1017338.00,771160.00,246178.00,10/1/2013,10,October,2013 +Government,Germany,Velo,2877,350.00,1006950.00,20139.00,986811.00,748020.00,238791.00,10/1/2014,10,October,2014 +Enterprise,Germany,Velo,809,125.00,101125.00,2022.50,99102.50,97080.00,2022.50,10/1/2013,10,October,2013 +Enterprise,Mexico,Velo,2145,125.00,268125.00,5362.50,262762.50,257400.00,5362.50,10/1/2013,10,October,2013 +Channel Partners,France,Velo,1055,12.00,12660.00,253.20,12406.80,3165.00,9241.80,12/1/2014,12,December,2014 +Government,Mexico,Velo,544,20.00,10880.00,217.60,10662.40,5440.00,5222.40,12/1/2013,12,December,2013 +Channel Partners,Mexico,Velo,1084,12.00,13008.00,260.16,12747.84,3252.00,9495.84,12/1/2014,12,December,2014 +Enterprise,Mexico,VTT,662,125.00,82750.00,1655.00,81095.00,79440.00,1655.00,6/1/2014,6,June,2014 +Small Business,Germany,VTT,214,300.00,64200.00,1284.00,62916.00,53500.00,9416.00,10/1/2013,10,October,2013 +Government,Germany,VTT,2877,350.00,1006950.00,20139.00,986811.00,748020.00,238791.00,10/1/2014,10,October,2014 +Enterprise,Canada,VTT,2729,125.00,341125.00,6822.50,334302.50,327480.00,6822.50,12/1/2014,12,December,2014 +Government,United States of America,VTT,266,350.00,93100.00,1862.00,91238.00,69160.00,22078.00,12/1/2013,12,December,2013 +Government,Mexico,VTT,1940,350.00,679000.00,13580.00,665420.00,504400.00,161020.00,12/1/2013,12,December,2013 +Small Business,Germany,Amarilla,259,300.00,77700.00,1554.00,76146.00,64750.00,11396.00,3/1/2014,3,March,2014 +Small Business,Mexico,Amarilla,1101,300.00,330300.00,6606.00,323694.00,275250.00,48444.00,3/1/2014,3,March,2014 +Enterprise,Germany,Amarilla,2276,125.00,284500.00,5690.00,278810.00,273120.00,5690.00,5/1/2014,5,May,2014 +Government,Germany,Amarilla,2966,350.00,1038100.00,20762.00,1017338.00,771160.00,246178.00,10/1/2013,10,October,2013 +Government,United States of America,Amarilla,1236,20.00,24720.00,494.40,24225.60,12360.00,11865.60,11/1/2014,11,November,2014 +Government,France,Amarilla,941,20.00,18820.00,376.40,18443.60,9410.00,9033.60,11/1/2014,11,November,2014 +Small Business,Canada,Amarilla,1916,300.00,574800.00,11496.00,563304.00,479000.00,84304.00,12/1/2014,12,December,2014 +Enterprise,France,Carretera,4243.5,125.00,530437.50,15913.13,514524.38,509220.00,5304.38,4/1/2014,4,April,2014 +Government,Germany,Carretera,2580,20.00,51600.00,1548.00,50052.00,25800.00,24252.00,4/1/2014,4,April,2014 +Small Business,Germany,Carretera,689,300.00,206700.00,6201.00,200499.00,172250.00,28249.00,6/1/2014,6,June,2014 +Channel Partners,United States of America,Carretera,1947,12.00,23364.00,700.92,22663.08,5841.00,16822.08,9/1/2014,9,September,2014 +Channel Partners,Canada,Carretera,908,12.00,10896.00,326.88,10569.12,2724.00,7845.12,12/1/2013,12,December,2013 +Government,Germany,Montana,1958,7.00,13706.00,411.18,13294.82,9790.00,3504.82,2/1/2014,2,February,2014 +Channel Partners,France,Montana,1901,12.00,22812.00,684.36,22127.64,5703.00,16424.64,6/1/2014,6,June,2014 +Government,France,Montana,544,7.00,3808.00,114.24,3693.76,2720.00,973.76,9/1/2014,9,September,2014 +Government,Germany,Montana,1797,350.00,628950.00,18868.50,610081.50,467220.00,142861.50,9/1/2013,9,September,2013 +Enterprise,France,Montana,1287,125.00,160875.00,4826.25,156048.75,154440.00,1608.75,12/1/2014,12,December,2014 +Enterprise,Germany,Montana,1706,125.00,213250.00,6397.50,206852.50,204720.00,2132.50,12/1/2014,12,December,2014 +Small Business,France,Paseo,2434.5,300.00,730350.00,21910.50,708439.50,608625.00,99814.50,1/1/2014,1,January,2014 +Enterprise,Canada,Paseo,1774,125.00,221750.00,6652.50,215097.50,212880.00,2217.50,3/1/2014,3,March,2014 +Channel Partners,France,Paseo,1901,12.00,22812.00,684.36,22127.64,5703.00,16424.64,6/1/2014,6,June,2014 +Small Business,Germany,Paseo,689,300.00,206700.00,6201.00,200499.00,172250.00,28249.00,6/1/2014,6,June,2014 +Enterprise,Germany,Paseo,1570,125.00,196250.00,5887.50,190362.50,188400.00,1962.50,6/1/2014,6,June,2014 +Channel Partners,United States of America,Paseo,1369.5,12.00,16434.00,493.02,15940.98,4108.50,11832.48,7/1/2014,7,July,2014 +Enterprise,Canada,Paseo,2009,125.00,251125.00,7533.75,243591.25,241080.00,2511.25,10/1/2014,10,October,2014 +Midmarket,Germany,Paseo,1945,15.00,29175.00,875.25,28299.75,19450.00,8849.75,10/1/2013,10,October,2013 +Enterprise,France,Paseo,1287,125.00,160875.00,4826.25,156048.75,154440.00,1608.75,12/1/2014,12,December,2014 +Enterprise,Germany,Paseo,1706,125.00,213250.00,6397.50,206852.50,204720.00,2132.50,12/1/2014,12,December,2014 +Enterprise,Canada,Velo,2009,125.00,251125.00,7533.75,243591.25,241080.00,2511.25,10/1/2014,10,October,2014 +Small Business,United States of America,VTT,2844,300.00,853200.00,25596.00,827604.00,711000.00,116604.00,2/1/2014,2,February,2014 +Channel Partners,Mexico,VTT,1916,12.00,22992.00,689.76,22302.24,5748.00,16554.24,4/1/2014,4,April,2014 +Enterprise,Germany,VTT,1570,125.00,196250.00,5887.50,190362.50,188400.00,1962.50,6/1/2014,6,June,2014 +Small Business,Canada,VTT,1874,300.00,562200.00,16866.00,545334.00,468500.00,76834.00,8/1/2014,8,August,2014 +Government,Mexico,VTT,1642,350.00,574700.00,17241.00,557459.00,426920.00,130539.00,8/1/2014,8,August,2014 +Midmarket,Germany,VTT,1945,15.00,29175.00,875.25,28299.75,19450.00,8849.75,10/1/2013,10,October,2013 +Government,Canada,Carretera,831,20.00,16620.00,498.60,16121.40,8310.00,7811.40,5/1/2014,5,May,2014 +Government,Mexico,Paseo,1760,7.00,12320.00,369.60,11950.40,8800.00,3150.40,9/1/2013,9,September,2013 +Government,Canada,Velo,3850.5,20.00,77010.00,2310.30,74699.70,38505.00,36194.70,4/1/2014,4,April,2014 +Channel Partners,Germany,VTT,2479,12.00,29748.00,892.44,28855.56,7437.00,21418.56,1/1/2014,1,January,2014 +Midmarket,Mexico,Montana,2031,15.00,30465.00,1218.60,29246.40,20310.00,8936.40,10/1/2014,10,October,2014 +Midmarket,Mexico,Paseo,2031,15.00,30465.00,1218.60,29246.40,20310.00,8936.40,10/1/2014,10,October,2014 +Midmarket,France,Paseo,2261,15.00,33915.00,1356.60,32558.40,22610.00,9948.40,12/1/2013,12,December,2013 +Government,United States of America,Velo,736,20.00,14720.00,588.80,14131.20,7360.00,6771.20,9/1/2013,9,September,2013 +Government,Canada,Carretera,2851,7.00,19957.00,798.28,19158.72,14255.00,4903.72,10/1/2013,10,October,2013 +Small Business,Germany,Carretera,2021,300.00,606300.00,24252.00,582048.00,505250.00,76798.00,10/1/2014,10,October,2014 +Government,United States of America,Carretera,274,350.00,95900.00,3836.00,92064.00,71240.00,20824.00,12/1/2014,12,December,2014 +Midmarket,Canada,Montana,1967,15.00,29505.00,1180.20,28324.80,19670.00,8654.80,3/1/2014,3,March,2014 +Small Business,Germany,Montana,1859,300.00,557700.00,22308.00,535392.00,464750.00,70642.00,8/1/2014,8,August,2014 +Government,Canada,Montana,2851,7.00,19957.00,798.28,19158.72,14255.00,4903.72,10/1/2013,10,October,2013 +Small Business,Germany,Montana,2021,300.00,606300.00,24252.00,582048.00,505250.00,76798.00,10/1/2014,10,October,2014 +Enterprise,Mexico,Montana,1138,125.00,142250.00,5690.00,136560.00,136560.00,0.00,12/1/2014,12,December,2014 +Government,Canada,Paseo,4251,7.00,29757.00,1190.28,28566.72,21255.00,7311.72,1/1/2014,1,January,2014 +Enterprise,Germany,Paseo,795,125.00,99375.00,3975.00,95400.00,95400.00,0.00,3/1/2014,3,March,2014 +Small Business,Germany,Paseo,1414.5,300.00,424350.00,16974.00,407376.00,353625.00,53751.00,4/1/2014,4,April,2014 +Small Business,United States of America,Paseo,2918,300.00,875400.00,35016.00,840384.00,729500.00,110884.00,5/1/2014,5,May,2014 +Government,United States of America,Paseo,3450,350.00,1207500.00,48300.00,1159200.00,897000.00,262200.00,7/1/2014,7,July,2014 +Enterprise,France,Paseo,2988,125.00,373500.00,14940.00,358560.00,358560.00,0.00,7/1/2014,7,July,2014 +Midmarket,Canada,Paseo,218,15.00,3270.00,130.80,3139.20,2180.00,959.20,9/1/2014,9,September,2014 +Government,Canada,Paseo,2074,20.00,41480.00,1659.20,39820.80,20740.00,19080.80,9/1/2014,9,September,2014 +Government,United States of America,Paseo,1056,20.00,21120.00,844.80,20275.20,10560.00,9715.20,9/1/2014,9,September,2014 +Midmarket,United States of America,Paseo,671,15.00,10065.00,402.60,9662.40,6710.00,2952.40,10/1/2013,10,October,2013 +Midmarket,Mexico,Paseo,1514,15.00,22710.00,908.40,21801.60,15140.00,6661.60,10/1/2013,10,October,2013 +Government,United States of America,Paseo,274,350.00,95900.00,3836.00,92064.00,71240.00,20824.00,12/1/2014,12,December,2014 +Enterprise,Mexico,Paseo,1138,125.00,142250.00,5690.00,136560.00,136560.00,0.00,12/1/2014,12,December,2014 +Channel Partners,United States of America,Velo,1465,12.00,17580.00,703.20,16876.80,4395.00,12481.80,3/1/2014,3,March,2014 +Government,Canada,Velo,2646,20.00,52920.00,2116.80,50803.20,26460.00,24343.20,9/1/2013,9,September,2013 +Government,France,Velo,2177,350.00,761950.00,30478.00,731472.00,566020.00,165452.00,10/1/2014,10,October,2014 +Channel Partners,France,VTT,866,12.00,10392.00,415.68,9976.32,2598.00,7378.32,5/1/2014,5,May,2014 +Government,United States of America,VTT,349,350.00,122150.00,4886.00,117264.00,90740.00,26524.00,9/1/2013,9,September,2013 +Government,France,VTT,2177,350.00,761950.00,30478.00,731472.00,566020.00,165452.00,10/1/2014,10,October,2014 +Midmarket,Mexico,VTT,1514,15.00,22710.00,908.40,21801.60,15140.00,6661.60,10/1/2013,10,October,2013 +Government,Mexico,Amarilla,1865,350.00,652750.00,26110.00,626640.00,484900.00,141740.00,2/1/2014,2,February,2014 +Enterprise,Mexico,Amarilla,1074,125.00,134250.00,5370.00,128880.00,128880.00,0.00,4/1/2014,4,April,2014 +Government,Germany,Amarilla,1907,350.00,667450.00,26698.00,640752.00,495820.00,144932.00,9/1/2014,9,September,2014 +Midmarket,United States of America,Amarilla,671,15.00,10065.00,402.60,9662.40,6710.00,2952.40,10/1/2013,10,October,2013 +Government,Canada,Amarilla,1778,350.00,622300.00,24892.00,597408.00,462280.00,135128.00,12/1/2013,12,December,2013 +Government,Germany,Montana,1159,7.00,8113.00,405.65,7707.35,5795.00,1912.35,10/1/2013,10,October,2013 +Government,Germany,Paseo,1372,7.00,9604.00,480.20,9123.80,6860.00,2263.80,1/1/2014,1,January,2014 +Government,Canada,Paseo,2349,7.00,16443.00,822.15,15620.85,11745.00,3875.85,9/1/2013,9,September,2013 +Government,Mexico,Paseo,2689,7.00,18823.00,941.15,17881.85,13445.00,4436.85,10/1/2014,10,October,2014 +Channel Partners,Canada,Paseo,2431,12.00,29172.00,1458.60,27713.40,7293.00,20420.40,12/1/2014,12,December,2014 +Channel Partners,Canada,Velo,2431,12.00,29172.00,1458.60,27713.40,7293.00,20420.40,12/1/2014,12,December,2014 +Government,Mexico,VTT,2689,7.00,18823.00,941.15,17881.85,13445.00,4436.85,10/1/2014,10,October,2014 +Government,Mexico,Amarilla,1683,7.00,11781.00,589.05,11191.95,8415.00,2776.95,7/1/2014,7,July,2014 +Channel Partners,Mexico,Amarilla,1123,12.00,13476.00,673.80,12802.20,3369.00,9433.20,8/1/2014,8,August,2014 +Government,Germany,Amarilla,1159,7.00,8113.00,405.65,7707.35,5795.00,1912.35,10/1/2013,10,October,2013 +Channel Partners,France,Carretera,1865,12.00,22380.00,1119.00,21261.00,5595.00,15666.00,2/1/2014,2,February,2014 +Channel Partners,Germany,Carretera,1116,12.00,13392.00,669.60,12722.40,3348.00,9374.40,2/1/2014,2,February,2014 +Government,France,Carretera,1563,20.00,31260.00,1563.00,29697.00,15630.00,14067.00,5/1/2014,5,May,2014 +Small Business,United States of America,Carretera,991,300.00,297300.00,14865.00,282435.00,247750.00,34685.00,6/1/2014,6,June,2014 +Government,Germany,Carretera,1016,7.00,7112.00,355.60,6756.40,5080.00,1676.40,11/1/2013,11,November,2013 +Midmarket,Mexico,Carretera,2791,15.00,41865.00,2093.25,39771.75,27910.00,11861.75,11/1/2014,11,November,2014 +Government,United States of America,Carretera,570,7.00,3990.00,199.50,3790.50,2850.00,940.50,12/1/2014,12,December,2014 +Government,France,Carretera,2487,7.00,17409.00,870.45,16538.55,12435.00,4103.55,12/1/2014,12,December,2014 +Government,France,Montana,1384.5,350.00,484575.00,24228.75,460346.25,359970.00,100376.25,1/1/2014,1,January,2014 +Enterprise,United States of America,Montana,3627,125.00,453375.00,22668.75,430706.25,435240.00,-4533.75,7/1/2014,7,July,2014 +Government,Mexico,Montana,720,350.00,252000.00,12600.00,239400.00,187200.00,52200.00,9/1/2013,9,September,2013 +Channel Partners,Germany,Montana,2342,12.00,28104.00,1405.20,26698.80,7026.00,19672.80,11/1/2014,11,November,2014 +Small Business,Mexico,Montana,1100,300.00,330000.00,16500.00,313500.00,275000.00,38500.00,12/1/2013,12,December,2013 +Government,France,Paseo,1303,20.00,26060.00,1303.00,24757.00,13030.00,11727.00,2/1/2014,2,February,2014 +Enterprise,United States of America,Paseo,2992,125.00,374000.00,18700.00,355300.00,359040.00,-3740.00,3/1/2014,3,March,2014 +Enterprise,France,Paseo,2385,125.00,298125.00,14906.25,283218.75,286200.00,-2981.25,3/1/2014,3,March,2014 +Small Business,Mexico,Paseo,1607,300.00,482100.00,24105.00,457995.00,401750.00,56245.00,4/1/2014,4,April,2014 +Government,United States of America,Paseo,2327,7.00,16289.00,814.45,15474.55,11635.00,3839.55,5/1/2014,5,May,2014 +Small Business,United States of America,Paseo,991,300.00,297300.00,14865.00,282435.00,247750.00,34685.00,6/1/2014,6,June,2014 +Government,United States of America,Paseo,602,350.00,210700.00,10535.00,200165.00,156520.00,43645.00,6/1/2014,6,June,2014 +Midmarket,France,Paseo,2620,15.00,39300.00,1965.00,37335.00,26200.00,11135.00,9/1/2014,9,September,2014 +Government,Canada,Paseo,1228,350.00,429800.00,21490.00,408310.00,319280.00,89030.00,10/1/2013,10,October,2013 +Government,Canada,Paseo,1389,20.00,27780.00,1389.00,26391.00,13890.00,12501.00,10/1/2013,10,October,2013 +Enterprise,United States of America,Paseo,861,125.00,107625.00,5381.25,102243.75,103320.00,-1076.25,10/1/2014,10,October,2014 +Enterprise,France,Paseo,704,125.00,88000.00,4400.00,83600.00,84480.00,-880.00,10/1/2013,10,October,2013 +Government,Canada,Paseo,1802,20.00,36040.00,1802.00,34238.00,18020.00,16218.00,12/1/2013,12,December,2013 +Government,United States of America,Paseo,2663,20.00,53260.00,2663.00,50597.00,26630.00,23967.00,12/1/2014,12,December,2014 +Government,France,Paseo,2136,7.00,14952.00,747.60,14204.40,10680.00,3524.40,12/1/2013,12,December,2013 +Midmarket,Germany,Paseo,2116,15.00,31740.00,1587.00,30153.00,21160.00,8993.00,12/1/2013,12,December,2013 +Midmarket,United States of America,Velo,555,15.00,8325.00,416.25,7908.75,5550.00,2358.75,1/1/2014,1,January,2014 +Midmarket,Mexico,Velo,2861,15.00,42915.00,2145.75,40769.25,28610.00,12159.25,1/1/2014,1,January,2014 +Enterprise,Germany,Velo,807,125.00,100875.00,5043.75,95831.25,96840.00,-1008.75,2/1/2014,2,February,2014 +Government,United States of America,Velo,602,350.00,210700.00,10535.00,200165.00,156520.00,43645.00,6/1/2014,6,June,2014 +Government,United States of America,Velo,2832,20.00,56640.00,2832.00,53808.00,28320.00,25488.00,8/1/2014,8,August,2014 +Government,France,Velo,1579,20.00,31580.00,1579.00,30001.00,15790.00,14211.00,8/1/2014,8,August,2014 +Enterprise,United States of America,Velo,861,125.00,107625.00,5381.25,102243.75,103320.00,-1076.25,10/1/2014,10,October,2014 +Enterprise,France,Velo,704,125.00,88000.00,4400.00,83600.00,84480.00,-880.00,10/1/2013,10,October,2013 +Government,France,Velo,1033,20.00,20660.00,1033.00,19627.00,10330.00,9297.00,12/1/2013,12,December,2013 +Small Business,Germany,Velo,1250,300.00,375000.00,18750.00,356250.00,312500.00,43750.00,12/1/2014,12,December,2014 +Government,Canada,VTT,1389,20.00,27780.00,1389.00,26391.00,13890.00,12501.00,10/1/2013,10,October,2013 +Government,United States of America,VTT,1265,20.00,25300.00,1265.00,24035.00,12650.00,11385.00,11/1/2013,11,November,2013 +Government,Germany,VTT,2297,20.00,45940.00,2297.00,43643.00,22970.00,20673.00,11/1/2013,11,November,2013 +Government,United States of America,VTT,2663,20.00,53260.00,2663.00,50597.00,26630.00,23967.00,12/1/2014,12,December,2014 +Government,United States of America,VTT,570,7.00,3990.00,199.50,3790.50,2850.00,940.50,12/1/2014,12,December,2014 +Government,France,VTT,2487,7.00,17409.00,870.45,16538.55,12435.00,4103.55,12/1/2014,12,December,2014 +Government,Germany,Amarilla,1350,350.00,472500.00,23625.00,448875.00,351000.00,97875.00,2/1/2014,2,February,2014 +Government,Canada,Amarilla,552,350.00,193200.00,9660.00,183540.00,143520.00,40020.00,8/1/2014,8,August,2014 +Government,Canada,Amarilla,1228,350.00,429800.00,21490.00,408310.00,319280.00,89030.00,10/1/2013,10,October,2013 +Small Business,Germany,Amarilla,1250,300.00,375000.00,18750.00,356250.00,312500.00,43750.00,12/1/2014,12,December,2014 +Midmarket,France,Paseo,3801,15.00,57015.00,3420.90,53594.10,38010.00,15584.10,4/1/2014,4,April,2014 +Government,United States of America,Carretera,1117.5,20.00,22350.00,1341.00,21009.00,11175.00,9834.00,1/1/2014,1,January,2014 +Midmarket,Canada,Carretera,2844,15.00,42660.00,2559.60,40100.40,28440.00,11660.40,6/1/2014,6,June,2014 +Channel Partners,Mexico,Carretera,562,12.00,6744.00,404.64,6339.36,1686.00,4653.36,9/1/2014,9,September,2014 +Channel Partners,Canada,Carretera,2299,12.00,27588.00,1655.28,25932.72,6897.00,19035.72,10/1/2013,10,October,2013 +Midmarket,United States of America,Carretera,2030,15.00,30450.00,1827.00,28623.00,20300.00,8323.00,11/1/2014,11,November,2014 +Government,United States of America,Carretera,263,7.00,1841.00,110.46,1730.54,1315.00,415.54,11/1/2013,11,November,2013 +Enterprise,Germany,Carretera,887,125.00,110875.00,6652.50,104222.50,106440.00,-2217.50,12/1/2013,12,December,2013 +Government,Mexico,Montana,980,350.00,343000.00,20580.00,322420.00,254800.00,67620.00,4/1/2014,4,April,2014 +Government,Germany,Montana,1460,350.00,511000.00,30660.00,480340.00,379600.00,100740.00,5/1/2014,5,May,2014 +Government,France,Montana,1403,7.00,9821.00,589.26,9231.74,7015.00,2216.74,10/1/2013,10,October,2013 +Channel Partners,United States of America,Montana,2723,12.00,32676.00,1960.56,30715.44,8169.00,22546.44,11/1/2014,11,November,2014 +Government,France,Paseo,1496,350.00,523600.00,31416.00,492184.00,388960.00,103224.00,6/1/2014,6,June,2014 +Channel Partners,Canada,Paseo,2299,12.00,27588.00,1655.28,25932.72,6897.00,19035.72,10/1/2013,10,October,2013 +Government,United States of America,Paseo,727,350.00,254450.00,15267.00,239183.00,189020.00,50163.00,10/1/2013,10,October,2013 +Enterprise,Canada,Velo,952,125.00,119000.00,7140.00,111860.00,114240.00,-2380.00,2/1/2014,2,February,2014 +Enterprise,United States of America,Velo,2755,125.00,344375.00,20662.50,323712.50,330600.00,-6887.50,2/1/2014,2,February,2014 +Midmarket,Germany,Velo,1530,15.00,22950.00,1377.00,21573.00,15300.00,6273.00,5/1/2014,5,May,2014 +Government,France,Velo,1496,350.00,523600.00,31416.00,492184.00,388960.00,103224.00,6/1/2014,6,June,2014 +Government,Mexico,Velo,1498,7.00,10486.00,629.16,9856.84,7490.00,2366.84,6/1/2014,6,June,2014 +Small Business,France,Velo,1221,300.00,366300.00,21978.00,344322.00,305250.00,39072.00,10/1/2013,10,October,2013 +Government,France,Velo,2076,350.00,726600.00,43596.00,683004.00,539760.00,143244.00,10/1/2013,10,October,2013 +Midmarket,Canada,VTT,2844,15.00,42660.00,2559.60,40100.40,28440.00,11660.40,6/1/2014,6,June,2014 +Government,Mexico,VTT,1498,7.00,10486.00,629.16,9856.84,7490.00,2366.84,6/1/2014,6,June,2014 +Small Business,France,VTT,1221,300.00,366300.00,21978.00,344322.00,305250.00,39072.00,10/1/2013,10,October,2013 +Government,Mexico,VTT,1123,20.00,22460.00,1347.60,21112.40,11230.00,9882.40,11/1/2013,11,November,2013 +Small Business,Canada,VTT,2436,300.00,730800.00,43848.00,686952.00,609000.00,77952.00,12/1/2013,12,December,2013 +Enterprise,France,Amarilla,1987.5,125.00,248437.50,14906.25,233531.25,238500.00,-4968.75,1/1/2014,1,January,2014 +Government,Mexico,Amarilla,1679,350.00,587650.00,35259.00,552391.00,436540.00,115851.00,9/1/2014,9,September,2014 +Government,United States of America,Amarilla,727,350.00,254450.00,15267.00,239183.00,189020.00,50163.00,10/1/2013,10,October,2013 +Government,France,Amarilla,1403,7.00,9821.00,589.26,9231.74,7015.00,2216.74,10/1/2013,10,October,2013 +Government,France,Amarilla,2076,350.00,726600.00,43596.00,683004.00,539760.00,143244.00,10/1/2013,10,October,2013 +Government,France,Montana,1757,20.00,35140.00,2108.40,33031.60,17570.00,15461.60,10/1/2013,10,October,2013 +Midmarket,United States of America,Paseo,2198,15.00,32970.00,1978.20,30991.80,21980.00,9011.80,8/1/2014,8,August,2014 +Midmarket,Germany,Paseo,1743,15.00,26145.00,1568.70,24576.30,17430.00,7146.30,8/1/2014,8,August,2014 +Midmarket,United States of America,Paseo,1153,15.00,17295.00,1037.70,16257.30,11530.00,4727.30,10/1/2014,10,October,2014 +Government,France,Paseo,1757,20.00,35140.00,2108.40,33031.60,17570.00,15461.60,10/1/2013,10,October,2013 +Government,Germany,Velo,1001,20.00,20020.00,1201.20,18818.80,10010.00,8808.80,8/1/2014,8,August,2014 +Government,Mexico,Velo,1333,7.00,9331.00,559.86,8771.14,6665.00,2106.14,11/1/2014,11,November,2014 +Midmarket,United States of America,VTT,1153,15.00,17295.00,1037.70,16257.30,11530.00,4727.30,10/1/2014,10,October,2014 +Channel Partners,Mexico,Carretera,727,12.00,8724.00,610.68,8113.32,2181.00,5932.32,2/1/2014,2,February,2014 +Channel Partners,Canada,Carretera,1884,12.00,22608.00,1582.56,21025.44,5652.00,15373.44,8/1/2014,8,August,2014 +Government,Mexico,Carretera,1834,20.00,36680.00,2567.60,34112.40,18340.00,15772.40,9/1/2013,9,September,2013 +Channel Partners,Mexico,Montana,2340,12.00,28080.00,1965.60,26114.40,7020.00,19094.40,1/1/2014,1,January,2014 +Channel Partners,France,Montana,2342,12.00,28104.00,1967.28,26136.72,7026.00,19110.72,11/1/2014,11,November,2014 +Government,France,Paseo,1031,7.00,7217.00,505.19,6711.81,5155.00,1556.81,9/1/2013,9,September,2013 +Midmarket,Canada,Velo,1262,15.00,18930.00,1325.10,17604.90,12620.00,4984.90,5/1/2014,5,May,2014 +Government,Canada,Velo,1135,7.00,7945.00,556.15,7388.85,5675.00,1713.85,6/1/2014,6,June,2014 +Government,United States of America,Velo,547,7.00,3829.00,268.03,3560.97,2735.00,825.97,11/1/2014,11,November,2014 +Government,Canada,Velo,1582,7.00,11074.00,775.18,10298.82,7910.00,2388.82,12/1/2014,12,December,2014 +Channel Partners,France,VTT,1738.5,12.00,20862.00,1460.34,19401.66,5215.50,14186.16,4/1/2014,4,April,2014 +Channel Partners,Germany,VTT,2215,12.00,26580.00,1860.60,24719.40,6645.00,18074.40,9/1/2013,9,September,2013 +Government,Canada,VTT,1582,7.00,11074.00,775.18,10298.82,7910.00,2388.82,12/1/2014,12,December,2014 +Government,Canada,Amarilla,1135,7.00,7945.00,556.15,7388.85,5675.00,1713.85,6/1/2014,6,June,2014 +Government,United States of America,Carretera,1761,350.00,616350.00,43144.50,573205.50,457860.00,115345.50,3/1/2014,3,March,2014 +Small Business,France,Carretera,448,300.00,134400.00,9408.00,124992.00,112000.00,12992.00,6/1/2014,6,June,2014 +Small Business,France,Carretera,2181,300.00,654300.00,45801.00,608499.00,545250.00,63249.00,10/1/2014,10,October,2014 +Government,France,Montana,1976,20.00,39520.00,2766.40,36753.60,19760.00,16993.60,10/1/2014,10,October,2014 +Small Business,France,Montana,2181,300.00,654300.00,45801.00,608499.00,545250.00,63249.00,10/1/2014,10,October,2014 +Enterprise,Germany,Montana,2500,125.00,312500.00,21875.00,290625.00,300000.00,-9375.00,11/1/2013,11,November,2013 +Small Business,Canada,Paseo,1702,300.00,510600.00,35742.00,474858.00,425500.00,49358.00,5/1/2014,5,May,2014 +Small Business,France,Paseo,448,300.00,134400.00,9408.00,124992.00,112000.00,12992.00,6/1/2014,6,June,2014 +Enterprise,Germany,Paseo,3513,125.00,439125.00,30738.75,408386.25,421560.00,-13173.75,7/1/2014,7,July,2014 +Midmarket,France,Paseo,2101,15.00,31515.00,2206.05,29308.95,21010.00,8298.95,8/1/2014,8,August,2014 +Midmarket,United States of America,Paseo,2931,15.00,43965.00,3077.55,40887.45,29310.00,11577.45,9/1/2013,9,September,2013 +Government,France,Paseo,1535,20.00,30700.00,2149.00,28551.00,15350.00,13201.00,9/1/2014,9,September,2014 +Small Business,Germany,Paseo,1123,300.00,336900.00,23583.00,313317.00,280750.00,32567.00,9/1/2013,9,September,2013 +Small Business,Canada,Paseo,1404,300.00,421200.00,29484.00,391716.00,351000.00,40716.00,11/1/2013,11,November,2013 +Channel Partners,Mexico,Paseo,2763,12.00,33156.00,2320.92,30835.08,8289.00,22546.08,11/1/2013,11,November,2013 +Government,Germany,Paseo,2125,7.00,14875.00,1041.25,13833.75,10625.00,3208.75,12/1/2013,12,December,2013 +Small Business,France,Velo,1659,300.00,497700.00,34839.00,462861.00,414750.00,48111.00,7/1/2014,7,July,2014 +Government,Mexico,Velo,609,20.00,12180.00,852.60,11327.40,6090.00,5237.40,8/1/2014,8,August,2014 +Enterprise,Germany,Velo,2087,125.00,260875.00,18261.25,242613.75,250440.00,-7826.25,9/1/2014,9,September,2014 +Government,France,Velo,1976,20.00,39520.00,2766.40,36753.60,19760.00,16993.60,10/1/2014,10,October,2014 +Government,United States of America,Velo,1421,20.00,28420.00,1989.40,26430.60,14210.00,12220.60,12/1/2013,12,December,2013 +Small Business,United States of America,Velo,1372,300.00,411600.00,28812.00,382788.00,343000.00,39788.00,12/1/2014,12,December,2014 +Government,Germany,Velo,588,20.00,11760.00,823.20,10936.80,5880.00,5056.80,12/1/2013,12,December,2013 +Channel Partners,Canada,VTT,3244.5,12.00,38934.00,2725.38,36208.62,9733.50,26475.12,1/1/2014,1,January,2014 +Small Business,France,VTT,959,300.00,287700.00,20139.00,267561.00,239750.00,27811.00,2/1/2014,2,February,2014 +Small Business,Mexico,VTT,2747,300.00,824100.00,57687.00,766413.00,686750.00,79663.00,2/1/2014,2,February,2014 +Enterprise,Canada,Amarilla,1645,125.00,205625.00,14393.75,191231.25,197400.00,-6168.75,5/1/2014,5,May,2014 +Government,France,Amarilla,2876,350.00,1006600.00,70462.00,936138.00,747760.00,188378.00,9/1/2014,9,September,2014 +Enterprise,Germany,Amarilla,994,125.00,124250.00,8697.50,115552.50,119280.00,-3727.50,9/1/2013,9,September,2013 +Government,Canada,Amarilla,1118,20.00,22360.00,1565.20,20794.80,11180.00,9614.80,11/1/2014,11,November,2014 +Small Business,United States of America,Amarilla,1372,300.00,411600.00,28812.00,382788.00,343000.00,39788.00,12/1/2014,12,December,2014 +Government,Canada,Montana,488,7.00,3416.00,273.28,3142.72,2440.00,702.72,2/1/2014,2,February,2014 +Government,United States of America,Montana,1282,20.00,25640.00,2051.20,23588.80,12820.00,10768.80,6/1/2014,6,June,2014 +Government,Canada,Paseo,257,7.00,1799.00,143.92,1655.08,1285.00,370.08,5/1/2014,5,May,2014 +Government,United States of America,Amarilla,1282,20.00,25640.00,2051.20,23588.80,12820.00,10768.80,6/1/2014,6,June,2014 +Enterprise,Mexico,Carretera,1540,125.00,192500.00,15400.00,177100.00,184800.00,-7700.00,8/1/2014,8,August,2014 +Midmarket,France,Carretera,490,15.00,7350.00,588.00,6762.00,4900.00,1862.00,11/1/2014,11,November,2014 +Government,Mexico,Carretera,1362,350.00,476700.00,38136.00,438564.00,354120.00,84444.00,12/1/2014,12,December,2014 +Midmarket,France,Montana,2501,15.00,37515.00,3001.20,34513.80,25010.00,9503.80,3/1/2014,3,March,2014 +Government,Canada,Montana,708,20.00,14160.00,1132.80,13027.20,7080.00,5947.20,6/1/2014,6,June,2014 +Government,Germany,Montana,645,20.00,12900.00,1032.00,11868.00,6450.00,5418.00,7/1/2014,7,July,2014 +Small Business,France,Montana,1562,300.00,468600.00,37488.00,431112.00,390500.00,40612.00,8/1/2014,8,August,2014 +Small Business,Canada,Montana,1283,300.00,384900.00,30792.00,354108.00,320750.00,33358.00,9/1/2013,9,September,2013 +Midmarket,Germany,Montana,711,15.00,10665.00,853.20,9811.80,7110.00,2701.80,12/1/2014,12,December,2014 +Enterprise,Mexico,Paseo,1114,125.00,139250.00,11140.00,128110.00,133680.00,-5570.00,3/1/2014,3,March,2014 +Government,Germany,Paseo,1259,7.00,8813.00,705.04,8107.96,6295.00,1812.96,4/1/2014,4,April,2014 +Government,Germany,Paseo,1095,7.00,7665.00,613.20,7051.80,5475.00,1576.80,5/1/2014,5,May,2014 +Government,Germany,Paseo,1366,20.00,27320.00,2185.60,25134.40,13660.00,11474.40,6/1/2014,6,June,2014 +Small Business,Mexico,Paseo,2460,300.00,738000.00,59040.00,678960.00,615000.00,63960.00,6/1/2014,6,June,2014 +Government,United States of America,Paseo,678,7.00,4746.00,379.68,4366.32,3390.00,976.32,8/1/2014,8,August,2014 +Government,Germany,Paseo,1598,7.00,11186.00,894.88,10291.12,7990.00,2301.12,8/1/2014,8,August,2014 +Government,Germany,Paseo,2409,7.00,16863.00,1349.04,15513.96,12045.00,3468.96,9/1/2013,9,September,2013 +Government,Germany,Paseo,1934,20.00,38680.00,3094.40,35585.60,19340.00,16245.60,9/1/2014,9,September,2014 +Government,Mexico,Paseo,2993,20.00,59860.00,4788.80,55071.20,29930.00,25141.20,9/1/2014,9,September,2014 +Government,Germany,Paseo,2146,350.00,751100.00,60088.00,691012.00,557960.00,133052.00,11/1/2013,11,November,2013 +Government,Mexico,Paseo,1946,7.00,13622.00,1089.76,12532.24,9730.00,2802.24,12/1/2013,12,December,2013 +Government,Mexico,Paseo,1362,350.00,476700.00,38136.00,438564.00,354120.00,84444.00,12/1/2014,12,December,2014 +Channel Partners,Canada,Velo,598,12.00,7176.00,574.08,6601.92,1794.00,4807.92,3/1/2014,3,March,2014 +Government,United States of America,Velo,2907,7.00,20349.00,1627.92,18721.08,14535.00,4186.08,6/1/2014,6,June,2014 +Government,Germany,Velo,2338,7.00,16366.00,1309.28,15056.72,11690.00,3366.72,6/1/2014,6,June,2014 +Small Business,France,Velo,386,300.00,115800.00,9264.00,106536.00,96500.00,10036.00,11/1/2013,11,November,2013 +Small Business,Mexico,Velo,635,300.00,190500.00,15240.00,175260.00,158750.00,16510.00,12/1/2014,12,December,2014 +Government,France,VTT,574.5,350.00,201075.00,16086.00,184989.00,149370.00,35619.00,4/1/2014,4,April,2014 +Government,Germany,VTT,2338,7.00,16366.00,1309.28,15056.72,11690.00,3366.72,6/1/2014,6,June,2014 +Government,France,VTT,381,350.00,133350.00,10668.00,122682.00,99060.00,23622.00,8/1/2014,8,August,2014 +Government,Germany,VTT,422,350.00,147700.00,11816.00,135884.00,109720.00,26164.00,8/1/2014,8,August,2014 +Small Business,Canada,VTT,2134,300.00,640200.00,51216.00,588984.00,533500.00,55484.00,9/1/2014,9,September,2014 +Small Business,United States of America,VTT,808,300.00,242400.00,19392.00,223008.00,202000.00,21008.00,12/1/2013,12,December,2013 +Government,Canada,Amarilla,708,20.00,14160.00,1132.80,13027.20,7080.00,5947.20,6/1/2014,6,June,2014 +Government,United States of America,Amarilla,2907,7.00,20349.00,1627.92,18721.08,14535.00,4186.08,6/1/2014,6,June,2014 +Government,Germany,Amarilla,1366,20.00,27320.00,2185.60,25134.40,13660.00,11474.40,6/1/2014,6,June,2014 +Small Business,Mexico,Amarilla,2460,300.00,738000.00,59040.00,678960.00,615000.00,63960.00,6/1/2014,6,June,2014 +Government,Germany,Amarilla,1520,20.00,30400.00,2432.00,27968.00,15200.00,12768.00,11/1/2014,11,November,2014 +Midmarket,Germany,Amarilla,711,15.00,10665.00,853.20,9811.80,7110.00,2701.80,12/1/2014,12,December,2014 +Channel Partners,Mexico,Amarilla,1375,12.00,16500.00,1320.00,15180.00,4125.00,11055.00,12/1/2013,12,December,2013 +Small Business,Mexico,Amarilla,635,300.00,190500.00,15240.00,175260.00,158750.00,16510.00,12/1/2014,12,December,2014 +Government,United States of America,VTT,436.5,20.00,8730.00,698.40,8031.60,4365.00,3666.60,7/1/2014,7,July,2014 +Small Business,Canada,Carretera,1094,300.00,328200.00,29538.00,298662.00,273500.00,25162.00,6/1/2014,6,June,2014 +Channel Partners,Mexico,Carretera,367,12.00,4404.00,396.36,4007.64,1101.00,2906.64,10/1/2013,10,October,2013 +Small Business,Canada,Montana,3802.5,300.00,1140750.00,102667.50,1038082.50,950625.00,87457.50,4/1/2014,4,April,2014 +Government,France,Montana,1666,350.00,583100.00,52479.00,530621.00,433160.00,97461.00,5/1/2014,5,May,2014 +Small Business,France,Montana,322,300.00,96600.00,8694.00,87906.00,80500.00,7406.00,9/1/2013,9,September,2013 +Channel Partners,Canada,Montana,2321,12.00,27852.00,2506.68,25345.32,6963.00,18382.32,11/1/2014,11,November,2014 +Enterprise,France,Montana,1857,125.00,232125.00,20891.25,211233.75,222840.00,-11606.25,11/1/2013,11,November,2013 +Government,Canada,Montana,1611,7.00,11277.00,1014.93,10262.07,8055.00,2207.07,12/1/2013,12,December,2013 +Enterprise,United States of America,Montana,2797,125.00,349625.00,31466.25,318158.75,335640.00,-17481.25,12/1/2014,12,December,2014 +Small Business,Germany,Montana,334,300.00,100200.00,9018.00,91182.00,83500.00,7682.00,12/1/2013,12,December,2013 +Small Business,Mexico,Paseo,2565,300.00,769500.00,69255.00,700245.00,641250.00,58995.00,1/1/2014,1,January,2014 +Government,Mexico,Paseo,2417,350.00,845950.00,76135.50,769814.50,628420.00,141394.50,1/1/2014,1,January,2014 +Midmarket,United States of America,Paseo,3675,15.00,55125.00,4961.25,50163.75,36750.00,13413.75,4/1/2014,4,April,2014 +Small Business,Canada,Paseo,1094,300.00,328200.00,29538.00,298662.00,273500.00,25162.00,6/1/2014,6,June,2014 +Midmarket,France,Paseo,1227,15.00,18405.00,1656.45,16748.55,12270.00,4478.55,10/1/2014,10,October,2014 +Channel Partners,Mexico,Paseo,367,12.00,4404.00,396.36,4007.64,1101.00,2906.64,10/1/2013,10,October,2013 +Small Business,France,Paseo,1324,300.00,397200.00,35748.00,361452.00,331000.00,30452.00,11/1/2014,11,November,2014 +Channel Partners,Germany,Paseo,1775,12.00,21300.00,1917.00,19383.00,5325.00,14058.00,11/1/2013,11,November,2013 +Enterprise,United States of America,Paseo,2797,125.00,349625.00,31466.25,318158.75,335640.00,-17481.25,12/1/2014,12,December,2014 +Midmarket,Mexico,Velo,245,15.00,3675.00,330.75,3344.25,2450.00,894.25,5/1/2014,5,May,2014 +Small Business,Canada,Velo,3793.5,300.00,1138050.00,102424.50,1035625.50,948375.00,87250.50,7/1/2014,7,July,2014 +Government,Germany,Velo,1307,350.00,457450.00,41170.50,416279.50,339820.00,76459.50,7/1/2014,7,July,2014 +Enterprise,Canada,Velo,567,125.00,70875.00,6378.75,64496.25,68040.00,-3543.75,9/1/2014,9,September,2014 +Enterprise,Mexico,Velo,2110,125.00,263750.00,23737.50,240012.50,253200.00,-13187.50,9/1/2014,9,September,2014 +Government,Canada,Velo,1269,350.00,444150.00,39973.50,404176.50,329940.00,74236.50,10/1/2014,10,October,2014 +Channel Partners,United States of America,VTT,1956,12.00,23472.00,2112.48,21359.52,5868.00,15491.52,1/1/2014,1,January,2014 +Small Business,Germany,VTT,2659,300.00,797700.00,71793.00,725907.00,664750.00,61157.00,2/1/2014,2,February,2014 +Government,United States of America,VTT,1351.5,350.00,473025.00,42572.25,430452.75,351390.00,79062.75,4/1/2014,4,April,2014 +Channel Partners,Germany,VTT,880,12.00,10560.00,950.40,9609.60,2640.00,6969.60,5/1/2014,5,May,2014 +Small Business,United States of America,VTT,1867,300.00,560100.00,50409.00,509691.00,466750.00,42941.00,9/1/2014,9,September,2014 +Channel Partners,France,VTT,2234,12.00,26808.00,2412.72,24395.28,6702.00,17693.28,9/1/2013,9,September,2013 +Midmarket,France,VTT,1227,15.00,18405.00,1656.45,16748.55,12270.00,4478.55,10/1/2014,10,October,2014 +Enterprise,Mexico,VTT,877,125.00,109625.00,9866.25,99758.75,105240.00,-5481.25,11/1/2014,11,November,2014 +Government,United States of America,Amarilla,2071,350.00,724850.00,65236.50,659613.50,538460.00,121153.50,9/1/2014,9,September,2014 +Government,Canada,Amarilla,1269,350.00,444150.00,39973.50,404176.50,329940.00,74236.50,10/1/2014,10,October,2014 +Midmarket,Germany,Amarilla,970,15.00,14550.00,1309.50,13240.50,9700.00,3540.50,11/1/2013,11,November,2013 +Government,Mexico,Amarilla,1694,20.00,33880.00,3049.20,30830.80,16940.00,13890.80,11/1/2014,11,November,2014 +Government,Germany,Carretera,663,20.00,13260.00,1193.40,12066.60,6630.00,5436.60,5/1/2014,5,May,2014 +Government,Canada,Carretera,819,7.00,5733.00,515.97,5217.03,4095.00,1122.03,7/1/2014,7,July,2014 +Channel Partners,Germany,Carretera,1580,12.00,18960.00,1706.40,17253.60,4740.00,12513.60,9/1/2014,9,September,2014 +Government,Mexico,Carretera,521,7.00,3647.00,328.23,3318.77,2605.00,713.77,12/1/2014,12,December,2014 +Government,United States of America,Paseo,973,20.00,19460.00,1751.40,17708.60,9730.00,7978.60,3/1/2014,3,March,2014 +Government,Mexico,Paseo,1038,20.00,20760.00,1868.40,18891.60,10380.00,8511.60,6/1/2014,6,June,2014 +Government,Germany,Paseo,360,7.00,2520.00,226.80,2293.20,1800.00,493.20,10/1/2014,10,October,2014 +Channel Partners,France,Velo,1967,12.00,23604.00,2124.36,21479.64,5901.00,15578.64,3/1/2014,3,March,2014 +Midmarket,Mexico,Velo,2628,15.00,39420.00,3547.80,35872.20,26280.00,9592.20,4/1/2014,4,April,2014 +Government,Germany,VTT,360,7.00,2520.00,226.80,2293.20,1800.00,493.20,10/1/2014,10,October,2014 +Government,France,VTT,2682,20.00,53640.00,4827.60,48812.40,26820.00,21992.40,11/1/2013,11,November,2013 +Government,Mexico,VTT,521,7.00,3647.00,328.23,3318.77,2605.00,713.77,12/1/2014,12,December,2014 +Government,Mexico,Amarilla,1038,20.00,20760.00,1868.40,18891.60,10380.00,8511.60,6/1/2014,6,June,2014 +Midmarket,Canada,Amarilla,1630.5,15.00,24457.50,2201.18,22256.33,16305.00,5951.33,7/1/2014,7,July,2014 +Channel Partners,France,Amarilla,306,12.00,3672.00,330.48,3341.52,918.00,2423.52,12/1/2013,12,December,2013 +Channel Partners,United States of America,Carretera,386,12.00,4632.00,463.20,4168.80,1158.00,3010.80,10/1/2013,10,October,2013 +Government,United States of America,Montana,2328,7.00,16296.00,1629.60,14666.40,11640.00,3026.40,9/1/2014,9,September,2014 +Channel Partners,United States of America,Paseo,386,12.00,4632.00,463.20,4168.80,1158.00,3010.80,10/1/2013,10,October,2013 +Enterprise,United States of America,Carretera,3445.5,125.00,430687.50,43068.75,387618.75,413460.00,-25841.25,4/1/2014,4,April,2014 +Enterprise,France,Carretera,1482,125.00,185250.00,18525.00,166725.00,177840.00,-11115.00,12/1/2013,12,December,2013 +Government,United States of America,Montana,2313,350.00,809550.00,80955.00,728595.00,601380.00,127215.00,5/1/2014,5,May,2014 +Enterprise,United States of America,Montana,1804,125.00,225500.00,22550.00,202950.00,216480.00,-13530.00,11/1/2013,11,November,2013 +Midmarket,France,Montana,2072,15.00,31080.00,3108.00,27972.00,20720.00,7252.00,12/1/2014,12,December,2014 +Government,France,Paseo,1954,20.00,39080.00,3908.00,35172.00,19540.00,15632.00,3/1/2014,3,March,2014 +Small Business,Mexico,Paseo,591,300.00,177300.00,17730.00,159570.00,147750.00,11820.00,5/1/2014,5,May,2014 +Midmarket,France,Paseo,2167,15.00,32505.00,3250.50,29254.50,21670.00,7584.50,10/1/2013,10,October,2013 +Government,Germany,Paseo,241,20.00,4820.00,482.00,4338.00,2410.00,1928.00,10/1/2014,10,October,2014 +Midmarket,Germany,Velo,681,15.00,10215.00,1021.50,9193.50,6810.00,2383.50,1/1/2014,1,January,2014 +Midmarket,Germany,Velo,510,15.00,7650.00,765.00,6885.00,5100.00,1785.00,4/1/2014,4,April,2014 +Midmarket,United States of America,Velo,790,15.00,11850.00,1185.00,10665.00,7900.00,2765.00,5/1/2014,5,May,2014 +Government,France,Velo,639,350.00,223650.00,22365.00,201285.00,166140.00,35145.00,7/1/2014,7,July,2014 +Enterprise,United States of America,Velo,1596,125.00,199500.00,19950.00,179550.00,191520.00,-11970.00,9/1/2014,9,September,2014 +Small Business,United States of America,Velo,2294,300.00,688200.00,68820.00,619380.00,573500.00,45880.00,10/1/2013,10,October,2013 +Government,Germany,Velo,241,20.00,4820.00,482.00,4338.00,2410.00,1928.00,10/1/2014,10,October,2014 +Government,Germany,Velo,2665,7.00,18655.00,1865.50,16789.50,13325.00,3464.50,11/1/2014,11,November,2014 +Enterprise,Canada,Velo,1916,125.00,239500.00,23950.00,215550.00,229920.00,-14370.00,12/1/2013,12,December,2013 +Small Business,France,Velo,853,300.00,255900.00,25590.00,230310.00,213250.00,17060.00,12/1/2014,12,December,2014 +Enterprise,Mexico,VTT,341,125.00,42625.00,4262.50,38362.50,40920.00,-2557.50,5/1/2014,5,May,2014 +Midmarket,Mexico,VTT,641,15.00,9615.00,961.50,8653.50,6410.00,2243.50,7/1/2014,7,July,2014 +Government,United States of America,VTT,2807,350.00,982450.00,98245.00,884205.00,729820.00,154385.00,8/1/2014,8,August,2014 +Small Business,Mexico,VTT,432,300.00,129600.00,12960.00,116640.00,108000.00,8640.00,9/1/2014,9,September,2014 +Small Business,United States of America,VTT,2294,300.00,688200.00,68820.00,619380.00,573500.00,45880.00,10/1/2013,10,October,2013 +Midmarket,France,VTT,2167,15.00,32505.00,3250.50,29254.50,21670.00,7584.50,10/1/2013,10,October,2013 +Enterprise,Canada,VTT,2529,125.00,316125.00,31612.50,284512.50,303480.00,-18967.50,11/1/2014,11,November,2014 +Government,Germany,VTT,1870,350.00,654500.00,65450.00,589050.00,486200.00,102850.00,12/1/2013,12,December,2013 +Enterprise,United States of America,Amarilla,579,125.00,72375.00,7237.50,65137.50,69480.00,-4342.50,1/1/2014,1,January,2014 +Government,Canada,Amarilla,2240,350.00,784000.00,78400.00,705600.00,582400.00,123200.00,2/1/2014,2,February,2014 +Small Business,United States of America,Amarilla,2993,300.00,897900.00,89790.00,808110.00,748250.00,59860.00,3/1/2014,3,March,2014 +Channel Partners,Canada,Amarilla,3520.5,12.00,42246.00,4224.60,38021.40,10561.50,27459.90,4/1/2014,4,April,2014 +Government,Mexico,Amarilla,2039,20.00,40780.00,4078.00,36702.00,20390.00,16312.00,5/1/2014,5,May,2014 +Channel Partners,Germany,Amarilla,2574,12.00,30888.00,3088.80,27799.20,7722.00,20077.20,8/1/2014,8,August,2014 +Government,Canada,Amarilla,707,350.00,247450.00,24745.00,222705.00,183820.00,38885.00,9/1/2014,9,September,2014 +Midmarket,France,Amarilla,2072,15.00,31080.00,3108.00,27972.00,20720.00,7252.00,12/1/2014,12,December,2014 +Small Business,France,Amarilla,853,300.00,255900.00,25590.00,230310.00,213250.00,17060.00,12/1/2014,12,December,2014 +Channel Partners,France,Carretera,1198,12.00,14376.00,1581.36,12794.64,3594.00,9200.64,10/1/2013,10,October,2013 +Government,France,Paseo,2532,7.00,17724.00,1949.64,15774.36,12660.00,3114.36,4/1/2014,4,April,2014 +Channel Partners,France,Paseo,1198,12.00,14376.00,1581.36,12794.64,3594.00,9200.64,10/1/2013,10,October,2013 +Midmarket,Canada,Velo,384,15.00,5760.00,633.60,5126.40,3840.00,1286.40,1/1/2014,1,January,2014 +Channel Partners,Germany,Velo,472,12.00,5664.00,623.04,5040.96,1416.00,3624.96,10/1/2014,10,October,2014 +Government,United States of America,VTT,1579,7.00,11053.00,1215.83,9837.17,7895.00,1942.17,3/1/2014,3,March,2014 +Channel Partners,Mexico,VTT,1005,12.00,12060.00,1326.60,10733.40,3015.00,7718.40,9/1/2013,9,September,2013 +Midmarket,United States of America,Amarilla,3199.5,15.00,47992.50,5279.18,42713.33,31995.00,10718.33,7/1/2014,7,July,2014 +Channel Partners,Germany,Amarilla,472,12.00,5664.00,623.04,5040.96,1416.00,3624.96,10/1/2014,10,October,2014 +Channel Partners,Canada,Carretera,1937,12.00,23244.00,2556.84,20687.16,5811.00,14876.16,2/1/2014,2,February,2014 +Government,Germany,Carretera,792,350.00,277200.00,30492.00,246708.00,205920.00,40788.00,3/1/2014,3,March,2014 +Small Business,Germany,Carretera,2811,300.00,843300.00,92763.00,750537.00,702750.00,47787.00,7/1/2014,7,July,2014 +Enterprise,France,Carretera,2441,125.00,305125.00,33563.75,271561.25,292920.00,-21358.75,10/1/2014,10,October,2014 +Midmarket,Canada,Carretera,1560,15.00,23400.00,2574.00,20826.00,15600.00,5226.00,11/1/2013,11,November,2013 +Government,Mexico,Carretera,2706,7.00,18942.00,2083.62,16858.38,13530.00,3328.38,11/1/2013,11,November,2013 +Government,Germany,Montana,766,350.00,268100.00,29491.00,238609.00,199160.00,39449.00,1/1/2014,1,January,2014 +Government,Germany,Montana,2992,20.00,59840.00,6582.40,53257.60,29920.00,23337.60,10/1/2013,10,October,2013 +Midmarket,Mexico,Montana,2157,15.00,32355.00,3559.05,28795.95,21570.00,7225.95,12/1/2014,12,December,2014 +Small Business,Canada,Paseo,873,300.00,261900.00,28809.00,233091.00,218250.00,14841.00,1/1/2014,1,January,2014 +Government,Mexico,Paseo,1122,20.00,22440.00,2468.40,19971.60,11220.00,8751.60,3/1/2014,3,March,2014 +Government,Canada,Paseo,2104.5,350.00,736575.00,81023.25,655551.75,547170.00,108381.75,7/1/2014,7,July,2014 +Channel Partners,Canada,Paseo,4026,12.00,48312.00,5314.32,42997.68,12078.00,30919.68,7/1/2014,7,July,2014 +Channel Partners,France,Paseo,2425.5,12.00,29106.00,3201.66,25904.34,7276.50,18627.84,7/1/2014,7,July,2014 +Government,Canada,Paseo,2394,20.00,47880.00,5266.80,42613.20,23940.00,18673.20,8/1/2014,8,August,2014 +Midmarket,Mexico,Paseo,1984,15.00,29760.00,3273.60,26486.40,19840.00,6646.40,8/1/2014,8,August,2014 +Enterprise,France,Paseo,2441,125.00,305125.00,33563.75,271561.25,292920.00,-21358.75,10/1/2014,10,October,2014 +Government,Germany,Paseo,2992,20.00,59840.00,6582.40,53257.60,29920.00,23337.60,10/1/2013,10,October,2013 +Small Business,Canada,Paseo,1366,300.00,409800.00,45078.00,364722.00,341500.00,23222.00,11/1/2014,11,November,2014 +Government,France,Velo,2805,20.00,56100.00,6171.00,49929.00,28050.00,21879.00,9/1/2013,9,September,2013 +Midmarket,Mexico,Velo,655,15.00,9825.00,1080.75,8744.25,6550.00,2194.25,9/1/2013,9,September,2013 +Government,Mexico,Velo,344,350.00,120400.00,13244.00,107156.00,89440.00,17716.00,10/1/2013,10,October,2013 +Government,Canada,Velo,1808,7.00,12656.00,1392.16,11263.84,9040.00,2223.84,11/1/2014,11,November,2014 +Channel Partners,France,VTT,1734,12.00,20808.00,2288.88,18519.12,5202.00,13317.12,1/1/2014,1,January,2014 +Enterprise,Mexico,VTT,554,125.00,69250.00,7617.50,61632.50,66480.00,-4847.50,1/1/2014,1,January,2014 +Government,Canada,VTT,2935,20.00,58700.00,6457.00,52243.00,29350.00,22893.00,11/1/2013,11,November,2013 +Enterprise,Germany,Amarilla,3165,125.00,395625.00,43518.75,352106.25,379800.00,-27693.75,1/1/2014,1,January,2014 +Government,Mexico,Amarilla,2629,20.00,52580.00,5783.80,46796.20,26290.00,20506.20,1/1/2014,1,January,2014 +Enterprise,France,Amarilla,1433,125.00,179125.00,19703.75,159421.25,171960.00,-12538.75,5/1/2014,5,May,2014 +Enterprise,Mexico,Amarilla,947,125.00,118375.00,13021.25,105353.75,113640.00,-8286.25,9/1/2013,9,September,2013 +Government,Mexico,Amarilla,344,350.00,120400.00,13244.00,107156.00,89440.00,17716.00,10/1/2013,10,October,2013 +Midmarket,Mexico,Amarilla,2157,15.00,32355.00,3559.05,28795.95,21570.00,7225.95,12/1/2014,12,December,2014 +Government,United States of America,Paseo,380,7.00,2660.00,292.60,2367.40,1900.00,467.40,9/1/2013,9,September,2013 +Government,Mexico,Carretera,886,350.00,310100.00,37212.00,272888.00,230360.00,42528.00,6/1/2014,6,June,2014 +Enterprise,Canada,Carretera,2416,125.00,302000.00,36240.00,265760.00,289920.00,-24160.00,9/1/2013,9,September,2013 +Enterprise,Mexico,Carretera,2156,125.00,269500.00,32340.00,237160.00,258720.00,-21560.00,10/1/2014,10,October,2014 +Midmarket,Canada,Carretera,2689,15.00,40335.00,4840.20,35494.80,26890.00,8604.80,11/1/2014,11,November,2014 +Midmarket,United States of America,Montana,677,15.00,10155.00,1218.60,8936.40,6770.00,2166.40,3/1/2014,3,March,2014 +Small Business,France,Montana,1773,300.00,531900.00,63828.00,468072.00,443250.00,24822.00,4/1/2014,4,April,2014 +Government,Mexico,Montana,2420,7.00,16940.00,2032.80,14907.20,12100.00,2807.20,9/1/2014,9,September,2014 +Government,Canada,Montana,2734,7.00,19138.00,2296.56,16841.44,13670.00,3171.44,10/1/2014,10,October,2014 +Government,Mexico,Montana,1715,20.00,34300.00,4116.00,30184.00,17150.00,13034.00,10/1/2013,10,October,2013 +Small Business,France,Montana,1186,300.00,355800.00,42696.00,313104.00,296500.00,16604.00,12/1/2013,12,December,2013 +Small Business,United States of America,Paseo,3495,300.00,1048500.00,125820.00,922680.00,873750.00,48930.00,1/1/2014,1,January,2014 +Government,Mexico,Paseo,886,350.00,310100.00,37212.00,272888.00,230360.00,42528.00,6/1/2014,6,June,2014 +Enterprise,Mexico,Paseo,2156,125.00,269500.00,32340.00,237160.00,258720.00,-21560.00,10/1/2014,10,October,2014 +Government,Mexico,Paseo,905,20.00,18100.00,2172.00,15928.00,9050.00,6878.00,10/1/2014,10,October,2014 +Government,Mexico,Paseo,1715,20.00,34300.00,4116.00,30184.00,17150.00,13034.00,10/1/2013,10,October,2013 +Government,France,Paseo,1594,350.00,557900.00,66948.00,490952.00,414440.00,76512.00,11/1/2014,11,November,2014 +Small Business,Germany,Paseo,1359,300.00,407700.00,48924.00,358776.00,339750.00,19026.00,11/1/2014,11,November,2014 +Small Business,Mexico,Paseo,2150,300.00,645000.00,77400.00,567600.00,537500.00,30100.00,11/1/2014,11,November,2014 +Government,Mexico,Paseo,1197,350.00,418950.00,50274.00,368676.00,311220.00,57456.00,11/1/2014,11,November,2014 +Midmarket,Mexico,Paseo,380,15.00,5700.00,684.00,5016.00,3800.00,1216.00,12/1/2013,12,December,2013 +Government,Mexico,Paseo,1233,20.00,24660.00,2959.20,21700.80,12330.00,9370.80,12/1/2014,12,December,2014 +Government,Mexico,Velo,1395,350.00,488250.00,58590.00,429660.00,362700.00,66960.00,7/1/2014,7,July,2014 +Government,United States of America,Velo,986,350.00,345100.00,41412.00,303688.00,256360.00,47328.00,10/1/2014,10,October,2014 +Government,Mexico,Velo,905,20.00,18100.00,2172.00,15928.00,9050.00,6878.00,10/1/2014,10,October,2014 +Channel Partners,Canada,VTT,2109,12.00,25308.00,3036.96,22271.04,6327.00,15944.04,5/1/2014,5,May,2014 +Midmarket,France,VTT,3874.5,15.00,58117.50,6974.10,51143.40,38745.00,12398.40,7/1/2014,7,July,2014 +Government,Canada,VTT,623,350.00,218050.00,26166.00,191884.00,161980.00,29904.00,9/1/2013,9,September,2013 +Government,United States of America,VTT,986,350.00,345100.00,41412.00,303688.00,256360.00,47328.00,10/1/2014,10,October,2014 +Enterprise,United States of America,VTT,2387,125.00,298375.00,35805.00,262570.00,286440.00,-23870.00,11/1/2014,11,November,2014 +Government,Mexico,VTT,1233,20.00,24660.00,2959.20,21700.80,12330.00,9370.80,12/1/2014,12,December,2014 +Government,United States of America,Amarilla,270,350.00,94500.00,11340.00,83160.00,70200.00,12960.00,2/1/2014,2,February,2014 +Government,France,Amarilla,3421.5,7.00,23950.50,2874.06,21076.44,17107.50,3968.94,7/1/2014,7,July,2014 +Government,Canada,Amarilla,2734,7.00,19138.00,2296.56,16841.44,13670.00,3171.44,10/1/2014,10,October,2014 +Midmarket,United States of America,Amarilla,2548,15.00,38220.00,4586.40,33633.60,25480.00,8153.60,11/1/2013,11,November,2013 +Government,France,Carretera,2521.5,20.00,50430.00,6051.60,44378.40,25215.00,19163.40,1/1/2014,1,January,2014 +Channel Partners,Mexico,Montana,2661,12.00,31932.00,3831.84,28100.16,7983.00,20117.16,5/1/2014,5,May,2014 +Government,Germany,Paseo,1531,20.00,30620.00,3674.40,26945.60,15310.00,11635.60,12/1/2014,12,December,2014 +Government,France,VTT,1491,7.00,10437.00,1252.44,9184.56,7455.00,1729.56,3/1/2014,3,March,2014 +Government,Germany,VTT,1531,20.00,30620.00,3674.40,26945.60,15310.00,11635.60,12/1/2014,12,December,2014 +Channel Partners,Canada,Amarilla,2761,12.00,33132.00,3975.84,29156.16,8283.00,20873.16,9/1/2013,9,September,2013 +Midmarket,United States of America,Carretera,2567,15.00,38505.00,5005.65,33499.35,25670.00,7829.35,6/1/2014,6,June,2014 +Midmarket,United States of America,VTT,2567,15.00,38505.00,5005.65,33499.35,25670.00,7829.35,6/1/2014,6,June,2014 +Government,Canada,Carretera,923,350.00,323050.00,41996.50,281053.50,239980.00,41073.50,3/1/2014,3,March,2014 +Government,France,Carretera,1790,350.00,626500.00,81445.00,545055.00,465400.00,79655.00,3/1/2014,3,March,2014 +Government,Germany,Carretera,442,20.00,8840.00,1149.20,7690.80,4420.00,3270.80,9/1/2013,9,September,2013 +Government,United States of America,Montana,982.5,350.00,343875.00,44703.75,299171.25,255450.00,43721.25,1/1/2014,1,January,2014 +Government,United States of America,Montana,1298,7.00,9086.00,1181.18,7904.82,6490.00,1414.82,2/1/2014,2,February,2014 +Channel Partners,Mexico,Montana,604,12.00,7248.00,942.24,6305.76,1812.00,4493.76,6/1/2014,6,June,2014 +Government,Mexico,Montana,2255,20.00,45100.00,5863.00,39237.00,22550.00,16687.00,7/1/2014,7,July,2014 +Government,Canada,Montana,1249,20.00,24980.00,3247.40,21732.60,12490.00,9242.60,10/1/2014,10,October,2014 +Government,United States of America,Paseo,1438.5,7.00,10069.50,1309.04,8760.47,7192.50,1567.97,1/1/2014,1,January,2014 +Small Business,Germany,Paseo,807,300.00,242100.00,31473.00,210627.00,201750.00,8877.00,1/1/2014,1,January,2014 +Government,United States of America,Paseo,2641,20.00,52820.00,6866.60,45953.40,26410.00,19543.40,2/1/2014,2,February,2014 +Government,Germany,Paseo,2708,20.00,54160.00,7040.80,47119.20,27080.00,20039.20,2/1/2014,2,February,2014 +Government,Canada,Paseo,2632,350.00,921200.00,119756.00,801444.00,684320.00,117124.00,6/1/2014,6,June,2014 +Enterprise,Canada,Paseo,1583,125.00,197875.00,25723.75,172151.25,189960.00,-17808.75,6/1/2014,6,June,2014 +Channel Partners,Mexico,Paseo,571,12.00,6852.00,890.76,5961.24,1713.00,4248.24,7/1/2014,7,July,2014 +Government,France,Paseo,2696,7.00,18872.00,2453.36,16418.64,13480.00,2938.64,8/1/2014,8,August,2014 +Midmarket,Canada,Paseo,1565,15.00,23475.00,3051.75,20423.25,15650.00,4773.25,10/1/2014,10,October,2014 +Government,Canada,Paseo,1249,20.00,24980.00,3247.40,21732.60,12490.00,9242.60,10/1/2014,10,October,2014 +Government,Germany,Paseo,357,350.00,124950.00,16243.50,108706.50,92820.00,15886.50,11/1/2014,11,November,2014 +Channel Partners,Germany,Paseo,1013,12.00,12156.00,1580.28,10575.72,3039.00,7536.72,12/1/2014,12,December,2014 +Midmarket,France,Velo,3997.5,15.00,59962.50,7795.13,52167.38,39975.00,12192.38,1/1/2014,1,January,2014 +Government,Canada,Velo,2632,350.00,921200.00,119756.00,801444.00,684320.00,117124.00,6/1/2014,6,June,2014 +Government,France,Velo,1190,7.00,8330.00,1082.90,7247.10,5950.00,1297.10,6/1/2014,6,June,2014 +Channel Partners,Mexico,Velo,604,12.00,7248.00,942.24,6305.76,1812.00,4493.76,6/1/2014,6,June,2014 +Midmarket,Germany,Velo,660,15.00,9900.00,1287.00,8613.00,6600.00,2013.00,9/1/2013,9,September,2013 +Channel Partners,Mexico,Velo,410,12.00,4920.00,639.60,4280.40,1230.00,3050.40,10/1/2014,10,October,2014 +Small Business,Mexico,Velo,2605,300.00,781500.00,101595.00,679905.00,651250.00,28655.00,11/1/2013,11,November,2013 +Channel Partners,Germany,Velo,1013,12.00,12156.00,1580.28,10575.72,3039.00,7536.72,12/1/2014,12,December,2014 +Enterprise,Canada,VTT,1583,125.00,197875.00,25723.75,172151.25,189960.00,-17808.75,6/1/2014,6,June,2014 +Midmarket,Canada,VTT,1565,15.00,23475.00,3051.75,20423.25,15650.00,4773.25,10/1/2014,10,October,2014 +Enterprise,Canada,Amarilla,1659,125.00,207375.00,26958.75,180416.25,199080.00,-18663.75,1/1/2014,1,January,2014 +Government,France,Amarilla,1190,7.00,8330.00,1082.90,7247.10,5950.00,1297.10,6/1/2014,6,June,2014 +Channel Partners,Mexico,Amarilla,410,12.00,4920.00,639.60,4280.40,1230.00,3050.40,10/1/2014,10,October,2014 +Channel Partners,Germany,Amarilla,1770,12.00,21240.00,2761.20,18478.80,5310.00,13168.80,12/1/2013,12,December,2013 +Government,Mexico,Carretera,2579,20.00,51580.00,7221.20,44358.80,25790.00,18568.80,4/1/2014,4,April,2014 +Government,United States of America,Carretera,1743,20.00,34860.00,4880.40,29979.60,17430.00,12549.60,5/1/2014,5,May,2014 +Government,United States of America,Carretera,2996,7.00,20972.00,2936.08,18035.92,14980.00,3055.92,10/1/2013,10,October,2013 +Government,Germany,Carretera,280,7.00,1960.00,274.40,1685.60,1400.00,285.60,12/1/2014,12,December,2014 +Government,France,Montana,293,7.00,2051.00,287.14,1763.86,1465.00,298.86,2/1/2014,2,February,2014 +Government,United States of America,Montana,2996,7.00,20972.00,2936.08,18035.92,14980.00,3055.92,10/1/2013,10,October,2013 +Midmarket,Germany,Paseo,278,15.00,4170.00,583.80,3586.20,2780.00,806.20,2/1/2014,2,February,2014 +Government,Canada,Paseo,2428,20.00,48560.00,6798.40,41761.60,24280.00,17481.60,3/1/2014,3,March,2014 +Midmarket,United States of America,Paseo,1767,15.00,26505.00,3710.70,22794.30,17670.00,5124.30,9/1/2014,9,September,2014 +Channel Partners,France,Paseo,1393,12.00,16716.00,2340.24,14375.76,4179.00,10196.76,10/1/2014,10,October,2014 +Government,Germany,VTT,280,7.00,1960.00,274.40,1685.60,1400.00,285.60,12/1/2014,12,December,2014 +Channel Partners,France,Amarilla,1393,12.00,16716.00,2340.24,14375.76,4179.00,10196.76,10/1/2014,10,October,2014 +Channel Partners,United States of America,Amarilla,2015,12.00,24180.00,3385.20,20794.80,6045.00,14749.80,12/1/2013,12,December,2013 +Small Business,Mexico,Carretera,801,300.00,240300.00,33642.00,206658.00,200250.00,6408.00,7/1/2014,7,July,2014 +Enterprise,France,Carretera,1023,125.00,127875.00,17902.50,109972.50,122760.00,-12787.50,9/1/2013,9,September,2013 +Small Business,Canada,Carretera,1496,300.00,448800.00,62832.00,385968.00,374000.00,11968.00,10/1/2014,10,October,2014 +Small Business,United States of America,Carretera,1010,300.00,303000.00,42420.00,260580.00,252500.00,8080.00,10/1/2014,10,October,2014 +Midmarket,Germany,Carretera,1513,15.00,22695.00,3177.30,19517.70,15130.00,4387.70,11/1/2014,11,November,2014 +Midmarket,Canada,Carretera,2300,15.00,34500.00,4830.00,29670.00,23000.00,6670.00,12/1/2014,12,December,2014 +Enterprise,Mexico,Carretera,2821,125.00,352625.00,49367.50,303257.50,338520.00,-35262.50,12/1/2013,12,December,2013 +Government,Canada,Montana,2227.5,350.00,779625.00,109147.50,670477.50,579150.00,91327.50,1/1/2014,1,January,2014 +Government,Germany,Montana,1199,350.00,419650.00,58751.00,360899.00,311740.00,49159.00,4/1/2014,4,April,2014 +Government,Canada,Montana,200,350.00,70000.00,9800.00,60200.00,52000.00,8200.00,5/1/2014,5,May,2014 +Government,Canada,Montana,388,7.00,2716.00,380.24,2335.76,1940.00,395.76,9/1/2014,9,September,2014 +Government,Mexico,Montana,1727,7.00,12089.00,1692.46,10396.54,8635.00,1761.54,10/1/2013,10,October,2013 +Midmarket,Canada,Montana,2300,15.00,34500.00,4830.00,29670.00,23000.00,6670.00,12/1/2014,12,December,2014 +Government,Mexico,Paseo,260,20.00,5200.00,728.00,4472.00,2600.00,1872.00,2/1/2014,2,February,2014 +Midmarket,Canada,Paseo,2470,15.00,37050.00,5187.00,31863.00,24700.00,7163.00,9/1/2013,9,September,2013 +Midmarket,Canada,Paseo,1743,15.00,26145.00,3660.30,22484.70,17430.00,5054.70,10/1/2013,10,October,2013 +Channel Partners,United States of America,Paseo,2914,12.00,34968.00,4895.52,30072.48,8742.00,21330.48,10/1/2014,10,October,2014 +Government,France,Paseo,1731,7.00,12117.00,1696.38,10420.62,8655.00,1765.62,10/1/2014,10,October,2014 +Government,Canada,Paseo,700,350.00,245000.00,34300.00,210700.00,182000.00,28700.00,11/1/2014,11,November,2014 +Channel Partners,Canada,Paseo,2222,12.00,26664.00,3732.96,22931.04,6666.00,16265.04,11/1/2013,11,November,2013 +Government,United States of America,Paseo,1177,350.00,411950.00,57673.00,354277.00,306020.00,48257.00,11/1/2014,11,November,2014 +Government,France,Paseo,1922,350.00,672700.00,94178.00,578522.00,499720.00,78802.00,11/1/2013,11,November,2013 +Enterprise,Mexico,Velo,1575,125.00,196875.00,27562.50,169312.50,189000.00,-19687.50,2/1/2014,2,February,2014 +Government,United States of America,Velo,606,20.00,12120.00,1696.80,10423.20,6060.00,4363.20,4/1/2014,4,April,2014 +Small Business,United States of America,Velo,2460,300.00,738000.00,103320.00,634680.00,615000.00,19680.00,7/1/2014,7,July,2014 +Small Business,Canada,Velo,269,300.00,80700.00,11298.00,69402.00,67250.00,2152.00,10/1/2013,10,October,2013 +Small Business,Germany,Velo,2536,300.00,760800.00,106512.00,654288.00,634000.00,20288.00,11/1/2013,11,November,2013 +Government,Mexico,VTT,2903,7.00,20321.00,2844.94,17476.06,14515.00,2961.06,3/1/2014,3,March,2014 +Small Business,United States of America,VTT,2541,300.00,762300.00,106722.00,655578.00,635250.00,20328.00,8/1/2014,8,August,2014 +Small Business,Canada,VTT,269,300.00,80700.00,11298.00,69402.00,67250.00,2152.00,10/1/2013,10,October,2013 +Small Business,Canada,VTT,1496,300.00,448800.00,62832.00,385968.00,374000.00,11968.00,10/1/2014,10,October,2014 +Small Business,United States of America,VTT,1010,300.00,303000.00,42420.00,260580.00,252500.00,8080.00,10/1/2014,10,October,2014 +Government,France,VTT,1281,350.00,448350.00,62769.00,385581.00,333060.00,52521.00,12/1/2013,12,December,2013 +Small Business,Canada,Amarilla,888,300.00,266400.00,37296.00,229104.00,222000.00,7104.00,3/1/2014,3,March,2014 +Enterprise,United States of America,Amarilla,2844,125.00,355500.00,49770.00,305730.00,341280.00,-35550.00,5/1/2014,5,May,2014 +Channel Partners,France,Amarilla,2475,12.00,29700.00,4158.00,25542.00,7425.00,18117.00,8/1/2014,8,August,2014 +Midmarket,Canada,Amarilla,1743,15.00,26145.00,3660.30,22484.70,17430.00,5054.70,10/1/2013,10,October,2013 +Channel Partners,United States of America,Amarilla,2914,12.00,34968.00,4895.52,30072.48,8742.00,21330.48,10/1/2014,10,October,2014 +Government,France,Amarilla,1731,7.00,12117.00,1696.38,10420.62,8655.00,1765.62,10/1/2014,10,October,2014 +Government,Mexico,Amarilla,1727,7.00,12089.00,1692.46,10396.54,8635.00,1761.54,10/1/2013,10,October,2013 +Midmarket,Mexico,Amarilla,1870,15.00,28050.00,3927.00,24123.00,18700.00,5423.00,11/1/2013,11,November,2013 +Enterprise,France,Carretera,1174,125.00,146750.00,22012.50,124737.50,140880.00,-16142.50,8/1/2014,8,August,2014 +Enterprise,Germany,Carretera,2767,125.00,345875.00,51881.25,293993.75,332040.00,-38046.25,8/1/2014,8,August,2014 +Enterprise,Germany,Carretera,1085,125.00,135625.00,20343.75,115281.25,130200.00,-14918.75,10/1/2014,10,October,2014 +Small Business,Mexico,Montana,546,300.00,163800.00,24570.00,139230.00,136500.00,2730.00,10/1/2014,10,October,2014 +Government,Germany,Paseo,1158,20.00,23160.00,3474.00,19686.00,11580.00,8106.00,3/1/2014,3,March,2014 +Midmarket,Canada,Paseo,1614,15.00,24210.00,3631.50,20578.50,16140.00,4438.50,4/1/2014,4,April,2014 +Government,Mexico,Paseo,2535,7.00,17745.00,2661.75,15083.25,12675.00,2408.25,4/1/2014,4,April,2014 +Government,Mexico,Paseo,2851,350.00,997850.00,149677.50,848172.50,741260.00,106912.50,5/1/2014,5,May,2014 +Midmarket,Canada,Paseo,2559,15.00,38385.00,5757.75,32627.25,25590.00,7037.25,8/1/2014,8,August,2014 +Government,United States of America,Paseo,267,20.00,5340.00,801.00,4539.00,2670.00,1869.00,10/1/2013,10,October,2013 +Enterprise,Germany,Paseo,1085,125.00,135625.00,20343.75,115281.25,130200.00,-14918.75,10/1/2014,10,October,2014 +Midmarket,Germany,Paseo,1175,15.00,17625.00,2643.75,14981.25,11750.00,3231.25,10/1/2014,10,October,2014 +Government,United States of America,Paseo,2007,350.00,702450.00,105367.50,597082.50,521820.00,75262.50,11/1/2013,11,November,2013 +Government,Mexico,Paseo,2151,350.00,752850.00,112927.50,639922.50,559260.00,80662.50,11/1/2013,11,November,2013 +Channel Partners,United States of America,Paseo,914,12.00,10968.00,1645.20,9322.80,2742.00,6580.80,12/1/2014,12,December,2014 +Government,France,Paseo,293,20.00,5860.00,879.00,4981.00,2930.00,2051.00,12/1/2014,12,December,2014 +Channel Partners,Mexico,Velo,500,12.00,6000.00,900.00,5100.00,1500.00,3600.00,3/1/2014,3,March,2014 +Midmarket,France,Velo,2826,15.00,42390.00,6358.50,36031.50,28260.00,7771.50,5/1/2014,5,May,2014 +Enterprise,France,Velo,663,125.00,82875.00,12431.25,70443.75,79560.00,-9116.25,9/1/2014,9,September,2014 +Small Business,United States of America,Velo,2574,300.00,772200.00,115830.00,656370.00,643500.00,12870.00,11/1/2013,11,November,2013 +Enterprise,United States of America,Velo,2438,125.00,304750.00,45712.50,259037.50,292560.00,-33522.50,12/1/2013,12,December,2013 +Channel Partners,United States of America,Velo,914,12.00,10968.00,1645.20,9322.80,2742.00,6580.80,12/1/2014,12,December,2014 +Government,Canada,VTT,865.5,20.00,17310.00,2596.50,14713.50,8655.00,6058.50,7/1/2014,7,July,2014 +Midmarket,Germany,VTT,492,15.00,7380.00,1107.00,6273.00,4920.00,1353.00,7/1/2014,7,July,2014 +Government,United States of America,VTT,267,20.00,5340.00,801.00,4539.00,2670.00,1869.00,10/1/2013,10,October,2013 +Midmarket,Germany,VTT,1175,15.00,17625.00,2643.75,14981.25,11750.00,3231.25,10/1/2014,10,October,2014 +Enterprise,Canada,VTT,2954,125.00,369250.00,55387.50,313862.50,354480.00,-40617.50,11/1/2013,11,November,2013 +Enterprise,Germany,VTT,552,125.00,69000.00,10350.00,58650.00,66240.00,-7590.00,11/1/2014,11,November,2014 +Government,France,VTT,293,20.00,5860.00,879.00,4981.00,2930.00,2051.00,12/1/2014,12,December,2014 +Small Business,France,Amarilla,2475,300.00,742500.00,111375.00,631125.00,618750.00,12375.00,3/1/2014,3,March,2014 +Small Business,Mexico,Amarilla,546,300.00,163800.00,24570.00,139230.00,136500.00,2730.00,10/1/2014,10,October,2014 +Government,Mexico,Montana,1368,7.00,9576.00,1436.40,8139.60,6840.00,1299.60,2/1/2014,2,February,2014 +Government,Canada,Paseo,723,7.00,5061.00,759.15,4301.85,3615.00,686.85,4/1/2014,4,April,2014 +Channel Partners,United States of America,VTT,1806,12.00,21672.00,3250.80,18421.20,5418.00,13003.20,5/1/2014,5,May,2014 \ No newline at end of file diff --git a/python/samples/concepts/resources/mixed_chat_files/user-context.txt b/python/samples/concepts/resources/mixed_chat_files/user-context.txt new file mode 100644 index 000000000000..dec71acb4b1a --- /dev/null +++ b/python/samples/concepts/resources/mixed_chat_files/user-context.txt @@ -0,0 +1 @@ +The central Sahara is hyperarid, with sparse vegetation. The northern and southern reaches of the desert, along with the highlands, have areas of sparse grassland and desert shrub, with trees and taller shrubs in wadis, where moisture collects. In the central, hyperarid region, there are many subdivisions of the great desert: Tanezrouft, the Ténéré, the Libyan Desert, the Eastern Desert, the Nubian Desert and others. These extremely arid areas often receive no rain for years. \ No newline at end of file diff --git a/python/samples/demos/assistants_group_chat/group_chat.py b/python/samples/demos/assistants_group_chat/group_chat.py new file mode 100644 index 000000000000..8342d9078295 --- /dev/null +++ b/python/samples/demos/assistants_group_chat/group_chat.py @@ -0,0 +1,153 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import os +import re + +from semantic_kernel.agents import AgentGroupChat +from semantic_kernel.agents.open_ai import OpenAIAssistantAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole + +##################################################################### +# The following sample demonstrates how to create a Semantic Kernel # +# OpenAIAssistantAgent, and leverage the assistant's # +# code interpreter or file search capabilities. The user interacts # +# with the AI assistant by uploading files and chatting. # +##################################################################### + + +# region Helper Functions + + +def display_intro_message(): + print( + """ + Chat with an AI assistant backed by a Semantic Kernel OpenAIAssistantAgent. + + To start: you can upload files to the assistant using the command (brackets included): + + [upload code_interpreter | file_search file_path] + + where `code_interpreter` or `file_search` is the purpose of the file and + `file_path` is the path to the file. For example: + + [upload code_interpreter file.txt] + + This will upload file.txt to the assistant for use with the code interpreter tool. + + Type "exit" to exit the chat. + """ + ) + + +def parse_upload_command(user_input: str): + """Parse the user input for an upload command.""" + match = re.search(r"\[upload\s+(code_interpreter|file_search)\s+(.+)\]", user_input) + if match: + return match.group(1), match.group(2) + return None, None + + +async def handle_file_upload(assistant_agent: OpenAIAssistantAgent, purpose: str, file_path: str): + """Handle the file upload command.""" + if not os.path.exists(file_path): + raise FileNotFoundError(f"File not found: {file_path}") + + file_id = await assistant_agent.add_file(file_path, purpose="assistants") + print(f"File uploaded: {file_id}") + + if purpose == "code_interpreter": + await enable_code_interpreter(assistant_agent, file_id) + elif purpose == "file_search": + await enable_file_search(assistant_agent, file_id) + + +async def enable_code_interpreter(assistant_agent: OpenAIAssistantAgent, file_id: str): + """Enable the file for code interpreter.""" + assistant_agent.code_interpreter_file_ids.append(file_id) + tools = [{"type": "file_search"}, {"type": "code_interpreter"}] + tool_resources = {"code_interpreter": {"file_ids": assistant_agent.code_interpreter_file_ids}} + await assistant_agent.modify_assistant( + assistant_id=assistant_agent.assistant.id, tools=tools, tool_resources=tool_resources + ) + print("File enabled for code interpreter.") + + +async def enable_file_search(assistant_agent: OpenAIAssistantAgent, file_id: str): + """Enable the file for file search.""" + if assistant_agent.vector_store_id is not None: + await assistant_agent.client.beta.vector_stores.files.create( + vector_store_id=assistant_agent.vector_store_id, file_id=file_id + ) + assistant_agent.file_search_file_ids.append(file_id) + else: + vector_store = await assistant_agent.create_vector_store(file_ids=file_id) + assistant_agent.file_search_file_ids.append(file_id) + assistant_agent.vector_store_id = vector_store.id + tools = [{"type": "file_search"}, {"type": "code_interpreter"}] + tool_resources = {"file_search": {"vector_store_ids": [vector_store.id]}} + await assistant_agent.modify_assistant( + assistant_id=assistant_agent.assistant.id, tools=tools, tool_resources=tool_resources + ) + print("File enabled for file search.") + + +async def cleanup_resources(assistant_agent: OpenAIAssistantAgent): + """Cleanup the resources used by the assistant.""" + if assistant_agent.vector_store_id: + await assistant_agent.delete_vector_store(assistant_agent.vector_store_id) + for file_id in assistant_agent.code_interpreter_file_ids: + await assistant_agent.delete_file(file_id) + for file_id in assistant_agent.file_search_file_ids: + await assistant_agent.delete_file(file_id) + await assistant_agent.delete() + + +# endregion + + +async def main(): + assistant_agent = None + try: + display_intro_message() + + # Create the OpenAI Assistant Agent + assistant_agent = await OpenAIAssistantAgent.create( + service_id="AIAssistant", + description="An AI assistant that helps with everyday tasks.", + instructions="Help the user with their task.", + enable_code_interpreter=True, + enable_file_search=True, + ) + + # Define an agent group chat, which drives the conversation + # We add messages to the chat and then invoke the agent to respond. + chat = AgentGroupChat() + + while True: + try: + user_input = input("User:> ") + except (KeyboardInterrupt, EOFError): + print("\n\nExiting chat...") + break + + if user_input.strip().lower() == "exit": + print("\n\nExiting chat...") + break + + purpose, file_path = parse_upload_command(user_input) + if purpose and file_path: + await handle_file_upload(assistant_agent, purpose, file_path) + continue + + await chat.add_chat_message(message=ChatMessageContent(role=AuthorRole.USER, content=user_input)) + async for content in chat.invoke(agent=assistant_agent): + print(f"Assistant:> # {content.role} - {content.name or '*'}: '{content.content}'") + finally: + if assistant_agent: + await cleanup_resources(assistant_agent) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started/.env.example b/python/samples/getting_started/.env.example index 5334bd632329..60ad7fd8db16 100644 --- a/python/samples/getting_started/.env.example +++ b/python/samples/getting_started/.env.example @@ -1,8 +1,8 @@ GLOBAL_LLM_SERVICE="" OPENAI_API_KEY="" -OPEN_AI_CHAT_MODEL_ID="" -OPEN_AI_TEXT_MODEL_ID="" -OPEN_AI_EMBEDDING_MODEL_ID="" +OPENAI_CHAT_MODEL_ID="" +OPENAI_TEXT_MODEL_ID="" +OPENAI_EMBEDDING_MODEL_ID="" OPENAI_ORG_ID="" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="" AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="" diff --git a/python/samples/getting_started/00-getting-started.ipynb b/python/samples/getting_started/00-getting-started.ipynb index 595e0fef6e6b..f2239967c7a3 100644 --- a/python/samples/getting_started/00-getting-started.ipynb +++ b/python/samples/getting_started/00-getting-started.ipynb @@ -17,7 +17,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0" + "%pip install semantic-kernel==1.6.0" ] }, { diff --git a/python/samples/getting_started/01-basic-loading-the-kernel.ipynb b/python/samples/getting_started/01-basic-loading-the-kernel.ipynb index ad672d181268..0b7991f02ae4 100644 --- a/python/samples/getting_started/01-basic-loading-the-kernel.ipynb +++ b/python/samples/getting_started/01-basic-loading-the-kernel.ipynb @@ -24,7 +24,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0" + "%pip install semantic-kernel==1.6.0" ] }, { diff --git a/python/samples/getting_started/02-running-prompts-from-file.ipynb b/python/samples/getting_started/02-running-prompts-from-file.ipynb index dc6c90095c85..12b2d658c068 100644 --- a/python/samples/getting_started/02-running-prompts-from-file.ipynb +++ b/python/samples/getting_started/02-running-prompts-from-file.ipynb @@ -35,7 +35,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0" + "%pip install semantic-kernel==1.6.0" ] }, { diff --git a/python/samples/getting_started/03-prompt-function-inline.ipynb b/python/samples/getting_started/03-prompt-function-inline.ipynb index fbed7a9d75a0..a4ea1707f42e 100644 --- a/python/samples/getting_started/03-prompt-function-inline.ipynb +++ b/python/samples/getting_started/03-prompt-function-inline.ipynb @@ -25,7 +25,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0" + "%pip install semantic-kernel==1.6.0" ] }, { @@ -83,7 +83,7 @@ "\n", "#### Option 2: using Azure OpenAI\n", "\n", - "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "Add your [Azure OpenAI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", "\n", "```\n", "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", diff --git a/python/samples/getting_started/04-kernel-arguments-chat.ipynb b/python/samples/getting_started/04-kernel-arguments-chat.ipynb index ac87b4def822..01ed946274bb 100644 --- a/python/samples/getting_started/04-kernel-arguments-chat.ipynb +++ b/python/samples/getting_started/04-kernel-arguments-chat.ipynb @@ -27,7 +27,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0" + "%pip install semantic-kernel==1.6.0" ] }, { diff --git a/python/samples/getting_started/05-using-the-planner.ipynb b/python/samples/getting_started/05-using-the-planner.ipynb index d6a6caa3f407..e7a0f371f19f 100644 --- a/python/samples/getting_started/05-using-the-planner.ipynb +++ b/python/samples/getting_started/05-using-the-planner.ipynb @@ -32,7 +32,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0" + "%pip install semantic-kernel==1.6.0" ] }, { diff --git a/python/samples/getting_started/06-memory-and-embeddings.ipynb b/python/samples/getting_started/06-memory-and-embeddings.ipynb index ea736d178a0e..fcece19a6223 100644 --- a/python/samples/getting_started/06-memory-and-embeddings.ipynb +++ b/python/samples/getting_started/06-memory-and-embeddings.ipynb @@ -37,7 +37,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0\n", + "%pip install semantic-kernel==1.6.0\n", "%pip install azure-core==1.30.1\n", "%pip install azure-search-documents==11.6.0b4" ] @@ -180,7 +180,6 @@ " azure_chat_service = AzureChatCompletion(\n", " service_id=chat_service_id,\n", " )\n", - " # next line assumes embeddings deployment name is \"text-embedding\", adjust the deployment name to the value of your chat model if needed\n", " embedding_gen = AzureTextEmbedding(\n", " service_id=\"embedding\",\n", " )\n", diff --git a/python/samples/getting_started/07-hugging-face-for-plugins.ipynb b/python/samples/getting_started/07-hugging-face-for-plugins.ipynb index 27248b795041..a99b0294cd18 100644 --- a/python/samples/getting_started/07-hugging-face-for-plugins.ipynb +++ b/python/samples/getting_started/07-hugging-face-for-plugins.ipynb @@ -21,7 +21,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel[hugging_face]==1.3.0" + "%pip install semantic-kernel[hugging_face]==1.6.0" ] }, { diff --git a/python/samples/getting_started/08-native-function-inline.ipynb b/python/samples/getting_started/08-native-function-inline.ipynb index 4bdfacce7cc6..f414a8424bfc 100644 --- a/python/samples/getting_started/08-native-function-inline.ipynb +++ b/python/samples/getting_started/08-native-function-inline.ipynb @@ -55,7 +55,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0" + "%pip install semantic-kernel==1.6.0" ] }, { diff --git a/python/samples/getting_started/09-groundedness-checking.ipynb b/python/samples/getting_started/09-groundedness-checking.ipynb index 412f69a20bdd..f603b615da19 100644 --- a/python/samples/getting_started/09-groundedness-checking.ipynb +++ b/python/samples/getting_started/09-groundedness-checking.ipynb @@ -36,7 +36,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0" + "%pip install semantic-kernel==1.6.0" ] }, { diff --git a/python/samples/getting_started/10-multiple-results-per-prompt.ipynb b/python/samples/getting_started/10-multiple-results-per-prompt.ipynb index 3452af397183..f4accdc8a8cb 100644 --- a/python/samples/getting_started/10-multiple-results-per-prompt.ipynb +++ b/python/samples/getting_started/10-multiple-results-per-prompt.ipynb @@ -34,7 +34,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0" + "%pip install semantic-kernel==1.6.0" ] }, { diff --git a/python/samples/getting_started/11-streaming-completions.ipynb b/python/samples/getting_started/11-streaming-completions.ipynb index f1d15491b88f..50e2d8684ec4 100644 --- a/python/samples/getting_started/11-streaming-completions.ipynb +++ b/python/samples/getting_started/11-streaming-completions.ipynb @@ -27,7 +27,7 @@ "outputs": [], "source": [ "# Note: if using a Poetry virtual environment, do not run this cell\n", - "%pip install semantic-kernel==1.3.0" + "%pip install semantic-kernel==1.6.0" ] }, { diff --git a/python/samples/getting_started/third_party/.env.example b/python/samples/getting_started/third_party/.env.example index 413f1a63cb58..9a1b4b6cfb38 100644 --- a/python/samples/getting_started/third_party/.env.example +++ b/python/samples/getting_started/third_party/.env.example @@ -1,8 +1,8 @@ GLOBAL_LLM_SERVICE="" OPENAI_API_KEY="" -OPEN_AI_CHAT_MODEL_ID="" -OPEN_AI_TEXT_MODEL_ID="" -OPEN_AI_EMBEDDING_MODEL_ID="" +OPENAI_CHAT_MODEL_ID="" +OPENAI_TEXT_MODEL_ID="" +OPENAI_EMBEDDING_MODEL_ID="" OPENAI_ORG_ID="" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="" AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="" diff --git a/python/samples/getting_started/third_party/weaviate-persistent-memory.ipynb b/python/samples/getting_started/third_party/weaviate-persistent-memory.ipynb index d453f10c39b3..ed15f5ab82cc 100644 --- a/python/samples/getting_started/third_party/weaviate-persistent-memory.ipynb +++ b/python/samples/getting_started/third_party/weaviate-persistent-memory.ipynb @@ -156,7 +156,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install semantic-kernel[weaviate]==1.3.0" + "%pip install semantic-kernel[weaviate]==1.6.0" ] }, { diff --git a/python/samples/getting_started_with_agents/README.md b/python/samples/getting_started_with_agents/README.md new file mode 100644 index 000000000000..3d8f471c7967 --- /dev/null +++ b/python/samples/getting_started_with_agents/README.md @@ -0,0 +1,42 @@ +# Semantic Kernel Agents - Getting Started + +This project contains a step by step guide to get started with _Semantic Kernel Agents_ in Python. + +#### PyPI: +- For the use of Chat Completion agents, the minimum allowed Semantic Kernel pypi version is 1.3.0. +- For the use of OpenAI Assistant agents, the minimum allowed Semantic Kernel pypi version is 1.4.0. +- For the use of Agent Group Chat, the minimum allowed Semantic kernel pypi version is 1.6.0. + +#### Source + +- [Semantic Kernel Agent Framework](../../semantic_kernel/agents/) + +## Examples + +The getting started with agents examples include: + +Example|Description +---|--- +[step1_agent](../getting_started_with_agents/step1_agent.py)|How to create and use an agent. +[step2_plugins](../getting_started_with_agents/step2_plugins.py)|How to associate plugins with an agent. +[step3_chat](../getting_started_with_agents/step3_chat.py)|How to create a conversation between agents. +[step4_kernel_function_strategies](../getting_started_with_agents/step4_kernel_function_strategies.py)|How to utilize a `KernelFunction` as a chat strategy. +[step5_json_result](../getting_started_with_agents/step5_json_result.py)|How to have an agent produce JSON. +[step6_logging](../getting_started_with_agents/step6_logging.py)|How to enable logging for agents. +[step7_assistant](../getting_started_with_agents/step7_assistant.py)|How to create and use an OpenAI Assistant agent. +[step8_assistant_vision](../getting_started_with_agents/step8_assistant_vision.py)|How to provide an image as input to an Open AI Assistant agent. +[step9_assistant_tool_code_interpreter](../getting_started_with_agents/step9_assistant_tool_code_interpreter.py)|How to use the code-interpreter tool for an Open AI Assistant agent. +[step10_assistant_tool_file_search](../getting_started_with_agents/step10_assistant_tool_file_search.py)|How to use the file-search tool for an Open AI Assistant agent. + +*Note: As we strive for parity with .NET, more getting_started_with_agent samples will be added. The current steps and names may be revised to further align with our .NET counterpart.* + +## Configuring the Kernel + +Similar to the Semantic Kernel Python concept samples, it is necessary to configure the secrets +and keys used by the kernel. See the follow "Configuring the Kernel" [guide](../concepts/README.md#configuring-the-kernell) for +more information. + +## Running Concept Samples + +Concept samples can be run in an IDE or via the command line. After setting up the required api key +for your AI connector, the samples run without any extra command line arguments. diff --git a/python/samples/getting_started_with_agents/resources/cat.jpg b/python/samples/getting_started_with_agents/resources/cat.jpg new file mode 100644 index 000000000000..1e9f26de48fc Binary files /dev/null and b/python/samples/getting_started_with_agents/resources/cat.jpg differ diff --git a/python/samples/getting_started_with_agents/resources/employees.pdf b/python/samples/getting_started_with_agents/resources/employees.pdf new file mode 100644 index 000000000000..bba45f80a90b Binary files /dev/null and b/python/samples/getting_started_with_agents/resources/employees.pdf differ diff --git a/python/samples/getting_started_with_agents/step10_assistant_tool_file_search.py b/python/samples/getting_started_with_agents/step10_assistant_tool_file_search.py new file mode 100644 index 000000000000..3ac413f92400 --- /dev/null +++ b/python/samples/getting_started_with_agents/step10_assistant_tool_file_search.py @@ -0,0 +1,81 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio +import os + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +##################################################################### +# The following sample demonstrates how to create an OpenAI # +# assistant using either Azure OpenAI or OpenAI and leverage the # +# assistant's file search functionality. # +##################################################################### + + +AGENT_NAME = "FileSearch" +AGENT_INSTRUCTIONS = "Find answers to the user's questions in the provided file." + +# Note: you may toggle this to switch between AzureOpenAI and OpenAI +use_azure_openai = True + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, input: str) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in agent.invoke(thread_id=thread_id): + if content.role != AuthorRole.TOOL: + print(f"# {content.role}: {content.content}") + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + # Define a service_id for the sample + service_id = "agent" + + # Get the path to the travelinfo.txt file + pdf_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources", "employees.pdf") + + # Create the agent configuration + if use_azure_openai: + agent = await AzureAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_file_search=True, + vector_store_filenames=[pdf_file_path], + ) + else: + agent = await OpenAIAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_file_search=True, + vector_store_filenames=[pdf_file_path], + ) + + # Define a thread and invoke the agent with the user input + thread_id = await agent.create_thread() + + try: + await invoke_agent(agent, thread_id=thread_id, input="Who is the youngest employee?") + await invoke_agent(agent, thread_id=thread_id, input="Who works in sales?") + await invoke_agent(agent, thread_id=thread_id, input="I have a customer request, who can help me?") + finally: + [await agent.delete_file(file_id) for file_id in agent.file_search_file_ids] + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/agents/step1_agent.py b/python/samples/getting_started_with_agents/step1_agent.py similarity index 96% rename from python/samples/concepts/agents/step1_agent.py rename to python/samples/getting_started_with_agents/step1_agent.py index 08e6fdeda8f0..e8d1e93882a1 100644 --- a/python/samples/concepts/agents/step1_agent.py +++ b/python/samples/getting_started_with_agents/step1_agent.py @@ -3,7 +3,7 @@ import asyncio from functools import reduce -from semantic_kernel.agents.chat_completion_agent import ChatCompletionAgent +from semantic_kernel.agents import ChatCompletionAgent from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.utils.author_role import AuthorRole diff --git a/python/samples/concepts/agents/step2_plugins.py b/python/samples/getting_started_with_agents/step2_plugins.py similarity index 97% rename from python/samples/concepts/agents/step2_plugins.py rename to python/samples/getting_started_with_agents/step2_plugins.py index 46111da6100a..134e85f1ffd2 100644 --- a/python/samples/concepts/agents/step2_plugins.py +++ b/python/samples/getting_started_with_agents/step2_plugins.py @@ -3,7 +3,7 @@ import asyncio from typing import Annotated -from semantic_kernel.agents.chat_completion_agent import ChatCompletionAgent +from semantic_kernel.agents import ChatCompletionAgent from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion from semantic_kernel.contents.chat_history import ChatHistory diff --git a/python/samples/getting_started_with_agents/step3_chat.py b/python/samples/getting_started_with_agents/step3_chat.py new file mode 100644 index 000000000000..e81c5d0c516c --- /dev/null +++ b/python/samples/getting_started_with_agents/step3_chat.py @@ -0,0 +1,85 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +################################################################### +# The following sample demonstrates how to create a simple, # +# agent group chat that utilizes An Art Director Chat Completion # +# Agent along with a Copy Writer Chat Completion Agent to # +# complete a task. # +################################################################### + + +class ApprovalTerminationStrategy(TerminationStrategy): + """A strategy for determining when an agent should terminate.""" + + async def should_agent_terminate(self, agent, history): + """Check if the agent should terminate.""" + return "approved" in history[-1].content.lower() + + +REVIEWER_NAME = "ArtDirector" +REVIEWER_INSTRUCTIONS = """ +You are an art director who has opinions about copywriting born of a love for David Ogilvy. +The goal is to determine if the given copy is acceptable to print. +If so, state that it is approved. +If not, provide insight on how to refine suggested copy without example. +""" + +COPYWRITER_NAME = "CopyWriter" +COPYWRITER_INSTRUCTIONS = """ +You are a copywriter with ten years of experience and are known for brevity and a dry humor. +The goal is to refine and decide on the single best copy as an expert in the field. +Only provide a single proposal per response. +You're laser focused on the goal at hand. +Don't waste time with chit chat. +Consider suggestions when refining an idea. +""" + + +def _create_kernel_with_chat_completion(service_id: str) -> Kernel: + kernel = Kernel() + kernel.add_service(AzureChatCompletion(service_id=service_id)) + return kernel + + +async def main(): + agent_reviewer = ChatCompletionAgent( + service_id="artdirector", + kernel=_create_kernel_with_chat_completion("artdirector"), + name=REVIEWER_NAME, + instructions=REVIEWER_INSTRUCTIONS, + ) + + agent_writer = ChatCompletionAgent( + service_id="copywriter", + kernel=_create_kernel_with_chat_completion("copywriter"), + name=COPYWRITER_NAME, + instructions=COPYWRITER_INSTRUCTIONS, + ) + + chat = AgentGroupChat( + agents=[agent_writer, agent_reviewer], + termination_strategy=ApprovalTerminationStrategy(agents=[agent_reviewer], maximum_iterations=10), + ) + + input = "a slogan for a new line of electric cars." + + await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=input)) + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in chat.invoke(): + print(f"# {content.role} - {content.name or '*'}: '{content.content}'") + + print(f"# IS COMPLETE: {chat.is_complete}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started_with_agents/step4_kernel_function_strategies.py b/python/samples/getting_started_with_agents/step4_kernel_function_strategies.py new file mode 100644 index 000000000000..9ad6a9d361bf --- /dev/null +++ b/python/samples/getting_started_with_agents/step4_kernel_function_strategies.py @@ -0,0 +1,128 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent +from semantic_kernel.agents.strategies import ( + KernelFunctionSelectionStrategy, + KernelFunctionTerminationStrategy, +) +from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt +from semantic_kernel.kernel import Kernel + +################################################################### +# The following sample demonstrates how to create a simple, # +# agent group chat that utilizes An Art Director Chat Completion # +# Agent along with a Copy Writer Chat Completion Agent to # +# complete a task. The sample also shows how to specify a Kernel # +# Function termination and selection strategy to determine when # +# to end the chat or how to select the next agent to take a turn # +# in the conversation. # +################################################################### + +REVIEWER_NAME = "ArtDirector" +REVIEWER_INSTRUCTIONS = """ +You are an art director who has opinions about copywriting born of a love for David Ogilvy. +The goal is to determine if the given copy is acceptable to print. +If so, state that it is approved. +If not, provide insight on how to refine suggested copy without example. +""" + +COPYWRITER_NAME = "CopyWriter" +COPYWRITER_INSTRUCTIONS = """ +You are a copywriter with ten years of experience and are known for brevity and a dry humor. +The goal is to refine and decide on the single best copy as an expert in the field. +Only provide a single proposal per response. +You're laser focused on the goal at hand. +Don't waste time with chit chat. +Consider suggestions when refining an idea. +""" + + +def _create_kernel_with_chat_completion(service_id: str) -> Kernel: + kernel = Kernel() + kernel.add_service(AzureChatCompletion(service_id=service_id)) + return kernel + + +async def main(): + agent_reviewer = ChatCompletionAgent( + service_id="artdirector", + kernel=_create_kernel_with_chat_completion("artdirector"), + name=REVIEWER_NAME, + instructions=REVIEWER_INSTRUCTIONS, + ) + + agent_writer = ChatCompletionAgent( + service_id="copywriter", + kernel=_create_kernel_with_chat_completion("copywriter"), + name=COPYWRITER_NAME, + instructions=COPYWRITER_INSTRUCTIONS, + ) + + termination_function = KernelFunctionFromPrompt( + function_name="termination", + prompt=""" + Determine if the copy has been approved. If so, respond with a single word: yes + + History: + {{$history}} + """, + ) + + selection_function = KernelFunctionFromPrompt( + function_name="selection", + prompt=f""" + Determine which participant takes the next turn in a conversation based on the the most recent participant. + State only the name of the participant to take the next turn. + No participant should take more than one turn in a row. + + Choose only from these participants: + - {REVIEWER_NAME} + - {COPYWRITER_NAME} + + Always follow these rules when selecting the next participant: + - After user input, it is {COPYWRITER_NAME}'s turn. + - After {COPYWRITER_NAME} replies, it is {REVIEWER_NAME}'s turn. + - After {REVIEWER_NAME} provides feedback, it is {COPYWRITER_NAME}'s turn. + + History: + {{{{$history}}}} + """, + ) + + chat = AgentGroupChat( + agents=[agent_writer, agent_reviewer], + termination_strategy=KernelFunctionTerminationStrategy( + agents=[agent_reviewer], + function=termination_function, + kernel=_create_kernel_with_chat_completion("termination"), + result_parser=lambda result: str(result.value[0]).lower() == "yes", + history_variable_name="history", + maximum_iterations=10, + ), + selection_strategy=KernelFunctionSelectionStrategy( + function=selection_function, + kernel=_create_kernel_with_chat_completion("selection"), + result_parser=lambda result: str(result.value[0]) if result.value is not None else COPYWRITER_NAME, + agent_variable_name="agents", + history_variable_name="history", + ), + ) + + input = "a slogan for a new line of electric cars." + + await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=input)) + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in chat.invoke(): + print(f"# {content.role} - {content.name or '*'}: '{content.content}'") + + print(f"# IS COMPLETE: {chat.is_complete}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started_with_agents/step5_json_result.py b/python/samples/getting_started_with_agents/step5_json_result.py new file mode 100644 index 000000000000..10edc9f2198f --- /dev/null +++ b/python/samples/getting_started_with_agents/step5_json_result.py @@ -0,0 +1,106 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio + +from pydantic import ValidationError + +from semantic_kernel.agents import AgentGroupChat, ChatCompletionAgent +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel +from semantic_kernel.kernel_pydantic import KernelBaseModel + +################################################################### +# The following sample demonstrates how to configure an Agent # +# Group Chat, and invoke an agent with only a single turn. # +# A custom termination strategy is provided where the model is # +# to rate the user input on creativity and expressiveness # +# and end the chat when a score of 70 or higher is provided. # +################################################################### + + +SCORE_COMPLETED_THRESHOLD = 70 +TUTOR_NAME = "Tutor" +TUTOR_INSTRUCTIONS = """ +Think step-by-step and rate the user input on creativity and expressivness from 1-100. + +Respond in JSON format with the following JSON schema: + +{ + "score": "integer (1-100)", + "notes": "the reason for your score" +} +""" + + +class InputScore(KernelBaseModel): + """A model for the input score.""" + + score: int + notes: str + + +def translate_json(json_string: str) -> InputScore | None: + try: + if json_string is None: + return None + return InputScore.model_validate_json(json_string) + except ValidationError: + return None + + +class ThresholdTerminationStrategy(TerminationStrategy): + """A strategy for determining when an agent should terminate.""" + + async def should_agent_terminate(self, agent, history): + """Check if the agent should terminate.""" + last_message_content = history[-1].content or "" + result = translate_json(last_message_content) + return result.score >= SCORE_COMPLETED_THRESHOLD if result else False + + +def _create_kernel_with_chat_completion(service_id: str) -> Kernel: + kernel = Kernel() + kernel.add_service(AzureChatCompletion(service_id=service_id)) + return kernel + + +async def invoke_agent(agent: ChatCompletionAgent, input: str, chat: AgentGroupChat): + """Invoke the agent with the user input.""" + await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in chat.invoke_single_turn(agent): + print(f"# {content.role} - {content.name or '*'}: '{content.content}'") + print(f"# IS COMPLETE: {chat.is_complete}") + + +async def main(): + service_id = "tutor" + agent = ChatCompletionAgent( + service_id=service_id, + kernel=_create_kernel_with_chat_completion(service_id=service_id), + name=TUTOR_NAME, + instructions=TUTOR_INSTRUCTIONS, + ) + + # Here a TerminationStrategy subclass is used that will terminate when + # the response includes a score that is greater than or equal to 70. + termination_strategy = ThresholdTerminationStrategy(maximum_iterations=10) + + chat = AgentGroupChat(termination_strategy=termination_strategy) + + await invoke_agent(agent=agent, input="The sunset is very colorful.", chat=chat) + await invoke_agent(agent=agent, input="The sunset is setting over the mountains.", chat=chat) + await invoke_agent( + agent=agent, + input="The sunset is setting over the mountains and filled the sky with a deep red flame, setting the clouds ablaze.", # noqa: E501 + chat=chat, + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started_with_agents/step6_logging.py b/python/samples/getting_started_with_agents/step6_logging.py new file mode 100644 index 000000000000..197bcd72ab8e --- /dev/null +++ b/python/samples/getting_started_with_agents/step6_logging.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging + +from semantic_kernel.agents import AgentGroupChat +from semantic_kernel.agents.chat_completion.chat_completion_agent import ChatCompletionAgent +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +################################################################### +# The following sample demonstrates how to create a simple, # +# agent group chat that utilizes An Art Director Chat Completion # +# Agent along with a Copy Writer Chat Completion Agent to # +# complete a task. The main point of this sample is to note how # +# to enable logging to view all interactions between the agents # +# and the model. # +################################################################### + + +# NOTE: This is all that is required to enable logging +logging.basicConfig(level=logging.DEBUG) + + +class ApprovalTerminationStrategy(TerminationStrategy): + """A strategy for determining when an agent should terminate.""" + + async def should_agent_terminate(self, agent, history): + """Check if the agent should terminate.""" + return "approved" in history[-1].content.lower() + + +REVIEWER_NAME = "ArtDirector" +REVIEWER_INSTRUCTIONS = """ +You are an art director who has opinions about copywriting born of a love for David Ogilvy. +The goal is to determine if the given copy is acceptable to print. +If so, state that it is approved. +If not, provide insight on how to refine suggested copy without example. +""" + +COPYWRITER_NAME = "CopyWriter" +COPYWRITER_INSTRUCTIONS = """ +You are a copywriter with ten years of experience and are known for brevity and a dry humor. +The goal is to refine and decide on the single best copy as an expert in the field. +Only provide a single proposal per response. +You're laser focused on the goal at hand. +Don't waste time with chit chat. +Consider suggestions when refining an idea. +""" + + +def _create_kernel_with_chat_completion(service_id: str) -> Kernel: + kernel = Kernel() + kernel.add_service(AzureChatCompletion(service_id=service_id)) + return kernel + + +async def main(): + agent_reviewer = ChatCompletionAgent( + service_id="artdirector", + kernel=_create_kernel_with_chat_completion("artdirector"), + name=REVIEWER_NAME, + instructions=REVIEWER_INSTRUCTIONS, + ) + + agent_writer = ChatCompletionAgent( + service_id="copywriter", + kernel=_create_kernel_with_chat_completion("copywriter"), + name=COPYWRITER_NAME, + instructions=COPYWRITER_INSTRUCTIONS, + ) + + chat = AgentGroupChat( + agents=[agent_writer, agent_reviewer], + termination_strategy=ApprovalTerminationStrategy(agents=[agent_reviewer], maximum_iterations=10), + ) + + input = "a slogan for a new line of electric cars." + + await chat.add_chat_message(ChatMessageContent(role=AuthorRole.USER, content=input)) + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in chat.invoke(): + print(f"# {content.role} - {content.name or '*'}: '{content.content}'") + + print(f"# IS COMPLETE: {chat.is_complete}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started_with_agents/step7_assistant.py b/python/samples/getting_started_with_agents/step7_assistant.py new file mode 100644 index 000000000000..6f1242f62ccc --- /dev/null +++ b/python/samples/getting_started_with_agents/step7_assistant.py @@ -0,0 +1,88 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio +from typing import Annotated + +from semantic_kernel.agents.open_ai import AzureAssistantAgent, OpenAIAssistantAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.functions.kernel_function_decorator import kernel_function +from semantic_kernel.kernel import Kernel + +##################################################################### +# The following sample demonstrates how to create an OpenAI # +# assistant using either Azure OpenAI or OpenAI. OpenAI Assistants # +# allow for function calling, the use of file search and a # +# code interpreter. Assistant Threads are used to manage the # +# conversation state, similar to a Semantic Kernel Chat History. # +##################################################################### + +HOST_NAME = "Host" +HOST_INSTRUCTIONS = "Answer questions about the menu." + +# Note: you may toggle this to switch between AzureOpenAI and OpenAI +use_azure_openai = True + + +# Define a sample plugin for the sample +class MenuPlugin: + """A sample Menu Plugin used for the concept sample.""" + + @kernel_function(description="Provides a list of specials from the menu.") + def get_specials(self) -> Annotated[str, "Returns the specials from the menu."]: + return """ + Special Soup: Clam Chowder + Special Salad: Cobb Salad + Special Drink: Chai Tea + """ + + @kernel_function(description="Provides the price of the requested menu item.") + def get_item_price( + self, menu_item: Annotated[str, "The name of the menu item."] + ) -> Annotated[str, "Returns the price of the menu item."]: + return "$9.99" + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, input: str) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in agent.invoke(thread_id=thread_id): + if content.role != AuthorRole.TOOL: + print(f"# {content.role}: {content.content}") + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + # Add the sample plugin to the kernel + kernel.add_plugin(plugin=MenuPlugin(), plugin_name="menu") + + # Create the OpenAI Assistant Agent + service_id = "agent" + if use_azure_openai: + agent = await AzureAssistantAgent.create( + kernel=kernel, service_id=service_id, name=HOST_NAME, instructions=HOST_INSTRUCTIONS + ) + else: + agent = await OpenAIAssistantAgent.create( + kernel=kernel, service_id=service_id, name=HOST_NAME, instructions=HOST_INSTRUCTIONS + ) + + thread_id = await agent.create_thread() + + try: + await invoke_agent(agent, thread_id=thread_id, input="Hello") + await invoke_agent(agent, thread_id=thread_id, input="What is the special soup?") + await invoke_agent(agent, thread_id=thread_id, input="What is the special drink?") + await invoke_agent(agent, thread_id=thread_id, input="Thank you") + finally: + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started_with_agents/step8_assistant_vision.py b/python/samples/getting_started_with_agents/step8_assistant_vision.py new file mode 100644 index 000000000000..62a796cd9f52 --- /dev/null +++ b/python/samples/getting_started_with_agents/step8_assistant_vision.py @@ -0,0 +1,102 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio +import os + +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.file_reference_content import FileReferenceContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +##################################################################### +# The following sample demonstrates how to create an OpenAI # +# assistant using either Azure OpenAI or OpenAI and leverage the # +# multi-modal content types to have the assistant describe images # +# and answer questions about them. # +##################################################################### + +HOST_NAME = "Host" +HOST_INSTRUCTIONS = "Answer questions about the menu." + + +def create_message_with_image_url(input: str, url: str) -> ChatMessageContent: + return ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text=input), ImageContent(uri=url)], + ) + + +def create_message_with_image_reference(input: str, file_id: str) -> ChatMessageContent: + return ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text=input), FileReferenceContent(file_id=file_id)], + ) + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, message: ChatMessageContent) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=message) + + print(f"# {AuthorRole.USER}: '{message.items[0].text}'") + + async for content in agent.invoke(thread_id=thread_id): + if content.role != AuthorRole.TOOL: + print(f"# {content.role}: {content.content}") + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + service_id = "agent" + + # Create the Assistant Agent + agent = await OpenAIAssistantAgent.create( + kernel=kernel, service_id=service_id, name=HOST_NAME, instructions=HOST_INSTRUCTIONS + ) + + cat_image_file_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "resources", + "cat.jpg", + ) + + # Upload the file for use with the assistant + file_id = await agent.add_file(cat_image_file_path, purpose="vision") + + # Create a thread for the conversation + thread_id = await agent.create_thread() + + try: + await invoke_agent( + agent, + thread_id=thread_id, + message=create_message_with_image_url( + "Describe this image.", + "https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/New_york_times_square-terabass.jpg/1200px-New_york_times_square-terabass.jpg", + ), + ) + await invoke_agent( + agent, + thread_id=thread_id, + message=create_message_with_image_url( + "What is the main color in this image?", + "https://upload.wikimedia.org/wikipedia/commons/5/56/White_shark.jpg", + ), + ) + await invoke_agent( + agent, + thread_id=thread_id, + message=create_message_with_image_reference("Is there an animal in this image?", file_id), + ) + finally: + await agent.delete_file(file_id) + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/getting_started_with_agents/step9_assistant_tool_code_interpreter.py b/python/samples/getting_started_with_agents/step9_assistant_tool_code_interpreter.py new file mode 100644 index 000000000000..11c2deff8e7c --- /dev/null +++ b/python/samples/getting_started_with_agents/step9_assistant_tool_code_interpreter.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft. All rights reserved. +import asyncio + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel + +##################################################################### +# The following sample demonstrates how to create an OpenAI # +# assistant using either Azure OpenAI or OpenAI and leverage the # +# assistant's code interpreter functionality to have it write # +# Python code to print Fibonacci numbers. # +##################################################################### + + +AGENT_NAME = "CodeRunner" +AGENT_INSTRUCTIONS = "Run the provided code file and return the result." + +# Note: you may toggle this to switch between AzureOpenAI and OpenAI +use_azure_openai = True + + +# A helper method to invoke the agent with the user input +async def invoke_agent(agent: OpenAIAssistantAgent, thread_id: str, input: str) -> None: + """Invoke the agent with the user input.""" + await agent.add_chat_message(thread_id=thread_id, message=ChatMessageContent(role=AuthorRole.USER, content=input)) + + print(f"# {AuthorRole.USER}: '{input}'") + + async for content in agent.invoke(thread_id=thread_id): + if content.role != AuthorRole.TOOL: + print(f"# {content.role}: {content.content}") + + +async def main(): + # Create the instance of the Kernel + kernel = Kernel() + + # Define a service_id for the sample + service_id = "agent" + + # Create the agent + if use_azure_openai: + agent = await AzureAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, + ) + else: + agent = await OpenAIAssistantAgent.create( + kernel=kernel, + service_id=service_id, + name=AGENT_NAME, + instructions=AGENT_INSTRUCTIONS, + enable_code_interpreter=True, + ) + + thread_id = await agent.create_thread() + + try: + await invoke_agent( + agent, + thread_id=thread_id, + input="Use code to determine the values in the Fibonacci sequence that that are less then the value of 101?", # noqa: E501 + ) + finally: + await agent.delete_thread(thread_id) + await agent.delete() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/learn_resources/.env.example b/python/samples/learn_resources/.env.example index 4a524a2c16bd..91f4c1e4f992 100644 --- a/python/samples/learn_resources/.env.example +++ b/python/samples/learn_resources/.env.example @@ -1,7 +1,7 @@ OPENAI_API_KEY="" -OPEN_AI_CHAT_MODEL_ID="" -OPEN_AI_TEXT_MODEL_ID="" -OPEN_AI_EMBEDDING_MODEL_ID="" +OPENAI_CHAT_MODEL_ID="" +OPENAI_TEXT_MODEL_ID="" +OPENAI_EMBEDDING_MODEL_ID="" OPENAI_ORG_ID="" AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="" AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="" diff --git a/python/samples/learn_resources/README.md b/python/samples/learn_resources/README.md index f36b03bca2b3..283d3f0d01c4 100644 --- a/python/samples/learn_resources/README.md +++ b/python/samples/learn_resources/README.md @@ -23,13 +23,13 @@ Copy the `.env.example` file to a new file named `.env`. Then, copy those keys i ``` GLOBAL_LLM_SERVICE="OpenAI" # Toggle between "OpenAI" or "AzureOpenAI" -OPEN_AI_CHAT_MODEL_ID="gpt-3.5-turbo-0125" -OPEN_AI_TEXT_MODEL_ID="gpt-3.5-turbo-instruct" +OPENAI_CHAT_MODEL_ID="gpt-3.5-turbo-0125" +OPENAI_TEXT_MODEL_ID="gpt-3.5-turbo-instruct" OPENAI_API_KEY="" OPENAI_ORG_ID="" -AZURE_OPEN_AI_CHAT_DEPLOYMENT_NAME="gpt-35-turbo" -AZURE_OPEN_AI_TEXT_DEPLOYMENT_NAME="gpt-35-turbo-instruct" +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="gpt-35-turbo" +AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="gpt-35-turbo-instruct" AZURE_OPENAI_ENDPOINT="" AZURE_OPENAI_API_KEY="" AZURE_OPENAI_API_VERSION="" diff --git a/python/samples/learn_resources/evaluate_with_prompt_flow.py b/python/samples/learn_resources/evaluate_with_prompt_flow.py index edb42782ff15..60fcd3bf9385 100644 --- a/python/samples/learn_resources/evaluate_with_prompt_flow.py +++ b/python/samples/learn_resources/evaluate_with_prompt_flow.py @@ -11,11 +11,11 @@ # Load the configuration from the .env file config = dotenv_values(".env") -deployment_type = config.get("AZURE_OPEN_AI_DEPLOYMENT_TYPE", None) +deployment_type = config.get("AZURE_OPENAI_DEPLOYMENT_TYPE", None) if deployment_type == "chat-completion": - deployment_name = config.get("AZURE_OPEN_AI_CHAT_COMPLETION_DEPLOYMENT_NAME", None) + deployment_name = config.get("AZURE_OPENAI_CHAT_COMPLETION_DEPLOYMENT_NAME", None) elif deployment_type == "text-completion": - deployment_name = config.get("AZURE_OPEN_AI_TEXT_COMPLETION_DEPLOYMENT_NAME", None) + deployment_name = config.get("AZURE_OPENAI_TEXT_COMPLETION_DEPLOYMENT_NAME", None) # Define the inputs of the flow inputs = { @@ -28,8 +28,8 @@ connection = AzureOpenAIConnection( name="AzureOpenAIConnection", type="Custom", - api_key=config.get("AZURE_OPEN_AI__API_KEY", None), - api_base=config.get("AZURE_OPEN_AI__ENDPOINT", None), + api_key=config.get("AZURE_OPENAI_API_KEY", None), + api_base=config.get("AZURE_OPENAI_ENDPOINT", None), api_version="2023-03-15-preview", ) diff --git a/python/semantic_kernel/agents/__init__.py b/python/semantic_kernel/agents/__init__.py index 376202f33570..34ad271709a0 100644 --- a/python/semantic_kernel/agents/__init__.py +++ b/python/semantic_kernel/agents/__init__.py @@ -1,7 +1,13 @@ # Copyright (c) Microsoft. All rights reserved. -from semantic_kernel.agents.chat_completion_agent import ChatCompletionAgent +from semantic_kernel.agents.agent import Agent +from semantic_kernel.agents.chat_completion.chat_completion_agent import ChatCompletionAgent +from semantic_kernel.agents.group_chat.agent_chat import AgentChat +from semantic_kernel.agents.group_chat.agent_group_chat import AgentGroupChat __all__ = [ + "Agent", + "AgentChat", + "AgentGroupChat", "ChatCompletionAgent", ] diff --git a/python/semantic_kernel/agents/agent.py b/python/semantic_kernel/agents/agent.py index 73ffcba0240e..a6c84c43dc43 100644 --- a/python/semantic_kernel/agents/agent.py +++ b/python/semantic_kernel/agents/agent.py @@ -1,19 +1,20 @@ # Copyright (c) Microsoft. All rights reserved. import uuid -from abc import ABC +from collections.abc import Iterable from typing import ClassVar from pydantic import Field -from semantic_kernel.agents.agent_channel import AgentChannel +from semantic_kernel.agents.channels.agent_channel import AgentChannel from semantic_kernel.kernel import Kernel from semantic_kernel.kernel_pydantic import KernelBaseModel from semantic_kernel.utils.experimental_decorator import experimental_class +from semantic_kernel.utils.naming import generate_random_ascii_name @experimental_class -class Agent(ABC, KernelBaseModel): +class Agent(KernelBaseModel): """Base abstraction for all Semantic Kernel agents. An agent instance may participate in one or more conversations. @@ -31,12 +32,12 @@ class Agent(ABC, KernelBaseModel): id: str = Field(default_factory=lambda: str(uuid.uuid4())) description: str | None = None - name: str | None = None + name: str = Field(default_factory=lambda: f"agent_{generate_random_ascii_name()}") instructions: str | None = None kernel: Kernel = Field(default_factory=Kernel) channel_type: ClassVar[type[AgentChannel] | None] = None - def get_channel_keys(self) -> list[str]: + def get_channel_keys(self) -> Iterable[str]: """Get the channel keys. Returns: @@ -46,7 +47,7 @@ def get_channel_keys(self) -> list[str]: raise NotImplementedError("Unable to get channel keys. Channel type not configured.") return [self.channel_type.__name__] - def create_channel(self) -> AgentChannel: + async def create_channel(self) -> AgentChannel: """Create a channel. Returns: @@ -55,3 +56,19 @@ def create_channel(self) -> AgentChannel: if not self.channel_type: raise NotImplementedError("Unable to create channel. Channel type not configured.") return self.channel_type() + + def __eq__(self, other): + """Check if two agents are equal.""" + if isinstance(other, Agent): + return ( + self.id == other.id + and self.name == other.name + and self.description == other.description + and self.instructions == other.instructions + and self.channel_type == other.channel_type + ) + return False + + def __hash__(self): + """Get the hash of the agent.""" + return hash((self.id, self.name, self.description, self.instructions, self.channel_type)) diff --git a/python/semantic_kernel/agents/channels/__init__.py b/python/semantic_kernel/agents/channels/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/agents/agent_channel.py b/python/semantic_kernel/agents/channels/agent_channel.py similarity index 85% rename from python/semantic_kernel/agents/agent_channel.py rename to python/semantic_kernel/agents/channels/agent_channel.py index ea834950e88e..8d8c0342dfd3 100644 --- a/python/semantic_kernel/agents/agent_channel.py +++ b/python/semantic_kernel/agents/channels/agent_channel.py @@ -36,14 +36,14 @@ async def receive( def invoke( self, agent: "Agent", - ) -> AsyncIterable["ChatMessageContent"]: + ) -> AsyncIterable[tuple[bool, "ChatMessageContent"]]: """Perform a discrete incremental interaction between a single Agent and AgentChat. Args: agent: The agent to interact with. Returns: - An async iterable of ChatMessageContent. + A async iterable of a bool, ChatMessageContent. """ ... @@ -57,3 +57,8 @@ def get_history( An async iterable of ChatMessageContent. """ ... + + @abstractmethod + async def reset(self) -> None: + """Reset any persistent state associated with the channel.""" + ... diff --git a/python/semantic_kernel/agents/chat_history_channel.py b/python/semantic_kernel/agents/channels/chat_history_channel.py similarity index 52% rename from python/semantic_kernel/agents/chat_history_channel.py rename to python/semantic_kernel/agents/channels/chat_history_channel.py index dc4a1b231b1d..7b170a60ca41 100644 --- a/python/semantic_kernel/agents/chat_history_channel.py +++ b/python/semantic_kernel/agents/channels/chat_history_channel.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import sys +from collections import deque from collections.abc import AsyncIterable if sys.version_info >= (3, 12): @@ -9,16 +10,18 @@ from typing_extensions import override # pragma: no cover from abc import abstractmethod -from typing import TYPE_CHECKING, Protocol, runtime_checkable +from typing import TYPE_CHECKING, Deque, Protocol, runtime_checkable -from semantic_kernel.agents.agent import Agent -from semantic_kernel.agents.agent_channel import AgentChannel +from semantic_kernel.agents.channels.agent_channel import AgentChannel from semantic_kernel.contents import ChatMessageContent from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent from semantic_kernel.exceptions import ServiceInvalidTypeError from semantic_kernel.utils.experimental_decorator import experimental_class if TYPE_CHECKING: + from semantic_kernel.agents.agent import Agent from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent @@ -47,8 +50,8 @@ class ChatHistoryChannel(AgentChannel, ChatHistory): @override async def invoke( self, - agent: Agent, - ) -> AsyncIterable[ChatMessageContent]: + agent: "Agent", + ) -> AsyncIterable[tuple[bool, ChatMessageContent]]: """Perform a discrete incremental interaction between a single Agent and AgentChat. Args: @@ -63,9 +66,46 @@ async def invoke( f"Invalid channel binding for agent with id: `{id}` with name: ({type(agent).__name__})" ) - async for message in agent.invoke(self): - self.messages.append(message) - yield message + message_count = len(self.messages) + mutated_history = set() + message_queue: Deque[ChatMessageContent] = deque() + + async for response_message in agent.invoke(self): + # Capture all messages that have been included in the mutated history. + for message_index in range(message_count, len(self.messages)): + mutated_message = self.messages[message_index] + mutated_history.add(mutated_message) + message_queue.append(mutated_message) + + # Update the message count pointer to reflect the current history. + message_count = len(self.messages) + + # Avoid duplicating any message included in the mutated history and also returned by the enumeration result. + if response_message not in mutated_history: + self.messages.append(response_message) + message_queue.append(response_message) + + # Dequeue the next message to yield. + yield_message = message_queue.popleft() + yield ( + self._is_message_visible(message=yield_message, message_queue_count=len(message_queue)), + yield_message, + ) + + # Dequeue any remaining messages to yield. + while message_queue: + yield_message = message_queue.popleft() + yield ( + self._is_message_visible(message=yield_message, message_queue_count=len(message_queue)), + yield_message, + ) + + def _is_message_visible(self, message: ChatMessageContent, message_queue_count: int) -> bool: + """Determine if a message is visible to the user.""" + return ( + not any(isinstance(item, (FunctionCallContent, FunctionResultContent)) for item in message.items) + or message_queue_count == 0 + ) @override async def receive( @@ -90,3 +130,8 @@ async def get_history( # type: ignore """ for message in reversed(self.messages): yield message + + @override + async def reset(self) -> None: + """Reset the channel state.""" + self.messages.clear() diff --git a/python/semantic_kernel/agents/channels/open_ai_assistant_channel.py b/python/semantic_kernel/agents/channels/open_ai_assistant_channel.py new file mode 100644 index 000000000000..1d37b68dcc5b --- /dev/null +++ b/python/semantic_kernel/agents/channels/open_ai_assistant_channel.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from collections.abc import AsyncIterable +from typing import TYPE_CHECKING, Any + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from openai import AsyncOpenAI + +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.open_ai.assistant_content_generation import create_chat_message, generate_message_content +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.exceptions.agent_exceptions import AgentChatException + +if TYPE_CHECKING: + from semantic_kernel.agents.agent import Agent + + +class OpenAIAssistantChannel(AgentChannel): + """OpenAI Assistant Channel.""" + + def __init__(self, client: AsyncOpenAI, thread_id: str) -> None: + """Initialize the OpenAI Assistant Channel.""" + self.client = client + self.thread_id = thread_id + + @override + async def receive(self, history: list["ChatMessageContent"]) -> None: + """Receive the conversation messages. + + Args: + history: The conversation messages. + """ + for message in history: + await create_chat_message(self.client, self.thread_id, message) + + @override + async def invoke(self, agent: "Agent") -> AsyncIterable[tuple[bool, "ChatMessageContent"]]: + """Invoke the agent. + + Args: + agent: The agent to invoke. + + Yields: + tuple[bool, ChatMessageContent]: The conversation messages. + """ + from semantic_kernel.agents.open_ai.open_ai_assistant_base import OpenAIAssistantBase + + if not isinstance(agent, OpenAIAssistantBase): + raise AgentChatException(f"Agent is not of the expected type {type(OpenAIAssistantBase)}.") + + if agent._is_deleted: + raise AgentChatException("Agent is deleted.") + + async for is_visible, message in agent._invoke_internal(thread_id=self.thread_id): + yield is_visible, message + + @override + async def get_history(self) -> AsyncIterable["ChatMessageContent"]: + """Get the conversation history. + + Yields: + ChatMessageContent: The conversation history. + """ + agent_names: dict[str, Any] = {} + + thread_messages = await self.client.beta.threads.messages.list( + thread_id=self.thread_id, limit=100, order="desc" + ) + for message in thread_messages.data: + assistant_name = None + if message.assistant_id and message.assistant_id not in agent_names: + agent = await self.client.beta.assistants.retrieve(message.assistant_id) + if agent.name: + agent_names[message.assistant_id] = agent.name + assistant_name = agent_names.get(message.assistant_id) if message.assistant_id else message.assistant_id + + content: ChatMessageContent = generate_message_content(str(assistant_name), message) + + if len(content.items) > 0: + yield content + + @override + async def reset(self) -> None: + """Reset the agent's thread.""" + try: + await self.client.beta.threads.delete(thread_id=self.thread_id) + except Exception as e: + raise AgentChatException(f"Failed to delete thread: {e}") diff --git a/python/semantic_kernel/agents/chat_completion/__init__.py b/python/semantic_kernel/agents/chat_completion/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/agents/chat_completion_agent.py b/python/semantic_kernel/agents/chat_completion/chat_completion_agent.py similarity index 96% rename from python/semantic_kernel/agents/chat_completion_agent.py rename to python/semantic_kernel/agents/chat_completion/chat_completion_agent.py index 44cf48f94722..6e07bd06d899 100644 --- a/python/semantic_kernel/agents/chat_completion_agent.py +++ b/python/semantic_kernel/agents/chat_completion/chat_completion_agent.py @@ -4,9 +4,9 @@ from collections.abc import AsyncGenerator, AsyncIterable from typing import TYPE_CHECKING, Any, ClassVar -from semantic_kernel.agents.agent import Agent -from semantic_kernel.agents.agent_channel import AgentChannel -from semantic_kernel.agents.chat_history_channel import ChatHistoryChannel +from semantic_kernel.agents import Agent +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.channels.chat_history_channel import ChatHistoryChannel from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.const import DEFAULT_SERVICE_NAME @@ -64,11 +64,12 @@ def __init__( args: dict[str, Any] = { "service_id": service_id, - "name": name, "description": description, "instructions": instructions, "execution_settings": execution_settings, } + if name is not None: + args["name"] = name if id is not None: args["id"] = id if kernel is not None: diff --git a/python/semantic_kernel/agents/group_chat/__init__.py b/python/semantic_kernel/agents/group_chat/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/agents/group_chat/agent_chat.py b/python/semantic_kernel/agents/group_chat/agent_chat.py new file mode 100644 index 000000000000..dcd44b2bed6b --- /dev/null +++ b/python/semantic_kernel/agents/group_chat/agent_chat.py @@ -0,0 +1,180 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging +import threading +from abc import abstractmethod +from collections.abc import AsyncGenerator, AsyncIterable +from typing import Protocol, runtime_checkable + +from pydantic import Field, PrivateAttr + +from semantic_kernel.agents import Agent +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.group_chat.agent_chat_utils import KeyEncoder +from semantic_kernel.agents.group_chat.broadcast_queue import BroadcastQueue, ChannelReference +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.agent_exceptions import AgentChatException +from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +@runtime_checkable +class AgentChatProtocol(Protocol): + """A protocol for agent chat.""" + + @abstractmethod + async def invoke(self, agent: Agent) -> AsyncIterable[ChatMessageContent]: + """Invoke an agent asynchronously.""" + ... + + +@experimental_class +class AgentChat(KernelBaseModel): + """A base class chat interface for agents.""" + + broadcast_queue: BroadcastQueue = Field(default_factory=BroadcastQueue) + agent_channels: dict[str, AgentChannel] = Field(default_factory=dict) + channel_map: dict[Agent, str] = Field(default_factory=dict) + history: ChatHistory = Field(default_factory=ChatHistory) + + _lock: threading.Lock = PrivateAttr(default_factory=threading.Lock) + _is_active: bool = False + + @property + def is_active(self) -> bool: + """Indicates whether the agent is currently active.""" + return self._is_active + + def set_activity_or_throw(self): + """Set the activity signal or throw an exception if another agent is active.""" + with self._lock: + if self._is_active: + raise Exception("Unable to proceed while another agent is active.") + self._is_active = True + + def clear_activity_signal(self): + """Clear the activity signal.""" + with self._lock: + self._is_active = False + + def invoke(self, agent: Agent | None = None, is_joining: bool = True) -> AsyncIterable[ChatMessageContent]: + """Invoke the agent asynchronously.""" + raise NotImplementedError("Subclasses should implement this method") + + async def get_messages_in_descending_order(self): + """Get messages in descending order asynchronously.""" + for index in range(len(self.history.messages) - 1, -1, -1): + yield self.history.messages[index] + await asyncio.sleep(0) # Yield control to the event loop + + async def get_chat_messages(self, agent: "Agent | None" = None) -> AsyncGenerator[ChatMessageContent, None]: + """Get chat messages asynchronously.""" + self.set_activity_or_throw() + + logger.info("Getting chat messages") + try: + if agent is None: + messages = self.get_messages_in_descending_order() + else: + channel_key = self._get_agent_hash(agent) + channel = await self._synchronize_channel(channel_key) + if channel is not None: + messages = channel.get_history() + if messages is not None: + async for message in messages: + yield message + finally: + self.clear_activity_signal() + + async def _synchronize_channel(self, channel_key: str) -> AgentChannel | None: + """Synchronize a channel.""" + channel = self.agent_channels.get(channel_key, None) + if channel: + await self.broadcast_queue.ensure_synchronized(ChannelReference(channel=channel, hash=channel_key)) + return channel + + def _get_agent_hash(self, agent: Agent): + """Get the hash of an agent.""" + hash_value = self.channel_map.get(agent, None) + if hash_value is None: + hash_value = KeyEncoder.generate_hash(agent.get_channel_keys()) + self.channel_map[agent] = hash_value + + return hash_value + + async def add_chat_message(self, message: ChatMessageContent) -> None: + """Add a chat message.""" + await self.add_chat_messages([message]) + + async def add_chat_messages(self, messages: list[ChatMessageContent]) -> None: + """Add chat messages.""" + self.set_activity_or_throw() + + for message in messages: + if message.role == AuthorRole.SYSTEM: + error_message = "System messages cannot be added to the chat history." + logger.error(error_message) + raise AgentChatException(error_message) + + logger.info(f"Adding `{len(messages)}` agent chat messages") + + try: + self.history.messages.extend(messages) + + # Broadcast message to other channels (in parallel) + # Note: Able to queue messages without synchronizing channels. + channel_refs = [ChannelReference(channel=channel, hash=key) for key, channel in self.agent_channels.items()] + await self.broadcast_queue.enqueue(channel_refs, messages) + finally: + self.clear_activity_signal() + + async def _get_or_create_channel(self, agent: Agent) -> AgentChannel: + """Get or create a channel.""" + channel_key = self._get_agent_hash(agent) + channel = await self._synchronize_channel(channel_key) + if channel is None: + channel = await agent.create_channel() + self.agent_channels[channel_key] = channel + + if len(self.history.messages) > 0: + await channel.receive(self.history.messages) + return channel + + async def invoke_agent(self, agent: Agent) -> AsyncIterable[ChatMessageContent]: + """Invoke an agent asynchronously.""" + self.set_activity_or_throw() + logger.info(f"Invoking agent {agent.name}") + try: + channel: AgentChannel = await self._get_or_create_channel(agent) + messages: list[ChatMessageContent] = [] + + async for is_visible, message in channel.invoke(agent): + messages.append(message) + self.history.messages.append(message) + if is_visible: + yield message + + # Broadcast message to other channels (in parallel) + # Note: Able to queue messages without synchronizing channels. + channel_refs = [ChannelReference(channel=channel, hash=key) for key, channel in self.agent_channels.items()] + await self.broadcast_queue.enqueue(channel_refs, messages) + finally: + self.clear_activity_signal() + + async def reset(self) -> None: + """Reset the agent chat.""" + self.set_activity_or_throw() + + try: + await asyncio.gather(*(channel.reset() for channel in self.agent_channels.values())) + self.agent_channels.clear() + self.channel_map.clear() + self.history.messages.clear() + finally: + self.clear_activity_signal() diff --git a/python/semantic_kernel/agents/group_chat/agent_chat_utils.py b/python/semantic_kernel/agents/group_chat/agent_chat_utils.py new file mode 100644 index 000000000000..0162bb94fe33 --- /dev/null +++ b/python/semantic_kernel/agents/group_chat/agent_chat_utils.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft. All rights reserved. + +import base64 +import hashlib +from collections.abc import Iterable + +from semantic_kernel.exceptions.agent_exceptions import AgentExecutionException +from semantic_kernel.utils.experimental_decorator import experimental_class + + +@experimental_class +class KeyEncoder: + """A class for encoding keys.""" + + @staticmethod + def generate_hash(keys: Iterable[str]) -> str: + """Generate a hash from a list of keys. + + Args: + keys: A list of keys to generate the hash from. + + Returns: + str: The generated hash. + + Raises: + AgentExecutionException: If the keys are empty + """ + if not keys: + raise AgentExecutionException("Channel Keys must not be empty. Unable to generate channel hash.") + joined_keys = ":".join(keys) + buffer = joined_keys.encode("utf-8") + sha256_hash = hashlib.sha256(buffer).digest() + return base64.b64encode(sha256_hash).decode("utf-8") diff --git a/python/semantic_kernel/agents/group_chat/agent_group_chat.py b/python/semantic_kernel/agents/group_chat/agent_group_chat.py new file mode 100644 index 000000000000..6aa60242de52 --- /dev/null +++ b/python/semantic_kernel/agents/group_chat/agent_group_chat.py @@ -0,0 +1,135 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from collections.abc import AsyncIterable +from typing import Any + +from pydantic import Field + +from semantic_kernel.agents import Agent, AgentChat +from semantic_kernel.agents.strategies import ( + DefaultTerminationStrategy, + SequentialSelectionStrategy, +) +from semantic_kernel.agents.strategies.selection.selection_strategy import SelectionStrategy +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.agent_exceptions import AgentChatException + +logger: logging.Logger = logging.getLogger(__name__) + + +class AgentGroupChat(AgentChat): + """An agent chat that supports multi-turn interactions.""" + + agent_ids: set[str] + agents: list[Agent] = Field(default_factory=list) + + is_complete: bool = False + termination_strategy: TerminationStrategy = Field(default_factory=DefaultTerminationStrategy) + selection_strategy: SelectionStrategy = Field(default_factory=SequentialSelectionStrategy) + + def __init__( + self, + agents: list[Agent] | None = None, + termination_strategy: TerminationStrategy | None = None, + selection_strategy: SelectionStrategy | None = None, + ) -> None: + """Initialize a new instance of AgentGroupChat. + + Args: + agents: The agents to add to the group chat. + termination_strategy: The termination strategy to use. + selection_strategy: The selection strategy + """ + agent_ids = {agent.id for agent in agents} if agents else set() + + if agents is None: + agents = [] + + args: dict[str, Any] = { + "agents": agents, + "agent_ids": agent_ids, + } + + if termination_strategy is not None: + args["termination_strategy"] = termination_strategy + if selection_strategy is not None: + args["selection_strategy"] = selection_strategy + + super().__init__(**args) + + def add_agent(self, agent: Agent) -> None: + """Add an agent to the group chat. + + Args: + agent: The agent to add. + """ + if agent.id not in self.agent_ids: + self.agent_ids.add(agent.id) + self.agents.append(agent) + + async def invoke_single_turn(self, agent: Agent) -> AsyncIterable[ChatMessageContent]: + """Invoke the agent chat for a single turn. + + Args: + agent: The agent to invoke. + + Yields: + The chat message. + """ + async for message in self.invoke(agent, is_joining=True): + if message.role == AuthorRole.ASSISTANT: + task = self.termination_strategy.should_terminate(agent, self.history.messages) + self.is_complete = await task + yield message + + async def invoke(self, agent: Agent | None = None, is_joining: bool = True) -> AsyncIterable[ChatMessageContent]: + """Invoke the agent chat asynchronously. + + Handles both group interactions and single agent interactions based on the provided arguments. + + Args: + agent: The agent to invoke. If not provided, the method processes all agents in the chat. + is_joining: Controls whether the agent joins the chat. Defaults to True. + + Yields: + The chat message. + """ + if agent is not None: + if is_joining: + self.add_agent(agent) + + async for message in super().invoke_agent(agent): + if message.role == AuthorRole.ASSISTANT: + task = self.termination_strategy.should_terminate(agent, self.history.messages) + self.is_complete = await task + yield message + + return + + if self.agents is None: + raise AgentChatException("No agents are available") + + if self.is_complete: + if not self.termination_strategy.automatic_reset: + raise AgentChatException("Chat is already complete") + + self.is_complete = False + + for _ in range(self.termination_strategy.maximum_iterations): + try: + selected_agent = await self.selection_strategy.next(self.agents, self.history.messages) + except Exception as ex: + logger.error(f"Failed to select agent: {ex}") + raise AgentChatException("Failed to select agent") from ex + + async for message in super().invoke_agent(selected_agent): + if message.role == AuthorRole.ASSISTANT: + task = self.termination_strategy.should_terminate(selected_agent, self.history.messages) + self.is_complete = await task + yield message + + if self.is_complete: + break diff --git a/python/semantic_kernel/agents/group_chat/broadcast_queue.py b/python/semantic_kernel/agents/group_chat/broadcast_queue.py new file mode 100644 index 000000000000..e303a53c8404 --- /dev/null +++ b/python/semantic_kernel/agents/group_chat/broadcast_queue.py @@ -0,0 +1,127 @@ +# Copyright (c) Microsoft. All rights reserved. + + +import asyncio +from collections import deque +from dataclasses import dataclass, field + +from pydantic import Field, SkipValidation, ValidationError, model_validator + +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.utils.experimental_decorator import experimental_class + + +@experimental_class +class QueueReference(KernelBaseModel): + """Utility class to associate a queue with its specific lock.""" + + queue: deque = Field(default_factory=deque) + queue_lock: SkipValidation[asyncio.Lock] = Field(default_factory=asyncio.Lock, exclude=True) + receive_task: SkipValidation[asyncio.Task | None] = None + receive_failure: Exception | None = None + + @property + def is_empty(self): + """Check if the queue is empty.""" + return len(self.queue) == 0 + + @model_validator(mode="before") + def validate_receive_task(cls, values): + """Validate the receive task.""" + receive_task = values.get("receive_task") + if receive_task is not None and not isinstance(receive_task, asyncio.Task): + raise ValidationError("receive_task must be an instance of asyncio.Task or None") + return values + + +@experimental_class +@dataclass +class ChannelReference: + """Tracks a channel along with its hashed key.""" + + hash: str + channel: AgentChannel = field(default_factory=AgentChannel) + + +@experimental_class +class BroadcastQueue(KernelBaseModel): + """A queue for broadcasting messages to listeners.""" + + queues: dict[str, QueueReference] = Field(default_factory=dict) + block_duration: float = 0.1 + + async def enqueue(self, channel_refs: list[ChannelReference], messages: list[ChatMessageContent]) -> None: + """Enqueue a set of messages for a given channel. + + Args: + channel_refs: The channel references. + messages: The messages to broadcast. + """ + for channel_ref in channel_refs: + if channel_ref.hash not in self.queues: + self.queues[channel_ref.hash] = QueueReference() + + queue_ref = self.queues[channel_ref.hash] + + async with queue_ref.queue_lock: + queue_ref.queue.append(messages) + + if not queue_ref.receive_task or queue_ref.receive_task.done(): + queue_ref.receive_task = asyncio.create_task(self.receive(channel_ref, queue_ref)) + + async def ensure_synchronized(self, channel_ref: ChannelReference) -> None: + """Blocks until a channel-queue is not in a receive state to ensure that channel history is complete. + + Args: + channel_ref: The channel reference. + """ + if channel_ref.hash not in self.queues: + return + + queue_ref = self.queues[channel_ref.hash] + + while True: + async with queue_ref.queue_lock: + is_empty = queue_ref.is_empty + + if queue_ref.receive_failure is not None: + failure = queue_ref.receive_failure + queue_ref.receive_failure = None + raise Exception( + f"Unexpected failure broadcasting to channel: {type(channel_ref.channel)}, failure: {failure}" + ) from failure + + if not is_empty and (not queue_ref.receive_task or queue_ref.receive_task.done()): + queue_ref.receive_task = asyncio.create_task(self.receive(channel_ref, queue_ref)) + + if is_empty: + break + + await asyncio.sleep(self.block_duration) + + async def receive(self, channel_ref: ChannelReference, queue_ref: QueueReference) -> None: + """Processes the specified queue with the provided channel, until the queue is empty. + + Args: + channel_ref: The channel reference. + queue_ref: The queue reference. + """ + while True: + async with queue_ref.queue_lock: + if queue_ref.is_empty: + break + + messages = queue_ref.queue[0] + try: + await channel_ref.channel.receive(messages) + except Exception as e: + queue_ref.receive_failure = e + + async with queue_ref.queue_lock: + if not queue_ref.is_empty: + queue_ref.queue.popleft() + + if queue_ref.receive_failure is not None or queue_ref.is_empty: + break diff --git a/python/semantic_kernel/agents/open_ai/__init__.py b/python/semantic_kernel/agents/open_ai/__init__.py new file mode 100644 index 000000000000..e9cbddaf2d6a --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_agent import OpenAIAssistantAgent + +__all__ = ["AzureAssistantAgent", "OpenAIAssistantAgent"] diff --git a/python/semantic_kernel/agents/open_ai/assistant_content_generation.py b/python/semantic_kernel/agents/open_ai/assistant_content_generation.py new file mode 100644 index 000000000000..858c17817ae0 --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/assistant_content_generation.py @@ -0,0 +1,196 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import TYPE_CHECKING, Any + +from openai import AsyncOpenAI +from openai.types.beta.threads.image_file_content_block import ImageFileContentBlock +from openai.types.beta.threads.text_content_block import TextContentBlock + +from semantic_kernel.contents.annotation_content import AnnotationContent +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.file_reference_content import FileReferenceContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.agent_exceptions import AgentExecutionException + +if TYPE_CHECKING: + from openai.resources.beta.threads.messages import Message + from openai.resources.beta.threads.runs.runs import Run + from openai.types.beta.threads.annotation import Annotation + from openai.types.beta.threads.runs.tool_call import ToolCall + + +################################################################### +# The methods in this file are used with OpenAIAssistantAgent # +# related code. They are used to create chat messages, or # +# generate message content. # +################################################################### + + +async def create_chat_message( + client: AsyncOpenAI, + thread_id: str, + message: "ChatMessageContent", + allowed_message_roles: list[str] = [AuthorRole.USER, AuthorRole.ASSISTANT], +) -> "Message": + """Class method to add a chat message, callable from class or instance. + + Args: + client: The client to use for creating the message. + thread_id: The thread id. + message: The chat message. + allowed_message_roles: The allowed message roles. + + Returns: + Message: The message. + """ + if message.role.value not in allowed_message_roles: + raise AgentExecutionException( + f"Invalid message role `{message.role.value}`. Allowed roles are {allowed_message_roles}." + ) + + message_contents: list[dict[str, Any]] = get_message_contents(message=message) + + return await client.beta.threads.messages.create( + thread_id=thread_id, + role=message.role.value, # type: ignore + content=message_contents, # type: ignore + ) + + +def get_message_contents(message: "ChatMessageContent") -> list[dict[str, Any]]: + """Get the message contents. + + Args: + message: The message. + """ + contents: list[dict[str, Any]] = [] + for content in message.items: + if isinstance(content, TextContent): + contents.append({"type": "text", "text": content.text}) + elif isinstance(content, ImageContent) and content.uri: + contents.append(content.to_dict()) + elif isinstance(content, FileReferenceContent): + contents.append({ + "type": "image_file", + "image_file": {"file_id": content.file_id}, + }) + return contents + + +def generate_message_content(assistant_name: str, message: "Message") -> ChatMessageContent: + """Generate message content.""" + role = AuthorRole(message.role) + + content: ChatMessageContent = ChatMessageContent(role=role, name=assistant_name) # type: ignore + + for item_content in message.content: + if item_content.type == "text": + assert isinstance(item_content, TextContentBlock) # nosec + content.items.append( + TextContent( + text=item_content.text.value, + ) + ) + for annotation in item_content.text.annotations: + content.items.append(generate_annotation_content(annotation)) + elif item_content.type == "image_file": + assert isinstance(item_content, ImageFileContentBlock) # nosec + content.items.append( + FileReferenceContent( + file_id=item_content.image_file.file_id, + ) + ) + return content + + +def generate_function_call_content(agent_name: str, fccs: list[FunctionCallContent]) -> ChatMessageContent: + """Generate function call content. + + Args: + agent_name: The agent name. + fccs: The function call contents. + + Returns: + ChatMessageContent: The chat message content containing the function call content as the items. + """ + return ChatMessageContent(role=AuthorRole.TOOL, name=agent_name, items=fccs) # type: ignore + + +def generate_function_result_content( + agent_name: str, function_step: FunctionCallContent, tool_call: "ToolCall" +) -> ChatMessageContent: + """Generate function result content.""" + function_call_content: ChatMessageContent = ChatMessageContent(role=AuthorRole.TOOL, name=agent_name) # type: ignore + function_call_content.items.append( + FunctionResultContent( + function_name=function_step.function_name, + plugin_name=function_step.plugin_name, + id=function_step.id, + result=tool_call.function.output, # type: ignore + ) + ) + return function_call_content + + +def get_function_call_contents(run: "Run", function_steps: dict[str, FunctionCallContent]) -> list[FunctionCallContent]: + """Extract function call contents from the run. + + Args: + run: The run. + function_steps: The function steps + + Returns: + The list of function call contents. + """ + function_call_contents: list[FunctionCallContent] = [] + required_action = getattr(run, "required_action", None) + if not required_action or not getattr(required_action, "submit_tool_outputs", False): + return function_call_contents + for tool in required_action.submit_tool_outputs.tool_calls: + fcc = FunctionCallContent( + id=tool.id, + index=getattr(tool, "index", None), + name=tool.function.name, + arguments=tool.function.arguments, + ) + function_call_contents.append(fcc) + function_steps[tool.id] = fcc + return function_call_contents + + +def generate_code_interpreter_content(agent_name: str, code: str) -> "ChatMessageContent": + """Generate code interpreter content. + + Args: + agent_name: The agent name. + code: The code. + + Returns: + ChatMessageContent: The chat message content. + """ + return ChatMessageContent( + role=AuthorRole.ASSISTANT, + content=code, + name=agent_name, + metadata={"code": True}, + ) + + +def generate_annotation_content(annotation: "Annotation") -> AnnotationContent: + """Generate annotation content.""" + file_id = None + if hasattr(annotation, "file_path"): + file_id = annotation.file_path.file_id + elif hasattr(annotation, "file_citation"): + file_id = annotation.file_citation.file_id + + return AnnotationContent( + file_id=file_id, + quote=annotation.text, + start_index=annotation.start_index, + end_index=annotation.end_index, + ) diff --git a/python/semantic_kernel/agents/open_ai/azure_assistant_agent.py b/python/semantic_kernel/agents/open_ai/azure_assistant_agent.py new file mode 100644 index 000000000000..fe6e48bd0356 --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/azure_assistant_agent.py @@ -0,0 +1,444 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from collections.abc import AsyncIterable, Awaitable, Callable +from copy import copy +from typing import TYPE_CHECKING, Any + +from openai import AsyncAzureOpenAI +from pydantic import ValidationError + +from semantic_kernel.agents.open_ai.open_ai_assistant_base import OpenAIAssistantBase +from semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings import AzureOpenAISettings +from semantic_kernel.const import DEFAULT_SERVICE_NAME +from semantic_kernel.exceptions.agent_exceptions import AgentInitializationException +from semantic_kernel.kernel_pydantic import HttpsUrl +from semantic_kernel.utils.experimental_decorator import experimental_class +from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent + +if TYPE_CHECKING: + from semantic_kernel.kernel import Kernel + + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class AzureAssistantAgent(OpenAIAssistantBase): + """Azure OpenAI Assistant Agent class. + + Provides the ability to interact with Azure OpenAI Assistants. + """ + + # region Agent Initialization + + def __init__( + self, + kernel: "Kernel | None" = None, + service_id: str | None = None, + deployment_name: str | None = None, + api_key: str | None = None, + endpoint: HttpsUrl | None = None, + api_version: str | None = None, + ad_token: str | None = None, + ad_token_provider: Callable[[], str | Awaitable[str]] | None = None, + client: AsyncAzureOpenAI | None = None, + default_headers: dict[str, str] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + description: str | None = None, + id: str | None = None, + instructions: str | None = None, + name: str | None = None, + enable_code_interpreter: bool | None = None, + enable_file_search: bool | None = None, + enable_json_response: bool | None = None, + file_ids: list[str] | None = [], + temperature: float | None = None, + top_p: float | None = None, + vector_store_id: str | None = None, + metadata: dict[str, Any] | None = {}, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + **kwargs: Any, + ) -> None: + """Initialize an Azure OpenAI Assistant Agent. + + Args: + kernel: The Kernel instance. (optional) + service_id: The service ID. (optional) + deployment_name: The deployment name. (optional) + api_key: The Azure OpenAI API key. (optional) + endpoint: The Azure OpenAI endpoint. (optional) + api_version: The Azure OpenAI API version. (optional) + ad_token: The Azure AD token. (optional) + ad_token_provider: The Azure AD token provider. (optional) + client: The Azure OpenAI client. (optional) + default_headers: The default headers. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional) + description: The description. (optional) + id: The Agent ID. (optional) + instructions: The Agent instructions. (optional) + name: The Agent name. (optional) + enable_code_interpreter: Enable the code interpreter. (optional) + enable_file_search: Enable the file search. (optional) + enable_json_response: Enable the JSON response. (optional) + file_ids: The file IDs. (optional) + temperature: The temperature. (optional) + top_p: The top p. (optional) + vector_store_id: The vector store ID. (optional) + metadata: The metadata. (optional) + max_completion_tokens: The maximum completion tokens. (optional) + max_prompt_tokens: The maximum prompt tokens. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. (optional) + truncation_message_count: The truncation message count. (optional) + **kwargs: Additional keyword arguments. + + Raises: + AgentInitializationError: If the api_key is not provided in the configuration. + """ + azure_openai_settings = AzureAssistantAgent._create_azure_openai_settings( + api_key=api_key, + endpoint=endpoint, + deployment_name=deployment_name, + api_version=api_version, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + + if not azure_openai_settings.chat_deployment_name: + raise AgentInitializationException("The Azure OpenAI chat_deployment_name is required.") + + if not azure_openai_settings.api_key and not ad_token and not ad_token_provider: + raise AgentInitializationException("Please provide either api_key, ad_token or ad_token_provider.") + + client = self._create_client( + api_key=azure_openai_settings.api_key.get_secret_value() if azure_openai_settings.api_key else None, + endpoint=azure_openai_settings.endpoint, + api_version=azure_openai_settings.api_version, + ad_token=ad_token, + ad_token_provider=ad_token_provider, + default_headers=default_headers, + ) + service_id = service_id if service_id else DEFAULT_SERVICE_NAME + + args: dict[str, Any] = { + "kernel": kernel, + "ai_model_id": azure_openai_settings.chat_deployment_name, + "service_id": service_id, + "client": client, + "name": name, + "description": description, + "instructions": instructions, + "enable_code_interpreter": enable_code_interpreter, + "enable_file_search": enable_file_search, + "enable_json_response": enable_json_response, + "file_ids": file_ids, + "temperature": temperature, + "top_p": top_p, + "vector_store_id": vector_store_id, + "metadata": metadata, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "parallel_tool_calls_enabled": parallel_tool_calls_enabled, + "truncation_message_count": truncation_message_count, + } + + if id is not None: + args["id"] = id + if kernel is not None: + args["kernel"] = kernel + if kwargs: + args.update(kwargs) + super().__init__(**args) + + @classmethod + async def create( + cls, + *, + kernel: "Kernel | None" = None, + service_id: str | None = None, + deployment_name: str | None = None, + api_key: str | None = None, + endpoint: HttpsUrl | None = None, + api_version: str | None = None, + ad_token: str | None = None, + ad_token_provider: Callable[[], str | Awaitable[str]] | None = None, + client: AsyncAzureOpenAI | None = None, + default_headers: dict[str, str] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + description: str | None = None, + id: str | None = None, + instructions: str | None = None, + name: str | None = None, + enable_code_interpreter: bool | None = None, + code_interpreter_filenames: list[str] | None = None, + enable_file_search: bool | None = None, + vector_store_filenames: list[str] | None = None, + enable_json_response: bool | None = None, + temperature: float | None = None, + top_p: float | None = None, + vector_store_id: str | None = None, + metadata: dict[str, Any] | None = {}, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + **kwargs: Any, + ) -> "AzureAssistantAgent": + """Asynchronous class method used to create the OpenAI Assistant Agent. + + Args: + kernel: The Kernel instance. (optional) + service_id: The service ID. (optional) + deployment_name: The deployment name. (optional) + api_key: The Azure OpenAI API key. (optional) + endpoint: The Azure OpenAI endpoint. (optional) + api_version: The Azure OpenAI API version. (optional) + ad_token: The Azure AD token. (optional) + ad_token_provider: The Azure AD token provider. (optional) + client: The Azure OpenAI client. (optional) + default_headers: The default headers. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional) + description: The description. (optional) + id: The Agent ID. (optional) + instructions: The Agent instructions. (optional) + name: The Agent name. (optional) + enable_code_interpreter: Enable the code interpreter. (optional) + code_interpreter_filenames: The filenames/paths to use with the code interpreter. (optional) + enable_file_search: Enable the file search. (optional) + vector_store_filenames: The filenames/paths for files to use with file search. (optional) + enable_json_response: Enable the JSON response. (optional) + temperature: The temperature. (optional) + top_p: The top p. (optional) + vector_store_id: The vector store ID. (optional) + metadata: The metadata. (optional) + max_completion_tokens: The maximum completion tokens. (optional) + max_prompt_tokens: The maximum prompt tokens. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. (optional) + truncation_message_count: The truncation message count. (optional) + **kwargs: Additional keyword arguments. + + Returns: + An instance of the AzureOpenAIAssistantAgent + """ + agent = cls( + kernel=kernel, + service_id=service_id, + deployment_name=deployment_name, + api_key=api_key, + endpoint=endpoint, + api_version=api_version, + ad_token=ad_token, + ad_token_provider=ad_token_provider, + client=client, + default_headers=default_headers, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + description=description, + id=id, + instructions=instructions, + name=name, + enable_code_interpreter=enable_code_interpreter, + enable_file_search=enable_file_search, + enable_json_response=enable_json_response, + temperature=temperature, + top_p=top_p, + vector_store_id=vector_store_id, + metadata=metadata, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + parallel_tool_calls_enabled=parallel_tool_calls_enabled, + truncation_message_count=truncation_message_count, + **kwargs, + ) + + assistant_create_kwargs: dict[str, Any] = {} + + if code_interpreter_filenames is not None: + code_interpreter_file_ids: list[str] = [] + for file_path in code_interpreter_filenames: + try: + file_id = await agent.add_file(file_path=file_path, purpose="assistants") + code_interpreter_file_ids.append(file_id) + except FileNotFoundError as ex: + logger.error( + f"Failed to upload code interpreter file with path: `{file_path}` with exception: {ex}" + ) + raise AgentInitializationException("Failed to upload code interpreter files.", ex) from ex + agent.code_interpreter_file_ids = code_interpreter_file_ids + assistant_create_kwargs["code_interpreter_file_ids"] = code_interpreter_file_ids + + if vector_store_filenames is not None: + file_search_file_ids: list[str] = [] + for file_path in vector_store_filenames: + try: + file_id = await agent.add_file(file_path=file_path, purpose="assistants") + file_search_file_ids.append(file_id) + except FileNotFoundError as ex: + logger.error(f"Failed to upload file search file with path: `{file_path}` with exception: {ex}") + raise AgentInitializationException("Failed to upload file search files.", ex) from ex + + if enable_file_search or agent.enable_file_search: + vector_store_id = await agent.create_vector_store(file_ids=file_search_file_ids) + agent.file_search_file_ids = file_search_file_ids + agent.vector_store_id = vector_store_id + assistant_create_kwargs["vector_store_id"] = vector_store_id + + agent.assistant = await agent.create_assistant(**assistant_create_kwargs) + return agent + + @staticmethod + def _create_client( + api_key: str | None = None, + endpoint: HttpsUrl | None = None, + api_version: str | None = None, + ad_token: str | None = None, + ad_token_provider: Callable[[], str | Awaitable[str]] | None = None, + default_headers: dict[str, str] | None = None, + ) -> AsyncAzureOpenAI: + """Create the OpenAI client from configuration. + + Args: + api_key: The OpenAI API key. + endpoint: The OpenAI endpoint. + api_version: The OpenAI API version. + ad_token: The Azure AD token. + ad_token_provider: The Azure AD token provider. + default_headers: The default headers. + + Returns: + An AsyncAzureOpenAI client instance. + """ + merged_headers = dict(copy(default_headers)) if default_headers else {} + if APP_INFO: + merged_headers.update(APP_INFO) + merged_headers = prepend_semantic_kernel_to_user_agent(merged_headers) + + if not api_key and not ad_token and not ad_token_provider: + raise AgentInitializationException( + "Please provide either AzureOpenAI api_key, an ad_token or an ad_token_provider or a client." + ) + if not endpoint: + raise AgentInitializationException("Please provide an AzureOpenAI endpoint.") + return AsyncAzureOpenAI( + azure_endpoint=str(endpoint), + api_version=api_version, + api_key=api_key, + azure_ad_token=ad_token, + azure_ad_token_provider=ad_token_provider, + default_headers=merged_headers, + ) + + @staticmethod + def _create_azure_openai_settings( + api_key: str | None = None, + endpoint: HttpsUrl | None = None, + deployment_name: str | None = None, + api_version: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> AzureOpenAISettings: + """Create the Azure OpenAI settings. + + Args: + api_key: The Azure OpenAI API key. + endpoint: The Azure OpenAI endpoint. + deployment_name: The Azure OpenAI chat deployment name. + api_version: The Azure OpenAI API version. + env_file_path: The environment file path. + env_file_encoding: The environment file encoding. + + Returns: + An instance of the AzureOpenAISettings. + """ + try: + azure_openai_settings = AzureOpenAISettings.create( + api_key=api_key, + endpoint=endpoint, + chat_deployment_name=deployment_name, + api_version=api_version, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise AgentInitializationException("Failed to create Azure OpenAI settings.", ex) from ex + + return azure_openai_settings + + async def list_definitions(self) -> AsyncIterable[dict[str, Any]]: + """List the assistant definitions. + + Yields: + An AsyncIterable of dictionaries representing the OpenAIAssistantDefinition. + """ + assistants = await self.client.beta.assistants.list(order="desc") + for assistant in assistants.data: + yield self._create_open_ai_assistant_definition(assistant) + + @classmethod + async def retrieve( + cls, + *, + id: str, + api_key: str | None = None, + endpoint: HttpsUrl | None = None, + api_version: str | None = None, + ad_token: str | None = None, + ad_token_provider: Callable[[], str | Awaitable[str]] | None = None, + client: AsyncAzureOpenAI | None = None, + kernel: "Kernel | None" = None, + default_headers: dict[str, str] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> "AzureAssistantAgent": + """Retrieve an assistant by ID. + + Args: + id: The assistant ID. + api_key: The Azure OpenAI API + endpoint: The Azure OpenAI endpoint. (optional) + api_version: The Azure OpenAI API version. (optional) + ad_token: The Azure AD token. (optional) + ad_token_provider: The Azure AD token provider. (optional) + client: The Azure OpenAI client. (optional) + kernel: The Kernel instance. (optional) + default_headers: The default headers. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional) + + Returns: + An AzureAssistantAgent instance. + """ + azure_openai_settings = AzureAssistantAgent._create_azure_openai_settings( + api_key=api_key, + endpoint=endpoint, + api_version=api_version, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + + if not azure_openai_settings.chat_deployment_name: + raise AgentInitializationException("The Azure OpenAI chat_deployment_name is required.") + if not azure_openai_settings.api_key and not ad_token and not ad_token_provider: + raise AgentInitializationException("Please provide either api_key, ad_token or ad_token_provider.") + + if not client: + client = AzureAssistantAgent._create_client( + api_key=api_key, + endpoint=endpoint, + api_version=api_version, + ad_token=ad_token, + ad_token_provider=ad_token_provider, + default_headers=default_headers, + ) + assistant = await client.beta.assistants.retrieve(id) + assistant_definition = OpenAIAssistantBase._create_open_ai_assistant_definition(assistant) + return AzureAssistantAgent(kernel=kernel, **assistant_definition) + + # endregion diff --git a/python/semantic_kernel/agents/open_ai/open_ai_assistant_agent.py b/python/semantic_kernel/agents/open_ai/open_ai_assistant_agent.py new file mode 100644 index 000000000000..ad68fface2aa --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/open_ai_assistant_agent.py @@ -0,0 +1,402 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from collections.abc import AsyncIterable +from copy import copy +from typing import TYPE_CHECKING, Any + +from openai import AsyncOpenAI +from pydantic import ValidationError + +from semantic_kernel.agents.open_ai.open_ai_assistant_base import OpenAIAssistantBase +from semantic_kernel.connectors.ai.open_ai.settings.open_ai_settings import OpenAISettings +from semantic_kernel.const import DEFAULT_SERVICE_NAME +from semantic_kernel.exceptions.agent_exceptions import AgentInitializationException +from semantic_kernel.utils.experimental_decorator import experimental_class +from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent + +if TYPE_CHECKING: + from semantic_kernel.kernel import Kernel + + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class OpenAIAssistantAgent(OpenAIAssistantBase): + """OpenAI Assistant Agent class. + + Provides the ability to interact with OpenAI Assistants. + """ + + # region Agent Initialization + + def __init__( + self, + *, + kernel: "Kernel | None" = None, + service_id: str | None = None, + ai_model_id: str | None = None, + api_key: str | None = None, + org_id: str | None = None, + client: AsyncOpenAI | None = None, + default_headers: dict[str, str] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + description: str | None = None, + id: str | None = None, + instructions: str | None = None, + name: str | None = None, + enable_code_interpreter: bool | None = None, + enable_file_search: bool | None = None, + enable_json_response: bool | None = None, + code_interpreter_file_ids: list[str] | None = [], + temperature: float | None = None, + top_p: float | None = None, + vector_store_id: str | None = None, + metadata: dict[str, Any] | None = {}, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + **kwargs: Any, + ) -> None: + """Initialize an OpenAIAssistant service. + + Args: + kernel: The Kernel instance. (optional) + service_id: The service ID. (optional) If not provided the default service name (default) is used. + ai_model_id: The AI model ID. (optional) + api_key: The OpenAI API key. (optional) + org_id: The OpenAI organization ID. (optional) + client: The OpenAI client. (optional) + default_headers: The default headers. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional) + description: The assistant description. (optional) + id: The assistant ID. (optional) + instructions: The assistant instructions. (optional) + name: The assistant name. (optional) + enable_code_interpreter: Enable code interpreter. (optional) + enable_file_search: Enable file search. (optional) + enable_json_response: Enable JSON response. (optional) + code_interpreter_file_ids: The file IDs. (optional) + temperature: The temperature. (optional) + top_p: The top p. (optional) + vector_store_id: The vector store ID. (optional) + metadata: The assistant metadata. (optional) + max_completion_tokens: The max completion tokens. (optional) + max_prompt_tokens: The max prompt tokens. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. (optional) + truncation_message_count: The truncation message count. (optional) + kwargs: Additional keyword arguments. + + Raises: + AgentInitializationError: If the api_key is not provided in the configuration. + """ + openai_settings = OpenAIAssistantAgent._create_open_ai_settings( + api_key=api_key, + org_id=org_id, + ai_model_id=ai_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + + if not client and not openai_settings.api_key: + raise AgentInitializationException("The OpenAI API key is required, if a client is not provided.") + if not openai_settings.chat_model_id: + raise AgentInitializationException("The OpenAI chat model ID is required.") + + if not client: + client = self._create_client( + api_key=openai_settings.api_key.get_secret_value() if openai_settings.api_key else None, + org_id=openai_settings.org_id, + default_headers=default_headers, + ) + + service_id = service_id if service_id else DEFAULT_SERVICE_NAME + + args: dict[str, Any] = { + "ai_model_id": openai_settings.chat_model_id, + "service_id": service_id, + "client": client, + "description": description, + "instructions": instructions, + "enable_code_interpreter": enable_code_interpreter, + "enable_file_search": enable_file_search, + "enable_json_response": enable_json_response, + "code_interpreter_file_ids": code_interpreter_file_ids, + "temperature": temperature, + "top_p": top_p, + "vector_store_id": vector_store_id, + "metadata": metadata, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "parallel_tool_calls_enabled": parallel_tool_calls_enabled, + "truncation_message_count": truncation_message_count, + } + + if name is not None: + args["name"] = name + if id is not None: + args["id"] = id + if kernel is not None: + args["kernel"] = kernel + if kwargs: + args.update(kwargs) + super().__init__(**args) + + @classmethod + async def create( + cls, + *, + kernel: "Kernel | None" = None, + service_id: str | None = None, + ai_model_id: str | None = None, + api_key: str | None = None, + org_id: str | None = None, + client: AsyncOpenAI | None = None, + default_headers: dict[str, str] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + description: str | None = None, + id: str | None = None, + instructions: str | None = None, + name: str | None = None, + enable_code_interpreter: bool | None = None, + code_interpreter_filenames: list[str] | None = None, + enable_file_search: bool | None = None, + vector_store_filenames: list[str] | None = None, + enable_json_response: bool | None = None, + temperature: float | None = None, + top_p: float | None = None, + vector_store_id: str | None = None, + metadata: dict[str, Any] | None = {}, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + **kwargs: Any, + ) -> "OpenAIAssistantAgent": + """Asynchronous class method used to create the OpenAI Assistant Agent. + + Args: + kernel: The Kernel instance. (optional) + service_id: The service ID. (optional) If not provided the default service name (default) is used. + ai_model_id: The AI model ID. (optional) + api_key: The OpenAI API key. (optional) + org_id: The OpenAI organization ID. (optional) + client: The OpenAI client. (optional) + default_headers: The default headers. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional) + description: The assistant description. (optional) + id: The assistant ID. (optional) + instructions: The assistant instructions. (optional) + name: The assistant name. (optional) + enable_code_interpreter: Enable code interpreter. (optional) + code_interpreter_filenames: The filenames/paths for files to use with file search. (optional) + enable_file_search: Enable file search. (optional) + vector_store_filenames: The list of file paths to upload and attach to the file search. (optional) + enable_json_response: Enable JSON response. (optional) + temperature: The temperature. (optional) + top_p: The top p. (optional) + vector_store_id: The vector store ID. (optional) + metadata: The assistant metadata. (optional) + max_completion_tokens: The max completion tokens. (optional) + max_prompt_tokens: The max prompt tokens. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. (optional) + truncation_message_count: The truncation message count. (optional) + kwargs: Additional keyword arguments. + + Returns: + An OpenAIAssistantAgent instance. + """ + agent = cls( + kernel=kernel, + service_id=service_id, + ai_model_id=ai_model_id, + api_key=api_key, + org_id=org_id, + client=client, + default_headers=default_headers, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + description=description, + id=id, + instructions=instructions, + name=name, + enable_code_interpreter=enable_code_interpreter, + enable_file_search=enable_file_search, + enable_json_response=enable_json_response, + temperature=temperature, + top_p=top_p, + vector_store_id=vector_store_id, + metadata=metadata, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + parallel_tool_calls_enabled=parallel_tool_calls_enabled, + truncation_message_count=truncation_message_count, + **kwargs, + ) + + assistant_create_kwargs: dict[str, Any] = {} + + if code_interpreter_filenames is not None: + code_interpreter_file_ids: list[str] = [] + for file_path in code_interpreter_filenames: + try: + file_id = await agent.add_file(file_path=file_path, purpose="assistants") + code_interpreter_file_ids.append(file_id) + except FileNotFoundError as ex: + logger.error( + f"Failed to upload code interpreter file with path: `{file_path}` with exception: {ex}" + ) + raise AgentInitializationException("Failed to upload code interpreter files.", ex) from ex + agent.code_interpreter_file_ids = code_interpreter_file_ids + assistant_create_kwargs["code_interpreter_file_ids"] = code_interpreter_file_ids + + if vector_store_filenames is not None: + file_search_file_ids: list[str] = [] + for file_path in vector_store_filenames: + try: + file_id = await agent.add_file(file_path=file_path, purpose="assistants") + file_search_file_ids.append(file_id) + except FileNotFoundError as ex: + logger.error(f"Failed to upload file search file with path: `{file_path}` with exception: {ex}") + raise AgentInitializationException("Failed to upload file search files.", ex) from ex + + if enable_file_search or agent.enable_file_search: + vector_store_id = await agent.create_vector_store(file_ids=file_search_file_ids) + agent.file_search_file_ids = file_search_file_ids + agent.vector_store_id = vector_store_id + assistant_create_kwargs["vector_store_id"] = vector_store_id + + agent.assistant = await agent.create_assistant(**assistant_create_kwargs) + return agent + + @staticmethod + def _create_client( + api_key: str | None = None, org_id: str | None = None, default_headers: dict[str, str] | None = None + ) -> AsyncOpenAI: + """An internal method to create the OpenAI client from the provided arguments. + + Args: + api_key: The OpenAI API key. + org_id: The OpenAI organization ID. (optional) + default_headers: The default headers. (optional) + + Returns: + An OpenAI client instance. + """ + merged_headers = dict(copy(default_headers)) if default_headers else {} + if default_headers: + merged_headers.update(default_headers) + if APP_INFO: + merged_headers.update(APP_INFO) + merged_headers = prepend_semantic_kernel_to_user_agent(merged_headers) + + if not api_key: + raise AgentInitializationException("Please provide an OpenAI api_key") + + return AsyncOpenAI( + api_key=api_key, + organization=org_id, + default_headers=merged_headers, + ) + + @staticmethod + def _create_open_ai_settings( + api_key: str | None = None, + org_id: str | None = None, + ai_model_id: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> OpenAISettings: + """An internal method to create the OpenAI settings from the provided arguments. + + Args: + api_key: The OpenAI API key. + org_id: The OpenAI organization ID. (optional) + ai_model_id: The AI model ID. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional) + + Returns: + An OpenAI settings instance. + """ + try: + openai_settings = OpenAISettings.create( + api_key=api_key, + org_id=org_id, + chat_model_id=ai_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise AgentInitializationException("Failed to create OpenAI settings.", ex) from ex + + return openai_settings + + async def list_definitions(self) -> AsyncIterable[dict[str, Any]]: + """List the assistant definitions. + + Yields: + An AsyncIterable of dictionaries representing the OpenAIAssistantDefinition. + """ + assistants = await self.client.beta.assistants.list(order="desc") + for assistant in assistants.data: + yield self._create_open_ai_assistant_definition(assistant) + + @classmethod + async def retrieve( + cls, + *, + id: str, + kernel: "Kernel | None" = None, + api_key: str | None = None, + org_id: str | None = None, + ai_model_id: str | None = None, + client: AsyncOpenAI | None = None, + default_headers: dict[str, str] | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> "OpenAIAssistantAgent": + """Retrieve an assistant by ID. + + Args: + id: The assistant ID. + kernel: The Kernel instance. (optional) + api_key: The OpenAI API key. (optional) + org_id: The OpenAI organization ID. (optional) + ai_model_id: The AI model ID. (optional) + client: The OpenAI client. (optional) + default_headers: The default headers. (optional) + env_file_path: The environment file path. (optional) + env_file_encoding: The environment file encoding. (optional + + Returns: + An OpenAIAssistantAgent instance. + """ + openai_settings = OpenAIAssistantAgent._create_open_ai_settings( + api_key=api_key, + org_id=org_id, + ai_model_id=ai_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + if not client and not openai_settings.api_key: + raise AgentInitializationException("The OpenAI API key is required, if a client is not provided.") + if not openai_settings.chat_model_id: + raise AgentInitializationException("The OpenAI chat model ID is required.") + if not client: + client = OpenAIAssistantAgent._create_client( + api_key=openai_settings.api_key.get_secret_value() if openai_settings.api_key else None, + org_id=openai_settings.org_id, + default_headers=default_headers, + ) + assistant = await client.beta.assistants.retrieve(id) + assistant_definition = OpenAIAssistantBase._create_open_ai_assistant_definition(assistant) + return OpenAIAssistantAgent(kernel=kernel, **assistant_definition) + + # endregion diff --git a/python/semantic_kernel/agents/open_ai/open_ai_assistant_base.py b/python/semantic_kernel/agents/open_ai/open_ai_assistant_base.py new file mode 100644 index 000000000000..70df5943840e --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/open_ai_assistant_base.py @@ -0,0 +1,1003 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import json +import logging +from collections.abc import AsyncIterable, Iterable +from typing import TYPE_CHECKING, Any, ClassVar, Literal + +from openai import AsyncOpenAI +from openai.resources.beta.assistants import Assistant +from openai.resources.beta.threads.messages import Message +from openai.resources.beta.threads.runs.runs import Run +from openai.types.beta.assistant_tool import CodeInterpreterTool, FileSearchTool +from openai.types.beta.threads.runs import RunStep +from pydantic import Field + +from semantic_kernel.agents import Agent +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.channels.open_ai_assistant_channel import OpenAIAssistantChannel +from semantic_kernel.agents.open_ai.assistant_content_generation import ( + create_chat_message, + generate_code_interpreter_content, + generate_function_call_content, + generate_function_result_content, + generate_message_content, + get_function_call_contents, + get_message_contents, +) +from semantic_kernel.agents.open_ai.run_polling_options import RunPollingOptions +from semantic_kernel.connectors.ai.function_calling_utils import kernel_function_metadata_to_function_call_format +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.agent_exceptions import ( + AgentExecutionException, + AgentFileNotFoundException, + AgentInitializationException, + AgentInvokeException, +) +from semantic_kernel.utils.experimental_decorator import experimental_class + +if TYPE_CHECKING: + from semantic_kernel.kernel import Kernel + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class OpenAIAssistantBase(Agent): + """OpenAI Assistant Base class. + + Manages the interaction with OpenAI Assistants. + """ + + _options_metadata_key: ClassVar[str] = "__run_options" + + ai_model_id: str + client: AsyncOpenAI + assistant: Assistant | None = None + polling_options: RunPollingOptions = Field(default_factory=RunPollingOptions) + enable_code_interpreter: bool | None = Field(False) + enable_file_search: bool | None = Field(False) + enable_json_response: bool | None = Field(False) + code_interpreter_file_ids: list[str] | None = Field(default_factory=list, max_length=20) + file_search_file_ids: list[str] | None = Field(default_factory=list, max_length=20) + temperature: float | None = Field(None) + top_p: float | None = Field(None) + vector_store_id: str | None = None + metadata: dict[str, Any] | None = Field(default_factory=dict, max_length=16) + max_completion_tokens: int | None = Field(None) + max_prompt_tokens: int | None = Field(None) + parallel_tool_calls_enabled: bool | None = Field(True) + truncation_message_count: int | None = Field(None) + + allowed_message_roles: ClassVar[list[str]] = [AuthorRole.USER, AuthorRole.ASSISTANT] + polling_status: ClassVar[list[str]] = ["queued", "in_progress", "cancelling"] + error_message_states: ClassVar[list[str]] = ["failed", "canceled", "expired"] + + channel_type: ClassVar[type[AgentChannel]] = OpenAIAssistantChannel + + _is_deleted: bool = False + + # region Assistant Initialization + + def __init__( + self, + ai_model_id: str, + client: AsyncOpenAI, + service_id: str, + *, + kernel: "Kernel | None" = None, + id: str | None = None, + name: str | None = None, + description: str | None = None, + instructions: str | None = None, + enable_code_interpreter: bool | None = None, + enable_file_search: bool | None = None, + enable_json_response: bool | None = None, + code_interpreter_file_ids: list[str] | None = [], + temperature: float | None = None, + top_p: float | None = None, + vector_store_id: str | None = None, + metadata: dict[str, Any] | None = {}, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + **kwargs: Any, + ) -> None: + """Initialize an OpenAIAssistant Base. + + Args: + ai_model_id: The AI model id. Defaults to None. + client: The client, either AsyncOpenAI or AsyncAzureOpenAI. + service_id: The service id. + kernel: The kernel. (optional) + id: The id. Defaults to None. (optional) + name: The name. Defaults to None. (optional) + description: The description. Defaults to None. (optional) + default_headers: The default headers. Defaults to None. (optional) + instructions: The instructions. Defaults to None. (optional) + enable_code_interpreter: Enable code interpreter. Defaults to False. (optional) + enable_file_search: Enable file search. Defaults to False. (optional) + enable_json_response: Enable JSON response. Defaults to False. (optional) + code_interpreter_file_ids: The file ids. Defaults to []. (optional) + temperature: The temperature. Defaults to None. (optional) + top_p: The top p. Defaults to None. (optional) + vector_store_id: The vector store id. Defaults to None. (optional) + metadata: The metadata. Defaults to {}. (optional) + max_completion_tokens: The max completion tokens. Defaults to None. (optional) + max_prompt_tokens: The max prompt tokens. Defaults to None. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. Defaults to True. (optional) + truncation_message_count: The truncation message count. Defaults to None. (optional) + kwargs: The keyword arguments. + """ + args: dict[str, Any] = {} + + args = { + "ai_model_id": ai_model_id, + "client": client, + "service_id": service_id, + "instructions": instructions, + "description": description, + "enable_code_interpreter": enable_code_interpreter, + "enable_file_search": enable_file_search, + "enable_json_response": enable_json_response, + "code_interpreter_file_ids": code_interpreter_file_ids, + "temperature": temperature, + "top_p": top_p, + "vector_store_id": vector_store_id, + "metadata": metadata, + "max_completion_tokens": max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens, + "parallel_tool_calls_enabled": parallel_tool_calls_enabled, + "truncation_message_count": truncation_message_count, + } + + if name is not None: + args["name"] = name + if id is not None: + args["id"] = id + if kernel is not None: + args["kernel"] = kernel + if kwargs: + args.update(kwargs) + + super().__init__(**args) + + async def create_assistant( + self, + ai_model_id: str | None = None, + description: str | None = None, + instructions: str | None = None, + name: str | None = None, + enable_code_interpreter: bool | None = None, + code_interpreter_file_ids: list[str] | None = None, + enable_file_search: bool | None = None, + vector_store_id: str | None = None, + metadata: dict[str, str] | None = {}, + **kwargs: Any, + ) -> "Assistant": + """Create the assistant. + + Args: + ai_model_id: The AI model id. Defaults to None. (optional) + description: The description. Defaults to None. (optional) + instructions: The instructions. Defaults to None. (optional) + name: The name. Defaults to None. (optional) + enable_code_interpreter: Enable code interpreter. Defaults to None. (optional) + enable_file_search: Enable file search. Defaults to None. (optional) + code_interpreter_file_ids: The file ids. Defaults to None. (optional) + vector_store_id: The vector store id. Defaults to None. (optional) + metadata: The metadata. Defaults to {}. (optional) + kwargs: Extra keyword arguments. + + Returns: + Assistant: The assistant + """ + create_assistant_kwargs: dict[str, Any] = {} + + if ai_model_id is not None: + create_assistant_kwargs["model"] = ai_model_id + elif self.ai_model_id: + create_assistant_kwargs["model"] = self.ai_model_id + + if description is not None: + create_assistant_kwargs["description"] = description + elif self.description: + create_assistant_kwargs["description"] = self.description + + if instructions is not None: + create_assistant_kwargs["instructions"] = instructions + elif self.instructions: + create_assistant_kwargs["instructions"] = self.instructions + + if name is not None: + create_assistant_kwargs["name"] = name + elif self.name: + create_assistant_kwargs["name"] = self.name + + tools = [] + if enable_code_interpreter is not None: + if enable_code_interpreter: + tools.append({"type": "code_interpreter"}) + elif self.enable_code_interpreter: + tools.append({"type": "code_interpreter"}) + + if enable_file_search is not None: + if enable_file_search: + tools.append({"type": "file_search"}) + elif self.enable_file_search: + tools.append({"type": "file_search"}) + + if tools: + create_assistant_kwargs["tools"] = tools + + tool_resources = {} + if code_interpreter_file_ids is not None: + tool_resources["code_interpreter"] = {"file_ids": code_interpreter_file_ids} + elif self.code_interpreter_file_ids: + tool_resources["code_interpreter"] = {"file_ids": self.code_interpreter_file_ids} + + if vector_store_id is not None: + tool_resources["file_search"] = {"vector_store_ids": [vector_store_id]} + elif self.vector_store_id: + tool_resources["file_search"] = {"vector_store_ids": [self.vector_store_id]} + + if tool_resources: + create_assistant_kwargs["tool_resources"] = tool_resources + + if metadata: + create_assistant_kwargs["metadata"] = metadata + elif self.metadata: + create_assistant_kwargs["metadata"] = self.metadata + + if kwargs: + create_assistant_kwargs.update(kwargs) + + execution_settings = {} + if self.max_completion_tokens: + execution_settings["max_completion_tokens"] = self.max_completion_tokens + + if self.max_prompt_tokens: + execution_settings["max_prompt_tokens"] = self.max_prompt_tokens + + if self.parallel_tool_calls_enabled: + execution_settings["parallel_tool_calls_enabled"] = self.parallel_tool_calls_enabled + + if self.truncation_message_count: + execution_settings["truncation_message_count"] = self.truncation_message_count + + if execution_settings: + if "metadata" not in create_assistant_kwargs: + create_assistant_kwargs["metadata"] = {} + if self._options_metadata_key not in create_assistant_kwargs["metadata"]: + create_assistant_kwargs["metadata"][self._options_metadata_key] = {} + create_assistant_kwargs["metadata"][self._options_metadata_key] = json.dumps(execution_settings) + + self.assistant = await self.client.beta.assistants.create( + **create_assistant_kwargs, + ) + + if self._is_deleted: + self._is_deleted = False + + return self.assistant + + async def modify_assistant(self, assistant_id: str, **kwargs: Any) -> Assistant: + """Modify the assistant. + + Args: + assistant_id: The assistant's current ID. + kwargs: Extra keyword arguments. + + Returns: + Assistant: The modified assistant. + """ + if self.assistant is None: + raise AgentInitializationException("The assistant has not been created.") + + modified_assistant = await self.client.beta.assistants.update(assistant_id=assistant_id, **kwargs) + self.assistant = modified_assistant + return self.assistant + + @classmethod + def _create_open_ai_assistant_definition(cls, assistant: "Assistant") -> dict[str, Any]: + """Create an OpenAI Assistant Definition from the provided assistant dictionary. + + Args: + assistant: The assistant dictionary. + + Returns: + An OpenAI Assistant Definition. + """ + execution_settings = {} + if isinstance(assistant.metadata, dict) and OpenAIAssistantBase._options_metadata_key in assistant.metadata: + settings_data = assistant.metadata[OpenAIAssistantBase._options_metadata_key] + if isinstance(settings_data, str): + settings_data = json.loads(settings_data) + assistant.metadata[OpenAIAssistantBase._options_metadata_key] = settings_data + execution_settings = {key: value for key, value in settings_data.items()} + + file_ids: list[str] = [] + vector_store_id = None + + tool_resources = getattr(assistant, "tool_resources", None) + if tool_resources: + if hasattr(tool_resources, "code_interpreter") and tool_resources.code_interpreter: + file_ids = getattr(tool_resources.code_interpreter, "code_interpreter_file_ids", []) + + if hasattr(tool_resources, "file_search") and tool_resources.file_search: + vector_store_ids = getattr(tool_resources.file_search, "vector_store_ids", []) + if vector_store_ids: + vector_store_id = vector_store_ids[0] + + enable_json_response = ( + hasattr(assistant, "response_format") + and assistant.response_format is not None + and getattr(assistant.response_format, "type", "") == "json_object" + ) + + enable_code_interpreter = any(isinstance(tool, CodeInterpreterTool) for tool in assistant.tools) + enable_file_search = any(isinstance(tool, FileSearchTool) for tool in assistant.tools) + + return { + "ai_model_id": assistant.model, + "description": assistant.description, + "id": assistant.id, + "instructions": assistant.instructions, + "name": assistant.name, + "enable_code_interpreter": enable_code_interpreter, + "enable_file_search": enable_file_search, + "enable_json_response": enable_json_response, + "code_interpreter_file_ids": file_ids, + "temperature": assistant.temperature, + "top_p": assistant.top_p, + "vector_store_id": vector_store_id if vector_store_id else None, + "metadata": assistant.metadata, + **execution_settings, + } + + # endregion + + # region Agent Properties + + @property + def tools(self) -> list[dict[str, str]]: + """The tools. + + Returns: + list[dict[str, str]]: The tools. + """ + if self.assistant is None: + raise AgentInitializationException("The assistant has not been created.") + return self._get_tools() + + # endregion + + # region Agent Channel Methods + + def get_channel_keys(self) -> Iterable[str]: + """Get the channel keys. + + Returns: + Iterable[str]: The channel keys. + """ + # Distinguish from other channel types. + yield f"{OpenAIAssistantBase.__name__}" + + # Distinguish between different agent IDs + yield self.id + + # Distinguish between agent names + yield self.name + + # Distinguish between different API base URLs + yield str(self.client.base_url) + + async def create_channel(self) -> AgentChannel: + """Create a channel.""" + thread_id = await self.create_thread() + + return OpenAIAssistantChannel(client=self.client, thread_id=thread_id) + + # endregion + + # region Agent Methods + + async def create_thread( + self, + *, + code_interpreter_file_ids: list[str] | None = [], + messages: list[ChatMessageContent] | None = [], + vector_store_id: str | None = None, + metadata: dict[str, str] = {}, + ) -> str: + """Create a thread. + + Args: + code_interpreter_file_ids: The code interpreter file ids. Defaults to an empty list. (optional) + messages: The chat messages. Defaults to an empty list. (optional) + vector_store_id: The vector store id. Defaults to None. (optional) + metadata: The metadata. Defaults to an empty dictionary. (optional) + + Returns: + str: The thread id. + """ + create_thread_kwargs: dict[str, Any] = {} + + tool_resources = {} + + if code_interpreter_file_ids: + tool_resources["code_interpreter"] = {"file_ids": code_interpreter_file_ids} + + if vector_store_id: + tool_resources["file_search"] = {"vector_store_ids": [vector_store_id]} + + if tool_resources: + create_thread_kwargs["tool_resources"] = tool_resources + + if messages: + messages_to_add = [] + for message in messages: + if message.role.value not in self.allowed_message_roles: + raise AgentExecutionException( + f"Invalid message role `{message.role.value}`. Allowed roles are {self.allowed_message_roles}." + ) + message_contents = get_message_contents(message=message) + for content in message_contents: + messages_to_add.append({"role": message.role.value, "content": content}) + create_thread_kwargs["messages"] = messages_to_add + + if metadata: + create_thread_kwargs["metadata"] = metadata + + thread = await self.client.beta.threads.create(**create_thread_kwargs) + return thread.id + + async def delete_thread(self, thread_id: str) -> None: + """Delete a thread. + + Args: + thread_id: The thread id. + """ + await self.client.beta.threads.delete(thread_id) + + async def delete(self) -> bool: + """Delete the assistant. + + Returns: + bool: True if the assistant is deleted. + """ + if not self._is_deleted and self.assistant: + await self.client.beta.assistants.delete(self.assistant.id) + self._is_deleted = True + return self._is_deleted + + async def add_chat_message(self, thread_id: str, message: ChatMessageContent) -> "Message": + """Add a chat message. + + Args: + thread_id: The thread id. + message: The chat message. + + Returns: + Message: The message. + """ + return await create_chat_message(self.client, thread_id, message, self.allowed_message_roles) + + async def get_thread_messages(self, thread_id: str) -> AsyncIterable[ChatMessageContent]: + """Get the messages for the specified thread. + + Args: + thread_id: The thread id. + + Yields: + ChatMessageContent: The chat message. + """ + agent_names: dict[str, Any] = {} + + thread_messages = await self.client.beta.threads.messages.list(thread_id=thread_id, limit=100, order="desc") + for message in thread_messages.data: + assistant_name = None + if message.assistant_id and message.assistant_id not in agent_names: + agent = await self.client.beta.assistants.retrieve(message.assistant_id) + if agent.name: + agent_names[message.assistant_id] = agent.name + assistant_name = agent_names.get(message.assistant_id) if message.assistant_id else message.assistant_id + assistant_name = assistant_name or message.assistant_id + + content: ChatMessageContent = generate_message_content(str(assistant_name), message) + + if len(content.items) > 0: + yield content + + async def add_file(self, file_path: str, purpose: Literal["assistants", "vision"]) -> str: + """Add a file for use with the Assistant. + + Args: + file_path: The file path. + purpose: The purpose. Can be "assistants" or "vision". + + Returns: + str: The file id. + + Raises: + AgentInitializationError: If the client has not been initialized or the file is not found. + """ + try: + with open(file_path, "rb") as file: + file = await self.client.files.create(file=file, purpose=purpose) # type: ignore + return file.id # type: ignore + except FileNotFoundError as ex: + raise AgentFileNotFoundException(f"File not found: {file_path}") from ex + + async def delete_file(self, file_id: str) -> None: + """Delete a file. + + Args: + file_id: The file id. + """ + try: + await self.client.files.delete(file_id) + except Exception as ex: + raise AgentExecutionException("Error deleting file.") from ex + + async def create_vector_store(self, file_ids: str | list[str]) -> str: + """Create a vector store. + + Args: + file_ids: The file ids either as a str of a single file ID or a list of strings of file IDs. + + Returns: + The vector store id. + + Raises: + AgentExecutionError: If there is an error creating the vector store. + """ + if isinstance(file_ids, str): + file_ids = [file_ids] + try: + vector_store = await self.client.beta.vector_stores.create(file_ids=file_ids) + return vector_store.id + except Exception as ex: + raise AgentExecutionException("Error creating vector store.") from ex + + async def delete_vector_store(self, vector_store_id: str) -> None: + """Delete a vector store. + + Args: + vector_store_id: The vector store id. + + Raises: + AgentExecutionError: If there is an error deleting the vector store. + """ + try: + await self.client.beta.vector_stores.delete(vector_store_id) + except Exception as ex: + raise AgentExecutionException("Error deleting vector store.") from ex + + # endregion + + # region Agent Invoke Methods + + async def invoke( + self, + thread_id: str, + *, + ai_model_id: str | None = None, + enable_code_interpreter: bool | None = False, + enable_file_search: bool | None = False, + enable_json_response: bool | None = None, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + temperature: float | None = None, + top_p: float | None = None, + metadata: dict[str, str] | None = {}, + **kwargs: Any, + ) -> AsyncIterable[ChatMessageContent]: + """Invoke the chat assistant. + + The supplied arguments will take precedence over the specified assistant level attributes. + + Args: + thread_id: The thread id. + ai_model_id: The AI model id. Defaults to None. (optional) + enable_code_interpreter: Enable code interpreter. Defaults to False. (optional) + enable_file_search: Enable file search. Defaults to False. (optional) + enable_json_response: Enable JSON response. Defaults to False. (optional) + max_completion_tokens: The max completion tokens. Defaults to None. (optional) + max_prompt_tokens: The max prompt tokens. Defaults to None. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. Defaults to True. (optional) + truncation_message_count: The truncation message count. Defaults to None. (optional) + temperature: The temperature. Defaults to None. (optional) + top_p: The top p. Defaults to None. (optional) + metadata: The metadata. Defaults to {}. (optional) + kwargs: Extra keyword arguments. + + Yields: + ChatMessageContent: The chat message content. + """ + async for is_visible, content in self._invoke_internal( + thread_id=thread_id, + ai_model_id=ai_model_id, + enable_code_interpreter=enable_code_interpreter, + enable_file_search=enable_file_search, + enable_json_response=enable_json_response, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + parallel_tool_calls_enabled=parallel_tool_calls_enabled, + truncation_message_count=truncation_message_count, + temperature=temperature, + top_p=top_p, + metadata=metadata, + kwargs=kwargs, + ): + if is_visible: + yield content + + async def _invoke_internal( + self, + thread_id: str, + *, + ai_model_id: str | None = None, + enable_code_interpreter: bool | None = False, + enable_file_search: bool | None = False, + enable_json_response: bool | None = None, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + temperature: float | None = None, + top_p: float | None = None, + metadata: dict[str, str] | None = {}, + **kwargs: Any, + ) -> AsyncIterable[tuple[bool, ChatMessageContent]]: + """Internal invoke method. + + The supplied arguments will take precedence over the specified assistant level attributes. + + Args: + thread_id: The thread id. + ai_model_id: The AI model id. Defaults to None. (optional) + enable_code_interpreter: Enable code interpreter. Defaults to False. (optional) + enable_file_search: Enable file search. Defaults to False. (optional) + enable_json_response: Enable JSON response. Defaults to False. (optional) + max_completion_tokens: The max completion tokens. Defaults to None. (optional) + max_prompt_tokens: The max prompt tokens. Defaults to None. (optional) + parallel_tool_calls_enabled: Enable parallel tool calls. Defaults to True. (optional) + truncation_message_count: The truncation message count. Defaults to None. (optional) + temperature: The temperature. Defaults to None. (optional) + top_p: The top p. Defaults to None. (optional) + metadata: The metadata. Defaults to {}. (optional) + kwargs: Extra keyword arguments. + + Yields: + tuple[bool, ChatMessageContent]: A tuple of visibility and chat message content. + """ + if not self.assistant: + raise AgentInitializationException("The assistant has not been created.") + + if self._is_deleted: + raise AgentInitializationException("The assistant has been deleted.") + + self._check_if_deleted() + tools = self._get_tools() + + run_options = self._generate_options( + ai_model_id=ai_model_id, + enable_code_interpreter=enable_code_interpreter, + enable_file_search=enable_file_search, + enable_json_response=enable_json_response, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + parallel_tool_calls_enabled=parallel_tool_calls_enabled, + truncation_message_count=truncation_message_count, + temperature=temperature, + top_p=top_p, + metadata=metadata, + kwargs=kwargs, + ) + + # Filter out None values to avoid passing them as kwargs + run_options = {k: v for k, v in run_options.items() if v is not None} + + run = await self.client.beta.threads.runs.create( + assistant_id=self.assistant.id, + thread_id=thread_id, + instructions=self.assistant.instructions, + tools=tools, # type: ignore + **run_options, + ) + + processed_step_ids = set() + function_steps: dict[str, FunctionCallContent] = {} + + while run.status != "completed": + run = await self._poll_run_status(run=run, thread_id=thread_id) + + if run.status in self.error_message_states: + raise AgentInvokeException( + f"Run failed with status: `{run.status}` for agent `{self.name}` and thread `{thread_id}`" + ) + + # Check if function calling required + if run.status == "requires_action": + fccs = get_function_call_contents(run, function_steps) + if fccs: + yield False, generate_function_call_content(agent_name=self.name, fccs=fccs) + + chat_history = ChatHistory() + _ = await self._invoke_function_calls(fccs=fccs, chat_history=chat_history) + + tool_outputs = self._format_tool_outputs(chat_history) + await self.client.beta.threads.runs.submit_tool_outputs( + run_id=run.id, + thread_id=thread_id, + tool_outputs=tool_outputs, # type: ignore + ) + + steps_response = await self.client.beta.threads.runs.steps.list(run_id=run.id, thread_id=thread_id) + steps: list[RunStep] = steps_response.data + completed_steps_to_process: list[RunStep] = sorted( + [s for s in steps if s.completed_at is not None and s.id not in processed_step_ids], + key=lambda s: s.created_at, + ) + + message_count = 0 + for completed_step in completed_steps_to_process: + if completed_step.type == "tool_calls": + assert hasattr(completed_step.step_details, "tool_calls") # nosec + for tool_call in completed_step.step_details.tool_calls: + is_visible = False + content: ChatMessageContent | None = None + if tool_call.type == "code_interpreter": + content = generate_code_interpreter_content( + self.name, + tool_call.code_interpreter.input, # type: ignore + ) + is_visible = True + elif tool_call.type == "function": + function_step = function_steps.get(tool_call.id) + assert function_step is not None # nosec + content = generate_function_result_content( + agent_name=self.name, function_step=function_step, tool_call=tool_call + ) + + if content: + message_count += 1 + yield is_visible, content + elif completed_step.type == "message_creation": + message = await self._retrieve_message( + thread_id=thread_id, + message_id=completed_step.step_details.message_creation.message_id, # type: ignore + ) + if message: + content = generate_message_content(self.name, message) + if len(content.items) > 0: + message_count += 1 + yield True, content + processed_step_ids.add(completed_step.id) + + # endregion + + # region Agent Helper Methods + + def _generate_options( + self, + *, + ai_model_id: str | None = None, + enable_code_interpreter: bool | None = False, + enable_file_search: bool | None = False, + enable_json_response: bool | None = False, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + temperature: float | None = None, + top_p: float | None = None, + metadata: dict[str, str] | None = {}, + kwargs: Any = {}, + ) -> dict[str, Any]: + """Generate options for the assistant invocation.""" + merged_options = self._merge_options( + ai_model_id=ai_model_id, + enable_code_interpreter=enable_code_interpreter, + enable_file_search=enable_file_search, + enable_json_response=enable_json_response, + max_completion_tokens=max_completion_tokens, + max_prompt_tokens=max_prompt_tokens, + parallel_tool_calls_enabled=parallel_tool_calls_enabled, + truncation_message_count=truncation_message_count, + temperature=temperature, + top_p=top_p, + metadata=metadata, + **kwargs, + ) + + truncation_message_count = merged_options.get("truncation_message_count") + + return { + "max_completion_tokens": merged_options.get("max_completion_tokens"), + "max_prompt_tokens": merged_options.get("max_prompt_tokens"), + "model": merged_options.get("ai_model_id"), + "top_p": merged_options.get("top_p"), + # TODO(evmattso): Support `parallel_tool_calls` when it is ready + "response_format": "json" if merged_options.get("enable_json_response") else None, + "temperature": merged_options.get("temperature"), + "truncation_strategy": truncation_message_count if truncation_message_count else None, + "metadata": merged_options.get("metadata", None), + } + + def _merge_options( + self, + ai_model_id: str | None = None, + enable_code_interpreter: bool | None = None, + enable_file_search: bool | None = None, + enable_json_response: bool | None = None, + max_completion_tokens: int | None = None, + max_prompt_tokens: int | None = None, + parallel_tool_calls_enabled: bool | None = True, + truncation_message_count: int | None = None, + temperature: float | None = None, + top_p: float | None = None, + metadata: dict[str, str] | None = {}, + **kwargs: Any, + ) -> dict[str, Any]: + """Merge the run-time options with the agent level attribute options.""" + merged_options = { + "ai_model_id": ai_model_id if ai_model_id is not None else self.ai_model_id, + "enable_code_interpreter": enable_code_interpreter + if enable_code_interpreter is not None + else self.enable_code_interpreter, + "enable_file_search": enable_file_search if enable_file_search is not None else self.enable_file_search, + "enable_json_response": enable_json_response + if enable_json_response is not None + else self.enable_json_response, + "max_completion_tokens": max_completion_tokens + if max_completion_tokens is not None + else self.max_completion_tokens, + "max_prompt_tokens": max_prompt_tokens if max_prompt_tokens is not None else self.max_prompt_tokens, + "parallel_tool_calls_enabled": parallel_tool_calls_enabled + if parallel_tool_calls_enabled is not None + else self.parallel_tool_calls_enabled, + "truncation_message_count": truncation_message_count + if truncation_message_count is not None + else self.truncation_message_count, + "temperature": temperature if temperature is not None else self.temperature, + "top_p": top_p if top_p is not None else self.top_p, + "metadata": metadata if metadata is not None else self.metadata, + } + + # Update merged_options with any additional kwargs + merged_options.update(kwargs) + return merged_options + + async def _poll_run_status(self, run: Run, thread_id: str) -> Run: + """Poll the run status. + + Args: + run: The run. + thread_id: The thread id. + + Returns: + The run. + """ + logger.info(f"Polling run status: {run.id}, threadId: {thread_id}") + + count = 0 + + while True: + # Reduce polling frequency after a couple attempts + await asyncio.sleep(self.polling_options.get_polling_interval(count).total_seconds()) + count += 1 + + try: + run = await self.client.beta.threads.runs.retrieve(run.id, thread_id=thread_id) + except Exception as e: + logging.warning(f"Failed to retrieve run for run id: `{run.id}` and thread id: `{thread_id}`: {e}") + # Retry anyway + + if run.status not in self.polling_status: + break + + logger.info(f"Polled run status: {run.status}, {run.id}, threadId: {thread_id}") + return run + + async def _retrieve_message(self, thread_id: str, message_id: str) -> Message | None: + """Retrieve a message from a thread. + + Args: + thread_id: The thread id. + message_id: The message id. + + Returns: + The message or None. + """ + message: Message | None = None + count = 0 + max_retries = 3 + + while count < max_retries: + try: + message = await self.client.beta.threads.messages.retrieve(message_id, thread_id=thread_id) + break + except Exception as ex: + logger.error(f"Failed to retrieve message {message_id} from thread {thread_id}: {ex}") + count += 1 + if count >= max_retries: + logger.error( + f"Max retries reached. Unable to retrieve message {message_id} from thread {thread_id}." + ) + break + backoff_time: float = self.polling_options.message_synchronization_delay.total_seconds() * (2**count) + await asyncio.sleep(backoff_time) + + return message + + def _check_if_deleted(self) -> None: + """Check if the assistant has been deleted.""" + if self._is_deleted: + raise AgentInitializationException("The assistant has been deleted.") + + def _get_tools(self) -> list[dict[str, str]]: + """Get the list of tools for the assistant. + + Returns: + The list of tools. + """ + tools = [] + if self.assistant is None: + raise AgentInitializationException("The assistant has not been created.") + + for tool in self.assistant.tools: + if isinstance(tool, CodeInterpreterTool): + tools.append({"type": "code_interpreter"}) + elif isinstance(tool, FileSearchTool): + tools.append({"type": "file_search"}) + + funcs = self.kernel.get_full_list_of_function_metadata() + tools.extend([kernel_function_metadata_to_function_call_format(f) for f in funcs]) + + return tools + + async def _invoke_function_calls(self, fccs: list[FunctionCallContent], chat_history: ChatHistory) -> list[Any]: + """Invoke function calls and store results in chat history. + + Args: + fccs: The function call contents. + chat_history: The chat history. + + Returns: + The results as a list. + """ + tasks = [ + self.kernel.invoke_function_call(function_call=function_call, chat_history=chat_history) + for function_call in fccs + ] + return await asyncio.gather(*tasks) + + def _format_tool_outputs(self, chat_history: ChatHistory) -> list[dict[str, str]]: + """Format tool outputs from chat history for submission. + + Args: + chat_history: The chat history. + + Returns: + The formatted tool outputs as a list of dictionaries. + """ + tool_outputs = [] + for tool_call in chat_history.messages[0].items: + if isinstance(tool_call, FunctionResultContent): + tool_outputs.append({ + "tool_call_id": tool_call.id, + "output": tool_call.result, + }) + return tool_outputs + + # endregion diff --git a/python/semantic_kernel/agents/open_ai/run_polling_options.py b/python/semantic_kernel/agents/open_ai/run_polling_options.py new file mode 100644 index 000000000000..9683f4cbea6b --- /dev/null +++ b/python/semantic_kernel/agents/open_ai/run_polling_options.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft. All rights reserved. + +from datetime import timedelta + +from pydantic import Field + +from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.utils.experimental_decorator import experimental_class + + +@experimental_class +class RunPollingOptions(KernelBaseModel): + """Configuration and defaults associated with polling behavior for Assistant API requests.""" + + default_polling_interval: timedelta = Field(default=timedelta(milliseconds=250)) + default_polling_backoff: timedelta = Field(default=timedelta(seconds=1)) + default_polling_backoff_threshold: int = Field(default=2) + default_message_synchronization_delay: timedelta = Field(default=timedelta(milliseconds=250)) + run_polling_interval: timedelta = Field(default=timedelta(milliseconds=250)) + run_polling_backoff: timedelta = Field(default=timedelta(seconds=1)) + run_polling_backoff_threshold: int = Field(default=2) + message_synchronization_delay: timedelta = Field(default=timedelta(milliseconds=250)) + + def get_polling_interval(self, iteration_count: int) -> timedelta: + """Get the polling interval for the given iteration count.""" + return ( + self.run_polling_backoff + if iteration_count > self.run_polling_backoff_threshold + else self.run_polling_interval + ) diff --git a/python/semantic_kernel/agents/strategies/__init__.py b/python/semantic_kernel/agents/strategies/__init__.py new file mode 100644 index 000000000000..836604a9f632 --- /dev/null +++ b/python/semantic_kernel/agents/strategies/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.agents.strategies.selection.kernel_function_selection_strategy import ( + KernelFunctionSelectionStrategy, +) +from semantic_kernel.agents.strategies.selection.sequential_selection_strategy import SequentialSelectionStrategy +from semantic_kernel.agents.strategies.termination.aggregator_termination_strategy import AggregatorTerminationStrategy +from semantic_kernel.agents.strategies.termination.default_termination_strategy import DefaultTerminationStrategy +from semantic_kernel.agents.strategies.termination.kernel_function_termination_strategy import ( + KernelFunctionTerminationStrategy, +) + +__all__ = [ + "AggregatorTerminationStrategy", + "DefaultTerminationStrategy", + "KernelFunctionSelectionStrategy", + "KernelFunctionTerminationStrategy", + "SequentialSelectionStrategy", +] diff --git a/python/semantic_kernel/agents/strategies/selection/__init__.py b/python/semantic_kernel/agents/strategies/selection/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/agents/strategies/selection/kernel_function_selection_strategy.py b/python/semantic_kernel/agents/strategies/selection/kernel_function_selection_strategy.py new file mode 100644 index 000000000000..3879fab95aca --- /dev/null +++ b/python/semantic_kernel/agents/strategies/selection/kernel_function_selection_strategy.py @@ -0,0 +1,100 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from collections.abc import Callable +from inspect import isawaitable +from typing import TYPE_CHECKING, ClassVar + +from pydantic import Field + +from semantic_kernel.agents.strategies.selection.selection_strategy import SelectionStrategy +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.exceptions.agent_exceptions import AgentExecutionException +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.functions.kernel_function import KernelFunction +from semantic_kernel.kernel import Kernel +from semantic_kernel.utils.experimental_decorator import experimental_class + +if TYPE_CHECKING: + from semantic_kernel.agents import Agent + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class KernelFunctionSelectionStrategy(SelectionStrategy): + """Determines agent selection based on the evaluation of a Kernel Function.""" + + DEFAULT_AGENT_VARIABLE_NAME: ClassVar[str] = "_agent_" + DEFAULT_HISTORY_VARIABLE_NAME: ClassVar[str] = "_history_" + + agent_variable_name: str | None = Field(default=DEFAULT_AGENT_VARIABLE_NAME) + history_variable_name: str | None = Field(default=DEFAULT_HISTORY_VARIABLE_NAME) + arguments: KernelArguments | None = None + function: KernelFunction + kernel: Kernel + result_parser: Callable[..., str] = Field(default_factory=lambda: (lambda: "")) + + async def next(self, agents: list["Agent"], history: list[ChatMessageContent]) -> "Agent": + """Check if the agent should terminate. + + Args: + agents: The list of agents to select from. + history: The history of messages in the conversation. + + Returns: + The next agent to interact with. + + Raises: + AgentExecutionException: If the strategy fails to execute the function or select the next agent + """ + original_arguments = self.arguments or KernelArguments() + execution_settings = original_arguments.execution_settings or {} + + messages = [message.to_dict(role_key="role", content_key="content") for message in history] + + filtered_arguments = { + self.agent_variable_name: ",".join(agent.name for agent in agents), + self.history_variable_name: messages, + } + + extracted_settings = {key: setting.model_dump() for key, setting in execution_settings.items()} + + combined_arguments = { + **original_arguments, + **extracted_settings, + **{k: v for k, v in filtered_arguments.items()}, + } + + arguments = KernelArguments( + **combined_arguments, + ) + + logger.info( + f"Kernel Function Selection Strategy next method called, " + f"invoking function: {self.function.plugin_name}, {self.function.name}", + ) + + try: + result = await self.function.invoke(kernel=self.kernel, arguments=arguments) + except Exception as ex: + logger.error("Kernel Function Selection Strategy next method failed", exc_info=ex) + raise AgentExecutionException("Agent Failure - Strategy failed to execute function.") from ex + + logger.info( + f"Kernel Function Selection Strategy next method completed: " + f"{self.function.plugin_name}, {self.function.name}, result: {result.value if result else None}", + ) + + agent_name = self.result_parser(result) + if isawaitable(agent_name): + agent_name = await agent_name + + if agent_name is None: + raise AgentExecutionException("Agent Failure - Strategy unable to determine next agent.") + + agent_turn = next((agent for agent in agents if agent.name == agent_name), None) + if agent_turn is None: + raise AgentExecutionException(f"Agent Failure - Strategy unable to select next agent: {agent_name}") + + return agent_turn diff --git a/python/semantic_kernel/agents/strategies/selection/selection_strategy.py b/python/semantic_kernel/agents/strategies/selection/selection_strategy.py new file mode 100644 index 000000000000..cef5625432c9 --- /dev/null +++ b/python/semantic_kernel/agents/strategies/selection/selection_strategy.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft. All rights reserved. + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING + +from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.utils.experimental_decorator import experimental_class + +if TYPE_CHECKING: + from semantic_kernel.agents import Agent + from semantic_kernel.contents.chat_message_content import ChatMessageContent + + +@experimental_class +class SelectionStrategy(KernelBaseModel, ABC): + """Contract for an agent selection strategy.""" + + @abstractmethod + async def next(self, agents: list["Agent"], history: list["ChatMessageContent"]) -> "Agent": + """Select the next agent to interact with. + + Args: + agents: The list of agents to select from. + history: The history of messages in the conversation. + + Returns: + The next agent to interact with. + """ + ... diff --git a/python/semantic_kernel/agents/strategies/selection/sequential_selection_strategy.py b/python/semantic_kernel/agents/strategies/selection/sequential_selection_strategy.py new file mode 100644 index 000000000000..8304f405df7e --- /dev/null +++ b/python/semantic_kernel/agents/strategies/selection/sequential_selection_strategy.py @@ -0,0 +1,45 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import TYPE_CHECKING + +from pydantic import PrivateAttr + +from semantic_kernel.agents.strategies.selection.selection_strategy import SelectionStrategy +from semantic_kernel.utils.experimental_decorator import experimental_class + +if TYPE_CHECKING: + from semantic_kernel.agents import Agent + from semantic_kernel.contents.chat_message_content import ChatMessageContent + + +@experimental_class +class SequentialSelectionStrategy(SelectionStrategy): + """A selection strategy that selects agents in a sequential order.""" + + _index: int = PrivateAttr(default=0) + + def reset(self): + """Reset the index.""" + self._index = 0 + + async def next(self, agents: list["Agent"], history: list["ChatMessageContent"]) -> "Agent": + """Select the next agent to interact with. + + Args: + agents: The list of agents to select from. + history: The history of messages in the conversation. + + Returns: + The next agent to interact with. + """ + if len(agents) == 0: + raise ValueError("No agents to select from") + + if self._index >= len(agents): + self.reset() + + agent = agents[self._index] + + self._index = (self._index + 1) % len(agents) + + return agent diff --git a/python/semantic_kernel/agents/strategies/termination/__init__.py b/python/semantic_kernel/agents/strategies/termination/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/agents/strategies/termination/aggregator_termination_strategy.py b/python/semantic_kernel/agents/strategies/termination/aggregator_termination_strategy.py new file mode 100644 index 000000000000..9b102912299e --- /dev/null +++ b/python/semantic_kernel/agents/strategies/termination/aggregator_termination_strategy.py @@ -0,0 +1,52 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from enum import Enum +from typing import TYPE_CHECKING + +from pydantic import Field + +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.utils.experimental_decorator import experimental_class + +if TYPE_CHECKING: + from semantic_kernel.agents.agent import Agent + + +@experimental_class +class AggregateTerminationCondition(str, Enum): + """The condition for terminating the aggregation process.""" + + ALL = "All" + ANY = "Any" + + +@experimental_class +class AggregatorTerminationStrategy(KernelBaseModel): + """A strategy that aggregates multiple termination strategies.""" + + strategies: list[TerminationStrategy] + condition: AggregateTerminationCondition = Field(default=AggregateTerminationCondition.ALL) + + async def should_terminate_async( + self, + agent: "Agent", + history: list[ChatMessageContent], + ) -> bool: + """Check if the agent should terminate. + + Args: + agent: The agent to check. + history: The history of messages in the conversation. + + Returns: + True if the agent should terminate, False otherwise + """ + strategy_execution = [strategy.should_terminate(agent, history) for strategy in self.strategies] + results = await asyncio.gather(*strategy_execution) + + if self.condition == AggregateTerminationCondition.ALL: + return all(results) + return any(results) diff --git a/python/semantic_kernel/agents/strategies/termination/default_termination_strategy.py b/python/semantic_kernel/agents/strategies/termination/default_termination_strategy.py new file mode 100644 index 000000000000..b1232d680097 --- /dev/null +++ b/python/semantic_kernel/agents/strategies/termination/default_termination_strategy.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import TYPE_CHECKING + +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.utils.experimental_decorator import experimental_class + +if TYPE_CHECKING: + from semantic_kernel.agents.agent import Agent + from semantic_kernel.contents.chat_message_content import ChatMessageContent + + +@experimental_class +class DefaultTerminationStrategy(TerminationStrategy): + """A default termination strategy that never terminates.""" + + maximum_iterations: int = 1 + + async def should_agent_terminate(self, agent: "Agent", history: list["ChatMessageContent"]) -> bool: + """Check if the agent should terminate. + + Args: + agent: The agent to check. + history: The history of messages in the conversation. + + Returns: + Defaults to False for the default strategy + """ + return False diff --git a/python/semantic_kernel/agents/strategies/termination/kernel_function_termination_strategy.py b/python/semantic_kernel/agents/strategies/termination/kernel_function_termination_strategy.py new file mode 100644 index 000000000000..92295488d559 --- /dev/null +++ b/python/semantic_kernel/agents/strategies/termination/kernel_function_termination_strategy.py @@ -0,0 +1,92 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from collections.abc import Callable +from inspect import isawaitable +from typing import TYPE_CHECKING, ClassVar + +from pydantic import Field + +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.functions.kernel_function import KernelFunction +from semantic_kernel.kernel import Kernel +from semantic_kernel.utils.experimental_decorator import experimental_class + +if TYPE_CHECKING: + from semantic_kernel.agents import Agent + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class KernelFunctionTerminationStrategy(TerminationStrategy): + """A termination strategy that uses a kernel function to determine termination.""" + + DEFAULT_AGENT_VARIABLE_NAME: ClassVar[str] = "_agent_" + DEFAULT_HISTORY_VARIABLE_NAME: ClassVar[str] = "_history_" + + agent_variable_name: str | None = Field(default=DEFAULT_AGENT_VARIABLE_NAME) + history_variable_name: str | None = Field(default=DEFAULT_HISTORY_VARIABLE_NAME) + arguments: KernelArguments | None = None + function: KernelFunction + kernel: Kernel + result_parser: Callable[..., bool] = Field(default_factory=lambda: (lambda: True)) + + async def should_agent_terminate( + self, + agent: "Agent", + history: list[ChatMessageContent], + ) -> bool: + """Check if the agent should terminate. + + Args: + agent: The agent to check. + history: The history of messages in the conversation. + + Returns: + True if the agent should terminate, False otherwise + """ + original_arguments = self.arguments or KernelArguments() + execution_settings = original_arguments.execution_settings or {} + + messages = [message.to_dict(role_key="role", content_key="content") for message in history] + + filtered_arguments = { + self.agent_variable_name: agent.name or agent.id, + self.history_variable_name: messages, + } + + extracted_settings = {key: setting.model_dump() for key, setting in execution_settings.items()} + + combined_arguments = { + **original_arguments, + **extracted_settings, + **{k: v for k, v in filtered_arguments.items()}, + } + + arguments = KernelArguments( + **combined_arguments, + ) + + logger.info(f"should_agent_terminate, function invoking: `{self.function.fully_qualified_name}`") + + result = await self.function.invoke(kernel=self.kernel, arguments=arguments) + + if result is None: + logger.info( + f"should_agent_terminate, function `{self.function.fully_qualified_name}` " + f"invoked with result `None`", + ) + return False + + logger.info( + f"should_agent_terminate, function `{self.function.fully_qualified_name}` " + f"invoked with result `{result.value if result.value else None}`", + ) + + result_parsed = self.result_parser(result) + if isawaitable(result_parsed): + result_parsed = await result_parsed + return result_parsed diff --git a/python/semantic_kernel/agents/strategies/termination/termination_strategy.py b/python/semantic_kernel/agents/strategies/termination/termination_strategy.py new file mode 100644 index 000000000000..ba4d0f6c341f --- /dev/null +++ b/python/semantic_kernel/agents/strategies/termination/termination_strategy.py @@ -0,0 +1,57 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from typing import TYPE_CHECKING + +from pydantic import Field + +from semantic_kernel.agents.agent import Agent +from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.utils.experimental_decorator import experimental_class + +if TYPE_CHECKING: + from semantic_kernel.contents.chat_message_content import ChatMessageContent + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class TerminationStrategy(KernelBaseModel): + """A strategy for determining when an agent should terminate.""" + + maximum_iterations: int = Field(default=99) + automatic_reset: bool = False + agents: list[Agent] = Field(default_factory=list) + + async def should_agent_terminate(self, agent: "Agent", history: list["ChatMessageContent"]) -> bool: + """Check if the agent should terminate. + + Args: + agent: The agent to check. + history: The history of messages in the conversation. + + Returns: + True if the agent should terminate, False otherwise + """ + raise NotImplementedError("Subclasses should implement this method") + + async def should_terminate(self, agent: "Agent", history: list["ChatMessageContent"]) -> bool: + """Check if the agent should terminate. + + Args: + agent: The agent to check. + history: The history of messages in the conversation. + + Returns: + True if the agent should terminate, False otherwise + """ + logger.info(f"Evaluating termination criteria for {agent.id}") + + if self.agents and not any(a.id == agent.id for a in self.agents): + logger.info(f"Agent {agent.id} is out of scope") + return False + + should_terminate = await self.should_agent_terminate(agent, history) + + logger.info(f"Evaluated criteria for {agent.id}, should terminate: {should_terminate}") + return should_terminate diff --git a/python/semantic_kernel/connectors/ai/anthropic/__init__.py b/python/semantic_kernel/connectors/ai/anthropic/__init__.py new file mode 100644 index 000000000000..c5d96ddd147f --- /dev/null +++ b/python/semantic_kernel/connectors/ai/anthropic/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.connectors.ai.anthropic.prompt_execution_settings.anthropic_prompt_execution_settings import ( + AnthropicChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.anthropic.services.anthropic_chat_completion import AnthropicChatCompletion + +__all__ = [ + "AnthropicChatCompletion", + "AnthropicChatPromptExecutionSettings", +] diff --git a/python/semantic_kernel/connectors/ai/anthropic/prompt_execution_settings/__init__.py b/python/semantic_kernel/connectors/ai/anthropic/prompt_execution_settings/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/connectors/ai/anthropic/prompt_execution_settings/anthropic_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/anthropic/prompt_execution_settings/anthropic_prompt_execution_settings.py new file mode 100644 index 000000000000..792afbe370dd --- /dev/null +++ b/python/semantic_kernel/connectors/ai/anthropic/prompt_execution_settings/anthropic_prompt_execution_settings.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from typing import Any + +from pydantic import Field, model_validator + +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + +logger = logging.getLogger(__name__) + + +class AnthropicPromptExecutionSettings(PromptExecutionSettings): + """Common request settings for Anthropic services.""" + + ai_model_id: str | None = Field(None, serialization_alias="model") + + +class AnthropicChatPromptExecutionSettings(AnthropicPromptExecutionSettings): + """Specific settings for the Chat Completion endpoint.""" + + messages: list[dict[str, Any]] | None = None + stream: bool | None = None + system: str | None = None + max_tokens: int | None = Field(None, gt=0) + temperature: float | None = Field(None, ge=0.0, le=2.0) + stop_sequences: list[str] | None = None + top_p: float | None = Field(None, ge=0.0, le=1.0) + top_k: int | None = Field(None, ge=0) + + @model_validator(mode="after") + def check_function_call_behavior(self) -> "AnthropicChatPromptExecutionSettings": + """Check if the user is requesting function call behavior.""" + if self.function_choice_behavior is not None: + raise NotImplementedError("Anthropic does not support function call behavior.") + + return self diff --git a/python/semantic_kernel/connectors/ai/anthropic/services/__init__.py b/python/semantic_kernel/connectors/ai/anthropic/services/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/connectors/ai/anthropic/services/anthropic_chat_completion.py b/python/semantic_kernel/connectors/ai/anthropic/services/anthropic_chat_completion.py new file mode 100644 index 000000000000..3b9a7de99182 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/anthropic/services/anthropic_chat_completion.py @@ -0,0 +1,249 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from collections.abc import AsyncGenerator +from typing import Any + +from anthropic import AsyncAnthropic +from anthropic.types import ( + ContentBlockStopEvent, + Message, + RawContentBlockDeltaEvent, + RawMessageDeltaEvent, + RawMessageStartEvent, + TextBlock, +) +from pydantic import ValidationError + +from semantic_kernel.connectors.ai.anthropic.prompt_execution_settings.anthropic_prompt_execution_settings import ( + AnthropicChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.anthropic.settings.anthropic_settings import AnthropicSettings +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ITEM_TYPES, ChatMessageContent +from semantic_kernel.contents.streaming_chat_message_content import ITEM_TYPES as STREAMING_ITEM_TYPES +from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent +from semantic_kernel.contents.streaming_text_content import StreamingTextContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason as SemanticKernelFinishReason +from semantic_kernel.exceptions.service_exceptions import ( + ServiceInitializationError, + ServiceResponseException, +) +from semantic_kernel.utils.experimental_decorator import experimental_class + +# map finish reasons from Anthropic to Semantic Kernel +ANTHROPIC_TO_SEMANTIC_KERNEL_FINISH_REASON_MAP = { + "end_turn": SemanticKernelFinishReason.STOP, + "max_tokens": SemanticKernelFinishReason.LENGTH, + "tool_use": SemanticKernelFinishReason.TOOL_CALLS, +} + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class AnthropicChatCompletion(ChatCompletionClientBase): + """Antropic ChatCompletion class.""" + + async_client: AsyncAnthropic + + def __init__( + self, + ai_model_id: str | None = None, + service_id: str | None = None, + api_key: str | None = None, + async_client: AsyncAnthropic | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize an AnthropicChatCompletion service. + + Args: + ai_model_id: Anthropic model name, see + https://docs.anthropic.com/en/docs/about-claude/models#model-names + service_id: Service ID tied to the execution settings. + api_key: The optional API key to use. If provided will override, + the env vars or .env file value. + async_client: An existing client to use. + env_file_path: Use the environment settings file as a fallback + to environment variables. + env_file_encoding: The encoding of the environment settings file. + """ + try: + anthropic_settings = AnthropicSettings.create( + api_key=api_key, + chat_model_id=ai_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise ServiceInitializationError("Failed to create Anthropic settings.", ex) from ex + + if not anthropic_settings.chat_model_id: + raise ServiceInitializationError("The Anthropic chat model ID is required.") + + if not async_client: + async_client = AsyncAnthropic( + api_key=anthropic_settings.api_key.get_secret_value(), + ) + + super().__init__( + async_client=async_client, + service_id=service_id or anthropic_settings.chat_model_id, + ai_model_id=anthropic_settings.chat_model_id, + ) + + async def get_chat_message_contents( + self, + chat_history: "ChatHistory", + settings: "PromptExecutionSettings", + **kwargs: Any, + ) -> list["ChatMessageContent"]: + """Executes a chat completion request and returns the result. + + Args: + chat_history: The chat history to use for the chat completion. + settings: The settings to use for the chat completion request. + kwargs: The optional arguments. + + Returns: + The completion result(s). + """ + if not isinstance(settings, AnthropicChatPromptExecutionSettings): + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, AnthropicChatPromptExecutionSettings) # nosec + + if not settings.ai_model_id: + settings.ai_model_id = self.ai_model_id + + settings.messages = self._prepare_chat_history_for_request(chat_history) + try: + response = await self.async_client.messages.create(**settings.prepare_settings_dict()) + except Exception as ex: + raise ServiceResponseException( + f"{type(self)} service failed to complete the prompt", + ex, + ) from ex + + metadata: dict[str, Any] = {"id": response.id} + # Check if usage exists and has a value, then add it to the metadata + if hasattr(response, "usage") and response.usage is not None: + metadata["usage"] = response.usage + + return [self._create_chat_message_content(response, content_block, metadata) + for content_block in response.content] + + async def get_streaming_chat_message_contents( + self, + chat_history: ChatHistory, + settings: PromptExecutionSettings, + **kwargs: Any, + ) -> AsyncGenerator[list[StreamingChatMessageContent], Any]: + """Executes a streaming chat completion request and returns the result. + + Args: + chat_history: The chat history to use for the chat completion. + settings: The settings to use for the chat completion request. + kwargs: The optional arguments. + + Yields: + A stream of StreamingChatMessageContent. + """ + if not isinstance(settings, AnthropicChatPromptExecutionSettings): + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, AnthropicChatPromptExecutionSettings) # nosec + + if not settings.ai_model_id: + settings.ai_model_id = self.ai_model_id + + settings.messages = self._prepare_chat_history_for_request(chat_history) + try: + async with self.async_client.messages.stream(**settings.prepare_settings_dict()) as stream: + author_role = None + metadata: dict[str, Any] = {"usage": {}, "id": None} + content_block_idx = 0 + + async for stream_event in stream: + if isinstance(stream_event, RawMessageStartEvent): + author_role = stream_event.message.role + metadata["usage"]["input_tokens"] = stream_event.message.usage.input_tokens + metadata["id"] = stream_event.message.id + elif isinstance(stream_event, (RawContentBlockDeltaEvent, RawMessageDeltaEvent)): + yield [self._create_streaming_chat_message_content(stream_event, + content_block_idx, + author_role, + metadata)] + elif isinstance(stream_event, ContentBlockStopEvent): + content_block_idx += 1 + + except Exception as ex: + raise ServiceResponseException( + f"{type(self)} service failed to complete the request", + ex, + ) from ex + + def _create_chat_message_content( + self, + response: Message, + content: TextBlock, + response_metadata: dict[str, Any] + ) -> "ChatMessageContent": + """Create a chat message content object.""" + items: list[ITEM_TYPES] = [] + + if content.text: + items.append(TextContent(text=content.text)) + + finish_reason = None + if response.stop_reason: + finish_reason = ANTHROPIC_TO_SEMANTIC_KERNEL_FINISH_REASON_MAP[response.stop_reason] + + return ChatMessageContent( + inner_content=response, + ai_model_id=self.ai_model_id, + metadata=response_metadata, + role=AuthorRole(response.role), + items=items, + finish_reason=finish_reason, + ) + + def _create_streaming_chat_message_content( + self, + stream_event: RawContentBlockDeltaEvent | RawMessageDeltaEvent, + content_block_idx: int, + role: str | None = None, + metadata: dict[str, Any] = {} + ) -> StreamingChatMessageContent: + """Create a streaming chat message content object from a choice.""" + text_content = "" + + if stream_event.delta and hasattr(stream_event.delta, "text"): + text_content = stream_event.delta.text + + items: list[STREAMING_ITEM_TYPES] = [StreamingTextContent(choice_index=content_block_idx, text=text_content)] + + finish_reason = None + if isinstance(stream_event, RawMessageDeltaEvent): + if stream_event.delta.stop_reason: + finish_reason = ANTHROPIC_TO_SEMANTIC_KERNEL_FINISH_REASON_MAP[stream_event.delta.stop_reason] + + metadata["usage"]["output_tokens"] = stream_event.usage.output_tokens + + return StreamingChatMessageContent( + choice_index=content_block_idx, + inner_content=stream_event, + ai_model_id=self.ai_model_id, + metadata=metadata, + role=AuthorRole(role) if role else AuthorRole.ASSISTANT, + finish_reason=finish_reason, + items=items, + ) + + def get_prompt_execution_settings_class(self) -> "type[AnthropicChatPromptExecutionSettings]": + """Create a request settings object.""" + return AnthropicChatPromptExecutionSettings + diff --git a/python/semantic_kernel/connectors/ai/anthropic/settings/__init__.py b/python/semantic_kernel/connectors/ai/anthropic/settings/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/connectors/ai/anthropic/settings/anthropic_settings.py b/python/semantic_kernel/connectors/ai/anthropic/settings/anthropic_settings.py new file mode 100644 index 000000000000..4c4b01a352ee --- /dev/null +++ b/python/semantic_kernel/connectors/ai/anthropic/settings/anthropic_settings.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import ClassVar + +from pydantic import SecretStr + +from semantic_kernel.kernel_pydantic import KernelBaseSettings + + +class AnthropicSettings(KernelBaseSettings): + """Anthropic model settings. + + The settings are first loaded from environment variables with the prefix 'ANTHROPIC_'. If the + environment variables are not found, the settings can be loaded from a .env file with the + encoding 'utf-8'. If the settings are not found in the .env file, the settings are ignored; + however, validation will fail alerting that the settings are missing. + + Optional settings for prefix 'ANTHROPIC_' are: + - api_key: ANTHROPIC API key, see https://console.mistral.ai/api-keys + (Env var ANTHROPIC_API_KEY) + - chat_model_id: The Anthropic chat model ID to use see https://docs.mistral.ai/getting-started/models/. + (Env var ANTHROPIC_CHAT_MODEL_ID) + - env_file_path: if provided, the .env settings are read from this file path location + """ + + env_prefix: ClassVar[str] = "ANTHROPIC_" + + api_key: SecretStr + chat_model_id: str | None = None diff --git a/python/semantic_kernel/connectors/ai/azure_ai_inference/azure_ai_inference_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/azure_ai_inference/azure_ai_inference_prompt_execution_settings.py index 5e81ad8b76e4..9f0d8bba851d 100644 --- a/python/semantic_kernel/connectors/ai/azure_ai_inference/azure_ai_inference_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/azure_ai_inference/azure_ai_inference_prompt_execution_settings.py @@ -30,8 +30,15 @@ class AzureAIInferencePromptExecutionSettings(PromptExecutionSettings): class AzureAIInferenceChatPromptExecutionSettings(AzureAIInferencePromptExecutionSettings): """Azure AI Inference Chat Prompt Execution Settings.""" - tools: list[dict[str, Any]] | None = Field(None, max_length=64) - tool_choice: str | None = None + tools: list[dict[str, Any]] | None = Field( + None, + max_length=64, + description="Do not set this manually. It is set by the service based on the function choice configuration.", + ) + tool_choice: str | None = Field( + None, + description="Do not set this manually. It is set by the service based on the function choice configuration.", + ) @experimental_class diff --git a/python/semantic_kernel/connectors/ai/azure_ai_inference/services/azure_ai_inference_chat_completion.py b/python/semantic_kernel/connectors/ai/azure_ai_inference/services/azure_ai_inference_chat_completion.py index f8a1adf295db..bcccd2c28d7f 100644 --- a/python/semantic_kernel/connectors/ai/azure_ai_inference/services/azure_ai_inference_chat_completion.py +++ b/python/semantic_kernel/connectors/ai/azure_ai_inference/services/azure_ai_inference_chat_completion.py @@ -135,19 +135,20 @@ async def get_chat_message_contents( settings = self.get_prompt_execution_settings_from_settings(settings) assert isinstance(settings, AzureAIInferenceChatPromptExecutionSettings) # nosec + kernel = kwargs.get("kernel") + if settings.function_choice_behavior is not None and (not kernel or not isinstance(kernel, Kernel)): + raise ServiceInvalidExecutionSettingsError("Kernel is required for auto invoking functions.") + + if kernel and settings.function_choice_behavior: + self._verify_function_choice_behavior(settings) + self._configure_function_choice_behavior(settings, kernel) + if ( settings.function_choice_behavior is None or not settings.function_choice_behavior.auto_invoke_kernel_functions ): return await self._send_chat_request(chat_history, settings) - kernel = kwargs.get("kernel") - if not isinstance(kernel, Kernel): - raise ServiceInvalidExecutionSettingsError("Kernel is required for auto invoking functions.") - - self._verify_function_choice_behavior(settings) - self._configure_function_choice_behavior(settings, kernel) - for request_index in range(settings.function_choice_behavior.maximum_auto_invoke_attempts): completions = await self._send_chat_request(chat_history, settings) chat_history.add_message(message=completions[0]) @@ -158,7 +159,7 @@ async def get_chat_message_contents( results = await self._invoke_function_calls( function_calls=function_calls, chat_history=chat_history, - kernel=kernel, + kernel=kernel, # type: ignore arguments=kwargs.get("arguments", None), function_call_count=fc_count, request_index=request_index, @@ -248,6 +249,14 @@ async def get_streaming_chat_message_contents( settings = self.get_prompt_execution_settings_from_settings(settings) assert isinstance(settings, AzureAIInferenceChatPromptExecutionSettings) # nosec + kernel = kwargs.get("kernel") + if settings.function_choice_behavior is not None and (not kernel or not isinstance(kernel, Kernel)): + raise ServiceInvalidExecutionSettingsError("Kernel is required for auto invoking functions.") + + if kernel and settings.function_choice_behavior: + self._verify_function_choice_behavior(settings) + self._configure_function_choice_behavior(settings, kernel) + if ( settings.function_choice_behavior is None or not settings.function_choice_behavior.auto_invoke_kernel_functions @@ -256,25 +265,24 @@ async def get_streaming_chat_message_contents( async_generator = self._send_chat_streaming_request(chat_history, settings) else: # Auto invoke is required. - async_generator = self._get_streaming_chat_message_contents_auto_invoke(chat_history, settings, **kwargs) + async_generator = self._get_streaming_chat_message_contents_auto_invoke( + kernel, # type: ignore + kwargs.get("arguments"), + chat_history, + settings, + ) async for messages in async_generator: yield messages async def _get_streaming_chat_message_contents_auto_invoke( self, + kernel: Kernel, + arguments: KernelArguments | None, chat_history: ChatHistory, settings: AzureAIInferenceChatPromptExecutionSettings, - **kwargs: Any, ) -> AsyncGenerator[list[StreamingChatMessageContent], Any]: """Get streaming chat message contents from the Azure AI Inference service with auto invoking functions.""" - kernel = kwargs.get("kernel") - if not isinstance(kernel, Kernel): - raise ServiceInvalidExecutionSettingsError("Kernel is required for auto invoking functions.") - - self._verify_function_choice_behavior(settings) - self._configure_function_choice_behavior(settings, kernel) - # mypy doesn't recognize the settings.function_choice_behavior is not None by the check above request_attempts = settings.function_choice_behavior.maximum_auto_invoke_attempts # type: ignore @@ -301,7 +309,7 @@ async def _get_streaming_chat_message_contents_auto_invoke( function_calls=function_calls, chat_history=chat_history, kernel=kernel, - arguments=kwargs.get("arguments", None), + arguments=arguments, function_call_count=len(function_calls), request_index=request_index, # mypy doesn't recognize the settings.function_choice_behavior is not None by the check above @@ -412,8 +420,6 @@ def _get_metadata_from_response(self, response: ChatCompletions | StreamingChatC def _verify_function_choice_behavior(self, settings: AzureAIInferenceChatPromptExecutionSettings): """Verify the function choice behavior.""" - if not settings.function_choice_behavior: - raise ServiceInvalidExecutionSettingsError("Function choice behavior is required for tool calls.") if settings.extra_parameters is not None and settings.extra_parameters.get("n", 1) > 1: # Currently only OpenAI models allow multiple completions but the Azure AI Inference service # does not expose the functionality directly. If users want to have more than 1 responses, they @@ -427,7 +433,7 @@ def _configure_function_choice_behavior( ): """Configure the function choice behavior to include the kernel functions.""" if not settings.function_choice_behavior: - raise ServiceInvalidExecutionSettingsError("Function choice behavior is required for tool calls.") + return settings.function_choice_behavior.configure( kernel=kernel, update_settings_callback=update_settings_from_function_call_configuration, settings=settings diff --git a/python/semantic_kernel/connectors/ai/chat_completion_client_base.py b/python/semantic_kernel/connectors/ai/chat_completion_client_base.py index ae24136838c1..4fca8ae2906f 100644 --- a/python/semantic_kernel/connectors/ai/chat_completion_client_base.py +++ b/python/semantic_kernel/connectors/ai/chat_completion_client_base.py @@ -4,6 +4,8 @@ from collections.abc import AsyncGenerator from typing import TYPE_CHECKING, Any +from semantic_kernel.contents.annotation_content import AnnotationContent +from semantic_kernel.contents.file_reference_content import FileReferenceContent from semantic_kernel.services.ai_service_client_base import AIServiceClientBase if TYPE_CHECKING: @@ -126,4 +128,8 @@ def _prepare_chat_history_for_request( Returns: prepared_chat_history (Any): The prepared chat history for a request. """ - return [message.to_dict(role_key=role_key, content_key=content_key) for message in chat_history.messages] + return [ + message.to_dict(role_key=role_key, content_key=content_key) + for message in chat_history.messages + if not isinstance(message, (AnnotationContent, FileReferenceContent)) + ] diff --git a/python/semantic_kernel/connectors/ai/function_call_choice_configuration.py b/python/semantic_kernel/connectors/ai/function_call_choice_configuration.py new file mode 100644 index 000000000000..d44fb946af65 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/function_call_choice_configuration.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from pydantic.dataclasses import dataclass + +from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata +from semantic_kernel.utils.experimental_decorator import experimental_class + + +@experimental_class +@dataclass +class FunctionCallChoiceConfiguration: + """Configuration for function call choice.""" + + available_functions: list[KernelFunctionMetadata] | None = None diff --git a/python/semantic_kernel/connectors/ai/function_calling_utils.py b/python/semantic_kernel/connectors/ai/function_calling_utils.py index 354fd0397a56..70240b45710f 100644 --- a/python/semantic_kernel/connectors/ai/function_calling_utils.py +++ b/python/semantic_kernel/connectors/ai/function_calling_utils.py @@ -1,15 +1,19 @@ # Copyright (c) Microsoft. All rights reserved. -from typing import Any +from collections import OrderedDict +from typing import TYPE_CHECKING, Any -from semantic_kernel.connectors.ai.function_choice_behavior import FunctionCallChoiceConfiguration -from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings -from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + +if TYPE_CHECKING: + from semantic_kernel.connectors.ai.function_choice_behavior import FunctionCallChoiceConfiguration + from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata def update_settings_from_function_call_configuration( - function_choice_configuration: FunctionCallChoiceConfiguration, - settings: PromptExecutionSettings, + function_choice_configuration: "FunctionCallChoiceConfiguration", + settings: "PromptExecutionSettings", type: str, ) -> None: """Update the settings from a FunctionChoiceConfiguration.""" @@ -26,7 +30,7 @@ def update_settings_from_function_call_configuration( def kernel_function_metadata_to_function_call_format( - metadata: KernelFunctionMetadata, + metadata: "KernelFunctionMetadata", ) -> dict[str, Any]: """Convert the kernel function metadata to function calling format.""" return { @@ -41,3 +45,26 @@ def kernel_function_metadata_to_function_call_format( }, }, } + + +def _combine_filter_dicts(*dicts: dict[str, list[str]]) -> dict: + """Combine multiple filter dictionaries with list values into one dictionary. + + This method is ensuring unique values while preserving order. + """ + combined_filters = {} + + keys = set().union(*(d.keys() for d in dicts)) + + for key in keys: + combined_functions: OrderedDict[str, None] = OrderedDict() + for d in dicts: + if key in d: + if isinstance(d[key], list): + for item in d[key]: + combined_functions[item] = None + else: + raise ServiceInitializationError(f"Values for filter key '{key}' are not lists.") + combined_filters[key] = list(combined_functions.keys()) + + return combined_filters diff --git a/python/semantic_kernel/connectors/ai/function_choice_behavior.py b/python/semantic_kernel/connectors/ai/function_choice_behavior.py index 13a918ff315b..759274d632f2 100644 --- a/python/semantic_kernel/connectors/ai/function_choice_behavior.py +++ b/python/semantic_kernel/connectors/ai/function_choice_behavior.py @@ -1,29 +1,32 @@ # Copyright (c) Microsoft. All rights reserved. import logging -from collections import OrderedDict from collections.abc import Callable from enum import Enum -from typing import TYPE_CHECKING, Literal +from typing import TYPE_CHECKING, Literal, TypeVar -from pydantic.dataclasses import dataclass from typing_extensions import deprecated +from semantic_kernel.connectors.ai.function_calling_utils import _combine_filter_dicts from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError -from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata from semantic_kernel.kernel_pydantic import KernelBaseModel from semantic_kernel.utils.experimental_decorator import experimental_class if TYPE_CHECKING: from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior + from semantic_kernel.connectors.ai.function_call_choice_configuration import FunctionCallChoiceConfiguration from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.kernel import Kernel + DEFAULT_MAX_AUTO_INVOKE_ATTEMPTS = 5 logger = logging.getLogger(__name__) +_T = TypeVar("_T", bound="FunctionChoiceBehavior") + + @experimental_class class FunctionChoiceType(Enum): """The type of function choice behavior.""" @@ -33,37 +36,6 @@ class FunctionChoiceType(Enum): REQUIRED = "required" -@experimental_class -@dataclass -class FunctionCallChoiceConfiguration: - """Configuration for function call choice.""" - - available_functions: list["KernelFunctionMetadata"] | None = None - - -def _combine_filter_dicts(*dicts: dict[str, list[str]]) -> dict: - """Combine multiple filter dictionaries with list values into one dictionary. - - This method is ensuring unique values while preserving order. - """ - combined_filters = {} - - keys = set().union(*(d.keys() for d in dicts)) - - for key in keys: - combined_functions: OrderedDict[str, None] = OrderedDict() - for d in dicts: - if key in d: - if isinstance(d[key], list): - for item in d[key]: - combined_functions[item] = None - else: - raise ServiceInitializationError(f"Values for filter key '{key}' are not lists.") - combined_filters[key] = list(combined_functions.keys()) - - return combined_filters - - @experimental_class class FunctionChoiceBehavior(KernelBaseModel): """Class that controls function choice behavior. @@ -73,7 +45,7 @@ class FunctionChoiceBehavior(KernelBaseModel): max_auto_invoke_attempts: The maximum number of auto invoke attempts. filters: Filters for the function choice behavior. Available options are: excluded_plugins, included_plugins, excluded_functions, or included_functions. - type: The type of function choice behavior. + type_: The type of function choice behavior. Properties: auto_invoke_kernel_functions: Check if the kernel functions should be auto-invoked. @@ -101,11 +73,11 @@ class FunctionChoiceBehavior(KernelBaseModel): dict[Literal["excluded_plugins", "included_plugins", "excluded_functions", "included_functions"], list[str]] | None ) = None - type: FunctionChoiceType | None = None + type_: FunctionChoiceType | None = None @classmethod @deprecated("The `FunctionCallBehavior` class is deprecated; use `FunctionChoiceBehavior` instead.") - def from_function_call_behavior(cls, behavior: "FunctionCallBehavior") -> "FunctionChoiceBehavior": + def from_function_call_behavior(cls: type[_T], behavior: "FunctionCallBehavior") -> _T: """Create a FunctionChoiceBehavior from a FunctionCallBehavior.""" from semantic_kernel.connectors.ai.function_call_behavior import ( EnabledFunctions, @@ -145,8 +117,10 @@ def _check_and_get_config( Literal["excluded_plugins", "included_plugins", "excluded_functions", "included_functions"], list[str] ] | None = {}, - ) -> FunctionCallChoiceConfiguration: + ) -> "FunctionCallChoiceConfiguration": """Check for missing functions and get the function call choice configuration.""" + from semantic_kernel.connectors.ai.function_call_choice_configuration import FunctionCallChoiceConfiguration + if filters: return FunctionCallChoiceConfiguration(available_functions=kernel.get_list_of_function_metadata(filters)) return FunctionCallChoiceConfiguration(available_functions=kernel.get_full_list_of_function_metadata()) @@ -164,15 +138,15 @@ def configure( config = self.get_config(kernel) if config: - update_settings_callback(config, settings, self.type) + update_settings_callback(config, settings, self.type_) - def get_config(self, kernel: "Kernel") -> FunctionCallChoiceConfiguration: + def get_config(self, kernel: "Kernel") -> "FunctionCallChoiceConfiguration": """Get the function call choice configuration based on the type.""" return self._check_and_get_config(kernel, self.filters) @classmethod def Auto( - cls, + cls: type[_T], auto_invoke: bool = True, *, filters: dict[ @@ -180,7 +154,7 @@ def Auto( ] | None = None, **kwargs, - ) -> "FunctionChoiceBehavior": + ) -> _T: """Creates a FunctionChoiceBehavior with type AUTO. Returns FunctionChoiceBehavior class with auto_invoke enabled, and the desired functions @@ -189,21 +163,21 @@ def Auto( """ kwargs.setdefault("maximum_auto_invoke_attempts", DEFAULT_MAX_AUTO_INVOKE_ATTEMPTS if auto_invoke else 0) return cls( - type=FunctionChoiceType.AUTO, + type_=FunctionChoiceType.AUTO, filters=filters, **kwargs, ) @classmethod def NoneInvoke( - cls, + cls: type[_T], *, filters: dict[ Literal["excluded_plugins", "included_plugins", "excluded_functions", "included_functions"], list[str] ] | None = None, **kwargs, - ) -> "FunctionChoiceBehavior": + ) -> _T: """Creates a FunctionChoiceBehavior with type NONE. Returns FunctionChoiceBehavior class with auto_invoke disabled, and the desired functions @@ -212,14 +186,14 @@ def NoneInvoke( """ kwargs.setdefault("maximum_auto_invoke_attempts", 0) return cls( - type=FunctionChoiceType.NONE, + type_=FunctionChoiceType.NONE, filters=filters, **kwargs, ) @classmethod def Required( - cls, + cls: type[_T], auto_invoke: bool = True, *, filters: dict[ @@ -227,7 +201,7 @@ def Required( ] | None = None, **kwargs, - ) -> "FunctionChoiceBehavior": + ) -> _T: """Creates a FunctionChoiceBehavior with type REQUIRED. Returns FunctionChoiceBehavior class with auto_invoke enabled, and the desired functions @@ -236,13 +210,13 @@ def Required( """ kwargs.setdefault("maximum_auto_invoke_attempts", 1 if auto_invoke else 0) return cls( - type=FunctionChoiceType.REQUIRED, + type_=FunctionChoiceType.REQUIRED, filters=filters, **kwargs, ) @classmethod - def from_dict(cls, data: dict) -> "FunctionChoiceBehavior": + def from_dict(cls: type[_T], data: dict) -> _T: """Create a FunctionChoiceBehavior from a dictionary.""" type_map = { "auto": cls.Auto, @@ -268,7 +242,7 @@ def from_dict(cls, data: dict) -> "FunctionChoiceBehavior": ) @classmethod - def from_string(cls, data: str) -> "FunctionChoiceBehavior": + def from_string(cls: type[_T], data: str) -> _T: """Create a FunctionChoiceBehavior from a string. This method converts the provided string to a FunctionChoiceBehavior object @@ -276,11 +250,11 @@ def from_string(cls, data: str) -> "FunctionChoiceBehavior": """ type_value = data.lower() if type_value == "auto": - return FunctionChoiceBehavior.Auto() + return cls.Auto() if type_value == "none": - return FunctionChoiceBehavior.NoneInvoke() + return cls.NoneInvoke() if type_value == "required": - return FunctionChoiceBehavior.Required() + return cls.Required() raise ServiceInitializationError( f"The specified type `{type_value}` is not supported. Allowed types are: `auto`, `none`, `required`." ) diff --git a/python/semantic_kernel/connectors/ai/google/README.md b/python/semantic_kernel/connectors/ai/google/README.md new file mode 100644 index 000000000000..03f132cd518c --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/README.md @@ -0,0 +1,54 @@ +# Google - Gemini + +Gemini models are Google's large language models. Semantic Kernel provides two connectors to access these models from Google Cloud. + +## Google AI + +You can access the Gemini API from Google AI Studio. This mode of access is for quick prototyping as it relies on API keys. + +Follow [these instructions](https://cloud.google.com/docs/authentication/api-keys) to create an API key. + +Once you have an API key, you can start using Gemini models in SK using the `google_ai` connector. Example: + +```Python +kernel = Kernel() +kernel.add_service( + GoogleAIChatCompletion( + gemini_model_id="gemini-1.5-flash", + api_key="...", + ) +) +... +``` + +> Alternatively, you can use an .env file to store the model id and api key. + +## Vertex AI + +Google also offers access to Gemini through its Vertex AI platform. Vertex AI provides a more complete solution to build your enterprise AI applications end-to-end. You can read more about it [here](https://cloud.google.com/vertex-ai/generative-ai/docs/migrate/migrate-google-ai). + +This mode of access requires a Google Cloud service account. Follow these [instructions](https://cloud.google.com/vertex-ai/generative-ai/docs/migrate/migrate-google-ai) to create a Google Cloud project if you don't have one already. Remember the `project id` as it is required to access the models. + +Follow the steps below to set up your environment to use the Vertex AI API: + +- [Install the gcloud CLI](https://cloud.google.com/sdk/docs/install) +- [Initialize the gcloud CLI](https://cloud.google.com/sdk/docs/initializing) + +Once you have your project and your environment is set up, you can start using Gemini models in SK using the `vertex_ai` connector. Example: + +```Python +kernel = Kernel() +kernel.add_service( + VertexAIChatCompletion( + project_id="...", + gemini_model_id="gemini-1.5-flash", + ) +) +... +``` + +> Alternatively, you can use an .env file to store the model id and project id. + +## Why is there code that looks almost identical in the implementations on the two connectors + +The two connectors have very similar implementations, including the utils files. However, they are fundamentally different as they depend on different packages from Google. Although the namings of many types are identical, they are different types. \ No newline at end of file diff --git a/python/semantic_kernel/connectors/ai/google/google_ai/google_ai_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/google/google_ai/google_ai_prompt_execution_settings.py index 94c0cc9c17cf..a1f0ce927e61 100644 --- a/python/semantic_kernel/connectors/ai/google/google_ai/google_ai_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/google/google_ai/google_ai_prompt_execution_settings.py @@ -35,8 +35,15 @@ class GoogleAITextPromptExecutionSettings(GoogleAIPromptExecutionSettings): class GoogleAIChatPromptExecutionSettings(GoogleAIPromptExecutionSettings): """Google AI Chat Prompt Execution Settings.""" - tools: list[dict[str, Any]] | None = Field(None, max_length=64) - tool_choice: str | None = None + tools: list[dict[str, Any]] | None = Field( + None, + max_length=64, + description="Do not set this manually. It is set by the service based on the function choice configuration.", + ) + tool_config: dict[str, Any] | None = Field( + None, + description="Do not set this manually. It is set by the service based on the function choice configuration.", + ) @override def prepare_settings_dict(self, **kwargs) -> dict[str, Any]: @@ -47,7 +54,7 @@ def prepare_settings_dict(self, **kwargs) -> dict[str, Any]: """ settings_dict = super().prepare_settings_dict(**kwargs) settings_dict.pop("tools", None) - settings_dict.pop("tool_choice", None) + settings_dict.pop("tool_config", None) return settings_dict diff --git a/python/semantic_kernel/connectors/ai/google/google_ai/services/google_ai_chat_completion.py b/python/semantic_kernel/connectors/ai/google/google_ai/services/google_ai_chat_completion.py index 8f928e05059a..8b72915b1b82 100644 --- a/python/semantic_kernel/connectors/ai/google/google_ai/services/google_ai_chat_completion.py +++ b/python/semantic_kernel/connectors/ai/google/google_ai/services/google_ai_chat_completion.py @@ -1,8 +1,10 @@ # Copyright (c) Microsoft. All rights reserved. +import logging import sys from collections.abc import AsyncGenerator +from functools import reduce from typing import TYPE_CHECKING, Any import google.generativeai as genai @@ -16,14 +18,27 @@ ) from semantic_kernel.connectors.ai.google.google_ai.services.google_ai_base import GoogleAIBase from semantic_kernel.connectors.ai.google.google_ai.services.utils import ( - filter_system_message, finish_reason_from_google_ai_to_semantic_kernel, format_assistant_message, + format_tool_message, format_user_message, + update_settings_from_function_choice_configuration, +) +from semantic_kernel.connectors.ai.google.shared_utils import ( + configure_function_choice_behavior, + filter_system_message, + format_gemini_function_name_to_kernel_function_fully_qualified_name, + invoke_function_calls, ) +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.streaming_chat_message_content import ITEM_TYPES as STREAMING_ITEM_TYPES from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent +from semantic_kernel.contents.streaming_text_content import StreamingTextContent +from semantic_kernel.contents.text_content import TextContent from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.contents.utils.finish_reason import FinishReason +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.kernel import Kernel if sys.version_info >= (3, 12): from typing import override # pragma: no cover @@ -33,12 +48,17 @@ from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase from semantic_kernel.connectors.ai.google.google_ai.google_ai_settings import GoogleAISettings from semantic_kernel.contents.chat_history import ChatHistory -from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError +from semantic_kernel.contents.chat_message_content import ITEM_TYPES, ChatMessageContent +from semantic_kernel.exceptions.service_exceptions import ( + ServiceInitializationError, + ServiceInvalidExecutionSettingsError, +) if TYPE_CHECKING: from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +logger: logging.Logger = logging.getLogger(__name__) + class GoogleAIChatCompletion(GoogleAIBase, ChatCompletionClientBase): """Google AI Chat Completion Client.""" @@ -97,7 +117,41 @@ async def get_chat_message_contents( settings = self.get_prompt_execution_settings_from_settings(settings) assert isinstance(settings, GoogleAIChatPromptExecutionSettings) # nosec - return await self._send_chat_request(chat_history, settings) + kernel = kwargs.get("kernel") + if settings.function_choice_behavior is not None and (not kernel or not isinstance(kernel, Kernel)): + raise ServiceInvalidExecutionSettingsError("Kernel is required for auto invoking functions.") + + if kernel and settings.function_choice_behavior: + configure_function_choice_behavior(settings, kernel, update_settings_from_function_choice_configuration) + + if ( + settings.function_choice_behavior is None + or not settings.function_choice_behavior.auto_invoke_kernel_functions + ): + return await self._send_chat_request(chat_history, settings) + + for request_index in range(settings.function_choice_behavior.maximum_auto_invoke_attempts): + completions = await self._send_chat_request(chat_history, settings) + chat_history.add_message(message=completions[0]) + function_calls = [item for item in chat_history.messages[-1].items if isinstance(item, FunctionCallContent)] + if (fc_count := len(function_calls)) == 0: + return completions + + results = await invoke_function_calls( + function_calls=function_calls, + chat_history=chat_history, + kernel=kernel, # type: ignore + arguments=kwargs.get("arguments", None), + function_call_count=fc_count, + request_index=request_index, + function_behavior=settings.function_choice_behavior, + ) + + if any(result.terminate for result in results if result is not None): + return completions + else: + # do a final call without auto function calling + return await self._send_chat_request(chat_history, settings) async def _send_chat_request( self, chat_history: ChatHistory, settings: GoogleAIChatPromptExecutionSettings @@ -112,6 +166,8 @@ async def _send_chat_request( response: AsyncGenerateContentResponse = await model.generate_content_async( contents=self._prepare_chat_history_for_request(chat_history), generation_config=GenerationConfig(**settings.prepare_settings_dict()), + tools=settings.tools, + tool_config=settings.tool_config, ) return [self._create_chat_message_content(response, candidate) for candidate in response.candidates] @@ -133,10 +189,25 @@ def _create_chat_message_content( response_metadata = self._get_metadata_from_response(response) response_metadata.update(self._get_metadata_from_candidate(candidate)) + items: list[ITEM_TYPES] = [] + for idx, part in enumerate(candidate.content.parts): + if part.text: + items.append(TextContent(text=part.text, inner_content=response, metadata=response_metadata)) + elif part.function_call: + items.append( + FunctionCallContent( + id=f"{part.function_call.name}_{idx!s}", + name=format_gemini_function_name_to_kernel_function_fully_qualified_name( + part.function_call.name + ), + arguments={k: v for k, v in part.function_call.args.items()}, + ) + ) + return ChatMessageContent( ai_model_id=self.ai_model_id, role=AuthorRole.ASSISTANT, - content=candidate.content.parts[0].text, + items=items, inner_content=response, finish_reason=finish_reason, metadata=response_metadata, @@ -155,11 +226,76 @@ async def get_streaming_chat_message_contents( settings = self.get_prompt_execution_settings_from_settings(settings) assert isinstance(settings, GoogleAIChatPromptExecutionSettings) # nosec - async_generator = self._send_chat_streaming_request(chat_history, settings) + kernel = kwargs.get("kernel") + if settings.function_choice_behavior is not None and (not kernel or not isinstance(kernel, Kernel)): + raise ServiceInvalidExecutionSettingsError("Kernel is required for auto invoking functions.") + + if kernel and settings.function_choice_behavior: + configure_function_choice_behavior(settings, kernel, update_settings_from_function_choice_configuration) + + if ( + settings.function_choice_behavior is None + or not settings.function_choice_behavior.auto_invoke_kernel_functions + ): + # No auto invoke is required. + async_generator = self._send_chat_streaming_request(chat_history, settings) + else: + # Auto invoke is required. + async_generator = self._get_streaming_chat_message_contents_auto_invoke( + kernel, # type: ignore + kwargs.get("arguments"), + chat_history, + settings, + ) async for messages in async_generator: yield messages + async def _get_streaming_chat_message_contents_auto_invoke( + self, + kernel: Kernel, + arguments: KernelArguments | None, + chat_history: ChatHistory, + settings: GoogleAIChatPromptExecutionSettings, + ) -> AsyncGenerator[list[StreamingChatMessageContent], Any]: + """Get streaming chat message contents from the Google AI service with auto invoking functions.""" + if not settings.function_choice_behavior: + raise ServiceInvalidExecutionSettingsError( + "Function choice behavior is required for auto invoking functions." + ) + + for request_index in range(settings.function_choice_behavior.maximum_auto_invoke_attempts): + all_messages: list[StreamingChatMessageContent] = [] + function_call_returned = False + async for messages in self._send_chat_streaming_request(chat_history, settings): + for message in messages: + if message: + all_messages.append(message) + if any(isinstance(item, FunctionCallContent) for item in message.items): + function_call_returned = True + yield messages + + if not function_call_returned: + # Response doesn't contain any function calls. No need to proceed to the next request. + return + + full_completion: StreamingChatMessageContent = reduce(lambda x, y: x + y, all_messages) + function_calls = [item for item in full_completion.items if isinstance(item, FunctionCallContent)] + chat_history.add_message(message=full_completion) + + results = await invoke_function_calls( + function_calls=function_calls, + chat_history=chat_history, + kernel=kernel, + arguments=arguments, + function_call_count=len(function_calls), + request_index=request_index, + function_behavior=settings.function_choice_behavior, + ) + + if any(result.terminate for result in results if result is not None): + return + async def _send_chat_streaming_request( self, chat_history: ChatHistory, @@ -175,6 +311,8 @@ async def _send_chat_streaming_request( response: AsyncGenerateContentResponse = await model.generate_content_async( contents=self._prepare_chat_history_for_request(chat_history), generation_config=GenerationConfig(**settings.prepare_settings_dict()), + tools=settings.tools, + tool_config=settings.tool_config, stream=True, ) @@ -200,11 +338,33 @@ def _create_streaming_chat_message_content( response_metadata = self._get_metadata_from_response(chunk) response_metadata.update(self._get_metadata_from_candidate(candidate)) + items: list[STREAMING_ITEM_TYPES] = [] + for idx, part in enumerate(candidate.content.parts): + if part.text: + items.append( + StreamingTextContent( + choice_index=candidate.index, + text=part.text, + inner_content=chunk, + metadata=response_metadata, + ) + ) + elif part.function_call: + items.append( + FunctionCallContent( + id=f"{part.function_call.name}_{idx!s}", + name=format_gemini_function_name_to_kernel_function_fully_qualified_name( + part.function_call.name + ), + arguments={k: v for k, v in part.function_call.args.items()}, + ) + ) + return StreamingChatMessageContent( ai_model_id=self.ai_model_id, role=AuthorRole.ASSISTANT, choice_index=candidate.index, - content=candidate.content.parts[0].text, + items=items, inner_content=chunk, finish_reason=finish_reason, metadata=response_metadata, @@ -230,8 +390,8 @@ def _prepare_chat_history_for_request( chat_request_messages.append(Content(role="user", parts=format_user_message(message))) elif message.role == AuthorRole.ASSISTANT: chat_request_messages.append(Content(role="model", parts=format_assistant_message(message))) - else: - raise ValueError(f"Unsupported role: {message.role}") + elif message.role == AuthorRole.TOOL: + chat_request_messages.append(Content(role="function", parts=format_tool_message(message))) return chat_request_messages diff --git a/python/semantic_kernel/connectors/ai/google/google_ai/services/utils.py b/python/semantic_kernel/connectors/ai/google/google_ai/services/utils.py index b1eb6aa1bc57..abbd5bf1281d 100644 --- a/python/semantic_kernel/connectors/ai/google/google_ai/services/utils.py +++ b/python/semantic_kernel/connectors/ai/google/google_ai/services/utils.py @@ -1,16 +1,29 @@ # Copyright (c) Microsoft. All rights reserved. +import json import logging - -from google.generativeai.protos import Blob, Candidate, Part - -from semantic_kernel.contents.chat_history import ChatHistory +from typing import Any + +from google.generativeai.protos import Blob, Candidate, FunctionCall, FunctionResponse, Part + +from semantic_kernel.connectors.ai.function_call_choice_configuration import FunctionCallChoiceConfiguration +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceType +from semantic_kernel.connectors.ai.google.google_ai.google_ai_prompt_execution_settings import ( + GoogleAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.shared_utils import ( + FUNCTION_CHOICE_TYPE_TO_GOOGLE_FUNCTION_CALLING_MODE, + format_function_result_content_name_to_gemini_function_name, + format_kernel_function_fully_qualified_name_to_gemini_function_name, +) from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent from semantic_kernel.contents.image_content import ImageContent from semantic_kernel.contents.text_content import TextContent -from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.contents.utils.finish_reason import FinishReason as SemanticKernelFinishReason from semantic_kernel.exceptions.service_exceptions import ServiceInvalidRequestError +from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata logger: logging.Logger = logging.getLogger(__name__) @@ -34,24 +47,6 @@ def finish_reason_from_google_ai_to_semantic_kernel( return None -def filter_system_message(chat_history: ChatHistory) -> str | None: - """Filter the first system message from the chat history. - - If there are multiple system messages, raise an error. - If there are no system messages, return None. - """ - if len([message for message in chat_history if message.role == AuthorRole.SYSTEM]) > 1: - raise ServiceInvalidRequestError( - "Multiple system messages in chat history. Only one system message is expected." - ) - - for message in chat_history: - if message.role == AuthorRole.SYSTEM: - return message.content - - return None - - def format_user_message(message: ChatMessageContent) -> list[Part]: """Format a user message to the expected object for the client. @@ -61,22 +56,12 @@ def format_user_message(message: ChatMessageContent) -> list[Part]: Returns: The formatted user message as a list of parts. """ - if not any(isinstance(item, (ImageContent)) for item in message.items): - return [Part(text=message.content)] - parts: list[Part] = [] for item in message.items: if isinstance(item, TextContent): parts.append(Part(text=message.content)) elif isinstance(item, ImageContent): - if item.data_uri: - parts.append(Part(inline_data=Blob(mime_type=item.mime_type, data=item.data))) - else: - # The Google AI API doesn't support image from an arbitrary URI: - # https://github.com/google-gemini/generative-ai-python/issues/357 - raise ServiceInvalidRequestError( - "ImageContent without data_uri in User message while formatting chat history for Google AI" - ) + parts.append(_create_image_part(item)) else: raise ServiceInvalidRequestError( "Unsupported item type in User message while formatting chat history for Google AI" @@ -95,4 +80,101 @@ def format_assistant_message(message: ChatMessageContent) -> list[Part]: Returns: The formatted assistant message as a list of parts. """ - return [Part(text=message.content)] + parts: list[Part] = [] + for item in message.items: + if isinstance(item, TextContent): + if item.text: + parts.append(Part(text=item.text)) + elif isinstance(item, FunctionCallContent): + parts.append( + Part( + function_call=FunctionCall( + name=item.name, + # Convert the arguments to a dictionary if it is a string + args=json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments, + ) + ) + ) + elif isinstance(item, ImageContent): + parts.append(_create_image_part(item)) + else: + raise ServiceInvalidRequestError( + "Unsupported item type in Assistant message while formatting chat history for Vertex AI" + f" Inference: {type(item)}" + ) + + return parts + + +def format_tool_message(message: ChatMessageContent) -> list[Part]: + """Format a tool message to the expected object for the client. + + Args: + message: The tool message. + + Returns: + The formatted tool message. + """ + parts: list[Part] = [] + for item in message.items: + if isinstance(item, FunctionResultContent): + gemini_function_name = format_function_result_content_name_to_gemini_function_name(item) + parts.append( + Part( + function_response=FunctionResponse( + name=gemini_function_name, + response={ + "name": gemini_function_name, + "content": item.result, + }, + ) + ) + ) + + return parts + + +def kernel_function_metadata_to_google_ai_function_call_format(metadata: KernelFunctionMetadata) -> dict[str, Any]: + """Convert the kernel function metadata to function calling format.""" + return { + "name": format_kernel_function_fully_qualified_name_to_gemini_function_name(metadata), + "description": metadata.description or "", + "parameters": { + "type": "object", + "properties": {param.name: param.schema_data for param in metadata.parameters}, + "required": [p.name for p in metadata.parameters if p.is_required], + }, + } + + +def update_settings_from_function_choice_configuration( + function_choice_configuration: FunctionCallChoiceConfiguration, + settings: GoogleAIChatPromptExecutionSettings, + type: FunctionChoiceType, +) -> None: + """Update the settings from a FunctionChoiceConfiguration.""" + if function_choice_configuration.available_functions: + settings.tool_config = { + "function_calling_config": { + "mode": FUNCTION_CHOICE_TYPE_TO_GOOGLE_FUNCTION_CALLING_MODE[type], + } + } + settings.tools = [ + { + "function_declarations": [ + kernel_function_metadata_to_google_ai_function_call_format(f) + for f in function_choice_configuration.available_functions + ] + } + ] + + +def _create_image_part(image_content: ImageContent) -> Part: + if image_content.data_uri: + return Part(inline_data=Blob(mime_type=image_content.mime_type, data=image_content.data)) + + # The Google AI API doesn't support images from arbitrary URIs: + # https://github.com/google-gemini/generative-ai-python/issues/357 + raise ServiceInvalidRequestError( + "ImageContent without data_uri in User message while formatting chat history for Google AI" + ) diff --git a/python/semantic_kernel/connectors/ai/google/shared_utils.py b/python/semantic_kernel/connectors/ai/google/shared_utils.py new file mode 100644 index 000000000000..5e8686d4397f --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/shared_utils.py @@ -0,0 +1,121 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging +from collections.abc import Callable +from typing import TYPE_CHECKING + +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior, FunctionChoiceType +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.service_exceptions import ServiceInvalidRequestError +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata +from semantic_kernel.kernel import Kernel + +if TYPE_CHECKING: + from semantic_kernel.connectors.ai.google.google_ai.google_ai_prompt_execution_settings import ( + GoogleAIChatPromptExecutionSettings, + ) + from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIChatPromptExecutionSettings, + ) + +logger: logging.Logger = logging.getLogger(__name__) + + +def filter_system_message(chat_history: ChatHistory) -> str | None: + """Filter the first system message from the chat history. + + If there are multiple system messages, raise an error. + If there are no system messages, return None. + """ + if len([message for message in chat_history if message.role == AuthorRole.SYSTEM]) > 1: + raise ServiceInvalidRequestError( + "Multiple system messages in chat history. Only one system message is expected." + ) + + for message in chat_history: + if message.role == AuthorRole.SYSTEM: + return message.content + + return None + + +async def invoke_function_calls( + function_calls: list[FunctionCallContent], + chat_history: ChatHistory, + kernel: Kernel, + arguments: KernelArguments | None, + function_call_count: int, + request_index: int, + function_behavior: FunctionChoiceBehavior, +): + """Invoke function calls.""" + logger.info(f"processing {function_call_count} tool calls in parallel.") + + return await asyncio.gather( + *[ + kernel.invoke_function_call( + function_call=function_call, + chat_history=chat_history, + arguments=arguments, + function_call_count=function_call_count, + request_index=request_index, + function_behavior=function_behavior, + ) + for function_call in function_calls + ], + ) + + +FUNCTION_CHOICE_TYPE_TO_GOOGLE_FUNCTION_CALLING_MODE = { + FunctionChoiceType.AUTO: "AUTO", + FunctionChoiceType.NONE: "NONE", + FunctionChoiceType.REQUIRED: "ANY", +} + +# The separator used in the fully qualified name of the function instead of the default "-" separator. +# This is required since Gemini doesn't work well with "-" in the function name. +# https://ai.google.dev/gemini-api/docs/function-calling#function_declarations +GEMINI_FUNCTION_NAME_SEPARATOR = "_" + + +def format_function_result_content_name_to_gemini_function_name(function_result_content: FunctionResultContent) -> str: + """Format the function result content name to the Gemini function name.""" + return ( + f"{function_result_content.plugin_name}{GEMINI_FUNCTION_NAME_SEPARATOR}{function_result_content.function_name}" + if function_result_content.plugin_name + else function_result_content.function_name + ) + + +def format_kernel_function_fully_qualified_name_to_gemini_function_name(metadata: KernelFunctionMetadata) -> str: + """Format the kernel function fully qualified name to the Gemini function name.""" + return ( + f"{metadata.plugin_name}{GEMINI_FUNCTION_NAME_SEPARATOR}{metadata.name}" + if metadata.plugin_name + else metadata.name + ) + + +def format_gemini_function_name_to_kernel_function_fully_qualified_name(gemini_function_name: str) -> str: + """Format the Gemini function name to the kernel function fully qualified name.""" + if GEMINI_FUNCTION_NAME_SEPARATOR in gemini_function_name: + plugin_name, function_name = gemini_function_name.split(GEMINI_FUNCTION_NAME_SEPARATOR, 1) + return f"{plugin_name}-{function_name}" + return gemini_function_name + + +def configure_function_choice_behavior( + settings: "GoogleAIChatPromptExecutionSettings | VertexAIChatPromptExecutionSettings", + kernel: Kernel, + callback: Callable[..., None], +): + """Configure the function choice behavior to include the kernel functions.""" + if not settings.function_choice_behavior: + return + + settings.function_choice_behavior.configure(kernel=kernel, update_settings_callback=callback, settings=settings) diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/__init__.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/__init__.py new file mode 100644 index 000000000000..c524ab06ecb1 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_chat_completion import VertexAIChatCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_text_completion import VertexAITextCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_text_embedding import VertexAITextEmbedding +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIChatPromptExecutionSettings, + VertexAIEmbeddingPromptExecutionSettings, + VertexAIPromptExecutionSettings, +) + +__all__ = [ + "VertexAIChatCompletion", + "VertexAIChatPromptExecutionSettings", + "VertexAIEmbeddingPromptExecutionSettings", + "VertexAIPromptExecutionSettings", + "VertexAITextCompletion", + "VertexAITextEmbedding", +] diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/__init__.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/utils.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/utils.py new file mode 100644 index 000000000000..400329688331 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/utils.py @@ -0,0 +1,182 @@ +# Copyright (c) Microsoft. All rights reserved. + +import json +import logging +from typing import Any + +from google.cloud.aiplatform_v1beta1.types.content import Blob, Candidate, Part +from google.cloud.aiplatform_v1beta1.types.tool import FunctionCall, FunctionResponse +from vertexai.generative_models import FunctionDeclaration, Tool, ToolConfig + +from semantic_kernel.connectors.ai.function_call_choice_configuration import FunctionCallChoiceConfiguration +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceType +from semantic_kernel.connectors.ai.google.shared_utils import ( + FUNCTION_CHOICE_TYPE_TO_GOOGLE_FUNCTION_CALLING_MODE, + format_function_result_content_name_to_gemini_function_name, + format_kernel_function_fully_qualified_name_to_gemini_function_name, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIChatPromptExecutionSettings, +) +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.finish_reason import FinishReason as SemanticKernelFinishReason +from semantic_kernel.exceptions.service_exceptions import ServiceInvalidRequestError +from semantic_kernel.functions.kernel_function_metadata import KernelFunctionMetadata + +logger: logging.Logger = logging.getLogger(__name__) + + +def finish_reason_from_vertex_ai_to_semantic_kernel( + finish_reason: Candidate.FinishReason, +) -> SemanticKernelFinishReason | None: + """Convert a Vertex AI FinishReason to a Semantic Kernel FinishReason. + + This is best effort and may not cover all cases as the enums are not identical. + """ + if finish_reason == Candidate.FinishReason.STOP: + return SemanticKernelFinishReason.STOP + + if finish_reason == Candidate.FinishReason.MAX_TOKENS: + return SemanticKernelFinishReason.LENGTH + + if finish_reason == Candidate.FinishReason.SAFETY: + return SemanticKernelFinishReason.CONTENT_FILTER + + return None + + +def format_user_message(message: ChatMessageContent) -> list[Part]: + """Format a user message to the expected object for the client. + + Args: + message: The user message. + + Returns: + The formatted user message as a list of parts. + """ + parts: list[Part] = [] + for item in message.items: + if isinstance(item, TextContent): + parts.append(Part(text=message.content)) + elif isinstance(item, ImageContent): + parts.append(_create_image_part(item)) + else: + raise ServiceInvalidRequestError( + "Unsupported item type in User message while formatting chat history for Vertex AI" + f" Inference: {type(item)}" + ) + + return parts + + +def format_assistant_message(message: ChatMessageContent) -> list[Part]: + """Format an assistant message to the expected object for the client. + + Args: + message: The assistant message. + + Returns: + The formatted assistant message as a list of parts. + """ + parts: list[Part] = [] + for item in message.items: + if isinstance(item, TextContent): + if item.text: + parts.append(Part(text=item.text)) + elif isinstance(item, FunctionCallContent): + parts.append( + Part( + function_call=FunctionCall( + name=item.name, + # Convert the arguments to a dictionary if it is a string + args=json.loads(item.arguments) if isinstance(item.arguments, str) else item.arguments, + ) + ) + ) + elif isinstance(item, ImageContent): + parts.append(_create_image_part(item)) + else: + raise ServiceInvalidRequestError( + "Unsupported item type in Assistant message while formatting chat history for Vertex AI" + f" Inference: {type(item)}" + ) + + return parts + + +def format_tool_message(message: ChatMessageContent) -> list[Part]: + """Format a tool message to the expected object for the client. + + Args: + message: The tool message. + + Returns: + The formatted tool message. + """ + parts: list[Part] = [] + for item in message.items: + if isinstance(item, FunctionResultContent): + gemini_function_name = format_function_result_content_name_to_gemini_function_name(item) + parts.append( + Part( + function_response=FunctionResponse( + name=gemini_function_name, + response={ + "name": gemini_function_name, + "content": item.result, + }, + ) + ) + ) + + return parts + + +def kernel_function_metadata_to_vertex_ai_function_call_format(metadata: KernelFunctionMetadata) -> dict[str, Any]: + """Convert the kernel function metadata to function calling format.""" + return FunctionDeclaration( + name=format_kernel_function_fully_qualified_name_to_gemini_function_name(metadata), + description=metadata.description or "", + parameters={ + "type": "object", + "properties": {param.name: param.schema_data for param in metadata.parameters}, + "required": [p.name for p in metadata.parameters if p.is_required], + }, + ) + + +def update_settings_from_function_choice_configuration( + function_choice_configuration: FunctionCallChoiceConfiguration, + settings: VertexAIChatPromptExecutionSettings, + type: FunctionChoiceType, +) -> None: + """Update the settings from a FunctionChoiceConfiguration.""" + if function_choice_configuration.available_functions: + settings.tool_config = ToolConfig( + function_calling_config=ToolConfig.FunctionCallingConfig( + mode=FUNCTION_CHOICE_TYPE_TO_GOOGLE_FUNCTION_CALLING_MODE[type], + ), + ) + settings.tools = [ + Tool( + function_declarations=[ + kernel_function_metadata_to_vertex_ai_function_call_format(f) + for f in function_choice_configuration.available_functions + ] + ) + ] + + +def _create_image_part(image_content: ImageContent) -> Part: + if image_content.data_uri: + return Part(inline_data=Blob(mime_type=image_content.mime_type, data=image_content.data)) + + # The Google AI API doesn't support images from arbitrary URIs: + # https://github.com/google-gemini/generative-ai-python/issues/357 + raise ServiceInvalidRequestError( + "ImageContent without data_uri in User message while formatting chat history for Google AI" + ) diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_base.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_base.py new file mode 100644 index 000000000000..e17b1994424d --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_base.py @@ -0,0 +1,12 @@ +# Copyright (c) Microsoft. All rights reserved. + +from abc import ABC + +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.kernel_pydantic import KernelBaseModel + + +class VertexAIBase(KernelBaseModel, ABC): + """Vertex AI Service.""" + + service_settings: VertexAISettings diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_chat_completion.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_chat_completion.py new file mode 100644 index 000000000000..245f9a434e45 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_chat_completion.py @@ -0,0 +1,426 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from collections.abc import AsyncGenerator, AsyncIterable +from functools import reduce +from typing import Any + +import vertexai +from google.cloud.aiplatform_v1beta1.types.content import Content +from pydantic import ValidationError +from vertexai.generative_models import Candidate, GenerationResponse, GenerativeModel + +from semantic_kernel.connectors.ai.google.shared_utils import ( + configure_function_choice_behavior, + filter_system_message, + format_gemini_function_name_to_kernel_function_fully_qualified_name, + invoke_function_calls, +) +from semantic_kernel.connectors.ai.google.vertex_ai.services.utils import ( + finish_reason_from_vertex_ai_to_semantic_kernel, + format_assistant_message, + format_tool_message, + format_user_message, + update_settings_from_function_choice_configuration, +) +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_base import VertexAIBase +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ITEM_TYPES, ChatMessageContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.streaming_chat_message_content import ITEM_TYPES as STREAMING_ITEM_TYPES +from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent +from semantic_kernel.contents.streaming_text_content import StreamingTextContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason +from semantic_kernel.exceptions.service_exceptions import ( + ServiceInitializationError, + ServiceInvalidExecutionSettingsError, +) +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.kernel import Kernel + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase + + +class VertexAIChatCompletion(VertexAIBase, ChatCompletionClientBase): + """Google Vertex AI Chat Completion Service.""" + + def __init__( + self, + project_id: str | None = None, + region: str | None = None, + gemini_model_id: str | None = None, + service_id: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize the Google Vertex AI Chat Completion Service. + + If no arguments are provided, the service will attempt to load the settings from the environment. + The following environment variables are used: + - VERTEX_AI_GEMINI_MODEL_ID + - VERTEX_AI_PROJECT_ID + - VERTEX_AI_REGION + + Args: + project_id (str): The Google Cloud project ID. + region (str): The Google Cloud region. + gemini_model_id (str): The Gemini model ID. + service_id (str): The Vertex AI service ID. + env_file_path (str): The path to the environment file. + env_file_encoding (str): The encoding of the environment file. + """ + try: + vertex_ai_settings = VertexAISettings.create( + project_id=project_id, + region=region, + gemini_model_id=gemini_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as e: + raise ServiceInitializationError(f"Failed to validate Vertex AI settings: {e}") from e + if not vertex_ai_settings.gemini_model_id: + raise ServiceInitializationError("The Vertex AI Gemini model ID is required.") + + super().__init__( + ai_model_id=vertex_ai_settings.gemini_model_id, + service_id=service_id or vertex_ai_settings.gemini_model_id, + service_settings=vertex_ai_settings, + ) + + # region Non-streaming + @override + async def get_chat_message_contents( + self, + chat_history: ChatHistory, + settings: "PromptExecutionSettings", + **kwargs: Any, + ) -> list[ChatMessageContent]: + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, VertexAIChatPromptExecutionSettings) # nosec + + kernel = kwargs.get("kernel") + if settings.function_choice_behavior is not None and (not kernel or not isinstance(kernel, Kernel)): + raise ServiceInvalidExecutionSettingsError("Kernel is required for auto invoking functions.") + + if kernel and settings.function_choice_behavior: + configure_function_choice_behavior(settings, kernel, update_settings_from_function_choice_configuration) + + if ( + settings.function_choice_behavior is None + or not settings.function_choice_behavior.auto_invoke_kernel_functions + ): + return await self._send_chat_request(chat_history, settings) + + for request_index in range(settings.function_choice_behavior.maximum_auto_invoke_attempts): + completions = await self._send_chat_request(chat_history, settings) + chat_history.add_message(message=completions[0]) + function_calls = [item for item in chat_history.messages[-1].items if isinstance(item, FunctionCallContent)] + if (fc_count := len(function_calls)) == 0: + return completions + + results = await invoke_function_calls( + function_calls=function_calls, + chat_history=chat_history, + kernel=kernel, # type: ignore + arguments=kwargs.get("arguments", None), + function_call_count=fc_count, + request_index=request_index, + function_behavior=settings.function_choice_behavior, + ) + + if any(result.terminate for result in results if result is not None): + return completions + else: + # do a final call without auto function calling + return await self._send_chat_request(chat_history, settings) + + async def _send_chat_request( + self, chat_history: ChatHistory, settings: VertexAIChatPromptExecutionSettings + ) -> list[ChatMessageContent]: + """Send a chat request to the Vertex AI service.""" + vertexai.init(project=self.service_settings.project_id, location=self.service_settings.region) + model = GenerativeModel( + self.service_settings.gemini_model_id, + system_instruction=filter_system_message(chat_history), + ) + + response: GenerationResponse = await model.generate_content_async( + contents=self._prepare_chat_history_for_request(chat_history), + generation_config=settings.prepare_settings_dict(), + tools=settings.tools, + tool_config=settings.tool_config, + ) + + return [self._create_chat_message_content(response, candidate) for candidate in response.candidates] + + def _create_chat_message_content(self, response: GenerationResponse, candidate: Candidate) -> ChatMessageContent: + """Create a chat message content object. + + Args: + response: The response from the service. + candidate: The candidate from the response. + + Returns: + A chat message content object. + """ + # Best effort conversion of finish reason. The raw value will be available in metadata. + finish_reason: FinishReason | None = finish_reason_from_vertex_ai_to_semantic_kernel(candidate.finish_reason) + response_metadata = self._get_metadata_from_response(response) + response_metadata.update(self._get_metadata_from_candidate(candidate)) + + items: list[ITEM_TYPES] = [] + for idx, part in enumerate(candidate.content.parts): + part_dict = part.to_dict() + if "text" in part_dict: + items.append(TextContent(text=part.text, inner_content=response, metadata=response_metadata)) + elif "function_call" in part_dict: + items.append( + FunctionCallContent( + id=f"{part.function_call.name}_{idx!s}", + name=format_gemini_function_name_to_kernel_function_fully_qualified_name( + part.function_call.name + ), + arguments={k: v for k, v in part.function_call.args.items()}, + ) + ) + + return ChatMessageContent( + ai_model_id=self.ai_model_id, + role=AuthorRole.ASSISTANT, + items=items, + inner_content=response, + finish_reason=finish_reason, + metadata=response_metadata, + ) + + # endregion + + # region Streaming + @override + async def get_streaming_chat_message_contents( + self, + chat_history: ChatHistory, + settings: "PromptExecutionSettings", + **kwargs: Any, + ) -> AsyncGenerator[list[StreamingChatMessageContent], Any]: + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, VertexAIChatPromptExecutionSettings) # nosec + + kernel = kwargs.get("kernel") + if settings.function_choice_behavior is not None and (not kernel or not isinstance(kernel, Kernel)): + raise ServiceInvalidExecutionSettingsError("Kernel is required for auto invoking functions.") + + if kernel and settings.function_choice_behavior: + configure_function_choice_behavior(settings, kernel, update_settings_from_function_choice_configuration) + + if ( + settings.function_choice_behavior is None + or not settings.function_choice_behavior.auto_invoke_kernel_functions + ): + # No auto invoke is required. + async_generator = self._send_chat_streaming_request(chat_history, settings) + else: + # Auto invoke is required. + async_generator = self._get_streaming_chat_message_contents_auto_invoke( + kernel, # type: ignore + kwargs.get("arguments"), + chat_history, + settings, + ) + + async for messages in async_generator: + yield messages + + async def _get_streaming_chat_message_contents_auto_invoke( + self, + kernel: Kernel, + arguments: KernelArguments | None, + chat_history: ChatHistory, + settings: VertexAIChatPromptExecutionSettings, + ) -> AsyncGenerator[list[StreamingChatMessageContent], Any]: + """Get streaming chat message contents from the Google AI service with auto invoking functions.""" + if not settings.function_choice_behavior: + raise ServiceInvalidExecutionSettingsError( + "Function choice behavior is required for auto invoking functions." + ) + + for request_index in range(settings.function_choice_behavior.maximum_auto_invoke_attempts): + all_messages: list[StreamingChatMessageContent] = [] + function_call_returned = False + async for messages in self._send_chat_streaming_request(chat_history, settings): + for message in messages: + if message: + all_messages.append(message) + if any(isinstance(item, FunctionCallContent) for item in message.items): + function_call_returned = True + yield messages + + if not function_call_returned: + # Response doesn't contain any function calls. No need to proceed to the next request. + return + + full_completion: StreamingChatMessageContent = reduce(lambda x, y: x + y, all_messages) + function_calls = [item for item in full_completion.items if isinstance(item, FunctionCallContent)] + chat_history.add_message(message=full_completion) + + results = await invoke_function_calls( + function_calls=function_calls, + chat_history=chat_history, + kernel=kernel, + arguments=arguments, + function_call_count=len(function_calls), + request_index=request_index, + function_behavior=settings.function_choice_behavior, + ) + + if any(result.terminate for result in results if result is not None): + return + + async def _send_chat_streaming_request( + self, + chat_history: ChatHistory, + settings: VertexAIChatPromptExecutionSettings, + ) -> AsyncGenerator[list[StreamingChatMessageContent], Any]: + """Send a streaming chat request to the Vertex AI service.""" + vertexai.init(project=self.service_settings.project_id, location=self.service_settings.region) + model = GenerativeModel( + self.service_settings.gemini_model_id, + system_instruction=filter_system_message(chat_history), + ) + + response: AsyncIterable[GenerationResponse] = await model.generate_content_async( + contents=self._prepare_chat_history_for_request(chat_history), + generation_config=settings.prepare_settings_dict(), + tools=settings.tools, + tool_config=settings.tool_config, + stream=True, + ) + + async for chunk in response: + yield [self._create_streaming_chat_message_content(chunk, candidate) for candidate in chunk.candidates] + + def _create_streaming_chat_message_content( + self, + chunk: GenerationResponse, + candidate: Candidate, + ) -> StreamingChatMessageContent: + """Create a streaming chat message content object. + + Args: + chunk: The response from the service. + candidate: The candidate from the response. + + Returns: + A streaming chat message content object. + """ + # Best effort conversion of finish reason. The raw value will be available in metadata. + finish_reason: FinishReason | None = finish_reason_from_vertex_ai_to_semantic_kernel(candidate.finish_reason) + response_metadata = self._get_metadata_from_response(chunk) + response_metadata.update(self._get_metadata_from_candidate(candidate)) + + items: list[STREAMING_ITEM_TYPES] = [] + for idx, part in enumerate(candidate.content.parts): + part_dict = part.to_dict() + if "text" in part_dict: + items.append( + StreamingTextContent( + choice_index=candidate.index, + text=part.text, + inner_content=chunk, + metadata=response_metadata, + ) + ) + elif "function_call" in part_dict: + items.append( + FunctionCallContent( + id=f"{part.function_call.name}_{idx!s}", + name=format_gemini_function_name_to_kernel_function_fully_qualified_name( + part.function_call.name + ), + arguments={k: v for k, v in part.function_call.args.items()}, + ) + ) + + return StreamingChatMessageContent( + ai_model_id=self.ai_model_id, + role=AuthorRole.ASSISTANT, + choice_index=candidate.index, + items=items, + inner_content=chunk, + finish_reason=finish_reason, + metadata=response_metadata, + ) + + # endregion + + @override + def _prepare_chat_history_for_request( + self, + chat_history: ChatHistory, + role_key: str = "role", + content_key: str = "content", + ) -> list[Content]: + chat_request_messages: list[Content] = [] + + for message in chat_history.messages: + if message.role == AuthorRole.SYSTEM: + # Skip system messages since they are not part of the chat request. + # System message will be provided as system_instruction in the model. + continue + if message.role == AuthorRole.USER: + chat_request_messages.append(Content(role="user", parts=format_user_message(message))) + elif message.role == AuthorRole.ASSISTANT: + chat_request_messages.append(Content(role="model", parts=format_assistant_message(message))) + elif message.role == AuthorRole.TOOL: + chat_request_messages.append(Content(role="function", parts=format_tool_message(message))) + + return chat_request_messages + + def _get_metadata_from_response(self, response: GenerationResponse) -> dict[str, Any]: + """Get metadata from the response. + + Args: + response: The response from the service. + + Returns: + A dictionary containing metadata. + """ + return { + "prompt_feedback": response.prompt_feedback, + "usage": response.usage_metadata, + } + + def _get_metadata_from_candidate(self, candidate: Candidate) -> dict[str, Any]: + """Get metadata from the candidate. + + Args: + candidate: The candidate from the response. + + Returns: + A dictionary containing metadata. + """ + return { + "index": candidate.index, + "finish_reason": candidate.finish_reason, + "safety_ratings": candidate.safety_ratings, + } + + @override + def get_prompt_execution_settings_class( + self, + ) -> type["PromptExecutionSettings"]: + """Get the request settings class.""" + return VertexAIChatPromptExecutionSettings diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_completion.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_completion.py new file mode 100644 index 000000000000..6919b6ba521e --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_completion.py @@ -0,0 +1,209 @@ +# Copyright (c) Microsoft. All rights reserved. + + +import sys +from collections.abc import AsyncGenerator, AsyncIterable +from typing import Any + +import vertexai +from pydantic import ValidationError +from vertexai.generative_models import Candidate, GenerationResponse, GenerativeModel + +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_base import VertexAIBase +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAITextPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase +from semantic_kernel.contents.streaming_text_content import StreamingTextContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + + +class VertexAITextCompletion(VertexAIBase, TextCompletionClientBase): + """Vertex AI Text Completion Client.""" + + def __init__( + self, + project_id: str | None = None, + region: str | None = None, + gemini_model_id: str | None = None, + service_id: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize the Google Vertex AI Text Completion Service. + + If no arguments are provided, the service will attempt to load the settings from the environment. + The following environment variables are used: + - VERTEX_AI_GEMINI_MODEL_ID + - VERTEX_AI_PROJECT_ID + + Args: + project_id (str): The Google Cloud project ID. + region (str): The Google Cloud region. + gemini_model_id (str): The Gemini model ID. + service_id (str): The Vertex AI service ID. + env_file_path (str): The path to the environment file. + env_file_encoding (str): The encoding of the environment file. + """ + try: + vertex_ai_settings = VertexAISettings.create( + project_id=project_id, + region=region, + gemini_model_id=gemini_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as e: + raise ServiceInitializationError(f"Failed to validate Vertex AI settings: {e}") from e + if not vertex_ai_settings.gemini_model_id: + raise ServiceInitializationError("The Vertex AI Gemini model ID is required.") + + super().__init__( + ai_model_id=vertex_ai_settings.gemini_model_id, + service_id=service_id or vertex_ai_settings.gemini_model_id, + service_settings=vertex_ai_settings, + ) + + # region Non-streaming + @override + async def get_text_contents( + self, + prompt: str, + settings: "PromptExecutionSettings", + ) -> list[TextContent]: + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, VertexAITextPromptExecutionSettings) # nosec + + return await self._send_request(prompt, settings) + + async def _send_request(self, prompt: str, settings: VertexAITextPromptExecutionSettings) -> list[TextContent]: + """Send a text generation request to the Vertex AI service.""" + vertexai.init(project=self.service_settings.project_id, location=self.service_settings.region) + model = GenerativeModel(self.service_settings.gemini_model_id) + + response: GenerationResponse = await model.generate_content_async( + contents=prompt, + generation_config=settings.prepare_settings_dict(), + ) + + return [self._create_text_content(response, candidate) for candidate in response.candidates] + + def _create_text_content(self, response: GenerationResponse, candidate: Candidate) -> TextContent: + """Create a text content object. + + Args: + response: The response from the service. + candidate: The candidate from the response. + + Returns: + A text content object. + """ + response_metadata = self._get_metadata_from_response(response) + response_metadata.update(self._get_metadata_from_candidate(candidate)) + + return TextContent( + ai_model_id=self.ai_model_id, + text=candidate.content.parts[0].text, + inner_content=response, + metadata=response_metadata, + ) + + # endregion + + # region Streaming + @override + async def get_streaming_text_contents( + self, + prompt: str, + settings: "PromptExecutionSettings", + ) -> AsyncGenerator[list["StreamingTextContent"], Any]: + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, VertexAITextPromptExecutionSettings) # nosec + + async_generator = self._send_streaming_request(prompt, settings) + + async for text_contents in async_generator: + yield text_contents + + async def _send_streaming_request( + self, prompt: str, settings: VertexAITextPromptExecutionSettings + ) -> AsyncGenerator[list[StreamingTextContent], Any]: + """Send a text generation request to the Vertex AI service.""" + vertexai.init(project=self.service_settings.project_id, location=self.service_settings.region) + model = GenerativeModel(self.service_settings.gemini_model_id) + + response: AsyncIterable[GenerationResponse] = await model.generate_content_async( + contents=prompt, + generation_config=settings.prepare_settings_dict(), + stream=True, + ) + + async for chunk in response: + yield [self._create_streaming_text_content(chunk, candidate) for candidate in chunk.candidates] + + def _create_streaming_text_content(self, chunk: GenerationResponse, candidate: Candidate) -> StreamingTextContent: + """Create a streaming text content object. + + Args: + chunk: The response from the service. + candidate: The candidate from the response. + + Returns: + A streaming text content object. + """ + response_metadata = self._get_metadata_from_response(chunk) + response_metadata.update(self._get_metadata_from_candidate(candidate)) + + return StreamingTextContent( + ai_model_id=self.ai_model_id, + choice_index=candidate.index, + text=candidate.content.parts[0].text, + inner_content=chunk, + metadata=response_metadata, + ) + + # endregion + + def _get_metadata_from_response(self, response: GenerationResponse) -> dict[str, Any]: + """Get metadata from the response. + + Args: + response: The response from the service. + + Returns: + A dictionary containing metadata. + """ + return { + "prompt_feedback": response.prompt_feedback, + "usage": response.usage_metadata, + } + + def _get_metadata_from_candidate(self, candidate: Candidate) -> dict[str, Any]: + """Get metadata from the candidate. + + Args: + candidate: The candidate from the response. + + Returns: + A dictionary containing metadata. + """ + return { + "index": candidate.index, + "finish_reason": candidate.finish_reason, + "safety_ratings": candidate.safety_ratings, + } + + @override + def get_prompt_execution_settings_class( + self, + ) -> type["PromptExecutionSettings"]: + """Get the request settings class.""" + return VertexAITextPromptExecutionSettings diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_embedding.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_embedding.py new file mode 100644 index 000000000000..46e59ea9bfc4 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/services/vertex_ai_text_embedding.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from typing import Any + +import vertexai +from numpy import array, ndarray +from pydantic import ValidationError +from vertexai.language_models import TextEmbedding, TextEmbeddingModel + +from semantic_kernel.connectors.ai.embeddings.embedding_generator_base import EmbeddingGeneratorBase +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_base import VertexAIBase +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIEmbeddingPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + + +class VertexAITextEmbedding(VertexAIBase, EmbeddingGeneratorBase): + """Vertex AI Text Embedding Service.""" + + def __init__( + self, + project_id: str | None = None, + region: str | None = None, + embedding_model_id: str | None = None, + service_id: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize the Google Vertex AI Chat Completion Service. + + If no arguments are provided, the service will attempt to load the settings from the environment. + The following environment variables are used: + - VERTEX_AI_EMBEDDING_MODEL_ID + - VERTEX_AI_PROJECT_ID + + Args: + project_id (str): The Google Cloud project ID. + region (str): The Google Cloud region. + embedding_model_id (str): The Gemini model ID. + service_id (str): The Vertex AI service ID. + env_file_path (str): The path to the environment file. + env_file_encoding (str): The encoding of the environment file. + """ + try: + vertex_ai_settings = VertexAISettings.create( + project_id=project_id, + region=region, + embedding_model_id=embedding_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as e: + raise ServiceInitializationError(f"Failed to validate Vertex AI settings: {e}") from e + if not vertex_ai_settings.embedding_model_id: + raise ServiceInitializationError("The Vertex AI embedding model ID is required.") + + super().__init__( + ai_model_id=vertex_ai_settings.embedding_model_id, + service_id=service_id or vertex_ai_settings.embedding_model_id, + service_settings=vertex_ai_settings, + ) + + @override + async def generate_embeddings( + self, + texts: list[str], + settings: "PromptExecutionSettings | None" = None, + **kwargs: Any, + ) -> ndarray: + raw_embeddings = await self.generate_raw_embeddings(texts, settings, **kwargs) + return array(raw_embeddings) + + @override + async def generate_raw_embeddings( + self, + texts: list[str], + settings: "PromptExecutionSettings | None" = None, + **kwargs: Any, + ) -> list[list[float]]: + if not settings: + settings = VertexAIEmbeddingPromptExecutionSettings() + else: + settings = self.get_prompt_execution_settings_from_settings(settings) + assert isinstance(settings, VertexAIEmbeddingPromptExecutionSettings) # nosec + + vertexai.init(project=self.service_settings.project_id, location=self.service_settings.region) + model = TextEmbeddingModel.from_pretrained(self.service_settings.embedding_model_id) + response: list[TextEmbedding] = await model.get_embeddings_async( + texts, + **settings.prepare_settings_dict(), + ) + + return [text_embedding.values for text_embedding in response] + + @override + def get_prompt_execution_settings_class( + self, + ) -> type["PromptExecutionSettings"]: + """Get the request settings class.""" + return VertexAIEmbeddingPromptExecutionSettings diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py new file mode 100644 index 000000000000..28c8eb6f28be --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_prompt_execution_settings.py @@ -0,0 +1,67 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from typing import Any, Literal + +from pydantic import Field +from vertexai.generative_models import Tool, ToolConfig + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + + +class VertexAIPromptExecutionSettings(PromptExecutionSettings): + """Vertex AI Prompt Execution Settings.""" + + stop_sequences: list[str] | None = Field(None, max_length=5) + response_mime_type: Literal["text/plain", "application/json"] | None = None + response_schema: Any | None = None + candidate_count: int | None = Field(None, ge=1) + max_output_tokens: int | None = Field(None, ge=1) + temperature: float | None = Field(None, ge=0.0, le=2.0) + top_p: float | None = None + top_k: int | None = None + + +class VertexAITextPromptExecutionSettings(VertexAIPromptExecutionSettings): + """Vertex AI Text Prompt Execution Settings.""" + + pass + + +class VertexAIChatPromptExecutionSettings(VertexAIPromptExecutionSettings): + """Vertex AI Chat Prompt Execution Settings.""" + + tools: list[Tool] | None = Field( + None, + max_length=64, + description="Do not set this manually. It is set by the service based on the function choice configuration.", + ) + tool_config: ToolConfig | None = Field( + None, + description="Do not set this manually. It is set by the service based on the function choice configuration.", + ) + + @override + def prepare_settings_dict(self, **kwargs) -> dict[str, Any]: + """Prepare the settings as a dictionary for sending to the AI service. + + This method removes the tools and tool_config keys from the settings dictionary, as + the Vertex AI service mandates these two settings to be sent as separate parameters. + """ + settings_dict = super().prepare_settings_dict(**kwargs) + settings_dict.pop("tools", None) + settings_dict.pop("tool_config", None) + + return settings_dict + + +class VertexAIEmbeddingPromptExecutionSettings(PromptExecutionSettings): + """Google AI Embedding Prompt Execution Settings.""" + + auto_truncate: bool | None = None + output_dimensionality: int | None = None diff --git a/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_settings.py b/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_settings.py new file mode 100644 index 000000000000..66bc35035fd6 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/google/vertex_ai/vertex_ai_settings.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import ClassVar + +from semantic_kernel.kernel_pydantic import KernelBaseSettings + + +class VertexAISettings(KernelBaseSettings): + """Vertex AI settings. + + The settings are first loaded from environment variables with + the prefix 'VERTEX_AI_'. + If the environment variables are not found, the settings can + be loaded from a .env file with the encoding 'utf-8'. + If the settings are not found in the .env file, the settings + are ignored; however, validation will fail alerting that the + settings are missing. + + Required settings for prefix 'VERTEX_AI_' are: + - gemini_model_id: str - The Gemini model ID for the Vertex AI service, i.e. gemini-1.5-pro + This value can be found in the Vertex AI service deployment. + (Env var VERTEX_AI_GEMINI_MODEL_ID) + - embedding_model_id: str - The embedding model ID for the Vertex AI service, i.e. text-embedding-004 + This value can be found in the Vertex AI service deployment. + (Env var VERTEX_AI_EMBEDDING_MODEL_ID) + - project_id: str - The Google Cloud project ID. + (Env var VERTEX_AI_PROJECT_ID) + - region: str - The Google Cloud region. + (Env var VERTEX_AI_REGION) + """ + + env_prefix: ClassVar[str] = "VERTEX_AI_" + + gemini_model_id: str | None = None + embedding_model_id: str | None = None + project_id: str + region: str | None = None diff --git a/python/semantic_kernel/connectors/ai/hugging_face/hf_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/hugging_face/hf_prompt_execution_settings.py index 0cb549155406..89fc5525cb29 100644 --- a/python/semantic_kernel/connectors/ai/hugging_face/hf_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/hugging_face/hf_prompt_execution_settings.py @@ -8,6 +8,8 @@ class HuggingFacePromptExecutionSettings(PromptExecutionSettings): + """Hugging Face prompt execution settings.""" + do_sample: bool = True max_new_tokens: int = 256 num_return_sequences: int = 1 diff --git a/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_completion.py b/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_completion.py index 61dd1554ec9d..083905e8b0fd 100644 --- a/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_completion.py +++ b/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_completion.py @@ -25,6 +25,8 @@ class HuggingFaceTextCompletion(TextCompletionClientBase): + """Hugging Face text completion service.""" + task: Literal["summarization", "text-generation", "text2text-generation"] device: str generator: Any diff --git a/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_embedding.py b/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_embedding.py index 553e48fabf2e..ca4cc1bc60f6 100644 --- a/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_embedding.py +++ b/python/semantic_kernel/connectors/ai/hugging_face/services/hf_text_embedding.py @@ -27,6 +27,8 @@ @experimental_class class HuggingFaceTextEmbedding(EmbeddingGeneratorBase): + """Hugging Face text embedding service.""" + device: str generator: Any diff --git a/python/semantic_kernel/connectors/ai/mistral_ai/__init__.py b/python/semantic_kernel/connectors/ai/mistral_ai/__init__.py index 9b2d7d379066..8dc0c473a53f 100644 --- a/python/semantic_kernel/connectors/ai/mistral_ai/__init__.py +++ b/python/semantic_kernel/connectors/ai/mistral_ai/__init__.py @@ -4,8 +4,10 @@ MistralAIChatPromptExecutionSettings, ) from semantic_kernel.connectors.ai.mistral_ai.services.mistral_ai_chat_completion import MistralAIChatCompletion +from semantic_kernel.connectors.ai.mistral_ai.services.mistral_ai_text_embedding import MistralAITextEmbedding __all__ = [ "MistralAIChatCompletion", "MistralAIChatPromptExecutionSettings", + "MistralAITextEmbedding", ] diff --git a/python/semantic_kernel/connectors/ai/mistral_ai/services/mistral_ai_text_embedding.py b/python/semantic_kernel/connectors/ai/mistral_ai/services/mistral_ai_text_embedding.py new file mode 100644 index 000000000000..24b2905b1587 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/mistral_ai/services/mistral_ai_text_embedding.py @@ -0,0 +1,112 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys + +if sys.version_info >= (3, 12): + from typing import Any, override # pragma: no cover +else: + from typing_extensions import Any, override # pragma: no cover +import logging + +from mistralai.async_client import MistralAsyncClient +from mistralai.models.embeddings import EmbeddingResponse +from numpy import array, ndarray +from pydantic import ValidationError + +from semantic_kernel.connectors.ai.embeddings.embedding_generator_base import EmbeddingGeneratorBase +from semantic_kernel.connectors.ai.mistral_ai.settings.mistral_ai_settings import MistralAISettings +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError, ServiceResponseException +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger: logging.Logger = logging.getLogger(__name__) + + +@experimental_class +class MistralAITextEmbedding(EmbeddingGeneratorBase): + """Mistral AI Inference Text Embedding Service.""" + + client: MistralAsyncClient + + def __init__( + self, + ai_model_id: str | None = None, + api_key: str | None = None, + service_id: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + client: MistralAsyncClient | None = None, + ) -> None: + """Initialize the Mistral AI Text Embedding service. + + If no arguments are provided, the service will attempt to load the settings from the environment. + The following environment variables are used: + - MISTRALAI_API_KEY + - MISTRALAI_EMBEDDING_MODEL_ID + + Args: + ai_model_id: (str | None): A string that is used to identify the model such as the model name. + api_key (str | None): The API key for the Mistral AI service deployment. + service_id (str | None): Service ID for the embedding completion service. + env_file_path (str | None): The path to the environment file. + env_file_encoding (str | None): The encoding of the environment file. + client (MistralAsyncClient | None): The Mistral AI client to use. + + Raises: + ServiceInitializationError: If an error occurs during initialization. + """ + try: + mistralai_settings = MistralAISettings.create( + api_key=api_key, + embedding_model_id=ai_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as e: + raise ServiceInitializationError(f"Failed to validate Mistral AI settings: {e}") from e + + if not mistralai_settings.embedding_model_id: + raise ServiceInitializationError("The MistralAI embedding model ID is required.") + + if not client: + client = MistralAsyncClient( + api_key=mistralai_settings.api_key.get_secret_value() + ) + + super().__init__( + service_id=service_id or mistralai_settings.embedding_model_id, + ai_model_id=ai_model_id or mistralai_settings.embedding_model_id, + client=client, + ) + + @override + async def generate_embeddings( + self, + texts: list[str], + settings: "PromptExecutionSettings | None" = None, + **kwargs: Any, + ) -> ndarray: + embedding_response = await self.generate_raw_embeddings(texts, settings, **kwargs) + return array(embedding_response) + + @override + async def generate_raw_embeddings( + self, + texts: list[str], + settings: "PromptExecutionSettings | None" = None, + **kwargs: Any, + ) -> Any: + """Generate embeddings from the Mistral AI service.""" + try: + + embedding_response: EmbeddingResponse = await self.client.embeddings( + model=self.ai_model_id, + input=texts + ) + except Exception as ex: + raise ServiceResponseException( + f"{type(self)} service failed to complete the embedding request.", + ex, + ) from ex + + return [item.embedding for item in embedding_response.data] diff --git a/python/semantic_kernel/connectors/ai/mistral_ai/settings/mistral_ai_settings.py b/python/semantic_kernel/connectors/ai/mistral_ai/settings/mistral_ai_settings.py index 8139be0ba568..8acd90148d69 100644 --- a/python/semantic_kernel/connectors/ai/mistral_ai/settings/mistral_ai_settings.py +++ b/python/semantic_kernel/connectors/ai/mistral_ai/settings/mistral_ai_settings.py @@ -20,6 +20,8 @@ class MistralAISettings(KernelBaseSettings): (Env var MISTRALAI_API_KEY) - chat_model_id: str | None - The The Mistral AI chat model ID to use see https://docs.mistral.ai/getting-started/models/. (Env var MISTRALAI_CHAT_MODEL_ID) + - embedding_model_id: str | None - The The Mistral AI embedding model ID to use see https://docs.mistral.ai/getting-started/models/. + (Env var MISTRALAI_EMBEDDING_MODEL_ID) - env_file_path: str | None - if provided, the .env settings are read from this file path location """ @@ -27,3 +29,4 @@ class MistralAISettings(KernelBaseSettings): api_key: SecretStr chat_model_id: str | None = None + embedding_model_id: str | None = None diff --git a/python/semantic_kernel/connectors/ai/open_ai/__init__.py b/python/semantic_kernel/connectors/ai/open_ai/__init__.py index e7208ec86ef5..ca13fe02f4bd 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/__init__.py +++ b/python/semantic_kernel/connectors/ai/open_ai/__init__.py @@ -15,15 +15,18 @@ ) from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( OpenAIChatPromptExecutionSettings, + OpenAIEmbeddingPromptExecutionSettings, OpenAIPromptExecutionSettings, OpenAITextPromptExecutionSettings, ) from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion from semantic_kernel.connectors.ai.open_ai.services.azure_text_completion import AzureTextCompletion from semantic_kernel.connectors.ai.open_ai.services.azure_text_embedding import AzureTextEmbedding +from semantic_kernel.connectors.ai.open_ai.services.azure_text_to_image import AzureTextToImage from semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion import OpenAIChatCompletion from semantic_kernel.connectors.ai.open_ai.services.open_ai_text_completion import OpenAITextCompletion from semantic_kernel.connectors.ai.open_ai.services.open_ai_text_embedding import OpenAITextEmbedding +from semantic_kernel.connectors.ai.open_ai.services.open_ai_text_to_image import OpenAITextToImage __all__ = [ "ApiKeyAuthentication", @@ -37,14 +40,17 @@ "AzureEmbeddingDependency", "AzureTextCompletion", "AzureTextEmbedding", + "AzureTextToImage", "ConnectionStringAuthentication", "DataSourceFieldsMapping", "DataSourceFieldsMapping", "ExtraBody", "OpenAIChatCompletion", "OpenAIChatPromptExecutionSettings", + "OpenAIEmbeddingPromptExecutionSettings", "OpenAIPromptExecutionSettings", "OpenAITextCompletion", "OpenAITextEmbedding", "OpenAITextPromptExecutionSettings", + "OpenAITextToImage", ] diff --git a/python/semantic_kernel/connectors/ai/open_ai/exceptions/content_filter_ai_exception.py b/python/semantic_kernel/connectors/ai/open_ai/exceptions/content_filter_ai_exception.py index 8f887b60b620..0cbdf0763e86 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/exceptions/content_filter_ai_exception.py +++ b/python/semantic_kernel/connectors/ai/open_ai/exceptions/content_filter_ai_exception.py @@ -10,6 +10,8 @@ class ContentFilterResultSeverity(Enum): + """The severity of the content filter result.""" + HIGH = "high" MEDIUM = "medium" SAFE = "safe" @@ -17,6 +19,8 @@ class ContentFilterResultSeverity(Enum): @dataclass class ContentFilterResult: + """The result of a content filter check.""" + filtered: bool = False detected: bool = False severity: ContentFilterResultSeverity = ContentFilterResultSeverity.SAFE @@ -42,6 +46,8 @@ def from_inner_error_result(cls, inner_error_results: dict[str, Any]) -> "Conten class ContentFilterCodes(Enum): + """Content filter codes.""" + RESPONSIBLE_AI_POLICY_VIOLATION = "ResponsibleAIPolicyViolation" diff --git a/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/azure_chat_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/azure_chat_prompt_execution_settings.py index db536253825c..32cfd6f9ae31 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/azure_chat_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/azure_chat_prompt_execution_settings.py @@ -19,6 +19,8 @@ class AzureChatRequestBase(KernelBaseModel): + """Base class for Azure Chat requests.""" + model_config = ConfigDict( alias_generator=AliasGenerator(validation_alias=to_camel, serialization_alias=to_snake), use_enum_values=True, @@ -27,21 +29,29 @@ class AzureChatRequestBase(KernelBaseModel): class ConnectionStringAuthentication(AzureChatRequestBase): + """Connection string authentication.""" + type: Annotated[Literal["ConnectionString", "connection_string"], AfterValidator(to_snake)] = "connection_string" connection_string: str | None = None class ApiKeyAuthentication(AzureChatRequestBase): + """API key authentication.""" + type: Annotated[Literal["APIKey", "api_key"], AfterValidator(to_snake)] = "api_key" key: str | None = None class AzureEmbeddingDependency(AzureChatRequestBase): + """Azure embedding dependency.""" + type: Annotated[Literal["DeploymentName", "deployment_name"], AfterValidator(to_snake)] = "deployment_name" deployment_name: str | None = None class DataSourceFieldsMapping(AzureChatRequestBase): + """Data source fields mapping.""" + title_field: str | None = None url_field: str | None = None filepath_field: str | None = None @@ -51,6 +61,8 @@ class DataSourceFieldsMapping(AzureChatRequestBase): class AzureDataSourceParameters(AzureChatRequestBase): + """Azure data source parameters.""" + index_name: str index_language: str | None = None fields_mapping: DataSourceFieldsMapping | None = None @@ -64,6 +76,8 @@ class AzureDataSourceParameters(AzureChatRequestBase): class AzureCosmosDBDataSourceParameters(AzureDataSourceParameters): + """Azure Cosmos DB data source parameters.""" + authentication: ConnectionStringAuthentication | None = None database_name: str | None = None container_name: str | None = None @@ -71,11 +85,15 @@ class AzureCosmosDBDataSourceParameters(AzureDataSourceParameters): class AzureCosmosDBDataSource(AzureChatRequestBase): + """Azure Cosmos DB data source.""" + type: Literal["azure_cosmos_db"] = "azure_cosmos_db" parameters: AzureCosmosDBDataSourceParameters class AzureAISearchDataSourceParameters(AzureDataSourceParameters): + """Azure AI Search data source parameters.""" + endpoint: str | None = None query_type: Annotated[ Literal["simple", "semantic", "vector", "vectorSimpleHybrid", "vectorSemanticHybrid"], AfterValidator(to_snake) @@ -84,6 +102,8 @@ class AzureAISearchDataSourceParameters(AzureDataSourceParameters): class AzureAISearchDataSource(AzureChatRequestBase): + """Azure AI Search data source.""" + type: Literal["azure_search"] = "azure_search" parameters: Annotated[dict, AzureAISearchDataSourceParameters] @@ -104,6 +124,8 @@ def from_azure_ai_search_settings(cls, azure_ai_search_settings: "AzureAISearchS class ExtraBody(KernelBaseModel): + """Extra body for the Azure Chat Completion endpoint.""" + data_sources: list[DataSource] | None = None input_language: str | None = Field(None, serialization_alias="inputLanguage") output_language: str | None = Field(None, serialization_alias="outputLanguage") diff --git a/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/open_ai_prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/open_ai_prompt_execution_settings.py index 8cde4a8cdaa9..f4aa7868f5fc 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/open_ai_prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/open_ai/prompt_execution_settings/open_ai_prompt_execution_settings.py @@ -62,12 +62,19 @@ class OpenAIChatPromptExecutionSettings(OpenAIPromptExecutionSettings): """Specific settings for the Chat Completion endpoint.""" response_format: dict[Literal["type"], Literal["text", "json_object"]] | None = None - tools: list[dict[str, Any]] | None = Field(None, max_length=64) - tool_choice: str | None = None function_call: str | None = None functions: list[dict[str, Any]] | None = None messages: list[dict[str, Any]] | None = None function_call_behavior: FunctionCallBehavior | None = Field(None, exclude=True) + tools: list[dict[str, Any]] | None = Field( + None, + max_length=64, + description="Do not set this manually. It is set by the service based on the function choice configuration.", + ) + tool_choice: str | None = Field( + None, + description="Do not set this manually. It is set by the service based on the function choice configuration.", + ) @field_validator("functions", "function_call", mode="after") @classmethod @@ -107,6 +114,8 @@ def check_for_function_call_behavior(cls, v) -> Self: class OpenAIEmbeddingPromptExecutionSettings(PromptExecutionSettings): + """Specific settings for the text embedding endpoint.""" + input: str | list[str] | list[int] | list[list[int]] | None = None ai_model_id: str | None = Field(None, serialization_alias="model") encoding_format: Literal["float", "base64"] | None = None diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/azure_chat_completion.py b/python/semantic_kernel/connectors/ai/open_ai/services/azure_chat_completion.py index 35f4c2843d89..d91fb24ab02b 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/azure_chat_completion.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/azure_chat_completion.py @@ -180,7 +180,8 @@ def _add_tool_message_to_chat_message_content( def _get_tool_message_from_chat_choice(self, choice: Choice | ChunkChoice) -> dict[str, Any] | None: """Get the tool message from a choice.""" content = choice.message if isinstance(choice, Choice) else choice.delta - if content.model_extra is not None: + # When you enable asynchronous content filtering in Azure OpenAI, you may receive empty deltas + if content and content.model_extra is not None: return content.model_extra.get("context", None) # openai allows extra content, so model_extra will be a dict, but we need to check anyway, but no way to test. return None # pragma: no cover diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/azure_text_to_image.py b/python/semantic_kernel/connectors/ai/open_ai/services/azure_text_to_image.py new file mode 100644 index 000000000000..46cb46c0f378 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/open_ai/services/azure_text_to_image.py @@ -0,0 +1,111 @@ +# Copyright (c) Microsoft. All rights reserved. + +from collections.abc import Mapping +from typing import Any, TypeVar + +from openai import AsyncAzureOpenAI +from openai.lib.azure import AsyncAzureADTokenProvider +from pydantic import ValidationError + +from semantic_kernel.connectors.ai.open_ai.services.azure_config_base import AzureOpenAIConfigBase +from semantic_kernel.connectors.ai.open_ai.services.open_ai_model_types import OpenAIModelTypes +from semantic_kernel.connectors.ai.open_ai.services.open_ai_text_to_image_base import OpenAITextToImageBase +from semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings import AzureOpenAISettings +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + +T_ = TypeVar("T_", bound="AzureTextToImage") + + +class AzureTextToImage(AzureOpenAIConfigBase, OpenAITextToImageBase): + """Azure Text to Image service.""" + + def __init__( + self, + service_id: str | None = None, + api_key: str | None = None, + deployment_name: str | None = None, + endpoint: str | None = None, + base_url: str | None = None, + api_version: str | None = None, + ad_token: str | None = None, + ad_token_provider: AsyncAzureADTokenProvider | None = None, + default_headers: Mapping[str, str] | None = None, + async_client: AsyncAzureOpenAI | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initialize an AzureTextToImage service. + + Args: + service_id: The service ID. (Optional) + api_key: The optional api key. If provided, will override the value in the + env vars or .env file. + deployment_name: The optional deployment. If provided, will override the value + (text_to_image_deployment_name) in the env vars or .env file. + endpoint: The optional deployment endpoint. If provided will override the value + in the env vars or .env file. + base_url: The optional deployment base_url. If provided will override the value + in the env vars or .env file. + api_version: The optional deployment api version. If provided will override the value + in the env vars or .env file. + ad_token: The Azure AD token for authentication. (Optional) + ad_token_provider: Azure AD Token provider. (Optional) + ad_auth: Whether to use Azure Active Directory authentication. + (Optional) The default value is False. + default_headers: The default headers mapping of string keys to + string values for HTTP requests. (Optional) + async_client: An existing client to use. (Optional) + env_file_path: Use the environment settings file as a fallback to + environment variables. (Optional) + env_file_encoding: The encoding of the environment settings file. (Optional) + """ + try: + azure_openai_settings = AzureOpenAISettings.create( + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + api_key=api_key, + text_to_image_deployment_name=deployment_name, + endpoint=endpoint, + base_url=base_url, + api_version=api_version, + ) + except ValidationError as exc: + raise ServiceInitializationError(f"Invalid settings: {exc}") from exc + if not azure_openai_settings.text_to_image_deployment_name: + raise ServiceInitializationError("The Azure OpenAI text to image deployment name is required.") + + super().__init__( + deployment_name=azure_openai_settings.text_to_image_deployment_name, + endpoint=azure_openai_settings.endpoint, + base_url=azure_openai_settings.base_url, + api_version=azure_openai_settings.api_version, + service_id=service_id, + api_key=azure_openai_settings.api_key.get_secret_value() if azure_openai_settings.api_key else None, + ad_token=ad_token, + ad_token_provider=ad_token_provider, + default_headers=default_headers, + ai_model_type=OpenAIModelTypes.IMAGE, + client=async_client, + ) + + @classmethod + def from_dict(cls: type[T_], settings: dict[str, Any]) -> T_: + """Initialize an Azure OpenAI service from a dictionary of settings. + + Args: + settings: A dictionary of settings for the service. + should contain keys: deployment_name, endpoint, api_key + and optionally: api_version, ad_auth + """ + return cls( + service_id=settings.get("service_id"), + api_key=settings.get("api_key"), + deployment_name=settings.get("deployment_name"), + endpoint=settings.get("endpoint"), + base_url=settings.get("base_url"), + api_version=settings.get("api_version"), + ad_token=settings.get("ad_token"), + ad_token_provider=settings.get("ad_token_provider"), + default_headers=settings.get("default_headers"), + env_file_path=settings.get("env_file_path"), + ) diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py index e71fd85ef265..f1d6099e8e7d 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py @@ -5,7 +5,9 @@ import sys from collections.abc import AsyncGenerator from functools import reduce -from typing import TYPE_CHECKING, Any, ClassVar +from typing import TYPE_CHECKING, Any, ClassVar, cast + +from semantic_kernel.contents.function_result_content import FunctionResultContent if sys.version_info >= (3, 12): from typing import override # pragma: no cover @@ -14,8 +16,10 @@ from openai import AsyncStream from openai.types.chat.chat_completion import ChatCompletion, Choice -from openai.types.chat.chat_completion_chunk import ChatCompletionChunk +from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDeltaFunctionCall, ChoiceDeltaToolCall from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice +from openai.types.chat.chat_completion_message import FunctionCall +from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall from typing_extensions import deprecated from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase @@ -135,7 +139,7 @@ async def get_chat_message_contents( ) if any(result.terminate for result in results if result is not None): - return completions + return self._create_filter_early_terminate_chat_message_content(chat_history.messages[-len(results) :]) self._update_settings(settings, chat_history, kernel=kernel) else: @@ -235,7 +239,8 @@ async def get_streaming_chat_message_contents( ], ) if any(result.terminate for result in results if result is not None): - return + yield self._create_filter_early_terminate_chat_message_content(chat_history.messages[-len(results) :]) # type: ignore + break self._update_settings(settings, chat_history, kernel=kernel) @@ -301,25 +306,44 @@ def _create_streaming_chat_message_content( items: list[Any] = self._get_tool_calls_from_chat_choice(choice) items.extend(self._get_function_call_from_chat_choice(choice)) - if choice.delta.content is not None: + if choice.delta and choice.delta.content is not None: items.append(StreamingTextContent(choice_index=choice.index, text=choice.delta.content)) return StreamingChatMessageContent( choice_index=choice.index, inner_content=chunk, ai_model_id=self.ai_model_id, metadata=metadata, - role=(AuthorRole(choice.delta.role) if choice.delta.role else AuthorRole.ASSISTANT), + role=(AuthorRole(choice.delta.role) if choice.delta and choice.delta.role else AuthorRole.ASSISTANT), finish_reason=(FinishReason(choice.finish_reason) if choice.finish_reason else None), items=items, ) + def _create_filter_early_terminate_chat_message_content( + self, + messages: list[ChatMessageContent], + ) -> list[ChatMessageContent]: + """Add an early termination message to the chat messages. + + This method combines the FunctionResultContent items from separate ChatMessageContent messages, + and is used in the event that the `context.terminate = True` condition is met. + """ + items: list[Any] = [] + for message in messages: + items.extend([item for item in message.items if isinstance(item, FunctionResultContent)]) + return [ + ChatMessageContent( + role=AuthorRole.TOOL, + items=items, + ) + ] + def _get_metadata_from_chat_response(self, response: ChatCompletion) -> dict[str, Any]: """Get metadata from a chat response.""" return { "id": response.id, "created": response.created, "system_fingerprint": response.system_fingerprint, - "usage": getattr(response, "usage", None), + "usage": response.usage if hasattr(response, "usage") else None, } def _get_metadata_from_streaming_chat_response(self, response: ChatCompletionChunk) -> dict[str, Any]: @@ -339,31 +363,32 @@ def _get_metadata_from_chat_choice(self, choice: Choice | ChunkChoice) -> dict[s def _get_tool_calls_from_chat_choice(self, choice: Choice | ChunkChoice) -> list[FunctionCallContent]: """Get tool calls from a chat choice.""" content = choice.message if isinstance(choice, Choice) else choice.delta - assert hasattr(content, "tool_calls") # nosec - if content.tool_calls is None: - return [] - return [ - FunctionCallContent( - id=tool.id, - index=getattr(tool, "index", None), - name=tool.function.name, - arguments=tool.function.arguments, - ) - for tool in content.tool_calls - if tool.function is not None - ] + if content and (tool_calls := getattr(content, "tool_calls", None)) is not None: + return [ + FunctionCallContent( + id=tool.id, + index=getattr(tool, "index", None), + name=tool.function.name, + arguments=tool.function.arguments, + ) + for tool in cast(list[ChatCompletionMessageToolCall] | list[ChoiceDeltaToolCall], tool_calls) + if tool.function is not None + ] + # When you enable asynchronous content filtering in Azure OpenAI, you may receive empty deltas + return [] def _get_function_call_from_chat_choice(self, choice: Choice | ChunkChoice) -> list[FunctionCallContent]: """Get a function call from a chat choice.""" content = choice.message if isinstance(choice, Choice) else choice.delta - assert hasattr(content, "function_call") # nosec - if content.function_call is None: - return [] - return [ - FunctionCallContent( - id="legacy_function_call", name=content.function_call.name, arguments=content.function_call.arguments - ) - ] + if content and (function_call := getattr(content, "function_call", None)) is not None: + function_call = cast(FunctionCall | ChoiceDeltaFunctionCall, function_call) + return [ + FunctionCallContent( + id="legacy_function_call", name=function_call.name, arguments=function_call.arguments + ) + ] + # When you enable asynchronous content filtering in Azure OpenAI, you may receive empty deltas + return [] # endregion # region request preparation diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_handler.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_handler.py index 61df57d7fa4f..c65e0bc01989 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_handler.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_handler.py @@ -4,7 +4,6 @@ from abc import ABC from typing import Any -from numpy import array from openai import AsyncOpenAI, AsyncStream, BadRequestError from openai.types import Completion, CreateEmbeddingResponse from openai.types.chat import ChatCompletion, ChatCompletionChunk @@ -62,9 +61,7 @@ async def _send_embedding_request(self, settings: OpenAIEmbeddingPromptExecution try: response = await self.client.embeddings.create(**settings.prepare_settings_dict()) self.store_usage(response) - # make numpy arrays from the response - # TODO (eavanvalkenburg): the openai response is cast to a list[float], could be used instead of ndarray - return [array(x.embedding) for x in response.data] + return [x.embedding for x in response.data] except Exception as ex: raise ServiceResponseException( f"{type(self)} service failed to generate embeddings", diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_model_types.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_model_types.py index 95e9b4521596..a627a763814f 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_model_types.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_model_types.py @@ -9,3 +9,4 @@ class OpenAIModelTypes(Enum): TEXT = "text" CHAT = "chat" EMBEDDING = "embedding" + IMAGE = "image" diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_completion_base.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_completion_base.py index 8adc7d5d1fb6..fbcb90767e46 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_completion_base.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_completion_base.py @@ -35,6 +35,8 @@ class OpenAITextCompletionBase(OpenAIHandler, TextCompletionClientBase): + """Base class for OpenAI text completion services.""" + MODEL_PROVIDER_NAME: ClassVar[str] = "openai" @override diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding_base.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding_base.py index 73a74884ecb8..3ba02c445c29 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding_base.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding_base.py @@ -23,6 +23,8 @@ @experimental_class class OpenAITextEmbeddingBase(OpenAIHandler, EmbeddingGeneratorBase): + """Base class for OpenAI text embedding services.""" + @override async def generate_embeddings( self, @@ -69,6 +71,6 @@ async def generate_raw_embeddings( raw_embeddings.extend(raw_embedding) return raw_embeddings - @override def get_prompt_execution_settings_class(self) -> type["PromptExecutionSettings"]: + """Get the request settings class.""" return OpenAIEmbeddingPromptExecutionSettings diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_to_image.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_to_image.py new file mode 100644 index 000000000000..442ca52f1818 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_to_image.py @@ -0,0 +1,85 @@ +# Copyright (c) Microsoft. All rights reserved. + +from collections.abc import Mapping +from typing import Any, TypeVar + +from openai import AsyncOpenAI +from pydantic import ValidationError + +from semantic_kernel.connectors.ai.open_ai.services.open_ai_config_base import OpenAIConfigBase +from semantic_kernel.connectors.ai.open_ai.services.open_ai_model_types import OpenAIModelTypes +from semantic_kernel.connectors.ai.open_ai.services.open_ai_text_to_image_base import OpenAITextToImageBase +from semantic_kernel.connectors.ai.open_ai.settings.open_ai_settings import OpenAISettings +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + +T_ = TypeVar("T_", bound="OpenAITextToImage") + + +class OpenAITextToImage(OpenAIConfigBase, OpenAITextToImageBase): + """OpenAI Text to Image service.""" + + def __init__( + self, + ai_model_id: str | None = None, + api_key: str | None = None, + org_id: str | None = None, + service_id: str | None = None, + default_headers: Mapping[str, str] | None = None, + async_client: AsyncOpenAI | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initializes a new instance of the OpenAITextCompletion class. + + Args: + ai_model_id: OpenAI model name, see + https://platform.openai.com/docs/models + service_id: Service ID tied to the execution settings. + api_key: The optional API key to use. If provided will override, + the env vars or .env file value. + org_id: The optional org ID to use. If provided will override, + the env vars or .env file value. + default_headers: The default headers mapping of string keys to + string values for HTTP requests. (Optional) + async_client: An existing client to use. (Optional) + env_file_path: Use the environment settings file as + a fallback to environment variables. (Optional) + env_file_encoding: The encoding of the environment settings file. (Optional) + """ + try: + openai_settings = OpenAISettings.create( + api_key=api_key, + org_id=org_id, + text_to_image_model_id=ai_model_id, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise ServiceInitializationError("Failed to create OpenAI settings.", ex) from ex + if not openai_settings.text_to_image_model_id: + raise ServiceInitializationError("The OpenAI text to image model ID is required.") + super().__init__( + ai_model_id=openai_settings.text_to_image_model_id, + api_key=openai_settings.api_key.get_secret_value() if openai_settings.api_key else None, + ai_model_type=OpenAIModelTypes.IMAGE, + org_id=openai_settings.org_id, + service_id=service_id, + default_headers=default_headers, + client=async_client, + ) + + @classmethod + def from_dict(cls: type[T_], settings: dict[str, Any]) -> T_: + """Initialize an Open AI service from a dictionary of settings. + + Args: + settings: A dictionary of settings for the service. + """ + return cls( + ai_model_id=settings.get("ai_model_id"), + api_key=settings.get("api_key"), + org_id=settings.get("org_id"), + service_id=settings.get("service_id"), + default_headers=settings.get("default_headers", {}), + env_file_path=settings.get("env_file_path"), + ) diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_to_image_base.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_to_image_base.py new file mode 100644 index 000000000000..08e38106d8c0 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_to_image_base.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import Any + +from semantic_kernel.connectors.ai.open_ai.services.open_ai_handler import OpenAIHandler +from semantic_kernel.connectors.ai.text_to_image_client_base import TextToImageClientBase +from semantic_kernel.exceptions.service_exceptions import ServiceResponseException + + +class OpenAITextToImageBase(OpenAIHandler, TextToImageClientBase): + """OpenAI text to image client.""" + + async def generate_image(self, description: str, width: int, height: int, **kwargs: Any) -> bytes | str: + """Generate image from text. + + Args: + description: Description of the image. + width: Width of the image, check the openai documentation for the supported sizes. + height: Height of the image, check the openai documentation for the supported sizes. + kwargs: Additional arguments, check the openai images.generate documentation for the supported arguments. + + Returns: + bytes | str: Image bytes or image URL. + """ + try: + result = await self.client.images.generate( + prompt=description, + model=self.ai_model_id, + size=f"{width}x{height}", # type: ignore + response_format="url", + **kwargs, + ) + except Exception as ex: + raise ServiceResponseException(f"Failed to generate image: {ex}") from ex + if not result.data or not result.data[0].url: + raise ServiceResponseException("Failed to generate image.") + return result.data[0].url diff --git a/python/semantic_kernel/connectors/ai/open_ai/settings/azure_open_ai_settings.py b/python/semantic_kernel/connectors/ai/open_ai/settings/azure_open_ai_settings.py index 891f275bf1f0..a5f26db8fb8d 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/settings/azure_open_ai_settings.py +++ b/python/semantic_kernel/connectors/ai/open_ai/settings/azure_open_ai_settings.py @@ -36,6 +36,12 @@ class AzureOpenAISettings(KernelBaseSettings): Resource Management > Deployments in the Azure portal or, alternatively, under Management > Deployments in Azure OpenAI Studio. (Env var AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME) + - text_to_image_deployment_name: str - The name of the Azure Text to Image deployment. This + value will correspond to the custom name you chose for your deployment + when you deployed a model. This value can be found under + Resource Management > Deployments in the Azure portal or, alternatively, + under Management > Deployments in Azure OpenAI Studio. + (Env var AZURE_OPENAI_TEXT_TO_IMAGE_DEPLOYMENT_NAME) - api_key: SecretStr - The API key for the Azure deployment. This value can be found in the Keys & Endpoint section when examining your resource in the Azure portal. You can use either KEY1 or KEY2. @@ -61,6 +67,7 @@ class AzureOpenAISettings(KernelBaseSettings): chat_deployment_name: str | None = None text_deployment_name: str | None = None embedding_deployment_name: str | None = None + text_to_image_deployment_name: str | None = None endpoint: HttpsUrl | None = None base_url: HttpsUrl | None = None api_key: SecretStr | None = None diff --git a/python/semantic_kernel/connectors/ai/open_ai/settings/open_ai_settings.py b/python/semantic_kernel/connectors/ai/open_ai/settings/open_ai_settings.py index f6266cab0f73..c7e82474295c 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/settings/open_ai_settings.py +++ b/python/semantic_kernel/connectors/ai/open_ai/settings/open_ai_settings.py @@ -26,6 +26,8 @@ class OpenAISettings(KernelBaseSettings): (Env var OPENAI_TEXT_MODEL_ID) - embedding_model_id: str | None - The OpenAI embedding model ID to use, for example, text-embedding-ada-002. (Env var OPENAI_EMBEDDING_MODEL_ID) + - text_to_image_model_id: str | None - The OpenAI text to image model ID to use, for example, dall-e-3. + (Env var OPENAI_TEXT_TO_IMAGE_MODEL_ID) - env_file_path: str | None - if provided, the .env settings are read from this file path location """ @@ -36,3 +38,4 @@ class OpenAISettings(KernelBaseSettings): chat_model_id: str | None = None text_model_id: str | None = None embedding_model_id: str | None = None + text_to_image_model_id: str | None = None diff --git a/python/semantic_kernel/connectors/ai/prompt_execution_settings.py b/python/semantic_kernel/connectors/ai/prompt_execution_settings.py index c530c09342a6..b683a58e17fa 100644 --- a/python/semantic_kernel/connectors/ai/prompt_execution_settings.py +++ b/python/semantic_kernel/connectors/ai/prompt_execution_settings.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. import logging -from typing import Any +from typing import Any, TypeVar from pydantic import Field, model_validator @@ -10,6 +10,8 @@ logger = logging.getLogger(__name__) +_T = TypeVar("_T", bound="PromptExecutionSettings") + class PromptExecutionSettings(KernelBaseModel): """Base class for prompt execution settings. @@ -36,7 +38,7 @@ class PromptExecutionSettings(KernelBaseModel): @model_validator(mode="before") @classmethod - def parse_function_choice_behavior(cls, data: dict[str, Any]) -> dict[str, Any]: + def parse_function_choice_behavior(cls: type[_T], data: dict[str, Any]) -> dict[str, Any]: """Parse the function choice behavior data.""" function_choice_behavior_data = data.get("function_choice_behavior") if function_choice_behavior_data: @@ -82,7 +84,7 @@ def prepare_settings_dict(self, **kwargs) -> dict[str, Any]: by_alias=True, ) - def update_from_prompt_execution_settings(self, config: "PromptExecutionSettings") -> None: + def update_from_prompt_execution_settings(self, config: _T) -> None: """Update the prompt execution settings from a completion config.""" if config.service_id is not None: self.service_id = config.service_id @@ -91,7 +93,7 @@ def update_from_prompt_execution_settings(self, config: "PromptExecutionSettings self.unpack_extension_data() @classmethod - def from_prompt_execution_settings(cls, config: "PromptExecutionSettings") -> "PromptExecutionSettings": + def from_prompt_execution_settings(cls: type[_T], config: _T) -> _T: """Create a prompt execution settings from a completion config.""" config.pack_extension_data() return cls( diff --git a/python/semantic_kernel/connectors/ai/text_to_image_client_base.py b/python/semantic_kernel/connectors/ai/text_to_image_client_base.py new file mode 100644 index 000000000000..50ef1d9ebbd5 --- /dev/null +++ b/python/semantic_kernel/connectors/ai/text_to_image_client_base.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft. All rights reserved. + +from abc import ABC, abstractmethod +from typing import Any + +from semantic_kernel.services.ai_service_client_base import AIServiceClientBase + + +class TextToImageClientBase(AIServiceClientBase, ABC): + """Base class for text to image client.""" + + @abstractmethod + async def generate_image(self, description: str, width: int, height: int, **kwargs: Any) -> bytes | str: + """Generate image from text. + + Args: + description: Description of the image. + width: Width of the image. + height: Height of the image. + kwargs: Additional arguments. + + Returns: + bytes | str: Image bytes or image URL. + """ + raise NotImplementedError diff --git a/python/semantic_kernel/connectors/memory/astradb/astra_client.py b/python/semantic_kernel/connectors/memory/astradb/astra_client.py index 8129b2b4f552..83dcd3b3ce9d 100644 --- a/python/semantic_kernel/connectors/memory/astradb/astra_client.py +++ b/python/semantic_kernel/connectors/memory/astradb/astra_client.py @@ -16,6 +16,8 @@ @experimental_class class AstraClient: + """AstraClient.""" + def __init__( self, astra_id: str, diff --git a/python/semantic_kernel/connectors/memory/astradb/utils.py b/python/semantic_kernel/connectors/memory/astradb/utils.py index 597fa3ed95ac..b327db0688ba 100644 --- a/python/semantic_kernel/connectors/memory/astradb/utils.py +++ b/python/semantic_kernel/connectors/memory/astradb/utils.py @@ -8,6 +8,8 @@ class AsyncSession: + """A wrapper around aiohttp.ClientSession that can be used as an async context manager.""" + def __init__(self, session: aiohttp.ClientSession = None): """Initializes a new instance of the AsyncSession class.""" self._session = session if session else aiohttp.ClientSession() diff --git a/python/semantic_kernel/connectors/memory/azure_ai_search/__init__.py b/python/semantic_kernel/connectors/memory/azure_ai_search/__init__.py new file mode 100644 index 000000000000..18c97087a2b8 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/azure_ai_search/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_collection import AzureAISearchCollection +from semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_store import AzureAISearchStore +from semantic_kernel.connectors.memory.azure_cognitive_search.azure_ai_search_settings import AzureAISearchSettings + +__all__ = ["AzureAISearchCollection", "AzureAISearchSettings", "AzureAISearchStore"] diff --git a/python/semantic_kernel/connectors/memory/azure_ai_search/azure_ai_search_collection.py b/python/semantic_kernel/connectors/memory/azure_ai_search/azure_ai_search_collection.py new file mode 100644 index 000000000000..7d61a030f30a --- /dev/null +++ b/python/semantic_kernel/connectors/memory/azure_ai_search/azure_ai_search_collection.py @@ -0,0 +1,214 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging +import sys +from collections.abc import Sequence +from typing import Any, ClassVar, Generic, TypeVar + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from azure.search.documents.aio import SearchClient +from azure.search.documents.indexes.aio import SearchIndexClient +from azure.search.documents.indexes.models import SearchIndex +from pydantic import ValidationError + +from semantic_kernel.connectors.memory.azure_ai_search.utils import ( + data_model_definition_to_azure_ai_search_index, + get_search_client, + get_search_index_client, +) +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection +from semantic_kernel.data.vector_store_record_fields import VectorStoreRecordVectorField +from semantic_kernel.exceptions import MemoryConnectorException, MemoryConnectorInitializationError +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger: logging.Logger = logging.getLogger(__name__) + +TModel = TypeVar("TModel") + + +@experimental_class +class AzureAISearchCollection(VectorStoreRecordCollection[str, TModel], Generic[TModel]): + """Azure AI Search collection implementation.""" + + search_client: SearchClient + search_index_client: SearchIndexClient + supported_key_types: ClassVar[list[str] | None] = ["str"] + supported_vector_types: ClassVar[list[str] | None] = ["float", "int"] + + def __init__( + self, + data_model_type: type[TModel], + data_model_definition: VectorStoreRecordDefinition | None = None, + collection_name: str | None = None, + search_index_client: SearchIndexClient | None = None, + search_client: SearchClient | None = None, + **kwargs: Any, + ) -> None: + """Initializes a new instance of the AzureAISearchCollection class. + + Args: + data_model_type (type[TModel]): The type of the data model. + data_model_definition (VectorStoreRecordDefinition): The model definition, optional. + collection_name (str): The name of the collection, optional. + search_index_client (SearchIndexClient): The search index client for interacting with Azure AI Search, + used for creating and deleting indexes. + search_client (SearchClient): The search client for interacting with Azure AI Search, + used for record operations. + **kwargs: Additional keyword arguments, including: + The same keyword arguments used for AzureAISearchVectorStore: + search_endpoint: str | None = None, + api_key: str | None = None, + azure_credentials: AzureKeyCredential | None = None, + token_credentials: AsyncTokenCredential | TokenCredential | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None + + """ + if search_client and search_index_client: + if not collection_name: + collection_name = search_client._index_name + elif search_client._index_name != collection_name: + raise MemoryConnectorInitializationError( + "Search client and search index client have different index names." + ) + super().__init__( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + search_client=search_client, + search_index_client=search_index_client, + ) + return + + if search_index_client: + if not collection_name: + raise MemoryConnectorInitializationError("Collection name is required.") + super().__init__( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + search_client=get_search_client( + search_index_client=search_index_client, collection_name=collection_name + ), + search_index_client=search_index_client, + ) + return + + from semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_settings import ( + AzureAISearchSettings, + ) + + try: + azure_ai_search_settings = AzureAISearchSettings.create( + env_file_path=kwargs.get("env_file_path", None), + endpoint=kwargs.get("search_endpoint", None), + api_key=kwargs.get("api_key", None), + env_file_encoding=kwargs.get("env_file_encoding", None), + index_name=collection_name, + ) + except ValidationError as exc: + raise MemoryConnectorInitializationError("Failed to create Azure Cognitive Search settings.") from exc + search_index_client = get_search_index_client( + azure_ai_search_settings=azure_ai_search_settings, + azure_credential=kwargs.get("azure_credentials", None), + token_credential=kwargs.get("token_credentials", None), + ) + if not azure_ai_search_settings.index_name: + raise MemoryConnectorInitializationError("Collection name is required.") + + super().__init__( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=azure_ai_search_settings.index_name, + search_client=get_search_client( + search_index_client=search_index_client, collection_name=azure_ai_search_settings.index_name + ), + search_index_client=search_index_client, + ) + + @override + async def _inner_upsert( + self, + records: Sequence[Any], + **kwargs: Any, + ) -> Sequence[str]: + if not isinstance(records, list): + records = list(records) + results = await self.search_client.merge_or_upload_documents(documents=records, **kwargs) + return [result.key for result in results] # type: ignore + + @override + async def _inner_get(self, keys: Sequence[str], **kwargs: Any) -> Sequence[dict[str, Any]]: + client = self.search_client + if "selected_fields" in kwargs: + selected_fields = kwargs["selected_fields"] + elif "include_vector" in kwargs and not kwargs["include_vector"]: + selected_fields = [ + name + for name, field in self.data_model_definition.fields.items() + if not isinstance(field, VectorStoreRecordVectorField) + ] + else: + selected_fields = ["*"] + + result = await asyncio.gather( + *[client.get_document(key=key, selected_fields=selected_fields) for key in keys], + return_exceptions=True, + ) + return [res for res in result if not isinstance(res, BaseException)] + + @override + async def _inner_delete(self, keys: Sequence[str], **kwargs: Any) -> None: + await self.search_client.delete_documents(documents=[{self._key_field_name: key} for key in keys]) + + @override + def _serialize_dicts_to_store_models(self, records: Sequence[dict[str, Any]], **kwargs: Any) -> Sequence[Any]: + return records + + @override + def _deserialize_store_models_to_dicts(self, records: Sequence[Any], **kwargs: Any) -> Sequence[dict[str, Any]]: + return records + + @override + async def create_collection(self, **kwargs) -> None: + """Create a new collection in Azure AI Search. + + Args: + **kwargs: Additional keyword arguments. + index (SearchIndex): The search index to create, if this is supplied + this is used instead of a index created based on the definition. + encryption_key (SearchResourceEncryptionKey): The encryption key to use, + not used when index is supplied. + other kwargs are passed to the create_index method. + """ + if index := kwargs.pop("index", None): + if isinstance(index, SearchIndex): + await self.search_index_client.create_index(index=index, **kwargs) + return + raise MemoryConnectorException("Invalid index type supplied.") + await self.search_index_client.create_index( + index=data_model_definition_to_azure_ai_search_index( + collection_name=self.collection_name, + definition=self.data_model_definition, + encryption_key=kwargs.pop("encryption_key", None), + ), + **kwargs, + ) + + @override + async def does_collection_exist(self, **kwargs) -> bool: + if "params" not in kwargs: + kwargs["params"] = {"select": ["name"]} + return self.collection_name in [ + index_name async for index_name in self.search_index_client.list_index_names(**kwargs) + ] + + @override + async def delete_collection(self, **kwargs) -> None: + await self.search_index_client.delete_index(self.collection_name, **kwargs) diff --git a/python/semantic_kernel/connectors/memory/azure_ai_search/azure_ai_search_settings.py b/python/semantic_kernel/connectors/memory/azure_ai_search/azure_ai_search_settings.py new file mode 100644 index 000000000000..99fc5620d289 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/azure_ai_search/azure_ai_search_settings.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import ClassVar + +from pydantic import SecretStr + +from semantic_kernel.kernel_pydantic import HttpsUrl, KernelBaseSettings +from semantic_kernel.utils.experimental_decorator import experimental_class + + +@experimental_class +class AzureAISearchSettings(KernelBaseSettings): + """Azure AI Search model settings currently used by the AzureCognitiveSearchMemoryStore connector. + + Args: + - api_key: SecretStr - Azure AI Search API key (Env var AZURE_AI_SEARCH_API_KEY) + - endpoint: HttpsUrl - Azure AI Search endpoint (Env var AZURE_AI_SEARCH_ENDPOINT) + - index_name: str - Azure AI Search index name (Env var AZURE_AI_SEARCH_INDEX_NAME) + """ + + env_prefix: ClassVar[str] = "AZURE_AI_SEARCH_" + + api_key: SecretStr | None = None + endpoint: HttpsUrl + index_name: str | None = None diff --git a/python/semantic_kernel/connectors/memory/azure_ai_search/azure_ai_search_store.py b/python/semantic_kernel/connectors/memory/azure_ai_search/azure_ai_search_store.py new file mode 100644 index 000000000000..fdfb00e06f4a --- /dev/null +++ b/python/semantic_kernel/connectors/memory/azure_ai_search/azure_ai_search_store.py @@ -0,0 +1,123 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +import sys +from typing import TYPE_CHECKING, Any, TypeVar + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from azure.search.documents.aio import SearchClient +from azure.search.documents.indexes.aio import SearchIndexClient +from pydantic import ValidationError + +from semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_collection import ( + AzureAISearchCollection, +) +from semantic_kernel.connectors.memory.azure_ai_search.utils import get_search_client, get_search_index_client +from semantic_kernel.data.vector_store import VectorStore +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.exceptions import MemoryConnectorInitializationError +from semantic_kernel.utils.experimental_decorator import experimental_class + +if TYPE_CHECKING: + from azure.core.credentials import AzureKeyCredential, TokenCredential + from azure.core.credentials_async import AsyncTokenCredential + + from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection + + +logger: logging.Logger = logging.getLogger(__name__) + +TModel = TypeVar("TModel") + + +@experimental_class +class AzureAISearchStore(VectorStore): + """Azure AI Search store implementation.""" + + search_index_client: SearchIndexClient + + def __init__( + self, + search_endpoint: str | None = None, + api_key: str | None = None, + azure_credentials: "AzureKeyCredential | None" = None, + token_credentials: "AsyncTokenCredential | TokenCredential | None" = None, + search_index_client: SearchIndexClient | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """Initializes a new instance of the AzureAISearchStore client. + + Args: + search_endpoint (str): The endpoint of the Azure AI Search service, optional. + Can be read from environment variables. + api_key (str): Azure AI Search API key, optional. Can be read from environment variables. + azure_credentials (AzureKeyCredential ): Azure AI Search credentials, optional. + token_credentials (AsyncTokenCredential | TokenCredential): Azure AI Search token credentials, optional. + search_index_client (SearchIndexClient): The search index client, optional. + env_file_path (str): Use the environment settings file as a fallback + to environment variables. + env_file_encoding (str): The encoding of the environment settings file. + + """ + from semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_settings import ( + AzureAISearchSettings, + ) + + if not search_index_client: + try: + azure_ai_search_settings = AzureAISearchSettings.create( + env_file_path=env_file_path, + endpoint=search_endpoint, + api_key=api_key, + env_file_encoding=env_file_encoding, + ) + except ValidationError as exc: + raise MemoryConnectorInitializationError("Failed to create Azure AI Search settings.") from exc + search_index_client = get_search_index_client( + azure_ai_search_settings=azure_ai_search_settings, + azure_credential=azure_credentials, + token_credential=token_credentials, + ) + + super().__init__(search_index_client=search_index_client) + + @override + def get_collection( + self, + collection_name: str, + data_model_type: type[TModel], + data_model_definition: VectorStoreRecordDefinition | None = None, + search_client: SearchClient | None = None, + **kwargs: Any, + ) -> "VectorStoreRecordCollection": + """Get a AzureAISearchCollection tied to a collection. + + Args: + collection_name (str): The name of the collection. + data_model_type (type[TModel]): The type of the data model. + data_model_definition (VectorStoreRecordDefinition | None): The model fields, optional. + search_client (SearchClient | None): The search client for interacting with Azure AI Search, + will be created if not supplied. + **kwargs: Additional keyword arguments, passed to the collection constructor. + """ + if collection_name not in self.vector_record_collections: + self.vector_record_collections[collection_name] = AzureAISearchCollection( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + search_index_client=self.search_index_client, + search_client=search_client or get_search_client(self.search_index_client, collection_name), + collection_name=collection_name, + **kwargs, + ) + return self.vector_record_collections[collection_name] + + @override + async def list_collection_names(self, **kwargs: Any) -> list[str]: + if "params" not in kwargs: + kwargs["params"] = {"select": ["name"]} + return [index async for index in self.search_index_client.list_index_names(**kwargs)] diff --git a/python/semantic_kernel/connectors/memory/azure_ai_search/const.py b/python/semantic_kernel/connectors/memory/azure_ai_search/const.py new file mode 100644 index 000000000000..0cb33e3d7497 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/azure_ai_search/const.py @@ -0,0 +1,51 @@ +# Copyright (c) Microsoft. All rights reserved. + +from azure.search.documents.indexes.models import ( + ExhaustiveKnnAlgorithmConfiguration, + ExhaustiveKnnParameters, + HnswAlgorithmConfiguration, + HnswParameters, + SearchFieldDataType, + VectorSearchAlgorithmMetric, +) + +from semantic_kernel.data.const import DistanceFunction, IndexKind + +INDEX_ALGORITHM_MAP = { + IndexKind.HNSW: (HnswAlgorithmConfiguration, HnswParameters), + IndexKind.FLAT: (ExhaustiveKnnAlgorithmConfiguration, ExhaustiveKnnParameters), + "default": (HnswAlgorithmConfiguration, HnswParameters), +} + +DISTANCE_FUNCTION_MAP = { + DistanceFunction.COSINE: VectorSearchAlgorithmMetric.COSINE, + DistanceFunction.DOT_PROD: VectorSearchAlgorithmMetric.DOT_PRODUCT, + DistanceFunction.EUCLIDEAN: VectorSearchAlgorithmMetric.EUCLIDEAN, + "default": VectorSearchAlgorithmMetric.COSINE, +} + +TYPE_MAPPER_DATA = { + "str": SearchFieldDataType.String, + "int": SearchFieldDataType.Int64, + "float": SearchFieldDataType.Double, + "bool": SearchFieldDataType.Boolean, + "list[str]": SearchFieldDataType.Collection(SearchFieldDataType.String), + "list[int]": SearchFieldDataType.Collection(SearchFieldDataType.Int64), + "list[float]": SearchFieldDataType.Collection(SearchFieldDataType.Double), + "list[bool]": SearchFieldDataType.Collection(SearchFieldDataType.Boolean), + "default": SearchFieldDataType.String, +} + +TYPE_MAPPER_VECTOR = { + "float": SearchFieldDataType.Collection(SearchFieldDataType.Single), + "int": "Collection(Edm.Int16)", + "binary": "Collection(Edm.Byte)", + "default": SearchFieldDataType.Collection(SearchFieldDataType.Single), +} + +__all__ = [ + "DISTANCE_FUNCTION_MAP", + "INDEX_ALGORITHM_MAP", + "TYPE_MAPPER_DATA", + "TYPE_MAPPER_VECTOR", +] diff --git a/python/semantic_kernel/connectors/memory/azure_ai_search/utils.py b/python/semantic_kernel/connectors/memory/azure_ai_search/utils.py new file mode 100644 index 000000000000..a3f3a549d532 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/azure_ai_search/utils.py @@ -0,0 +1,178 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import contextlib +import logging +from typing import TYPE_CHECKING, Any + +from azure.core.credentials import AzureKeyCredential, TokenCredential +from azure.search.documents.aio import SearchClient +from azure.search.documents.indexes.aio import SearchIndexClient +from azure.search.documents.indexes.models import ( + SearchField, + SearchIndex, + SearchResourceEncryptionKey, + SimpleField, + VectorSearch, + VectorSearchProfile, +) + +from semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_settings import AzureAISearchSettings +from semantic_kernel.connectors.memory.azure_ai_search.const import ( + DISTANCE_FUNCTION_MAP, + INDEX_ALGORITHM_MAP, + TYPE_MAPPER_DATA, + TYPE_MAPPER_VECTOR, +) +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) +from semantic_kernel.exceptions import ServiceInitializationError +from semantic_kernel.utils.experimental_decorator import experimental_function +from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + +logger: logging.Logger = logging.getLogger(__name__) + + +def get_search_client(search_index_client: SearchIndexClient, collection_name: str, **kwargs: Any) -> SearchClient: + """Create a search client for a collection.""" + return SearchClientWrapper( + search_index_client._endpoint, collection_name, search_index_client._credential, **kwargs + ) + + +def get_search_index_client( + azure_ai_search_settings: AzureAISearchSettings, + azure_credential: AzureKeyCredential | None = None, + token_credential: "AsyncTokenCredential | TokenCredential | None" = None, +) -> SearchIndexClient: + """Return a client for Azure Cognitive Search. + + Args: + azure_ai_search_settings (AzureAISearchSettings): Azure Cognitive Search settings. + azure_credential (AzureKeyCredential): Optional Azure credentials (default: {None}). + token_credential (TokenCredential): Optional Token credential (default: {None}). + """ + # Credentials + credential: "AzureKeyCredential | AsyncTokenCredential | TokenCredential | None" = None + if azure_ai_search_settings.api_key: + credential = AzureKeyCredential(azure_ai_search_settings.api_key.get_secret_value()) + elif azure_credential: + credential = azure_credential + elif token_credential: + credential = token_credential + else: + raise ServiceInitializationError("Error: missing Azure AI Search client credentials.") + + return SearchIndexClientWrapper( + endpoint=str(azure_ai_search_settings.endpoint), + credential=credential, # type: ignore + headers=prepend_semantic_kernel_to_user_agent({}) if APP_INFO else None, + ) + + +@experimental_function +def data_model_definition_to_azure_ai_search_index( + collection_name: str, + definition: VectorStoreRecordDefinition, + encryption_key: SearchResourceEncryptionKey | None = None, +) -> SearchIndex: + """Convert a VectorStoreRecordDefinition to an Azure AI Search index.""" + fields = [] + search_profiles = [] + search_algos = [] + + for field in definition.fields.values(): + if isinstance(field, VectorStoreRecordDataField): + if not field.property_type: + logger.debug(f"Field {field.name} has not specified type, defaulting to Edm.String.") + type_ = TYPE_MAPPER_DATA[field.property_type or "default"] + fields.append( + SearchField( + name=field.name, + type=type_, + filterable=field.is_filterable, + # searchable is set first on the value of is_full_text_searchable, + # if it is None it checks the field type, if text then it is searchable + searchable=type_ in ("Edm.String", "Collection(Edm.String)") + if field.is_full_text_searchable is None + else field.is_full_text_searchable, + sortable=True, + hidden=False, + ) + ) + elif isinstance(field, VectorStoreRecordKeyField): + assert field.name # nosec + fields.append( + SimpleField( + name=field.name, + type="Edm.String", # hardcoded, only allowed type for key + key=True, + filterable=True, + searchable=True, + ) + ) + elif isinstance(field, VectorStoreRecordVectorField): + if not field.property_type: + logger.debug(f"Field {field.name} has not specified type, defaulting to Collection(Edm.Single).") + if not field.index_kind: + logger.debug(f"Field {field.name} has not specified index kind, defaulting to hnsw.") + if not field.distance_function: + logger.debug(f"Field {field.name} has not specified distance function, defaulting to cosine.") + profile_name = f"{field.name}_profile" + algo_name = f"{field.name}_algorithm" + fields.append( + SearchField( + name=field.name, + type=TYPE_MAPPER_VECTOR[field.property_type or "default"], + searchable=True, + vector_search_dimensions=field.dimensions, + vector_search_profile_name=profile_name, + hidden=False, + ) + ) + search_profiles.append( + VectorSearchProfile( + name=profile_name, + algorithm_configuration_name=algo_name, + ) + ) + algo_class, algo_params = INDEX_ALGORITHM_MAP[field.index_kind or "default"] + search_algos.append( + algo_class( + name=algo_name, + parameters=algo_params( + metric=DISTANCE_FUNCTION_MAP[field.distance_function or "default"], + ), + ) + ) + return SearchIndex( + name=collection_name, + fields=fields, + vector_search=VectorSearch(profiles=search_profiles, algorithms=search_algos), + encryption_key=encryption_key, + ) + + +class SearchIndexClientWrapper(SearchIndexClient): + """Wrapper to make sure the connection is closed when the object is deleted.""" + + def __del__(self) -> None: + """Async close connection, done when the object is deleted, used when SK creates a client.""" + with contextlib.suppress(Exception): + asyncio.get_running_loop().create_task(self.close()) + + +class SearchClientWrapper(SearchClient): + """Wrapper to make sure the connection is closed when the object is deleted.""" + + def __del__(self) -> None: + """Async close connection, done when the object is deleted, used when SK creates a client.""" + with contextlib.suppress(Exception): + asyncio.get_running_loop().create_task(self.close()) diff --git a/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_cognitive_search_memory_store.py b/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_cognitive_search_memory_store.py index 8c4c63991f42..cc3aa0978580 100644 --- a/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_cognitive_search_memory_store.py +++ b/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_cognitive_search_memory_store.py @@ -40,6 +40,8 @@ @experimental_class class AzureCognitiveSearchMemoryStore(MemoryStoreBase): + """Azure Cognitive Search Memory Store.""" + _search_index_client: SearchIndexClient = None _vector_size: int = None diff --git a/python/semantic_kernel/connectors/memory/azure_cosmosdb/azure_cosmos_db_store_api.py b/python/semantic_kernel/connectors/memory/azure_cosmosdb/azure_cosmos_db_store_api.py index 68fdd93e272b..47b8d065086c 100644 --- a/python/semantic_kernel/connectors/memory/azure_cosmosdb/azure_cosmos_db_store_api.py +++ b/python/semantic_kernel/connectors/memory/azure_cosmosdb/azure_cosmos_db_store_api.py @@ -11,6 +11,8 @@ # Abstract class similar to the original data store that allows API level abstraction @experimental_class class AzureCosmosDBStoreApi(ABC): + """AzureCosmosDBStoreApi.""" + @abstractmethod async def create_collection(self, collection_name: str) -> None: """Creates a new collection in the data store. diff --git a/python/semantic_kernel/connectors/memory/azure_cosmosdb/mongo_vcore_store_api.py b/python/semantic_kernel/connectors/memory/azure_cosmosdb/mongo_vcore_store_api.py index d9d5034300fc..338bd9e7a234 100644 --- a/python/semantic_kernel/connectors/memory/azure_cosmosdb/mongo_vcore_store_api.py +++ b/python/semantic_kernel/connectors/memory/azure_cosmosdb/mongo_vcore_store_api.py @@ -19,6 +19,8 @@ @experimental_class class MongoStoreApi(AzureCosmosDBStoreApi): + """MongoStoreApi class for the Azure Cosmos DB Mongo store.""" + database = None collection_name: str index_name = None @@ -338,10 +340,8 @@ async def get_nearest_match( @staticmethod def __serialize_metadata(record: MemoryRecord) -> str: - return json.dumps( - { - "text": record.text, - "description": record.description, - "additional_metadata": record.additional_metadata, - } - ) + return json.dumps({ + "text": record.text, + "description": record.description, + "additional_metadata": record.additional_metadata, + }) diff --git a/python/semantic_kernel/connectors/memory/chroma/chroma_memory_store.py b/python/semantic_kernel/connectors/memory/chroma/chroma_memory_store.py index 52bd91e31387..b0d8734449df 100644 --- a/python/semantic_kernel/connectors/memory/chroma/chroma_memory_store.py +++ b/python/semantic_kernel/connectors/memory/chroma/chroma_memory_store.py @@ -27,6 +27,8 @@ @experimental_class class ChromaMemoryStore(MemoryStoreBase): + """ChromaMemoryStore provides an interface to store and retrieve data using ChromaDB.""" + _client: "chromadb.Client" def __init__( diff --git a/python/semantic_kernel/connectors/memory/milvus/milvus_memory_store.py b/python/semantic_kernel/connectors/memory/milvus/milvus_memory_store.py index 6801c35f40f2..b3a5fe3275e7 100644 --- a/python/semantic_kernel/connectors/memory/milvus/milvus_memory_store.py +++ b/python/semantic_kernel/connectors/memory/milvus/milvus_memory_store.py @@ -146,6 +146,8 @@ def create_fields(dimensions: int) -> list[FieldSchema]: @experimental_class class MilvusMemoryStore(MemoryStoreBase): + """Memory store based on Milvus.""" + def __init__( self, uri: str = "http://localhost:19530", diff --git a/python/semantic_kernel/connectors/memory/pinecone/pinecone_memory_store.py b/python/semantic_kernel/connectors/memory/pinecone/pinecone_memory_store.py index a7f06e102fcc..7f244d94d83b 100644 --- a/python/semantic_kernel/connectors/memory/pinecone/pinecone_memory_store.py +++ b/python/semantic_kernel/connectors/memory/pinecone/pinecone_memory_store.py @@ -4,7 +4,7 @@ from typing import NamedTuple from numpy import ndarray -from pinecone import FetchResponse, IndexDescription, IndexList, Pinecone, ServerlessSpec +from pinecone import FetchResponse, IndexList, IndexModel, Pinecone, ServerlessSpec from pydantic import ValidationError from semantic_kernel.connectors.memory.pinecone.pinecone_settings import PineconeSettings @@ -111,7 +111,7 @@ async def create_collection( ) self.collection_names_cache.add(collection_name) - async def describe_collection(self, collection_name: str) -> IndexDescription | None: + async def describe_collection(self, collection_name: str) -> IndexModel | None: """Gets the description of the index. Args: diff --git a/python/semantic_kernel/connectors/memory/qdrant/__init__.py b/python/semantic_kernel/connectors/memory/qdrant/__init__.py index e8672b9c8b4a..951a6ed733ca 100644 --- a/python/semantic_kernel/connectors/memory/qdrant/__init__.py +++ b/python/semantic_kernel/connectors/memory/qdrant/__init__.py @@ -1,7 +1,10 @@ # Copyright (c) Microsoft. All rights reserved. +from semantic_kernel.connectors.memory.qdrant.qdrant_collection import QdrantCollection from semantic_kernel.connectors.memory.qdrant.qdrant_memory_store import ( QdrantMemoryStore, ) +from semantic_kernel.connectors.memory.qdrant.qdrant_settings import QdrantSettings +from semantic_kernel.connectors.memory.qdrant.qdrant_store import QdrantStore -__all__ = ["QdrantMemoryStore"] +__all__ = ["QdrantCollection", "QdrantMemoryStore", "QdrantSettings", "QdrantStore"] diff --git a/python/semantic_kernel/connectors/memory/qdrant/const.py b/python/semantic_kernel/connectors/memory/qdrant/const.py new file mode 100644 index 000000000000..749635b35f39 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/qdrant/const.py @@ -0,0 +1,25 @@ +# Copyright (c) Microsoft. All rights reserved. + +from qdrant_client.models import Datatype, Distance + +from semantic_kernel.data.const import DistanceFunction + +DISTANCE_FUNCTION_MAP = { + DistanceFunction.COSINE: Distance.COSINE, + DistanceFunction.DOT_PROD: Distance.DOT, + DistanceFunction.EUCLIDEAN: Distance.EUCLID, + DistanceFunction.MANHATTAN: Distance.MANHATTAN, + "default": Distance.COSINE, +} + +TYPE_MAPPER_VECTOR = { + "float": Datatype.FLOAT32, + "int": Datatype.UINT8, + "binary": Datatype.UINT8, + "default": Datatype.FLOAT32, +} + +__all__ = [ + "DISTANCE_FUNCTION_MAP", + "TYPE_MAPPER_VECTOR", +] diff --git a/python/semantic_kernel/connectors/memory/qdrant/qdrant_collection.py b/python/semantic_kernel/connectors/memory/qdrant/qdrant_collection.py new file mode 100644 index 000000000000..1333813d1e8f --- /dev/null +++ b/python/semantic_kernel/connectors/memory/qdrant/qdrant_collection.py @@ -0,0 +1,257 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +import sys +from collections.abc import Mapping, Sequence +from typing import Any, ClassVar, TypeVar + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from pydantic import ValidationError +from qdrant_client.async_qdrant_client import AsyncQdrantClient +from qdrant_client.models import PointStruct, VectorParams + +from semantic_kernel.connectors.memory.qdrant.const import DISTANCE_FUNCTION_MAP, TYPE_MAPPER_VECTOR +from semantic_kernel.connectors.memory.qdrant.utils import AsyncQdrantClientWrapper +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection +from semantic_kernel.data.vector_store_record_fields import VectorStoreRecordVectorField +from semantic_kernel.exceptions import ( + MemoryConnectorInitializationError, + VectorStoreModelValidationError, +) +from semantic_kernel.exceptions.memory_connector_exceptions import MemoryConnectorException +from semantic_kernel.kernel_types import OneOrMany +from semantic_kernel.utils.experimental_decorator import experimental_class +from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent + +logger: logging.Logger = logging.getLogger(__name__) + +TModel = TypeVar("TModel") +TKey = TypeVar("TKey", str, int) + + +@experimental_class +class QdrantCollection(VectorStoreRecordCollection[str | int, TModel]): + """A QdrantCollection is a memory collection that uses Qdrant as the backend.""" + + qdrant_client: AsyncQdrantClient + named_vectors: bool + supported_key_types: ClassVar[list[str] | None] = ["str", "int"] + supported_vector_types: ClassVar[list[str] | None] = ["float", "int"] + + def __init__( + self, + data_model_type: type[TModel], + data_model_definition: VectorStoreRecordDefinition | None = None, + collection_name: str | None = None, + named_vectors: bool = True, + url: str | None = None, + api_key: str | None = None, + host: str | None = None, + port: int | None = None, + grpc_port: int | None = None, + path: str | None = None, + location: str | None = None, + prefer_grpc: bool | None = None, + client: AsyncQdrantClient | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + **kwargs: Any, + ) -> None: + """Initializes a new instance of the QdrantVectorRecordStore. + + When using qdrant client, make sure to supply url and api_key. + When using qdrant server, make sure to supply url or host and optionally port. + When using qdrant local, either supply path to use a persisted qdrant instance + or set location to ":memory:" to use an in-memory qdrant instance. + When nothing is supplied, it defaults to an in-memory qdrant instance. + You can also supply a async qdrant client directly. + + Args: + data_model_type (type[TModel]): The type of the data model. + data_model_definition (VectorStoreRecordDefinition): The model fields, optional. + collection_name (str): The name of the collection, optional. + named_vectors (bool): If true, vectors are stored with name (default: True). + url (str): The URL of the Qdrant server (default: {None}). + api_key (str): The API key for the Qdrant server (default: {None}). + host (str): The host of the Qdrant server (default: {None}). + port (int): The port of the Qdrant server (default: {None}). + grpc_port (int): The gRPC port of the Qdrant server (default: {None}). + path (str): The path of the Qdrant server (default: {None}). + location (str): The location of the Qdrant server (default: {None}). + prefer_grpc (bool): If true, gRPC will be preferred (default: {None}). + client (AsyncQdrantClient): The Qdrant client to use (default: {None}). + env_file_path (str): Use the environment settings file as a fallback to environment variables. + env_file_encoding (str): The encoding of the environment settings file. + **kwargs: Additional keyword arguments passed to the client constructor. + + """ + if client: + super().__init__( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + qdrant_client=client, # type: ignore + named_vectors=named_vectors, # type: ignore + ) + return + + from semantic_kernel.connectors.memory.qdrant.qdrant_settings import QdrantSettings + + try: + settings = QdrantSettings.create( + url=url, + api_key=api_key, + host=host, + port=port, + grpc_port=grpc_port, + path=path, + location=location, + prefer_grpc=prefer_grpc, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise MemoryConnectorInitializationError("Failed to create Qdrant settings.", ex) from ex + if APP_INFO: + kwargs.setdefault("metadata", {}) + kwargs["metadata"] = prepend_semantic_kernel_to_user_agent(kwargs["metadata"]) + try: + client = AsyncQdrantClientWrapper(**settings.model_dump(exclude_none=True), **kwargs) + except ValueError as ex: + raise MemoryConnectorInitializationError("Failed to create Qdrant client.", ex) from ex + super().__init__( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + qdrant_client=client, + named_vectors=named_vectors, + ) + + @override + async def _inner_upsert( + self, + records: Sequence[PointStruct], + **kwargs: Any, + ) -> Sequence[TKey]: + await self.qdrant_client.upsert( + collection_name=self.collection_name, + points=records, + **kwargs, + ) + return [record.id for record in records] + + @override + async def _inner_get(self, keys: Sequence[TKey], **kwargs: Any) -> OneOrMany[Any] | None: + if "with_vectors" not in kwargs: + kwargs["with_vectors"] = kwargs.pop("include_vectors", True) + return await self.qdrant_client.retrieve( + collection_name=self.collection_name, + ids=keys, + **kwargs, + ) + + @override + async def _inner_delete(self, keys: Sequence[TKey], **kwargs: Any) -> None: + await self.qdrant_client.delete( + collection_name=self.collection_name, + points_selector=keys, + **kwargs, + ) + + @override + def _serialize_dicts_to_store_models( + self, + records: Sequence[dict[str, Any]], + **kwargs: Any, + ) -> Sequence[PointStruct]: + return [ + PointStruct( + id=record.pop(self._key_field_name), + vector=record.pop(self.data_model_definition.vector_field_names[0]) + if not self.named_vectors + else {field: record.pop(field) for field in self.data_model_definition.vector_field_names}, + payload=record, + ) + for record in records + ] + + @override + def _deserialize_store_models_to_dicts( + self, + records: Sequence[PointStruct], + **kwargs: Any, + ) -> Sequence[dict[str, Any]]: + return [ + { + self._key_field_name: record.id, + **(record.payload if record.payload else {}), + **( + record.vector + if isinstance(record.vector, dict) + else {self.data_model_definition.vector_field_names[0]: record.vector} + ), + } + for record in records + ] + + @override + async def create_collection(self, **kwargs) -> None: + """Create a new collection in Azure AI Search. + + Args: + **kwargs: Additional keyword arguments. + You can supply all keyword arguments supported by the QdrantClient.create_collection method. + This method creates the vectors_config automatically when not supplied, other params are not set. + Collection name will be set to the collection_name property, cannot be overridden. + """ + if "vectors_config" not in kwargs: + vectors_config: VectorParams | Mapping[str, VectorParams] = {} + if self.named_vectors: + for field in self.data_model_definition.vector_field_names: + vector = self.data_model_definition.fields[field] + assert isinstance(vector, VectorStoreRecordVectorField) # nosec + if not vector.dimensions: + raise MemoryConnectorException("Vector field must have dimensions.") + vectors_config[field] = VectorParams( + size=vector.dimensions, + distance=DISTANCE_FUNCTION_MAP[vector.distance_function or "default"], + datatype=TYPE_MAPPER_VECTOR[vector.property_type or "default"], + ) + else: + vector = self.data_model_definition.fields[self.data_model_definition.vector_field_names[0]] + assert isinstance(vector, VectorStoreRecordVectorField) # nosec + if not vector.dimensions: + raise MemoryConnectorException("Vector field must have dimensions.") + vectors_config = VectorParams( + size=vector.dimensions, + distance=DISTANCE_FUNCTION_MAP[vector.distance_function or "default"], + datatype=TYPE_MAPPER_VECTOR[vector.property_type or "default"], + ) + kwargs["vectors_config"] = vectors_config + if "collection_name" not in kwargs: + kwargs["collection_name"] = self.collection_name + await self.qdrant_client.create_collection(**kwargs) + + @override + async def does_collection_exist(self, **kwargs) -> bool: + return await self.qdrant_client.collection_exists(self.collection_name, **kwargs) + + @override + async def delete_collection(self, **kwargs) -> None: + await self.qdrant_client.delete_collection(self.collection_name, **kwargs) + + def _validate_data_model(self): + """Internal function that should be overloaded by child classes to validate datatypes, etc. + + This should take the VectorStoreRecordDefinition from the item_type and validate it against the store. + + Checks should include, allowed naming of parameters, allowed data types, allowed vector dimensions. + """ + super()._validate_data_model() + if len(self.data_model_definition.vector_field_names) > 1 and not self.named_vectors: + raise VectorStoreModelValidationError("Only one vector field is allowed when not using named vectors.") diff --git a/python/semantic_kernel/connectors/memory/qdrant/qdrant_memory_store.py b/python/semantic_kernel/connectors/memory/qdrant/qdrant_memory_store.py index d92c71952662..8e5d5d0f2166 100644 --- a/python/semantic_kernel/connectors/memory/qdrant/qdrant_memory_store.py +++ b/python/semantic_kernel/connectors/memory/qdrant/qdrant_memory_store.py @@ -24,6 +24,8 @@ @experimental_class class QdrantMemoryStore(MemoryStoreBase): + """QdrantMemoryStore.""" + _qdrantclient: QdrantClient def __init__( diff --git a/python/semantic_kernel/connectors/memory/qdrant/qdrant_settings.py b/python/semantic_kernel/connectors/memory/qdrant/qdrant_settings.py new file mode 100644 index 000000000000..e80fb4fe3fc0 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/qdrant/qdrant_settings.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import ClassVar + +from pydantic import HttpUrl, SecretStr, model_validator + +from semantic_kernel.kernel_pydantic import KernelBaseSettings +from semantic_kernel.utils.experimental_decorator import experimental_class + +IN_MEMORY_STRING = ":memory:" + + +@experimental_class +class QdrantSettings(KernelBaseSettings): + """Qdrant settings currently used by the Qdrant Vector Record Store.""" + + env_prefix: ClassVar[str] = "QDRANT_" + + url: HttpUrl | None = None + api_key: SecretStr | None = None + host: str | None = None + port: int | None = None + grpc_port: int | None = None + path: str | None = None + location: str | None = None + prefer_grpc: bool = False + + @model_validator(mode="before") + def validate_settings(cls, values): + """Validate the settings.""" + if "url" not in values and "host" not in values and "path" not in values and "location" not in values: + values["location"] = IN_MEMORY_STRING + return values + + def model_dump(self, **kwargs): + """Dump the model.""" + dump = super().model_dump(**kwargs) + if "api_key" in dump: + dump["api_key"] = dump["api_key"].get_secret_value() + if "url" in dump: + dump["url"] = str(dump["url"]) + return dump diff --git a/python/semantic_kernel/connectors/memory/qdrant/qdrant_store.py b/python/semantic_kernel/connectors/memory/qdrant/qdrant_store.py new file mode 100644 index 000000000000..85f0a1d7eaf5 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/qdrant/qdrant_store.py @@ -0,0 +1,135 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +import sys +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any, TypeVar + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from pydantic import ValidationError +from qdrant_client.async_qdrant_client import AsyncQdrantClient + +from semantic_kernel.connectors.memory.qdrant.qdrant_collection import QdrantCollection +from semantic_kernel.data.vector_store import VectorStore +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.exceptions import MemoryConnectorInitializationError +from semantic_kernel.utils.experimental_decorator import experimental_class +from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent + +if TYPE_CHECKING: + from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection + +logger: logging.Logger = logging.getLogger(__name__) + +TModel = TypeVar("TModel") +TKey = TypeVar("TKey", str, int) + + +@experimental_class +class QdrantStore(VectorStore): + """A QdrantStore is a memory store that uses Qdrant as the backend.""" + + qdrant_client: AsyncQdrantClient + + def __init__( + self, + url: str | None = None, + api_key: str | None = None, + host: str | None = None, + port: int | None = None, + grpc_port: int | None = None, + path: str | None = None, + location: str | None = None, + prefer_grpc: bool | None = None, + client: AsyncQdrantClient | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + **kwargs: Any, + ) -> None: + """Initializes a new instance of the QdrantVectorRecordStore. + + When using qdrant client, make sure to supply url and api_key. + When using qdrant server, make sure to supply url or host and optionally port. + When using qdrant local, either supply path to use a persisted qdrant instance + or set location to ":memory:" to use an in-memory qdrant instance. + When nothing is supplied, it defaults to an in-memory qdrant instance. + You can also supply a async qdrant client directly. + + Args: + url (str): The URL of the Qdrant server (default: {None}). + api_key (str): The API key for the Qdrant server (default: {None}). + host (str): The host of the Qdrant server (default: {None}). + port (int): The port of the Qdrant server (default: {None}). + grpc_port (int): The gRPC port of the Qdrant server (default: {None}). + path (str): The path of the Qdrant server (default: {None}). + location (str): The location of the Qdrant server (default: {None}). + prefer_grpc (bool): If true, gRPC will be preferred (default: {None}). + client (AsyncQdrantClient): The Qdrant client to use (default: {None}). + env_file_path (str): Use the environment settings file as a fallback to environment variables. + env_file_encoding (str): The encoding of the environment settings file. + **kwargs: Additional keyword arguments passed to the client constructor. + + """ + if client: + super().__init__(qdrant_client=client, **kwargs) + return + + from semantic_kernel.connectors.memory.qdrant.qdrant_settings import QdrantSettings + + try: + settings = QdrantSettings.create( + url=url, + api_key=api_key, + host=host, + port=port, + grpc_port=grpc_port, + path=path, + location=location, + prefer_grpc=prefer_grpc, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise MemoryConnectorInitializationError("Failed to create Qdrant settings.", ex) from ex + if APP_INFO: + kwargs.setdefault("metadata", {}) + kwargs["metadata"] = prepend_semantic_kernel_to_user_agent(kwargs["metadata"]) + try: + client = AsyncQdrantClient(**settings.model_dump(exclude_none=True), **kwargs) + except ValueError as ex: + raise MemoryConnectorInitializationError("Failed to create Qdrant client.", ex) from ex + super().__init__(qdrant_client=client) + + def get_collection( + self, + collection_name: str, + data_model_type: type[TModel], + data_model_definition: VectorStoreRecordDefinition | None = None, + **kwargs: Any, + ) -> "VectorStoreRecordCollection": + """Get a QdrantCollection tied to a collection. + + Args: + collection_name (str): The name of the collection. + data_model_type (type[TModel]): The type of the data model. + data_model_definition (VectorStoreRecordDefinition | None): The model fields, optional. + **kwargs: Additional keyword arguments, passed to the collection constructor. + """ + if collection_name not in self.vector_record_collections: + self.vector_record_collections[collection_name] = QdrantCollection[data_model_type]( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + client=self.qdrant_client, + **kwargs, + ) + return self.vector_record_collections[collection_name] + + @override + async def list_collection_names(self, **kwargs: Any) -> Sequence[str]: + collections = await self.qdrant_client.get_collections() + return [collection.name for collection in collections.collections] diff --git a/python/semantic_kernel/connectors/memory/qdrant/utils.py b/python/semantic_kernel/connectors/memory/qdrant/utils.py new file mode 100644 index 000000000000..34fbe4b3f6a2 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/qdrant/utils.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import contextlib + +from qdrant_client.async_qdrant_client import AsyncQdrantClient + + +class AsyncQdrantClientWrapper(AsyncQdrantClient): + """Wrapper to make sure the connection is closed when the object is deleted.""" + + def __del__(self) -> None: + """Async close connection, done when the object is deleted, used when SK creates a client.""" + with contextlib.suppress(Exception): + asyncio.get_running_loop().create_task(self.close()) diff --git a/python/semantic_kernel/connectors/memory/redis/__init__.py b/python/semantic_kernel/connectors/memory/redis/__init__.py index 16e086af74cd..9469edb6b20b 100644 --- a/python/semantic_kernel/connectors/memory/redis/__init__.py +++ b/python/semantic_kernel/connectors/memory/redis/__init__.py @@ -1,8 +1,18 @@ # Copyright (c) Microsoft. All rights reserved. +from semantic_kernel.connectors.memory.redis.const import RedisCollectionTypes +from semantic_kernel.connectors.memory.redis.redis_collection import RedisHashsetCollection, RedisJsonCollection from semantic_kernel.connectors.memory.redis.redis_memory_store import ( RedisMemoryStore, ) from semantic_kernel.connectors.memory.redis.redis_settings import RedisSettings +from semantic_kernel.connectors.memory.redis.redis_store import RedisStore -__all__ = ["RedisMemoryStore", "RedisSettings"] +__all__ = [ + "RedisCollectionTypes", + "RedisHashsetCollection", + "RedisJsonCollection", + "RedisMemoryStore", + "RedisSettings", + "RedisStore", +] diff --git a/python/semantic_kernel/connectors/memory/redis/const.py b/python/semantic_kernel/connectors/memory/redis/const.py new file mode 100644 index 000000000000..490b0915a190 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/redis/const.py @@ -0,0 +1,39 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from enum import Enum + +from redis.commands.search.indexDefinition import IndexType + +from semantic_kernel.data.const import DistanceFunction + + +class RedisCollectionTypes(str, Enum): + JSON = "json" + HASHSET = "hashset" + + +INDEX_TYPE_MAP = { + RedisCollectionTypes.JSON: IndexType.JSON, + RedisCollectionTypes.HASHSET: IndexType.HASH, +} + +DISTANCE_FUNCTION_MAP = { + DistanceFunction.COSINE: "COSINE", + DistanceFunction.DOT_PROD: "IP", + DistanceFunction.EUCLIDEAN: "L2", + "default": "COSINE", +} + +TYPE_MAPPER_VECTOR = { + "float": "FLOAT32", + "int": "FLOAT16", + "binary": "FLOAT16", + "ndarray": "FLOAT32", + "default": "FLOAT32", +} + +__all__ = [ + "DISTANCE_FUNCTION_MAP", + "TYPE_MAPPER_VECTOR", +] diff --git a/python/semantic_kernel/connectors/memory/redis/redis_collection.py b/python/semantic_kernel/connectors/memory/redis/redis_collection.py new file mode 100644 index 000000000000..e5a5d328a917 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/redis/redis_collection.py @@ -0,0 +1,333 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import json +import logging +import sys +from collections.abc import Sequence +from copy import copy +from typing import Any, ClassVar, TypeVar + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +import numpy as np +from pydantic import ValidationError +from redis.asyncio.client import Redis +from redis.commands.search.indexDefinition import IndexDefinition + +from semantic_kernel.connectors.memory.redis.const import INDEX_TYPE_MAP, RedisCollectionTypes +from semantic_kernel.connectors.memory.redis.utils import RedisWrapper, data_model_definition_to_redis_fields +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) +from semantic_kernel.exceptions.memory_connector_exceptions import ( + MemoryConnectorException, + MemoryConnectorInitializationError, +) +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger: logging.Logger = logging.getLogger(__name__) + +TModel = TypeVar("TModel") + + +@experimental_class +class RedisCollection(VectorStoreRecordCollection[str, TModel]): + """A vector store record collection implementation using Redis.""" + + redis_database: Redis + prefix_collection_name_to_key_names: bool + collection_type: RedisCollectionTypes + supported_key_types: ClassVar[list[str] | None] = ["str"] + supported_vector_types: ClassVar[list[str] | None] = ["float"] + + def __init__( + self, + data_model_type: type[TModel], + data_model_definition: VectorStoreRecordDefinition | None = None, + collection_name: str | None = None, + redis_database: Redis | None = None, + prefix_collection_name_to_key_names: bool = True, + collection_type: RedisCollectionTypes = RedisCollectionTypes.HASHSET, + connection_string: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + ) -> None: + """RedisMemoryStore is an abstracted interface to interact with a Redis node connection. + + See documentation about connections: https://redis-py.readthedocs.io/en/stable/connections.html + See documentation about vector attributes: https://redis.io/docs/stack/search/reference/vectors. + + """ + if redis_database: + super().__init__( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + redis_database=redis_database, + prefix_collection_name_to_key_names=prefix_collection_name_to_key_names, + collection_type=collection_type, + ) + return + try: + from semantic_kernel.connectors.memory.redis.redis_settings import RedisSettings + + redis_settings = RedisSettings.create( + connection_string=connection_string, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise MemoryConnectorInitializationError("Failed to create Redis settings.", ex) from ex + super().__init__( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + redis_database=RedisWrapper.from_url(redis_settings.connection_string.get_secret_value()), + prefix_collection_name_to_key_names=prefix_collection_name_to_key_names, + collection_type=collection_type, + ) + + def _get_redis_key(self, key: str) -> str: + if self.prefix_collection_name_to_key_names: + return f"{self.collection_name}:{key}" + return key + + def _unget_redis_key(self, key: str) -> str: + if self.prefix_collection_name_to_key_names and ":" in key: + return key[len(self.collection_name) + 1 :] + return key + + @override + async def create_collection(self, **kwargs) -> None: + """Create a new index in Redis. + + Args: + **kwargs: Additional keyword arguments. + fields (list[Fields]): The fields to create the index with, when not supplied, + these are created from the data_model_definition. + index_definition (IndexDefinition): The search index to create, if this is supplied + this is used instead of a index created based on the definition. + other kwargs are passed to the create_index method. + """ + if (index_definition := kwargs.pop("index_definition", None)) and (fields := kwargs.pop("fields", None)): + if isinstance(index_definition, IndexDefinition): + await self.redis_database.ft(self.collection_name).create_index( + fields, definition=index_definition, **kwargs + ) + return + raise MemoryConnectorException("Invalid index type supplied.") + fields = data_model_definition_to_redis_fields(self.data_model_definition, self.collection_type) + index_definition = IndexDefinition( + prefix=f"{self.collection_name}:", index_type=INDEX_TYPE_MAP[self.collection_type] + ) + await self.redis_database.ft(self.collection_name).create_index(fields, definition=index_definition, **kwargs) + + @override + async def does_collection_exist(self, **kwargs) -> bool: + try: + await self.redis_database.ft(self.collection_name).info() + return True + except Exception: + return False + + @override + async def delete_collection(self, **kwargs) -> None: + exists = await self.does_collection_exist() + if exists: + await self.redis_database.ft(self.collection_name).dropindex(**kwargs) + else: + logger.debug("Collection does not exist, skipping deletion.") + + +@experimental_class +class RedisHashsetCollection(RedisCollection): + """A vector store record collection implementation using Redis Hashsets.""" + + def __init__( + self, + data_model_type: type[TModel], + data_model_definition: VectorStoreRecordDefinition | None = None, + collection_name: str | None = None, + redis_database: Redis | None = None, + prefix_collection_name_to_key_names: bool = False, + connection_string: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + **kwargs: Any, + ) -> None: + """RedisMemoryStore is an abstracted interface to interact with a Redis node connection. + + See documentation about connections: https://redis-py.readthedocs.io/en/stable/connections.html + See documentation about vector attributes: https://redis.io/docs/stack/search/reference/vectors. + + """ + super().__init__( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + redis_database=redis_database, + prefix_collection_name_to_key_names=prefix_collection_name_to_key_names, + collection_type=RedisCollectionTypes.HASHSET, + connection_string=connection_string, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + **kwargs, + ) + + @override + async def _inner_upsert(self, records: Sequence[Any], **kwargs: Any) -> Sequence[str]: + return await asyncio.gather(*[self._single_upsert(record) for record in records]) + + async def _single_upsert(self, upsert_record: Any) -> str: + await self.redis_database.hset(**upsert_record) + return self._unget_redis_key(upsert_record["name"]) + + @override + async def _inner_get(self, keys: Sequence[str], **kwargs) -> Sequence[dict[bytes, bytes]] | None: + results = await asyncio.gather(*[self.redis_database.hgetall(self._get_redis_key(key)) for key in keys]) + return [result for result in results if result] + + @override + async def _inner_delete(self, keys: Sequence[str], **kwargs: Any) -> None: + await self.redis_database.delete(*[self._get_redis_key(key) for key in keys]) + + @override + def _serialize_dicts_to_store_models( + self, + records: Sequence[dict[str, Any]], + **kwargs: Any, + ) -> Sequence[dict[str, Any]]: + """Serialize the dict to a Redis store model.""" + results = [] + for record in records: + result = {"mapping": {}} + metadata = {} + for name, field in self.data_model_definition.fields.items(): + if isinstance(field, VectorStoreRecordVectorField): + if not isinstance(record[name], np.ndarray): + record[name] = np.array(record[name]) + result["mapping"][name] = record[name].tobytes() + continue + if isinstance(field, VectorStoreRecordKeyField): + result["name"] = self._get_redis_key(record[name]) + continue + metadata[name] = record[field.name] + result["mapping"]["metadata"] = json.dumps(metadata) + results.append(result) + return results + + @override + def _deserialize_store_models_to_dicts( + self, + records: Sequence[dict[bytes, bytes]], + keys: Sequence[str], + **kwargs: Any, + ) -> Sequence[dict[str, Any]]: + results = [] + for key, record in zip(keys, records): + if record: + flattened = json.loads(record[b"metadata"]) + for name, field in self.data_model_definition.fields.items(): + if isinstance(field, VectorStoreRecordKeyField): + flattened[name] = self._unget_redis_key(key) + if isinstance(field, VectorStoreRecordVectorField): + flattened[name] = np.frombuffer(record[name.encode()]).tolist() + results.append(flattened) + return results + + +@experimental_class +class RedisJsonCollection(RedisCollection): + """A vector store record collection implementation using Redis Json.""" + + def __init__( + self, + data_model_type: type[TModel], + data_model_definition: VectorStoreRecordDefinition | None = None, + collection_name: str | None = None, + redis_database: Redis | None = None, + prefix_collection_name_to_key_names: bool = False, + connection_string: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + **kwargs: Any, + ) -> None: + """RedisMemoryStore is an abstracted interface to interact with a Redis node connection. + + See documentation about connections: https://redis-py.readthedocs.io/en/stable/connections.html + See documentation about vector attributes: https://redis.io/docs/stack/search/reference/vectors. + + """ + super().__init__( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + redis_database=redis_database, + prefix_collection_name_to_key_names=prefix_collection_name_to_key_names, + collection_type=RedisCollectionTypes.JSON, + connection_string=connection_string, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + **kwargs, + ) + + @override + async def _inner_upsert(self, records: Sequence[Any], **kwargs: Any) -> Sequence[str]: + return await asyncio.gather(*[self._single_upsert(record) for record in records]) + + async def _single_upsert(self, upsert_record: Any) -> str: + await self.redis_database.json().set(upsert_record["name"], "$", upsert_record["value"]) + return self._unget_redis_key(upsert_record["name"]) + + @override + async def _inner_get(self, keys: Sequence[str], **kwargs) -> Sequence[dict[bytes, bytes]] | None: + kwargs_copy = copy(kwargs) + kwargs_copy.pop("include_vectors", None) + results = await self.redis_database.json().mget([self._get_redis_key(key) for key in keys], "$", **kwargs_copy) + return [result[0] for result in results if result] + + @override + async def _inner_delete(self, keys: Sequence[str], **kwargs: Any) -> None: + await asyncio.gather(*[self.redis_database.json().delete(key, **kwargs) for key in keys]) + + @override + def _serialize_dicts_to_store_models( + self, + records: Sequence[dict[str, Any]], + **kwargs: Any, + ) -> Sequence[dict[str, Any]]: + """Serialize the dict to a Redis store model.""" + results = [] + for record in records: + result = {"value": {}} + for name, field in self.data_model_definition.fields.items(): + if isinstance(field, VectorStoreRecordKeyField): + result["name"] = self._get_redis_key(record[name]) + continue + if isinstance(field, VectorStoreRecordVectorField): + if isinstance(record[name], np.ndarray): + record[name] = record[name].tolist() + result["value"][name] = record[name] + result["value"][name] = record[name] + results.append(result) + return results + + @override + def _deserialize_store_models_to_dicts( + self, + records: Sequence[dict[str, Any]], + keys: Sequence[str], + **kwargs: Any, + ) -> Sequence[dict[str, Any]]: + results = [] + for key, record in zip(keys, records): + record[self.data_model_definition.key_field_name] = self._unget_redis_key(key) + results.append(record) + return results diff --git a/python/semantic_kernel/connectors/memory/redis/redis_store.py b/python/semantic_kernel/connectors/memory/redis/redis_store.py new file mode 100644 index 000000000000..e0b5d8991ee3 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/redis/redis_store.py @@ -0,0 +1,105 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +import sys +from collections.abc import Sequence +from typing import Any, TypeVar + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from pydantic import ValidationError +from redis.asyncio.client import Redis + +from semantic_kernel.connectors.memory.redis.const import RedisCollectionTypes +from semantic_kernel.connectors.memory.redis.redis_collection import RedisHashsetCollection, RedisJsonCollection +from semantic_kernel.connectors.memory.redis.utils import RedisWrapper +from semantic_kernel.data.vector_store import VectorStore +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection +from semantic_kernel.exceptions.memory_connector_exceptions import MemoryConnectorInitializationError +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger: logging.Logger = logging.getLogger(__name__) + +TModel = TypeVar("TModel") + + +@experimental_class +class RedisStore(VectorStore): + """Create a Redis Vector Store.""" + + redis_database: Redis + + def __init__( + self, + connection_string: str | None = None, + env_file_path: str | None = None, + env_file_encoding: str | None = None, + redis_database: Redis | None = None, + **kwargs: Any, + ) -> None: + """RedisMemoryStore is an abstracted interface to interact with a Redis node connection. + + See documentation about connections: https://redis-py.readthedocs.io/en/stable/connections.html + See documentation about vector attributes: https://redis.io/docs/stack/search/reference/vectors. + + """ + if redis_database: + super().__init__(redis_database=redis_database) + return + try: + from semantic_kernel.connectors.memory.redis.redis_settings import RedisSettings + + redis_settings = RedisSettings.create( + connection_string=connection_string, + env_file_path=env_file_path, + env_file_encoding=env_file_encoding, + ) + except ValidationError as ex: + raise MemoryConnectorInitializationError("Failed to create Redis settings.", ex) from ex + super().__init__(redis_database=RedisWrapper.from_url(redis_settings.connection_string.get_secret_value())) + + @override + async def list_collection_names(self, **kwargs) -> Sequence[str]: + return [name.decode() for name in await self.redis_database.execute_command("FT._LIST")] + + @override + def get_collection( + self, + collection_name: str, + data_model_type: type[TModel], + data_model_definition: VectorStoreRecordDefinition | None = None, + collection_type: RedisCollectionTypes = RedisCollectionTypes.HASHSET, + **kwargs: Any, + ) -> "VectorStoreRecordCollection": + """Get a RedisCollection.. + + Args: + collection_name (str): The name of the collection. + data_model_type (type[TModel]): The type of the data model. + data_model_definition (VectorStoreRecordDefinition | None): The model fields, optional. + collection_type (RedisCollectionTypes): The type of the collection, can be JSON or HASHSET. + + **kwargs: Additional keyword arguments, passed to the collection constructor. + """ + if collection_name not in self.vector_record_collections: + if collection_type == RedisCollectionTypes.HASHSET: + self.vector_record_collections[collection_name] = RedisHashsetCollection( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + redis_database=self.redis_database, + **kwargs, + ) + else: + self.vector_record_collections[collection_name] = RedisJsonCollection( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + redis_database=self.redis_database, + **kwargs, + ) + return self.vector_record_collections[collection_name] diff --git a/python/semantic_kernel/connectors/memory/redis/utils.py b/python/semantic_kernel/connectors/memory/redis/utils.py index babadccad08b..0c0ccbee56ea 100644 --- a/python/semantic_kernel/connectors/memory/redis/utils.py +++ b/python/semantic_kernel/connectors/memory/redis/utils.py @@ -1,17 +1,29 @@ # Copyright (c) Microsoft. All rights reserved. +import asyncio +import contextlib import json from datetime import datetime from typing import Any import numpy as np -from redis import Redis +from redis.asyncio.client import Redis from redis.commands.search.document import Document - +from redis.commands.search.field import Field as RedisField +from redis.commands.search.field import NumericField, TagField, TextField, VectorField + +from semantic_kernel.connectors.memory.azure_ai_search.const import DISTANCE_FUNCTION_MAP +from semantic_kernel.connectors.memory.redis.const import TYPE_MAPPER_VECTOR, RedisCollectionTypes +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) from semantic_kernel.memory.memory_record import MemoryRecord -def get_redis_key(collection_name: str, record_id: str) -> str: +def get_redis_key(collection_name: str, record_id: str) -> str: # pragma: no cover """Returns the Redis key for an element called record_id within collection_name. Args: @@ -24,7 +36,7 @@ def get_redis_key(collection_name: str, record_id: str) -> str: return f"{collection_name}:{record_id}" -def split_redis_key(redis_key: str) -> tuple[str, str]: +def split_redis_key(redis_key: str) -> tuple[str, str]: # pragma: no cover """Split a Redis key into its collection name and record ID. Args: @@ -37,7 +49,7 @@ def split_redis_key(redis_key: str) -> tuple[str, str]: return collection, record_id -def serialize_record_to_redis(record: MemoryRecord, vector_type: np.dtype) -> dict[str, Any]: +def serialize_record_to_redis(record: MemoryRecord, vector_type: np.dtype) -> dict[str, Any]: # pragma: no cover """Serialize a MemoryRecord to Redis fields.""" all_metadata = { "is_reference": record._is_reference, @@ -56,7 +68,9 @@ def serialize_record_to_redis(record: MemoryRecord, vector_type: np.dtype) -> di } -def deserialize_redis_to_record(fields: dict[str, Any], vector_type: np.dtype, with_embedding: bool) -> MemoryRecord: +def deserialize_redis_to_record( + fields: dict[str, Any], vector_type: np.dtype, with_embedding: bool +) -> MemoryRecord: # pragma: no cover """Deserialize Redis fields to a MemoryRecord.""" metadata = json.loads(fields[b"metadata"]) record = MemoryRecord( @@ -81,7 +95,7 @@ def deserialize_redis_to_record(fields: dict[str, Any], vector_type: np.dtype, w def deserialize_document_to_record( database: Redis, doc: Document, vector_type: np.dtype, with_embedding: bool -) -> MemoryRecord: +) -> MemoryRecord: # pragma: no cover """Deserialize document to a MemoryRecord.""" # Document's ID refers to the Redis key redis_key = doc["id"] @@ -107,3 +121,68 @@ def deserialize_document_to_record( record._embedding = np.frombuffer(eb, dtype=vector_type).astype(float) return record + + +class RedisWrapper(Redis): + """Wrapper to make sure the connection is closed when the object is deleted.""" + + def __del__(self) -> None: + """Close connection, done when the object is deleted, used when SK creates a client.""" + with contextlib.suppress(Exception): + asyncio.get_running_loop().create_task(self.close()) + + +def data_model_definition_to_redis_fields( + data_model_definition: VectorStoreRecordDefinition, collection_type: RedisCollectionTypes +) -> list[RedisField]: + """Create a list of fields for Redis from a data_model_definition.""" + fields: list[RedisField] = [] + for name, field in data_model_definition.fields.items(): + if isinstance(field, VectorStoreRecordKeyField): + continue + if collection_type == RedisCollectionTypes.HASHSET: + fields.append(_field_to_redis_field_hashset(name, field)) + elif collection_type == RedisCollectionTypes.JSON: + fields.append(_field_to_redis_field_json(name, field)) + return fields + + +def _field_to_redis_field_hashset( + name: str, field: VectorStoreRecordVectorField | VectorStoreRecordDataField +) -> RedisField: + if isinstance(field, VectorStoreRecordVectorField): + return VectorField( + name=name, + algorithm=field.index_kind.value.upper() if field.index_kind else "HNSW", + attributes={ + "type": TYPE_MAPPER_VECTOR[field.property_type or "default"], + "dim": field.dimensions, + "distance_metric": DISTANCE_FUNCTION_MAP[field.distance_function or "default"], + }, + ) + if field.property_type in ["int", "float"]: + return NumericField(name=name) + if field.is_full_text_searchable: + return TextField(name=name) + return TagField(name=name) + + +def _field_to_redis_field_json( + name: str, field: VectorStoreRecordVectorField | VectorStoreRecordDataField +) -> RedisField: + if isinstance(field, VectorStoreRecordVectorField): + return VectorField( + name=f"$.{name}", + algorithm=field.index_kind.value.upper() if field.index_kind else "HNSW", + attributes={ + "type": TYPE_MAPPER_VECTOR[field.property_type or "default"], + "dim": field.dimensions, + "distance_metric": DISTANCE_FUNCTION_MAP[field.distance_function or "default"], + }, + as_name=name, + ) + if field.property_type in ["int", "float"]: + return NumericField(name=f"$.{name}", as_name=name) + if field.is_full_text_searchable: + return TextField(name=f"$.{name}", as_name=name) + return TagField(name=f"$.{name}", as_name=name) diff --git a/python/semantic_kernel/connectors/memory/usearch/usearch_memory_store.py b/python/semantic_kernel/connectors/memory/usearch/usearch_memory_store.py index b4e308240050..1d00e19d5fb6 100644 --- a/python/semantic_kernel/connectors/memory/usearch/usearch_memory_store.py +++ b/python/semantic_kernel/connectors/memory/usearch/usearch_memory_store.py @@ -61,18 +61,16 @@ def create_default(embeddings_index: Index) -> "_USearchCollection": # PyArrow Schema definition for the embeddings data from `MemoryRecord`. -_embeddings_data_schema = pa.schema( - [ - pa.field("key", pa.string()), - pa.field("timestamp", pa.timestamp("us")), - pa.field("is_reference", pa.bool_()), - pa.field("external_source_name", pa.string()), - pa.field("id", pa.string()), - pa.field("description", pa.string()), - pa.field("text", pa.string()), - pa.field("additional_metadata", pa.string()), - ] -) +_embeddings_data_schema = pa.schema([ + pa.field("key", pa.string()), + pa.field("timestamp", pa.timestamp("us")), + pa.field("is_reference", pa.bool_()), + pa.field("external_source_name", pa.string()), + pa.field("id", pa.string()), + pa.field("description", pa.string()), + pa.field("text", pa.string()), + pa.field("additional_metadata", pa.string()), +]) class _CollectionFileType(Enum): @@ -116,6 +114,8 @@ def pyarrow_table_to_memoryrecords(table: pa.Table, vectors: ndarray | None = No @experimental_class class USearchMemoryStore(MemoryStoreBase): + """Memory store for searching embeddings with USearch.""" + def __init__( self, persist_directory: os.PathLike | None = None, @@ -349,9 +349,10 @@ async def upsert_batch( ) # Update embeddings_table - ucollection.embeddings_data_table = pa.concat_tables( - [ucollection.embeddings_data_table, memoryrecords_to_pyarrow_table(records)] - ) + ucollection.embeddings_data_table = pa.concat_tables([ + ucollection.embeddings_data_table, + memoryrecords_to_pyarrow_table(records), + ]) # Update embeddings_id_to_label for index, record_id in enumerate(all_records_id): diff --git a/python/semantic_kernel/connectors/memory/volatile/__init__.py b/python/semantic_kernel/connectors/memory/volatile/__init__.py new file mode 100644 index 000000000000..a0e7fb496dae --- /dev/null +++ b/python/semantic_kernel/connectors/memory/volatile/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.connectors.memory.volatile.volatile_collection import VolatileCollection +from semantic_kernel.connectors.memory.volatile.volatile_store import VolatileStore + +__all__ = ["VolatileCollection", "VolatileStore"] diff --git a/python/semantic_kernel/connectors/memory/volatile/volatile_collection.py b/python/semantic_kernel/connectors/memory/volatile/volatile_collection.py new file mode 100644 index 000000000000..2b436e7e6ea5 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/volatile/volatile_collection.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from collections.abc import Mapping, Sequence +from typing import Any, ClassVar, TypeVar + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +from pydantic import Field + +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection +from semantic_kernel.kernel_types import OneOrMany + +KEY_TYPES = str | int | float + +TModel = TypeVar("TModel") + + +class VolatileCollection(VectorStoreRecordCollection[KEY_TYPES, TModel]): + """Volatile Collection.""" + + inner_storage: dict[KEY_TYPES, dict] = Field(default_factory=dict) + supported_key_types: ClassVar[list[str] | None] = ["str", "int", "float"] + + def __init__( + self, + collection_name: str, + data_model_type: type[TModel], + data_model_definition: VectorStoreRecordDefinition | None = None, + ): + """Create a Volatile Collection.""" + super().__init__( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + ) + + @override + async def _inner_delete(self, keys: Sequence[KEY_TYPES], **kwargs: Any) -> None: + for key in keys: + self.inner_storage.pop(key, None) + + @override + async def _inner_get(self, keys: Sequence[KEY_TYPES], **kwargs: Any) -> Any | OneOrMany[TModel] | None: + return [self.inner_storage[key] for key in keys if key in self.inner_storage] + + @override + async def _inner_upsert(self, records: Sequence[Any], **kwargs: Any) -> Sequence[KEY_TYPES]: + updated_keys = [] + for record in records: + key = record[self._key_field_name] if isinstance(record, Mapping) else getattr(record, self._key_field_name) + self.inner_storage[key] = record + updated_keys.append(key) + return updated_keys + + def _deserialize_store_models_to_dicts(self, records: Sequence[Any], **kwargs: Any) -> Sequence[dict[str, Any]]: + return records + + def _serialize_dicts_to_store_models(self, records: Sequence[dict[str, Any]], **kwargs: Any) -> Sequence[Any]: + return records + + @override + async def create_collection(self, **kwargs: Any) -> None: + pass + + @override + async def delete_collection(self, **kwargs: Any) -> None: + self.inner_storage = {} + + @override + async def does_collection_exist(self, **kwargs: Any) -> bool: + return True diff --git a/python/semantic_kernel/connectors/memory/volatile/volatile_store.py b/python/semantic_kernel/connectors/memory/volatile/volatile_store.py new file mode 100644 index 000000000000..1df7b2948373 --- /dev/null +++ b/python/semantic_kernel/connectors/memory/volatile/volatile_store.py @@ -0,0 +1,47 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +import sys +from collections.abc import Sequence +from typing import Any, TypeVar + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + + +from semantic_kernel.connectors.memory.volatile.volatile_collection import VolatileCollection +from semantic_kernel.data.vector_store import VectorStore +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger: logging.Logger = logging.getLogger(__name__) + +TModel = TypeVar("TModel") + + +@experimental_class +class VolatileStore(VectorStore): + """Create a Volatile Vector Store.""" + + @override + async def list_collection_names(self, **kwargs) -> Sequence[str]: + return list(self.vector_record_collections.keys()) + + @override + def get_collection( + self, + collection_name: str, + data_model_type: type[TModel], + data_model_definition: VectorStoreRecordDefinition | None = None, + **kwargs: Any, + ) -> "VectorStoreRecordCollection": + if collection_name not in self.vector_record_collections: + self.vector_record_collections[collection_name] = VolatileCollection( + data_model_type=data_model_type, + data_model_definition=data_model_definition, + collection_name=collection_name, + ) + return self.vector_record_collections[collection_name] diff --git a/python/semantic_kernel/connectors/memory/weaviate/weaviate_memory_store.py b/python/semantic_kernel/connectors/memory/weaviate/weaviate_memory_store.py index 4e9a87bf2eec..e0be96a17021 100644 --- a/python/semantic_kernel/connectors/memory/weaviate/weaviate_memory_store.py +++ b/python/semantic_kernel/connectors/memory/weaviate/weaviate_memory_store.py @@ -104,6 +104,8 @@ def remove_underscore_prefix(cls, sk_dict): @experimental_class class WeaviateMemoryStore(MemoryStoreBase): + """A memory store that uses Weaviate as the backend.""" + def __init__( self, url: str | None = None, @@ -219,13 +221,11 @@ async def get_batch(self, collection_name: str, keys: list[str], with_embedding: def _build_multi_get_query(self, collection_name: str, keys: list[str], with_embedding: bool): queries = [] for i, key in enumerate(keys): - query = self.client.query.get(collection_name, ALL_PROPERTIES).with_where( - { - "path": ["key"], - "operator": "Equal", - "valueString": key, - } - ) + query = self.client.query.get(collection_name, ALL_PROPERTIES).with_where({ + "path": ["key"], + "operator": "Equal", + "valueString": key, + }) if with_embedding: query = query.with_additional("vector") diff --git a/python/semantic_kernel/connectors/openai_plugin/openai_authentication_config.py b/python/semantic_kernel/connectors/openai_plugin/openai_authentication_config.py index 25ee4581bba1..fddb9a722a1a 100644 --- a/python/semantic_kernel/connectors/openai_plugin/openai_authentication_config.py +++ b/python/semantic_kernel/connectors/openai_plugin/openai_authentication_config.py @@ -9,11 +9,15 @@ class OpenAIAuthenticationType(str, Enum): + """OpenAI authentication types.""" + OAuth = "oauth" NoneType = "none" class OpenAIAuthorizationType(str, Enum): + """OpenAI authorization types.""" + Bearer = "Bearer" Basic = "Basic" diff --git a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation.py b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation.py index d3c95d1ae0a0..7ab80e300405 100644 --- a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation.py +++ b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation.py @@ -25,6 +25,8 @@ @experimental_class class RestApiOperation: + """RestApiOperation.""" + MEDIA_TYPE_TEXT_PLAIN = "text/plain" PAYLOAD_ARGUMENT_NAME = "payload" CONTENT_TYPE_ARGUMENT_NAME = "content-type" diff --git a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_expected_response.py b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_expected_response.py index 3b77af349594..f5669ecb081d 100644 --- a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_expected_response.py +++ b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_expected_response.py @@ -6,6 +6,8 @@ @experimental_class class RestApiOperationExpectedResponse: + """RestApiOperationExpectedResponse.""" + def __init__(self, description: str, media_type: str, schema: dict[str, str] | None = None): """Initialize the RestApiOperationExpectedResponse.""" self.description = description diff --git a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_parameter.py b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_parameter.py index c74a10acac34..761e390c9d4c 100644 --- a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_parameter.py +++ b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_parameter.py @@ -16,6 +16,8 @@ @experimental_class class RestApiOperationParameter: + """RestApiOperationParameter.""" + def __init__( self, name: str, diff --git a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_parameter_style.py b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_parameter_style.py index b7ea8b108b1b..c76f9e3a8847 100644 --- a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_parameter_style.py +++ b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_parameter_style.py @@ -7,4 +7,6 @@ @experimental_class class RestApiOperationParameterStyle(Enum): + """RestApiOperationParameterStyle.""" + SIMPLE = "simple" diff --git a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_payload.py b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_payload.py index ad102911f665..6734114f28a2 100644 --- a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_payload.py +++ b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_payload.py @@ -8,6 +8,8 @@ @experimental_class class RestApiOperationPayload: + """RestApiOperationPayload.""" + def __init__( self, media_type: str, diff --git a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_payload_property.py b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_payload_property.py index cf6fed327184..ab0ee15f3e9d 100644 --- a/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_payload_property.py +++ b/python/semantic_kernel/connectors/openapi_plugin/models/rest_api_operation_payload_property.py @@ -7,6 +7,8 @@ @experimental_class class RestApiOperationPayloadProperty: + """RestApiOperationPayloadProperty.""" + def __init__( self, name: str, diff --git a/python/semantic_kernel/connectors/utils/document_loader.py b/python/semantic_kernel/connectors/utils/document_loader.py index 5984c3b9bfce..2cc35075ed89 100644 --- a/python/semantic_kernel/connectors/utils/document_loader.py +++ b/python/semantic_kernel/connectors/utils/document_loader.py @@ -13,6 +13,8 @@ class DocumentLoader: + """Utility class to load a document from a URL.""" + @staticmethod async def from_uri( url: str, diff --git a/python/semantic_kernel/contents/__init__.py b/python/semantic_kernel/contents/__init__.py index 21d717945299..7563bdbcfe93 100644 --- a/python/semantic_kernel/contents/__init__.py +++ b/python/semantic_kernel/contents/__init__.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +from semantic_kernel.contents.annotation_content import AnnotationContent from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.function_call_content import FunctionCallContent @@ -12,6 +13,7 @@ from semantic_kernel.contents.utils.finish_reason import FinishReason __all__ = [ + "AnnotationContent", "AuthorRole", "ChatHistory", "ChatMessageContent", diff --git a/python/semantic_kernel/contents/annotation_content.py b/python/semantic_kernel/contents/annotation_content.py new file mode 100644 index 000000000000..a33f8014d694 --- /dev/null +++ b/python/semantic_kernel/contents/annotation_content.py @@ -0,0 +1,61 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from typing import Any, ClassVar, Literal, TypeVar +from xml.etree.ElementTree import Element # nosec + +from pydantic import Field + +from semantic_kernel.contents.const import ANNOTATION_CONTENT_TAG, ContentTypes +from semantic_kernel.contents.kernel_content import KernelContent +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger = logging.getLogger(__name__) + +_T = TypeVar("_T", bound="AnnotationContent") + + +@experimental_class +class AnnotationContent(KernelContent): + """Annotation content.""" + + content_type: Literal[ContentTypes.ANNOTATION_CONTENT] = Field(ANNOTATION_CONTENT_TAG, init=False) # type: ignore + tag: ClassVar[str] = ANNOTATION_CONTENT_TAG + file_id: str | None = None + quote: str | None = None + start_index: int | None = None + end_index: int | None = None + + def __str__(self) -> str: + """Return the string representation of the annotation content.""" + return f"AnnotationContent(file_id={self.file_id}, quote={self.quote}, start_index={self.start_index}, end_index={self.end_index})" # noqa: E501 + + def to_element(self) -> Element: + """Convert the annotation content to an Element.""" + element = Element(self.tag) + if self.file_id: + element.set("file_id", self.file_id) + if self.quote: + element.set("quote", self.quote) + if self.start_index is not None: + element.set("start_index", str(self.start_index)) + if self.end_index is not None: + element.set("end_index", str(self.end_index)) + return element + + @classmethod + def from_element(cls: type[_T], element: Element) -> _T: + """Create an instance from an Element.""" + return cls( + file_id=element.get("file_id"), + quote=element.get("quote"), + start_index=int(element.get("start_index")) if element.get("start_index") else None, # type: ignore + end_index=int(element.get("end_index")) if element.get("end_index") else None, # type: ignore + ) + + def to_dict(self) -> dict[str, Any]: + """Convert the instance to a dictionary.""" + return { + "type": "text", + "text": f"{self.file_id} {self.quote} (Start Index={self.start_index}->End Index={self.end_index})", + } diff --git a/python/semantic_kernel/contents/chat_message_content.py b/python/semantic_kernel/contents/chat_message_content.py index 930e97202c98..ced273de75a2 100644 --- a/python/semantic_kernel/contents/chat_message_content.py +++ b/python/semantic_kernel/contents/chat_message_content.py @@ -9,15 +9,19 @@ from defusedxml import ElementTree from pydantic import Field +from semantic_kernel.contents.annotation_content import AnnotationContent from semantic_kernel.contents.const import ( + ANNOTATION_CONTENT_TAG, CHAT_MESSAGE_CONTENT_TAG, DISCRIMINATOR_FIELD, + FILE_REFERENCE_CONTENT_TAG, FUNCTION_CALL_CONTENT_TAG, FUNCTION_RESULT_CONTENT_TAG, IMAGE_CONTENT_TAG, TEXT_CONTENT_TAG, ContentTypes, ) +from semantic_kernel.contents.file_reference_content import FileReferenceContent from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.function_result_content import FunctionResultContent from semantic_kernel.contents.image_content import ImageContent @@ -29,18 +33,22 @@ from semantic_kernel.exceptions.content_exceptions import ContentInitializationError TAG_CONTENT_MAP = { + ANNOTATION_CONTENT_TAG: AnnotationContent, TEXT_CONTENT_TAG: TextContent, + FILE_REFERENCE_CONTENT_TAG: FileReferenceContent, FUNCTION_CALL_CONTENT_TAG: FunctionCallContent, FUNCTION_RESULT_CONTENT_TAG: FunctionResultContent, IMAGE_CONTENT_TAG: ImageContent, } ITEM_TYPES = Union[ + AnnotationContent, ImageContent, TextContent, StreamingTextContent, FunctionResultContent, FunctionCallContent, + FileReferenceContent, ] logger = logging.getLogger(__name__) @@ -298,3 +306,7 @@ def _parse_items(self) -> str | list[dict[str, Any]]: if len(self.items) == 1 and isinstance(self.items[0], FunctionResultContent): return str(self.items[0].result) return [item.to_dict() for item in self.items] + + def __hash__(self) -> int: + """Return the hash of the chat message content.""" + return hash((self.tag, self.role, self.content, self.encoding, self.finish_reason, *self.items)) diff --git a/python/semantic_kernel/contents/const.py b/python/semantic_kernel/contents/const.py index 07153e4c0541..0e2a34e876b3 100644 --- a/python/semantic_kernel/contents/const.py +++ b/python/semantic_kernel/contents/const.py @@ -6,16 +6,22 @@ CHAT_HISTORY_TAG: Final[str] = "chat_history" TEXT_CONTENT_TAG: Final[str] = "text" IMAGE_CONTENT_TAG: Final[str] = "image" +ANNOTATION_CONTENT_TAG: Final[str] = "annotation" BINARY_CONTENT_TAG: Final[str] = "binary" +FILE_REFERENCE_CONTENT_TAG: Final[str] = "file_reference" FUNCTION_CALL_CONTENT_TAG: Final[str] = "function_call" FUNCTION_RESULT_CONTENT_TAG: Final[str] = "function_result" DISCRIMINATOR_FIELD: Final[str] = "content_type" class ContentTypes(str, Enum): + """Content types enumeration.""" + + ANNOTATION_CONTENT = ANNOTATION_CONTENT_TAG BINARY_CONTENT = BINARY_CONTENT_TAG CHAT_MESSAGE_CONTENT = CHAT_MESSAGE_CONTENT_TAG IMAGE_CONTENT = IMAGE_CONTENT_TAG + FILE_REFERENCE_CONTENT = FILE_REFERENCE_CONTENT_TAG FUNCTION_CALL_CONTENT = FUNCTION_CALL_CONTENT_TAG FUNCTION_RESULT_CONTENT = FUNCTION_RESULT_CONTENT_TAG TEXT_CONTENT = TEXT_CONTENT_TAG diff --git a/python/semantic_kernel/contents/file_reference_content.py b/python/semantic_kernel/contents/file_reference_content.py new file mode 100644 index 000000000000..99cd15f341ef --- /dev/null +++ b/python/semantic_kernel/contents/file_reference_content.py @@ -0,0 +1,48 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from typing import Any, ClassVar, Literal, TypeVar +from xml.etree.ElementTree import Element # nosec + +from pydantic import Field + +from semantic_kernel.contents.const import FILE_REFERENCE_CONTENT_TAG, ContentTypes +from semantic_kernel.contents.kernel_content import KernelContent +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger = logging.getLogger(__name__) + +_T = TypeVar("_T", bound="FileReferenceContent") + + +@experimental_class +class FileReferenceContent(KernelContent): + """File reference content.""" + + content_type: Literal[ContentTypes.FILE_REFERENCE_CONTENT] = Field(FILE_REFERENCE_CONTENT_TAG, init=False) # type: ignore + tag: ClassVar[str] = FILE_REFERENCE_CONTENT_TAG + file_id: str | None = None + + def __str__(self) -> str: + """Return the string representation of the file reference content.""" + return f"FileReferenceContent(file_id={self.file_id})" + + def to_element(self) -> Element: + """Convert the file reference content to an Element.""" + element = Element(self.tag) + if self.file_id: + element.set("file_id", self.file_id) + return element + + @classmethod + def from_element(cls: type[_T], element: Element) -> _T: + """Create an instance from an Element.""" + return cls( + file_id=element.get("file_id"), + ) + + def to_dict(self) -> dict[str, Any]: + """Convert the instance to a dictionary.""" + return { + "file_id": self.file_id, + } diff --git a/python/semantic_kernel/contents/function_call_content.py b/python/semantic_kernel/contents/function_call_content.py index 89b34306262c..88bf1189fdec 100644 --- a/python/semantic_kernel/contents/function_call_content.py +++ b/python/semantic_kernel/contents/function_call_content.py @@ -195,3 +195,7 @@ def to_dict(self) -> dict[str, str | Any]: """Convert the instance to a dictionary.""" args = json.dumps(self.arguments) if isinstance(self.arguments, dict) else self.arguments return {"id": self.id, "type": "function", "function": {"name": self.name, "arguments": args}} + + def __hash__(self) -> int: + """Return the hash of the function call content.""" + return hash((self.tag, self.id, self.index, self.name, self.function_name, self.plugin_name, self.arguments)) diff --git a/python/semantic_kernel/contents/function_result_content.py b/python/semantic_kernel/contents/function_result_content.py index 301bc53b645d..0cadf5f2b60d 100644 --- a/python/semantic_kernel/contents/function_result_content.py +++ b/python/semantic_kernel/contents/function_result_content.py @@ -150,12 +150,10 @@ def from_function_call_content_and_result( metadata=metadata, ) - def to_chat_message_content(self, unwrap: bool = False) -> "ChatMessageContent": + def to_chat_message_content(self) -> "ChatMessageContent": """Convert the instance to a ChatMessageContent.""" from semantic_kernel.contents.chat_message_content import ChatMessageContent - if unwrap and isinstance(self.result, str): - return ChatMessageContent(role=AuthorRole.TOOL, content=self.result) return ChatMessageContent(role=AuthorRole.TOOL, items=[self]) def to_dict(self) -> dict[str, str]: @@ -174,3 +172,7 @@ def split_name(self) -> list[str]: def serialize_result(self, value: Any) -> str: """Serialize the result.""" return str(value) + + def __hash__(self) -> int: + """Return the hash of the function result content.""" + return hash((self.tag, self.id, self.result, self.name, self.function_name, self.plugin_name, self.encoding)) diff --git a/python/semantic_kernel/contents/text_content.py b/python/semantic_kernel/contents/text_content.py index e9aabe809ef3..c3decbfa1a6d 100644 --- a/python/semantic_kernel/contents/text_content.py +++ b/python/semantic_kernel/contents/text_content.py @@ -57,3 +57,7 @@ def from_element(cls: type[_T], element: Element) -> _T: def to_dict(self) -> dict[str, str]: """Convert the instance to a dictionary.""" return {"type": "text", "text": self.text} + + def __hash__(self) -> int: + """Return the hash of the text content.""" + return hash((self.tag, self.text, self.encoding)) diff --git a/python/semantic_kernel/contents/utils/data_uri.py b/python/semantic_kernel/contents/utils/data_uri.py index 96d603013bbe..3cf080af8577 100644 --- a/python/semantic_kernel/contents/utils/data_uri.py +++ b/python/semantic_kernel/contents/utils/data_uri.py @@ -24,6 +24,8 @@ class DataUri(KernelBaseModel, validate_assignment=True): + """A class to represent a data uri.""" + data_bytes: bytes | None = None data_str: str | None = None mime_type: str | None = None diff --git a/python/semantic_kernel/core_plugins/text_memory_plugin.py b/python/semantic_kernel/core_plugins/text_memory_plugin.py index 04b8e60f2f06..3e69f4e31f43 100644 --- a/python/semantic_kernel/core_plugins/text_memory_plugin.py +++ b/python/semantic_kernel/core_plugins/text_memory_plugin.py @@ -19,6 +19,8 @@ class TextMemoryPlugin(KernelBaseModel): + """A plugin to interact with a Semantic Text Memory.""" + memory: SemanticTextMemoryBase embeddings_kwargs: dict[str, Any] = Field(default_factory=dict) diff --git a/python/semantic_kernel/data/__init__.py b/python/semantic_kernel/data/__init__.py new file mode 100644 index 000000000000..07ab87c4dae0 --- /dev/null +++ b/python/semantic_kernel/data/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.data.const import DistanceFunction, IndexKind +from semantic_kernel.data.vector_store import VectorStore +from semantic_kernel.data.vector_store_model_decorator import vectorstoremodel +from semantic_kernel.data.vector_store_model_definition import ( + VectorStoreRecordDefinition, +) +from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) +from semantic_kernel.data.vector_store_record_utils import VectorStoreRecordUtils + +__all__ = [ + "DistanceFunction", + "IndexKind", + "VectorStore", + "VectorStoreRecordCollection", + "VectorStoreRecordDataField", + "VectorStoreRecordDefinition", + "VectorStoreRecordKeyField", + "VectorStoreRecordUtils", + "VectorStoreRecordVectorField", + "vectorstoremodel", +] diff --git a/python/semantic_kernel/data/const.py b/python/semantic_kernel/data/const.py new file mode 100644 index 000000000000..6972d9c7945e --- /dev/null +++ b/python/semantic_kernel/data/const.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from enum import Enum + + +class IndexKind(str, Enum): + """Index kinds for similarity search.""" + + HNSW = "hnsw" + FLAT = "flat" + + +class DistanceFunction(str, Enum): + """Distance functions for similarity search.""" + + COSINE = "cosine" + DOT_PROD = "dot_prod" + EUCLIDEAN = "euclidean" + MANHATTAN = "manhattan" diff --git a/python/semantic_kernel/data/vector_store.py b/python/semantic_kernel/data/vector_store.py new file mode 100644 index 000000000000..e51f145c7168 --- /dev/null +++ b/python/semantic_kernel/data/vector_store.py @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from abc import abstractmethod +from collections.abc import Sequence +from typing import Any + +from pydantic import Field + +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection +from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.utils.experimental_decorator import experimental_class + + +@experimental_class +class VectorStore(KernelBaseModel): + """Base class for vector stores.""" + + vector_record_collections: dict[str, VectorStoreRecordCollection] = Field(default_factory=dict) + + @abstractmethod + def get_collection( + self, + collection_name: str, + data_model_type: type[object], + data_model_definition: VectorStoreRecordDefinition | None = None, + **kwargs: Any, + ) -> VectorStoreRecordCollection: + """Get a vector record store.""" + ... # pragma: no cover + + @abstractmethod + async def list_collection_names(self, **kwargs) -> Sequence[str]: + """Get the names of all collections.""" + ... # pragma: no cover diff --git a/python/semantic_kernel/data/vector_store_model_decorator.py b/python/semantic_kernel/data/vector_store_model_decorator.py new file mode 100644 index 000000000000..e89fd6d7f766 --- /dev/null +++ b/python/semantic_kernel/data/vector_store_model_decorator.py @@ -0,0 +1,117 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from inspect import _empty, signature +from types import NoneType +from typing import Any + +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_fields import VectorStoreRecordField, VectorStoreRecordVectorField +from semantic_kernel.exceptions.memory_connector_exceptions import VectorStoreModelException + +logger = logging.getLogger(__name__) + + +def vectorstoremodel( + cls: Any | None = None, +): + """Returns the class as a vector store model. + + This decorator makes a class a vector store model. + There are three things being checked: + - The class must have at least one field with a annotation, + of type VectorStoreRecordKeyField, VectorStoreRecordDataField or VectorStoreRecordVectorField. + - The class must have exactly one field with the VectorStoreRecordKeyField annotation. + + Optionally, when there are VectorStoreRecordDataFields that specify a embedding property name, + there must be a corresponding VectorStoreRecordVectorField with the same name. + + Args: + cls: The class to be decorated. + + Raises: + DataModelException: If the class does not implement the serialize and deserialize methods. + DataModelException: If there are no fields with a VectorStoreRecordField annotation. + DataModelException: If there are fields with no name. + DataModelException: If there is no key field. + DataModelException: If there is a field with an embedding property name but no corresponding vector field. + """ + + def wrap(cls: Any): + # get fields and annotations + cls_sig = signature(cls) + setattr(cls, "__kernel_vectorstoremodel__", True) + setattr(cls, "__kernel_vectorstoremodel_definition__", _parse_signature_to_definition(cls_sig.parameters)) + + return cls + + # See if we're being called as @vectorstoremodel or @vectorstoremodel(). + if cls is None: + # We're called with parens. + return wrap + + # We're called as @vectorstoremodel without parens. + return wrap(cls) + + +def _parse_signature_to_definition(parameters) -> VectorStoreRecordDefinition: + if len(parameters) == 0: + raise VectorStoreModelException( + "There must be at least one field in the datamodel. If you are using this with a @dataclass, " + "you might have inverted the order of the decorators, the vectorstoremodel decorator should be the top one." + ) + fields: dict[str, VectorStoreRecordField] = {} + for field in parameters.values(): + annotation = field.annotation + # check first if there are any annotations + if not hasattr(annotation, "__metadata__"): + if field._default is _empty: + raise VectorStoreModelException( + "Fields that do not have a VectorStoreRecord* annotation must have a default value." + ) + logger.info( + f'Field "{field.name}" does not have a VectorStoreRecord* ' + "annotation, will not be part of the record." + ) + continue + property_type = annotation.__origin__ + if (args := getattr(property_type, "__args__", None)) and NoneType in args and len(args) == 2: + property_type = args[0] + metadata = annotation.__metadata__ + field_type = None + for item in metadata: + if isinstance(item, VectorStoreRecordField): + field_type = item + if not field_type.name or field_type.name != field.name: + field_type.name = field.name + if not field_type.property_type: + if hasattr(property_type, "__args__"): + if isinstance(item, VectorStoreRecordVectorField): + field_type.property_type = property_type.__args__[0].__name__ + elif property_type.__name__ == "list": + field_type.property_type = f"{property_type.__name__}[{property_type.__args__[0].__name__}]" + else: + field_type.property_type = property_type.__name__ + + else: + field_type.property_type = property_type.__name__ + elif isinstance(item, type(VectorStoreRecordField)): + if hasattr(property_type, "__args__") and property_type.__name__ == "list": + property_type_name = f"{property_type.__name__}[{property_type.__args__[0].__name__}]" + else: + property_type_name = property_type.__name__ + field_type = item(name=field.name, property_type=property_type_name) + if not field_type: + if field._default is _empty: + raise VectorStoreModelException( + "Fields that do not have a VectorStoreRecord* annotation must have a default value." + ) + logger.debug( + f'Field "{field.name}" does not have a VectorStoreRecordField ' + "annotation, will not be part of the record." + ) + continue + # field name is set either when not None or by instantiating a new field + assert field_type.name is not None # nosec + fields[field_type.name] = field_type + return VectorStoreRecordDefinition(fields=fields) diff --git a/python/semantic_kernel/data/vector_store_model_definition.py b/python/semantic_kernel/data/vector_store_model_definition.py new file mode 100644 index 000000000000..b4e11bc78359 --- /dev/null +++ b/python/semantic_kernel/data/vector_store_model_definition.py @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft. All rights reserved. + +from dataclasses import dataclass, field + +from semantic_kernel.data.vector_store_model_protocols import ( + DeserializeProtocol, + FromDictProtocol, + SerializeProtocol, + ToDictProtocol, +) +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordField, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) +from semantic_kernel.exceptions.memory_connector_exceptions import VectorStoreModelException + + +@dataclass +class VectorStoreRecordDefinition: + """Memory record definition. + + Args: + fields: The fields of the record. + container_mode: Whether the record is in container mode. + to_dict: The to_dict function, should take a record and return a list of dicts. + from_dict: The from_dict function, should take a list of dicts and return a record. + serialize: The serialize function, should take a record and return the type specific to a datastore. + deserialize: The deserialize function, should take a type specific to a datastore and return a record. + + """ + + key_field_name: str = field(init=False) + fields: dict[str, VectorStoreRecordField] + container_mode: bool = False + to_dict: ToDictProtocol | None = None + from_dict: FromDictProtocol | None = None + serialize: SerializeProtocol | None = None + deserialize: DeserializeProtocol | None = None + + @property + def field_names(self) -> list[str]: + """Get the names of the fields.""" + return list(self.fields.keys()) + + @property + def key_field(self) -> "VectorStoreRecordKeyField": + """Get the key field.""" + return self.fields[self.key_field_name] # type: ignore + + @property + def vector_field_names(self) -> list[str]: + """Get the names of the vector fields.""" + return [name for name, value in self.fields.items() if isinstance(value, VectorStoreRecordVectorField)] + + @property + def vector_fields(self) -> list["VectorStoreRecordVectorField"]: + """Get the names of the vector fields.""" + return [field for field in self.fields.values() if isinstance(field, VectorStoreRecordVectorField)] + + def __post_init__(self): + """Validate the fields. + + Raises: + DataModelException: If a field does not have a name. + DataModelException: If there is a field with an embedding property name but no corresponding vector field. + DataModelException: If there is no key field. + """ + if len(self.fields) == 0: + raise VectorStoreModelException( + "There must be at least one field with a VectorStoreRecordField annotation." + ) + self.key_field_name = "" + for name, value in self.fields.items(): + if not name: + raise VectorStoreModelException("Fields must have a name.") + if value.name is None: + value.name = name + if ( + isinstance(value, VectorStoreRecordDataField) + and value.has_embedding + and value.embedding_property_name not in self.field_names + ): + raise VectorStoreModelException( + "Data field with embedding property name must refer to a existing vector field." + ) + if isinstance(value, VectorStoreRecordKeyField): + if self.key_field_name != "": + raise VectorStoreModelException("Memory record definition must have exactly one key field.") + self.key_field_name = name + if not self.key_field_name: + raise VectorStoreModelException("Memory record definition must have exactly one key field.") diff --git a/python/semantic_kernel/data/vector_store_model_protocols.py b/python/semantic_kernel/data/vector_store_model_protocols.py new file mode 100644 index 000000000000..18d5e5b9709c --- /dev/null +++ b/python/semantic_kernel/data/vector_store_model_protocols.py @@ -0,0 +1,114 @@ +# Copyright (c) Microsoft. All rights reserved. + +from collections.abc import Sequence +from typing import Any, Protocol, TypeVar, runtime_checkable + +TModel = TypeVar("TModel", bound=object) + + +@runtime_checkable +class VectorStoreModelFunctionSerdeProtocol(Protocol): + """Data model serialization and deserialization protocol. + + This can optionally be implemented to allow single step serialization and deserialization + for using your data model with a specific datastore. + """ + + def serialize(self, **kwargs: Any) -> Any: + """Serialize the object to the format required by the data store.""" + ... # pragma: no cover + + @classmethod + def deserialize(cls: type[TModel], obj: Any, **kwargs: Any) -> TModel: + """Deserialize the output of the data store to an object.""" + ... # pragma: no cover + + +@runtime_checkable +class VectorStoreModelPydanticProtocol(Protocol): + """Class used internally to make sure a datamodel has model_dump and model_validate.""" + + def model_dump(self, *args: Any, **kwargs: Any) -> dict[str, Any]: + """Serialize the object to the format required by the data store.""" + ... # pragma: no cover + + @classmethod + def model_validate(cls: type[TModel], *args: Any, **kwargs: Any) -> TModel: + """Deserialize the output of the data store to an object.""" + ... # pragma: no cover + + +@runtime_checkable +class VectorStoreModelToDictFromDictProtocol(Protocol): + """Class used internally to check if a model has to_dict and from_dict methods.""" + + def to_dict(self, *args: Any, **kwargs: Any) -> dict[str, Any]: + """Serialize the object to the format required by the data store.""" + ... # pragma: no cover + + @classmethod + def from_dict(cls: type[TModel], *args: Any, **kwargs: Any) -> TModel: + """Deserialize the output of the data store to an object.""" + ... # pragma: no cover + + +@runtime_checkable +class ToDictProtocol(Protocol): + """Protocol for to_dict method. + + Args: + record: The record to be serialized. + **kwargs: Additional keyword arguments. + + Returns: + A list of dictionaries. + """ + + def __call__(self, record: Any, **kwargs: Any) -> Sequence[dict[str, Any]]: ... # pragma: no cover # noqa: D102 + + +@runtime_checkable +class FromDictProtocol(Protocol): + """Protocol for from_dict method. + + Args: + records: A list of dictionaries. + **kwargs: Additional keyword arguments. + + Returns: + A record or list thereof. + """ + + def __call__(self, records: Sequence[dict[str, Any]], **kwargs: Any) -> Any: ... # noqa: D102 + + +@runtime_checkable +class SerializeProtocol(Protocol): + """Protocol for serialize method. + + Args: + record: The record to be serialized. + **kwargs: Additional keyword arguments. + + Returns: + The serialized record, ready to be consumed by the specific store. + + """ + + def __call__(self, record: Any, **kwargs: Any) -> Any: ... # noqa: D102 + + +@runtime_checkable +class DeserializeProtocol(Protocol): + """Protocol for deserialize method. + + Args: + records: The serialized record directly from the store. + **kwargs: Additional keyword arguments. + + Returns: + The deserialized record in the format expected by the application. + + """ + + def __call__(self, records: Any, **kwargs: Any) -> Any: ... # noqa: D102 diff --git a/python/semantic_kernel/data/vector_store_record_collection.py b/python/semantic_kernel/data/vector_store_record_collection.py new file mode 100644 index 000000000000..de82a0764b7a --- /dev/null +++ b/python/semantic_kernel/data/vector_store_record_collection.py @@ -0,0 +1,570 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import contextlib +import logging +from abc import abstractmethod +from collections.abc import Awaitable, Callable, Mapping, Sequence +from typing import Any, ClassVar, Generic, TypeVar + +from pydantic import model_validator + +from semantic_kernel.data.vector_store_model_definition import ( + VectorStoreRecordDefinition, +) +from semantic_kernel.data.vector_store_model_protocols import ( + VectorStoreModelFunctionSerdeProtocol, + VectorStoreModelPydanticProtocol, + VectorStoreModelToDictFromDictProtocol, +) +from semantic_kernel.exceptions.memory_connector_exceptions import ( + MemoryConnectorException, + VectorStoreModelDeserializationException, + VectorStoreModelSerializationException, + VectorStoreModelValidationError, +) +from semantic_kernel.kernel_pydantic import KernelBaseModel +from semantic_kernel.kernel_types import OneOrMany +from semantic_kernel.utils.experimental_decorator import experimental_class + +TModel = TypeVar("TModel", bound=object) +TKey = TypeVar("TKey") +_T = TypeVar("_T", bound="VectorStoreRecordCollection") + +logger = logging.getLogger(__name__) + + +@experimental_class +class VectorStoreRecordCollection(KernelBaseModel, Generic[TKey, TModel]): + """Base class for a vector store record collection.""" + + collection_name: str + data_model_type: type[TModel] + data_model_definition: VectorStoreRecordDefinition + supported_key_types: ClassVar[list[str] | None] = None + supported_vector_types: ClassVar[list[str] | None] = None + + @property + def _container_mode(self) -> bool: + return self.data_model_definition.container_mode + + @property + def _key_field_name(self) -> str: + return self.data_model_definition.key_field_name + + @model_validator(mode="before") + @classmethod + def _ensure_data_model_definition(cls: type[_T], data: dict[str, Any]) -> dict[str, Any]: + """Ensure there is a data model definition, if it isn't passed, try to get it from the data model type.""" + if not data.get("data_model_definition"): + data["data_model_definition"] = getattr( + data["data_model_type"], "__kernel_vectorstoremodel_definition__", None + ) + return data + + def model_post_init(self, __context: object | None = None): + """Post init function that sets the key field and container mode values, and validates the datamodel.""" + self._validate_data_model() + + # region Overload Methods + async def close(self): + """Close the connection.""" + return # pragma: no cover + + @abstractmethod + async def _inner_upsert( + self, + records: Sequence[Any], + **kwargs: Any, + ) -> Sequence[TKey]: + """Upsert the records, this should be overridden by the child class. + + Args: + records: The records, the format is specific to the store. + **kwargs (Any): Additional arguments, to be passed to the store. + + Returns: + The keys of the upserted records. + """ + ... # pragma: no cover + + @abstractmethod + async def _inner_get(self, keys: Sequence[TKey], **kwargs: Any) -> OneOrMany[Any] | None: + """Get the records, this should be overridden by the child class. + + Args: + keys: The keys to get. + **kwargs: Additional arguments. + + Returns: + The records from the store, not deserialized. + """ + ... # pragma: no cover + + @abstractmethod + async def _inner_delete(self, keys: Sequence[TKey], **kwargs: Any) -> None: + """Delete the records, this should be overridden by the child class. + + Args: + keys: The keys. + **kwargs: Additional arguments. + """ + ... # pragma: no cover + + def _validate_data_model(self): + """Internal function that can be overloaded by child classes to validate datatypes, etc. + + This should take the VectorStoreRecordDefinition from the item_type and validate it against the store. + + Checks can include, allowed naming of parameters, allowed data types, allowed vector dimensions. + + Default checks are that the key field is in the allowed key types and the vector fields + are in the allowed vector types. + + Raises: + VectorStoreModelValidationError: If the key field is not in the allowed key types. + VectorStoreModelValidationError: If the vector fields are not in the allowed vector types. + + """ + if ( + self.supported_key_types + and self.data_model_definition.key_field.property_type + and self.data_model_definition.key_field.property_type not in self.supported_key_types + ): + raise VectorStoreModelValidationError( + f"Key field must be one of {self.supported_key_types}, " + f"got {self.data_model_definition.key_field.property_type}" + ) + if not self.supported_vector_types: + return + for field in self.data_model_definition.vector_fields: + if field.property_type and field.property_type not in self.supported_vector_types: + raise VectorStoreModelValidationError( + f"Vector field {field.name} must be one of {self.supported_vector_types}, got {field.property_type}" + ) + + @abstractmethod + def _serialize_dicts_to_store_models(self, records: Sequence[dict[str, Any]], **kwargs: Any) -> Sequence[Any]: + """Serialize a list of dicts of the data to the store model. + + This method should be overridden by the child class to convert the dict to the store model. + """ + ... # pragma: no cover + + @abstractmethod + def _deserialize_store_models_to_dicts(self, records: Sequence[Any], **kwargs: Any) -> Sequence[dict[str, Any]]: + """Deserialize the store models to a list of dicts. + + This method should be overridden by the child class to convert the store model to a list of dicts. + """ + ... # pragma: no cover + + async def create_collection_if_not_exists(self, **kwargs: Any) -> bool: + """Create the collection in the service if it does not exists. + + First uses does_collection_exist to check if it exists, if it does returns False. + Otherwise, creates the collection and returns True. + + """ + if await self.does_collection_exist(**kwargs): + return False + await self.create_collection(**kwargs) + return True + + @abstractmethod + async def create_collection(self, **kwargs: Any) -> None: + """Create the collection in the service.""" + ... # pragma: no cover + + @abstractmethod + async def does_collection_exist(self, **kwargs: Any) -> bool: + """Check if the collection exists.""" + ... # pragma: no cover + + @abstractmethod + async def delete_collection(self, **kwargs: Any) -> None: + """Delete the collection.""" + ... # pragma: no cover + + # region Public Methods + + async def upsert( + self, + record: TModel, + embedding_generation_function: Callable[ + [TModel, type[TModel] | None, VectorStoreRecordDefinition | None], Awaitable[TModel] + ] + | None = None, + **kwargs: Any, + ) -> OneOrMany[TKey] | None: + """Upsert a record. + + Args: + record: The record. + embedding_generation_function: Supply this function to generate embeddings. + This will be called with the data model definition and the records, + should return the records with vectors. + This can be supplied by using the add_vector_to_records method from the VectorStoreRecordUtils. + **kwargs: Additional arguments. + + Returns: + The key of the upserted record or a list of keys, when a container type is used. + """ + if embedding_generation_function: + record = await embedding_generation_function(record, self.data_model_type, self.data_model_definition) + + try: + data = self.serialize(record) + except Exception as exc: + raise MemoryConnectorException(f"Error serializing record: {exc}") from exc + + try: + results = await self._inner_upsert(data if isinstance(data, Sequence) else [data], **kwargs) + except Exception as exc: + raise MemoryConnectorException(f"Error upserting record: {exc}") from exc + + if self._container_mode: + return results + return results[0] + + async def upsert_batch( + self, + records: OneOrMany[TModel], + embedding_generation_function: Callable[ + [OneOrMany[TModel], type[TModel] | None, VectorStoreRecordDefinition | None], Awaitable[OneOrMany[TModel]] + ] + | None = None, + **kwargs: Any, + ) -> Sequence[TKey]: + """Upsert a batch of records. + + Args: + records: The records to upsert, can be a list of records, or a single container. + embedding_generation_function: Supply this function to generate embeddings. + This will be called with the data model definition and the records, + should return the records with vectors. + This can be supplied by using the add_vector_to_records method from the VectorStoreRecordUtils. + **kwargs: Additional arguments. + + Returns: + Sequence[TKey]: The keys of the upserted records, this is always a list, + corresponds to the input or the items in the container. + """ + if embedding_generation_function: + records = await embedding_generation_function(records, self.data_model_type, self.data_model_definition) + + try: + data = self.serialize(records) + except Exception as exc: + raise MemoryConnectorException(f"Error serializing records: {exc}") from exc + + try: + return await self._inner_upsert(data, **kwargs) # type: ignore + except Exception as exc: + raise MemoryConnectorException(f"Error upserting records: {exc}") from exc + + async def get(self, key: TKey, include_vectors: bool = True, **kwargs: Any) -> TModel | None: + """Get a record. + + Args: + key: The key. + include_vectors: Include the vectors in the response, default is True. + Some vector stores do not support retrieving without vectors, even when set to false. + Some vector stores have specific parameters to control that behavior, when + that parameter is set, include_vectors is ignored. + **kwargs: Additional arguments. + + Returns: + TModel: The record. + """ + try: + records = await self._inner_get([key], include_vectors=include_vectors, **kwargs) + except Exception as exc: + raise MemoryConnectorException(f"Error getting record: {exc}") from exc + + if not records: + return None + + try: + model_records = self.deserialize(records[0], keys=[key], **kwargs) + except Exception as exc: + raise MemoryConnectorException(f"Error deserializing record: {exc}") from exc + + # there are many code paths within the deserialize method, some supplied by the developer, + # and so depending on what is used, + # it might return a sequence, so we just return the first element, + # there should never be multiple elements (this is not a batch get), + # hence a raise if there are. + if not isinstance(model_records, Sequence): + return model_records + if len(model_records) == 1: + return model_records[0] + raise MemoryConnectorException(f"Error deserializing record, multiple records returned: {model_records}") + + async def get_batch( + self, keys: Sequence[TKey], include_vectors: bool = True, **kwargs: Any + ) -> OneOrMany[TModel] | None: + """Get a batch of records. + + Args: + keys: The keys. + include_vectors: Include the vectors in the response. Default is True. + Some vector stores do not support retrieving without vectors, even when set to false. + Some vector stores have specific parameters to control that behavior, when + that parameter is set, include_vectors is ignored. + **kwargs: Additional arguments. + + Returns: + The records, either a list of TModel or the container type. + """ + try: + records = await self._inner_get(keys, include_vectors=include_vectors, **kwargs) + except Exception as exc: + raise MemoryConnectorException(f"Error getting records: {exc}") from exc + + if not records: + return None + + try: + return self.deserialize(records, keys=keys, **kwargs) + except Exception as exc: + raise MemoryConnectorException(f"Error deserializing record: {exc}") from exc + + async def delete(self, key: TKey, **kwargs: Any) -> None: + """Delete a record. + + Args: + key: The key. + **kwargs: Additional arguments. + + """ + try: + await self._inner_delete([key], **kwargs) + except Exception as exc: + raise MemoryConnectorException(f"Error deleting record: {exc}") from exc + + async def delete_batch(self, keys: Sequence[TKey], **kwargs: Any) -> None: + """Delete a batch of records. + + Args: + keys: The keys. + **kwargs: Additional arguments. + + """ + try: + await self._inner_delete(keys, **kwargs) + except Exception as exc: + raise MemoryConnectorException(f"Error deleting records: {exc}") from exc + + # region Internal Serialization methods + + def serialize(self, records: OneOrMany[TModel], **kwargs: Any) -> OneOrMany[Any]: + """Serialize the data model to the store model. + + This method follows the following steps: + 1. Check if the data model has a serialize method. + Use that method to serialize and return the result. + 2. Serialize the records into a dict, using the data model specific method. + 3. Convert the dict to the store model, using the store specific method. + + If overriding this method, make sure to first try to serialize the data model to the store model, + before doing the store specific version, + the user supplied version should have precedence. + """ + if serialized := self._serialize_data_model_to_store_model(records): + return serialized + + if isinstance(records, Sequence): + dict_records = [self._serialize_data_model_to_dict(rec) for rec in records] + return self._serialize_dicts_to_store_models(dict_records, **kwargs) # type: ignore + + dict_records = self._serialize_data_model_to_dict(records) # type: ignore + if isinstance(dict_records, Sequence): + # most likely this is a container, so we return all records as a list + # can also be a single record, but the to_dict returns a list + # hence we will treat it as a container. + return self._serialize_dicts_to_store_models(dict_records, **kwargs) # type: ignore + # this case is single record in, single record out + return self._serialize_dicts_to_store_models([dict_records], **kwargs)[0] + + def deserialize(self, records: OneOrMany[Any | dict[str, Any]], **kwargs: Any) -> OneOrMany[TModel] | None: + """Deserialize the store model to the data model. + + This method follows the following steps: + 1. Check if the data model has a deserialize method. + Use that method to deserialize and return the result. + 2. Deserialize the store model to a dict, using the store specific method. + 3. Convert the dict to the data model, using the data model specific method. + """ + if deserialized := self._deserialize_store_model_to_data_model(records, **kwargs): + return deserialized + + if isinstance(records, Sequence): + dict_records = self._deserialize_store_models_to_dicts(records, **kwargs) + if self._container_mode: + return self._deserialize_dict_to_data_model(dict_records, **kwargs) + return [self._deserialize_dict_to_data_model(rec, **kwargs) for rec in dict_records] + + dict_record = self._deserialize_store_models_to_dicts([records], **kwargs)[0] + if not dict_record: + return None + return self._deserialize_dict_to_data_model(dict_record, **kwargs) + + def _serialize_data_model_to_store_model(self, record: OneOrMany[TModel], **kwargs: Any) -> OneOrMany[Any] | None: + """Serialize the data model to the store model. + + This works when the data model has supplied a serialize method, specific to a data source. + This is a method called 'serialize()' on the data model or part of the vector store record definition. + + The developer is responsible for correctly serializing for the specific data source. + """ + if isinstance(record, Sequence): + result = [self._serialize_data_model_to_store_model(rec, **kwargs) for rec in record] + if not all(result): + return None + return result + if self.data_model_definition.serialize: + return self.data_model_definition.serialize(record, **kwargs) # type: ignore + if isinstance(record, VectorStoreModelFunctionSerdeProtocol): + try: + return record.serialize(**kwargs) + except Exception as exc: + raise VectorStoreModelSerializationException(f"Error serializing record: {exc}") from exc + return None + + def _deserialize_store_model_to_data_model(self, record: OneOrMany[Any], **kwargs: Any) -> OneOrMany[TModel] | None: + """Deserialize the store model to the data model. + + This works when the data model has supplied a deserialize method, specific to a data source. + This uses a method called 'deserialize()' on the data model or part of the vector store record definition. + + The developer is responsible for correctly deserializing for the specific data source. + """ + if self.data_model_definition.deserialize: + if isinstance(record, Sequence): + return self.data_model_definition.deserialize(record, **kwargs) + return self.data_model_definition.deserialize([record], **kwargs) + if isinstance(self.data_model_type, VectorStoreModelFunctionSerdeProtocol): + try: + if isinstance(record, Sequence): + return [self.data_model_type.deserialize(rec, **kwargs) for rec in record] + return self.data_model_type.deserialize(record, **kwargs) + except Exception as exc: + raise VectorStoreModelSerializationException(f"Error deserializing record: {exc}") from exc + return None + + def _serialize_data_model_to_dict(self, record: TModel, **kwargs: Any) -> OneOrMany[dict[str, Any]]: + """This function is used if no serialize method is found on the data model. + + This will generally serialize the data model to a dict, should not be overridden by child classes. + + The output of this should be passed to the serialize_dict_to_store_model method. + """ + if self.data_model_definition.to_dict: + return self.data_model_definition.to_dict(record, **kwargs) + if isinstance(record, VectorStoreModelPydanticProtocol): + try: + ret = record.model_dump() + if not any(field.serialize_function is not None for field in self.data_model_definition.vector_fields): + return ret + for field in self.data_model_definition.vector_fields: + if field.serialize_function: + assert field.name is not None # nosec + ret[field.name] = field.serialize_function(ret[field.name]) + return ret + except Exception as exc: + raise VectorStoreModelSerializationException(f"Error serializing record: {exc}") from exc + if isinstance(record, VectorStoreModelToDictFromDictProtocol): + try: + ret = record.to_dict() + if not any(field.serialize_function is not None for field in self.data_model_definition.vector_fields): + return ret + for field in self.data_model_definition.vector_fields: + if field.serialize_function: + assert field.name is not None # nosec + ret[field.name] = field.serialize_function(ret[field.name]) + return ret + except Exception as exc: + raise VectorStoreModelSerializationException(f"Error serializing record: {exc}") from exc + + store_model = {} + for field_name in self.data_model_definition.field_names: + try: + value = record[field_name] if isinstance(record, Mapping) else getattr(record, field_name) + if func := getattr(self.data_model_definition.fields[field_name], "serialize_function", None): + value = func(value) + store_model[field_name] = value + except (AttributeError, KeyError) as exc: + raise VectorStoreModelSerializationException( + f"Error serializing record, not able to get: {field_name}" + ) from exc + return store_model + + def _deserialize_dict_to_data_model(self, record: OneOrMany[dict[str, Any]], **kwargs: Any) -> TModel: + """This function is used if no deserialize method is found on the data model. + + This method is the second step and will deserialize a dict to the data model, + should not be overridden by child classes. + + The input of this should come from the _deserialized_store_model_to_dict function. + """ + if self.data_model_definition.from_dict: + if isinstance(record, Sequence): + return self.data_model_definition.from_dict(record, **kwargs) + ret = self.data_model_definition.from_dict([record], **kwargs) + return ret if self._container_mode else ret[0] + if isinstance(record, Sequence): + if len(record) > 1: + raise VectorStoreModelDeserializationException( + "Cannot deserialize multiple records to a single record unless you are using a container." + ) + record = record[0] + if isinstance(self.data_model_type, VectorStoreModelPydanticProtocol): + try: + if not any(field.serialize_function is not None for field in self.data_model_definition.vector_fields): + return self.data_model_type.model_validate(record) + for field in self.data_model_definition.vector_fields: + if field.serialize_function: + record[field.name] = field.serialize_function(record[field.name]) + return self.data_model_type.model_validate(record) + except Exception as exc: + raise VectorStoreModelDeserializationException(f"Error deserializing record: {exc}") from exc + if isinstance(self.data_model_type, VectorStoreModelToDictFromDictProtocol): + try: + if not any(field.serialize_function is not None for field in self.data_model_definition.vector_fields): + return self.data_model_type.from_dict(record) + for field in self.data_model_definition.vector_fields: + if field.serialize_function: + record[field.name] = field.serialize_function(record[field.name]) + return self.data_model_type.from_dict(record) + except Exception as exc: + raise VectorStoreModelDeserializationException(f"Error deserializing record: {exc}") from exc + data_model_dict: dict[str, Any] = {} + for field_name in self.data_model_definition.fields: # type: ignore + try: + value = record[field_name] + if func := getattr(self.data_model_definition.fields[field_name], "deserialize_function", None): + value = func(value) + data_model_dict[field_name] = value + except KeyError as exc: + raise VectorStoreModelDeserializationException( + f"Error deserializing record, not able to get: {field_name}" + ) from exc + if self.data_model_type is dict: + return data_model_dict # type: ignore + return self.data_model_type(**data_model_dict) + + # region Internal Functions + + async def __aenter__(self): + """Enter the context manager.""" + return self + + async def __aexit__(self, *args): + """Exit the context manager.""" + await self.close() + + def __del__(self): + """Delete the instance.""" + with contextlib.suppress(Exception): + asyncio.get_running_loop().create_task(self.close()) diff --git a/python/semantic_kernel/data/vector_store_record_fields.py b/python/semantic_kernel/data/vector_store_record_fields.py new file mode 100644 index 000000000000..5f01be7022f4 --- /dev/null +++ b/python/semantic_kernel/data/vector_store_record_fields.py @@ -0,0 +1,81 @@ +# Copyright (c) Microsoft. All rights reserved. + +from abc import ABC +from collections.abc import Callable +from typing import Any + +from pydantic import Field +from pydantic.dataclasses import dataclass + +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.data.const import DistanceFunction, IndexKind + + +@dataclass +class VectorStoreRecordField(ABC): + """Base class for all Vector Store Record Fields.""" + + name: str | None = None + property_type: str | None = None + + +@dataclass +class VectorStoreRecordKeyField(VectorStoreRecordField): + """Memory record key field.""" + + +@dataclass +class VectorStoreRecordDataField(VectorStoreRecordField): + """Memory record data field.""" + + has_embedding: bool = False + embedding_property_name: str | None = None + is_filterable: bool | None = None + is_full_text_searchable: bool | None = None + + +@dataclass +class VectorStoreRecordVectorField(VectorStoreRecordField): + """Memory record vector field. + + Most vectors stores use a `list[float]` as the data type for vectors. + This is the default and all vector stores in SK use this internally. + But in your class you may want to use a numpy array or some other optimized type, + in order to support that, + you can set the deserialize_function to a function that takes a list of floats and returns the optimized type, + and then also supply a serialize_function that takes the optimized type and returns a list of floats. + + For instance for numpy, that would be `serialize_function=np.ndarray.tolist` and `deserialize_function=np.array`, + (with `import numpy as np` at the top of your file). + if you want to set it up with more specific options, use a lambda, a custom function or a partial. + + Args: + property_type (str, optional): Property type. + For vectors this should be the inner type of the vector. + By default the vector will be a list of numbers. + If you want to use a numpy array or some other optimized format, + set the cast_function with a function + that takes a list of floats and returns a numpy array. + local_embedding (bool, optional): Whether to embed the vector locally. Defaults to True. + embedding_settings (dict[str, PromptExecutionSettings], optional): Embedding settings. + The key is the name of the embedding service to use, can be multiple ones. + serialize_function (Callable[[Any], list[float | int]], optional): Serialize function, + should take the vector and return a list of numbers. + deserialize_function (Callable[[list[float | int]], Any], optional): Deserialize function, + should take a list of numbers and return the vector. + """ + + local_embedding: bool = True + dimensions: int | None = None + index_kind: IndexKind | None = None + distance_function: DistanceFunction | None = None + embedding_settings: dict[str, PromptExecutionSettings] = Field(default_factory=dict) + serialize_function: Callable[[Any], list[float | int]] | None = None + deserialize_function: Callable[[list[float | int]], Any] | None = None + + +__all__ = [ + "VectorStoreRecordDataField", + "VectorStoreRecordKeyField", + "VectorStoreRecordVectorField", +] diff --git a/python/semantic_kernel/data/vector_store_record_utils.py b/python/semantic_kernel/data/vector_store_record_utils.py new file mode 100644 index 000000000000..665605ddc630 --- /dev/null +++ b/python/semantic_kernel/data/vector_store_record_utils.py @@ -0,0 +1,83 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from collections.abc import Callable +from typing import TYPE_CHECKING, TypeVar + +from semantic_kernel.data.vector_store_record_fields import VectorStoreRecordDataField, VectorStoreRecordVectorField +from semantic_kernel.exceptions.memory_connector_exceptions import VectorStoreModelException +from semantic_kernel.kernel_types import OneOrMany + +if TYPE_CHECKING: + from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition + from semantic_kernel.kernel import Kernel + +TModel = TypeVar("TModel", bound=object) + + +class VectorStoreRecordUtils: + """Helper class to easily add embeddings to a (set of) vector store record.""" + + def __init__(self, kernel: "Kernel"): + """Initializes the VectorStoreRecordUtils with a kernel.""" + self.kernel = kernel + + async def add_vector_to_records( + self, + records: OneOrMany[TModel], + data_model_type: type[TModel] | None = None, + data_model_definition: "VectorStoreRecordDefinition | None" = None, + **kwargs, + ) -> OneOrMany[TModel]: + """Vectorize the vector record. + + This function can be passed to upsert or upsert batch of a VectorStoreRecordCollection. + + Loops through the fields of the data model definition, + looks at data fields, if they have a vector field, + looks up that vector field and checks if is a local embedding. + + If so adds that to a list of embeddings to make. + + Finally calls Kernel add_embedding_to_object with the list of embeddings to make. + + Optional arguments are passed onto the Kernel add_embedding_to_object call. + """ + # dict of embedding_field.name and tuple of record, settings, field_name + embeddings_to_make: list[tuple[str, str, dict[str, "PromptExecutionSettings"], Callable | None]] = [] + if not data_model_definition: + data_model_definition = getattr(data_model_type, "__kernel_vectorstoremodel_definition__", None) + if not data_model_definition: + raise VectorStoreModelException( + "Data model definition is required, either directly or from the data model type." + ) + for name, field in data_model_definition.fields.items(): # type: ignore + if ( + not isinstance(field, VectorStoreRecordDataField) + or not field.has_embedding + or not field.embedding_property_name + ): + continue + embedding_field = data_model_definition.fields.get(field.embedding_property_name) + if not isinstance(embedding_field, VectorStoreRecordVectorField): + raise VectorStoreModelException("Embedding field must be a VectorStoreRecordVectorField") + if embedding_field.local_embedding: + embeddings_to_make.append(( + name, + field.embedding_property_name, + embedding_field.embedding_settings, + embedding_field.deserialize_function, + )) + + for field_to_embed, field_to_store, settings, cast_callable in embeddings_to_make: + await self.kernel.add_embedding_to_object( + inputs=records, + field_to_embed=field_to_embed, + field_to_store=field_to_store, + execution_settings=settings, + container_mode=data_model_definition.container_mode, + cast_function=cast_callable, + **kwargs, + ) + return records diff --git a/python/semantic_kernel/exceptions/__init__.py b/python/semantic_kernel/exceptions/__init__.py index 3c3a43e419d0..3a5f22b87bf2 100644 --- a/python/semantic_kernel/exceptions/__init__.py +++ b/python/semantic_kernel/exceptions/__init__.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +from semantic_kernel.exceptions.agent_exceptions import * # noqa: F403 from semantic_kernel.exceptions.content_exceptions import * # noqa: F403 from semantic_kernel.exceptions.function_exceptions import * # noqa: F403 from semantic_kernel.exceptions.kernel_exceptions import * # noqa: F403 diff --git a/python/semantic_kernel/exceptions/agent_exceptions.py b/python/semantic_kernel/exceptions/agent_exceptions.py new file mode 100644 index 000000000000..1c6b5bb897cf --- /dev/null +++ b/python/semantic_kernel/exceptions/agent_exceptions.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from semantic_kernel.exceptions.kernel_exceptions import KernelException + + +class AgentException(KernelException): + """Base class for all agent exceptions.""" + + pass + + +class AgentFileNotFoundException(AgentException): + """The requested file was not found.""" + + pass + + +class AgentInitializationException(AgentException): + """An error occurred while initializing the agent.""" + + pass + + +class AgentExecutionException(AgentException): + """An error occurred while executing the agent.""" + + pass + + +class AgentInvokeException(AgentException): + """An error occurred while invoking the agent.""" + + pass + + +class AgentChatException(AgentException): + """An error occurred while invoking the agent chat.""" + + pass diff --git a/python/semantic_kernel/exceptions/content_exceptions.py b/python/semantic_kernel/exceptions/content_exceptions.py index e37ef1bb9d88..d9c3f5aa10c5 100644 --- a/python/semantic_kernel/exceptions/content_exceptions.py +++ b/python/semantic_kernel/exceptions/content_exceptions.py @@ -4,26 +4,38 @@ class ContentException(KernelException): + """Base class for all content exceptions.""" + pass class ContentInitializationError(ContentException): + """An error occurred while initializing the content.""" + pass class ContentSerializationError(ContentException): + """An error occurred while serializing the content.""" + pass class ContentAdditionException(ContentException): + """An error occurred while adding content.""" + pass class FunctionCallInvalidNameException(ContentException): + """An error occurred while validating the function name.""" + pass class FunctionCallInvalidArgumentsException(ContentException): + """An error occurred while validating the function arguments.""" + pass diff --git a/python/semantic_kernel/exceptions/function_exceptions.py b/python/semantic_kernel/exceptions/function_exceptions.py index 954b54e58116..e9a3929a55a1 100644 --- a/python/semantic_kernel/exceptions/function_exceptions.py +++ b/python/semantic_kernel/exceptions/function_exceptions.py @@ -3,52 +3,76 @@ class FunctionException(KernelException): + """Base class for all function exceptions.""" + pass class FunctionSyntaxError(FunctionException): + """Base class for all function syntax exceptions.""" + pass class FunctionInitializationError(FunctionException): + """An error occurred while initializing the function.""" + def __init__(self, message: str): - """Raised when a KernelFunction fails to initialize.""" + """Adds the context of the error to the generic message.""" super().__init__("KernelFunction failed to initialize: " + message) class PluginInitializationError(FunctionException): + """An error occurred while initializing the plugin.""" + pass class PluginInvalidNameError(FunctionSyntaxError): + """An error occurred while validating the plugin name.""" + pass class FunctionInvalidNameError(FunctionSyntaxError): + """An error occurred while validating the function name.""" + pass class FunctionInvalidParamNameError(FunctionSyntaxError): + """An error occurred while validating the function parameter name.""" + pass class FunctionNameNotUniqueError(FunctionSyntaxError): + """An error occurred while validating the function name.""" + pass class FunctionExecutionException(FunctionException): + """Base class for all function execution exceptions.""" + pass class FunctionResultError(FunctionException): + """An error occurred while validating the function result.""" + pass class FunctionInvalidParameterConfiguration(FunctionException): + """An error occurred while validating the function parameter configuration.""" + pass class PromptRenderingException(FunctionException): + """An error occurred while rendering a prompt.""" + pass diff --git a/python/semantic_kernel/exceptions/kernel_exceptions.py b/python/semantic_kernel/exceptions/kernel_exceptions.py index e808a995c91b..7a4aed128183 100644 --- a/python/semantic_kernel/exceptions/kernel_exceptions.py +++ b/python/semantic_kernel/exceptions/kernel_exceptions.py @@ -11,34 +11,50 @@ class KernelException(Exception): + """The base class for all Semantic Kernel exceptions.""" + pass class KernelServiceNotFoundError(KernelException): + """Raised when a service is not found in the kernel.""" + pass class KernelPluginNotFoundError(KernelException): + """Raised when a plugin is not found in the kernel.""" + pass class KernelPluginInvalidConfigurationError(KernelException): + """Raised when a plugin configuration is invalid.""" + pass class KernelFunctionNotFoundError(KernelException): + """Raised when a function is not found in the kernel.""" + pass class KernelFunctionAlreadyExistsError(KernelException): + """Raised when a function is already registered in the kernel.""" + pass class KernelInvokeException(KernelException): + """Raised when an error occurs while invoking a function in the kernel.""" + pass class OperationCancelledException(KernelException): + """Raised when an operation is cancelled.""" + pass diff --git a/python/semantic_kernel/exceptions/memory_connector_exceptions.py b/python/semantic_kernel/exceptions/memory_connector_exceptions.py index b72a266762d2..0a94503aa414 100644 --- a/python/semantic_kernel/exceptions/memory_connector_exceptions.py +++ b/python/semantic_kernel/exceptions/memory_connector_exceptions.py @@ -5,14 +5,44 @@ class MemoryConnectorException(KernelException): + """Base class for all memory connector exceptions.""" + + pass + + +class VectorStoreModelException(MemoryConnectorException): + """Base class for all vector store model exceptions.""" + + pass + + +class VectorStoreModelSerializationException(VectorStoreModelException): + """An error occurred while serializing the vector store model.""" + + pass + + +class VectorStoreModelDeserializationException(VectorStoreModelException): + """An error occurred while deserializing the vector store model.""" + pass class MemoryConnectorInitializationError(MemoryConnectorException): + """An error occurred while initializing the memory connector.""" + pass class MemoryConnectorResourceNotFound(MemoryConnectorException): + """The requested resource was not found in the memory connector.""" + + pass + + +class VectorStoreModelValidationError(VectorStoreModelException): + """An error occurred while validating the vector store model.""" + pass @@ -20,4 +50,8 @@ class MemoryConnectorResourceNotFound(MemoryConnectorException): "MemoryConnectorException", "MemoryConnectorInitializationError", "MemoryConnectorResourceNotFound", + "VectorStoreModelDeserializationException", + "VectorStoreModelException", + "VectorStoreModelSerializationException", + "VectorStoreModelValidationError", ] diff --git a/python/semantic_kernel/exceptions/planner_exceptions.py b/python/semantic_kernel/exceptions/planner_exceptions.py index 790515a7e25f..423002aebb0b 100644 --- a/python/semantic_kernel/exceptions/planner_exceptions.py +++ b/python/semantic_kernel/exceptions/planner_exceptions.py @@ -5,26 +5,38 @@ class PlannerException(KernelException): + """Base class for all planner exceptions.""" + pass class PlannerExecutionException(PlannerException): + """Base class for all planner execution exceptions.""" + pass class PlannerInvalidGoalError(PlannerException): + """An error occurred while validating the goal.""" + pass class PlannerInvalidPlanError(PlannerException): + """An error occurred while validating the plan.""" + pass class PlannerInvalidConfigurationError(PlannerException): + """An error occurred while validating the configuration.""" + pass class PlannerCreatePlanError(PlannerException): + """An error occurred while creating the plan.""" + pass diff --git a/python/semantic_kernel/exceptions/service_exceptions.py b/python/semantic_kernel/exceptions/service_exceptions.py index eeb75e03f8df..a8865c5b5926 100644 --- a/python/semantic_kernel/exceptions/service_exceptions.py +++ b/python/semantic_kernel/exceptions/service_exceptions.py @@ -5,42 +5,62 @@ class ServiceException(KernelException): + """Base class for all service exceptions.""" + pass class ServiceInitializationError(ServiceException): + """An error occurred while initializing the service.""" + pass class ServiceResponseException(ServiceException): + """Base class for all service response exceptions.""" + pass class ServiceInvalidAuthError(ServiceException): + """An error occurred while authenticating the service.""" + pass class ServiceInvalidTypeError(ServiceResponseException): + """An error occurred while validating the type of the service request.""" + pass class ServiceInvalidRequestError(ServiceResponseException): + """An error occurred while validating the request to the service.""" + pass class ServiceInvalidResponseError(ServiceResponseException): + """An error occurred while validating the response from the service.""" + pass class ServiceInvalidExecutionSettingsError(ServiceResponseException): + """An error occurred while validating the execution settings of the service.""" + pass class ServiceContentFilterException(ServiceResponseException): + """An error was raised by the content filter of the service.""" + pass class ServiceResourceNotFoundError(ServiceException): + """The request service could not be found.""" + pass diff --git a/python/semantic_kernel/exceptions/template_engine_exceptions.py b/python/semantic_kernel/exceptions/template_engine_exceptions.py index a35dde04bb5e..7fa85e512dba 100644 --- a/python/semantic_kernel/exceptions/template_engine_exceptions.py +++ b/python/semantic_kernel/exceptions/template_engine_exceptions.py @@ -5,20 +5,28 @@ class BlockException(KernelException): + """Base class for all block exceptions.""" + pass class BlockSyntaxError(BlockException): + """A invalid block syntax was found.""" + pass class BlockRenderException(BlockException): + """An error occurred while rendering a block.""" + pass class VarBlockSyntaxError(BlockSyntaxError): + """A invalid VarBlock syntax was found.""" + def __init__(self, content: str) -> None: - """Raised when the content of a VarBlock is invalid.""" + """Adds the context of the error to the generic message.""" super().__init__( f"A VarBlock starts with a '$' followed by at least one letter, \ number or underscore, anything else is invalid. \ @@ -27,12 +35,16 @@ def __init__(self, content: str) -> None: class VarBlockRenderError(BlockRenderException): + """An error occurred while rendering a VarBlock.""" + pass class ValBlockSyntaxError(BlockSyntaxError): + """A invalid ValBlock syntax was found.""" + def __init__(self, content: str) -> None: - """Raised when the content of a ValBlock is invalid.""" + """Adds the context of the error to the generic message.""" super().__init__( f"A ValBlock starts with a single or double quote followed by at least one letter, \ finishing with the same type of quote as the first one. \ @@ -41,8 +53,10 @@ def __init__(self, content: str) -> None: class NamedArgBlockSyntaxError(BlockSyntaxError): + """A invalid NamedArgBlock syntax was found.""" + def __init__(self, content: str) -> None: - """Raised when the content of a NamedArgBlock is invalid.""" + """Adds the context of the error to the generic message.""" super().__init__( f"A NamedArgBlock starts with a name (letters, numbers or underscore) \ followed by a single equal sign, then the value of the argument, \ @@ -53,8 +67,10 @@ def __init__(self, content: str) -> None: class FunctionIdBlockSyntaxError(BlockSyntaxError): + """A invalid FunctionIdBlock syntax was found.""" + def __init__(self, content: str) -> None: - """Raised when the content of a FunctionIdBlock is invalid.""" + """Adds the context of the error to the generic message.""" super().__init__( f"A FunctionIdBlock is composed of either a plugin name and \ function name separated by a single dot, or just a function name. \ @@ -64,38 +80,56 @@ def __init__(self, content: str) -> None: class CodeBlockSyntaxError(BlockSyntaxError): + """A invalid CodeBlock syntax was found.""" + pass class CodeBlockTokenError(BlockException): + """An error occurred while tokenizing a CodeBlock.""" + pass class CodeBlockRenderException(BlockRenderException): + """An error occurred while rendering a CodeBlock.""" + pass class TemplateSyntaxError(BlockSyntaxError): + """A invalid Template syntax was found.""" + pass class TemplateRenderException(BlockRenderException): + """An error occurred while rendering a Template.""" + pass class HandlebarsTemplateSyntaxError(BlockSyntaxError): + """A invalid HandlebarsTemplate syntax was found.""" + pass class HandlebarsTemplateRenderException(BlockRenderException): + """An error occurred while rendering a HandlebarsTemplate.""" + pass class Jinja2TemplateSyntaxError(BlockSyntaxError): + """A invalid Jinja2Template syntax was found.""" + pass class Jinja2TemplateRenderException(BlockRenderException): + """An error occurred while rendering a Jinja2Template.""" + pass diff --git a/python/semantic_kernel/functions/kernel_arguments.py b/python/semantic_kernel/functions/kernel_arguments.py index a997f339a854..f6fa8060fe71 100644 --- a/python/semantic_kernel/functions/kernel_arguments.py +++ b/python/semantic_kernel/functions/kernel_arguments.py @@ -9,6 +9,8 @@ class KernelArguments(dict): + """The arguments sent to the KernelFunction.""" + def __init__( self, settings: ( diff --git a/python/semantic_kernel/functions/kernel_function_extension.py b/python/semantic_kernel/functions/kernel_function_extension.py index 06acb0d846c0..bfd9be0e32ba 100644 --- a/python/semantic_kernel/functions/kernel_function_extension.py +++ b/python/semantic_kernel/functions/kernel_function_extension.py @@ -31,6 +31,8 @@ class KernelFunctionExtension(KernelBaseModel, ABC): + """Kernel function extension.""" + plugins: dict[str, KernelPlugin] = Field(default_factory=dict) @field_validator("plugins", mode="before") diff --git a/python/semantic_kernel/functions/kernel_function_metadata.py b/python/semantic_kernel/functions/kernel_function_metadata.py index 67427506bc21..e579ce994126 100644 --- a/python/semantic_kernel/functions/kernel_function_metadata.py +++ b/python/semantic_kernel/functions/kernel_function_metadata.py @@ -10,6 +10,8 @@ class KernelFunctionMetadata(KernelBaseModel): + """The kernel function metadata.""" + name: str = Field(..., pattern=FUNCTION_NAME_REGEX) plugin_name: str | None = Field(None, pattern=PLUGIN_NAME_REGEX) description: str | None = Field(default=None) diff --git a/python/semantic_kernel/functions/kernel_parameter_metadata.py b/python/semantic_kernel/functions/kernel_parameter_metadata.py index eeb08dac5f14..20d6151bbb0b 100644 --- a/python/semantic_kernel/functions/kernel_parameter_metadata.py +++ b/python/semantic_kernel/functions/kernel_parameter_metadata.py @@ -10,6 +10,8 @@ class KernelParameterMetadata(KernelBaseModel): + """The kernel parameter metadata.""" + name: str | None = Field(..., pattern=FUNCTION_PARAM_NAME_REGEX) description: str | None = Field(None) default_value: Any | None = None diff --git a/python/semantic_kernel/kernel.py b/python/semantic_kernel/kernel.py index da54baadb429..34540b0443e8 100644 --- a/python/semantic_kernel/kernel.py +++ b/python/semantic_kernel/kernel.py @@ -1,10 +1,11 @@ # Copyright (c) Microsoft. All rights reserved. import logging -from collections.abc import AsyncGenerator, AsyncIterable +from collections.abc import AsyncGenerator, AsyncIterable, Callable from copy import copy from typing import TYPE_CHECKING, Any, Literal, TypeVar +from semantic_kernel.connectors.ai.embeddings.embedding_generator_base import EmbeddingGeneratorBase from semantic_kernel.const import METADATA_EXCEPTION_KEY from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.function_call_content import FunctionCallContent @@ -18,6 +19,7 @@ OperationCancelledException, TemplateSyntaxError, ) +from semantic_kernel.exceptions.kernel_exceptions import KernelServiceNotFoundError from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import ( AutoFunctionInvocationContext, ) @@ -31,7 +33,7 @@ from semantic_kernel.functions.kernel_function_extension import KernelFunctionExtension from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt from semantic_kernel.functions.kernel_plugin import KernelPlugin -from semantic_kernel.kernel_types import AI_SERVICE_CLIENT_TYPE +from semantic_kernel.kernel_types import AI_SERVICE_CLIENT_TYPE, OneOrMany from semantic_kernel.prompt_template.const import KERNEL_TEMPLATE_FORMAT_NAME from semantic_kernel.reliability.kernel_reliability_extension import KernelReliabilityExtension from semantic_kernel.services.ai_service_selector import AIServiceSelector @@ -42,25 +44,31 @@ from semantic_kernel.connectors.ai.function_choice_behavior import ( FunctionChoiceBehavior, ) + from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.functions.kernel_function import KernelFunction T = TypeVar("T") +TDataModel = TypeVar("TDataModel") logger: logging.Logger = logging.getLogger(__name__) class Kernel(KernelFilterExtension, KernelFunctionExtension, KernelServicesExtension, KernelReliabilityExtension): - """The main Kernel class of Semantic Kernel. + """The Kernel of Semantic Kernel. - This is the main entry point for the Semantic Kernel. It provides the ability to run - semantic/native functions, and manage plugins, memory, and AI services. + This is the main entry point for Semantic Kernel. It provides the ability to run + functions and manage filters, plugins, and AI services. Attributes: - plugins (dict[str, KernelPlugin] | None): The plugins to be used by the kernel - services (dict[str, AIServiceClientBase]): The services to be used by the kernel - ai_service_selector (AIServiceSelector): The AI service selector to be used by the kernel - retry_mechanism (RetryMechanismBase): The retry mechanism to be used by the kernel + function_invocation_filters: Filters applied during function invocation, from KernelFilterExtension. + prompt_rendering_filters: Filters applied during prompt rendering, from KernelFilterExtension. + auto_function_invocation_filters: Filters applied during auto function invocation, from KernelFilterExtension. + plugins: A dict with the plugins registered with the Kernel, from KernelFunctionExtension. + services: A dict with the services registered with the Kernel, from KernelServicesExtension. + ai_service_selector: The AI service selector to be used by the kernel, from KernelServicesExtension. + retry_mechanism: The retry mechanism to be used by the kernel, from KernelReliabilityExtension. + """ def __init__( @@ -75,14 +83,11 @@ def __init__( """Initialize a new instance of the Kernel class. Args: - plugins (KernelPlugin | dict[str, KernelPlugin] | list[KernelPlugin] | None): - The plugins to be used by the kernel, will be rewritten to a dict with plugin name as key - services (AIServiceClientBase | list[AIServiceClientBase] | dict[str, AIServiceClientBase] | None): - The services to be used by the kernel, will be rewritten to a dict with service_id as key - ai_service_selector (AIServiceSelector | None): - The AI service selector to be used by the kernel, + plugins: The plugins to be used by the kernel, will be rewritten to a dict with plugin name as key + services: The services to be used by the kernel, will be rewritten to a dict with service_id as key + ai_service_selector: The AI service selector to be used by the kernel, default is based on order of execution settings. - **kwargs (Any): Additional fields to be passed to the Kernel model, + **kwargs: Additional fields to be passed to the Kernel model, these are limited to retry_mechanism and function_invoking_handlers and function_invoked_handlers, the best way to add function_invoking_handlers and function_invoked_handlers is to use the add_function_invoking_handler @@ -398,14 +403,12 @@ async def invoke_function_call( ) await stack(invocation_context) - if invocation_context.terminate: - return invocation_context - frc = FunctionResultContent.from_function_call_content_and_result( function_call_content=function_call, result=invocation_context.function_result ) chat_history.add_message(message=frc.to_chat_message_content()) - return None + + return invocation_context if invocation_context.terminate else None async def _inner_auto_function_invoke_handler(self, context: AutoFunctionInvocationContext): """Inner auto function invocation handler.""" @@ -421,3 +424,59 @@ async def _inner_auto_function_invoke_handler(self, context: AutoFunctionInvocat else: context.function_result = FunctionResult(function=context.function.metadata, value=value) return + + async def add_embedding_to_object( + self, + inputs: OneOrMany[TDataModel], + field_to_embed: str, + field_to_store: str, + execution_settings: dict[str, "PromptExecutionSettings"], + container_mode: bool = False, + cast_function: Callable[[list[float]], Any] | None = None, + **kwargs: Any, + ): + """Gather all fields to embed, batch the embedding generation and store.""" + contents: list[Any] = [] + dict_like = (getter := getattr(inputs, "get", False)) and callable(getter) + list_of_dicts: bool = False + if container_mode: + contents = inputs[field_to_embed].tolist() # type: ignore + elif isinstance(inputs, list): + list_of_dicts = (getter := getattr(inputs[0], "get", False)) and callable(getter) + for record in inputs: + if list_of_dicts: + contents.append(record.get(field_to_embed)) # type: ignore + else: + contents.append(getattr(record, field_to_embed)) + else: + if dict_like: + contents.append(inputs.get(field_to_embed)) # type: ignore + else: + contents.append(getattr(inputs, field_to_embed)) + vectors = None + service: EmbeddingGeneratorBase | None = None + for service_id, settings in execution_settings.items(): + service = self.get_service(service_id, type=EmbeddingGeneratorBase) # type: ignore + if service: + vectors = await service.generate_raw_embeddings(texts=contents, settings=settings, **kwargs) # type: ignore + break + if not service: + raise KernelServiceNotFoundError("No service found to generate embeddings.") + if vectors is None: + raise KernelInvokeException("No vectors were generated.") + if cast_function: + vectors = [cast_function(vector) for vector in vectors] + if container_mode: + inputs[field_to_store] = vectors # type: ignore + return + if isinstance(inputs, list): + for record, vector in zip(inputs, vectors): + if list_of_dicts: + record[field_to_store] = vector # type: ignore + else: + setattr(record, field_to_store, vector) + return + if dict_like: + inputs[field_to_store] = vectors[0] # type: ignore + return + setattr(inputs, field_to_store, vectors[0]) diff --git a/python/semantic_kernel/kernel_pydantic.py b/python/semantic_kernel/kernel_pydantic.py index e2bedb1c8f3f..0547f5d73b3a 100644 --- a/python/semantic_kernel/kernel_pydantic.py +++ b/python/semantic_kernel/kernel_pydantic.py @@ -3,7 +3,7 @@ from typing import Annotated, Any, ClassVar, TypeVar -from pydantic import BaseModel, ConfigDict, UrlConstraints +from pydantic import BaseModel, ConfigDict, Field, UrlConstraints from pydantic.networks import Url from pydantic_settings import BaseSettings, SettingsConfigDict @@ -35,8 +35,8 @@ class KernelBaseSettings(BaseSettings): """ env_prefix: ClassVar[str] = "" - env_file_path: str | None = None - env_file_encoding: str = "utf-8" + env_file_path: str | None = Field(None, exclude=True) + env_file_encoding: str = Field("utf-8", exclude=True) model_config = SettingsConfigDict( extra="ignore", diff --git a/python/semantic_kernel/kernel_types.py b/python/semantic_kernel/kernel_types.py index b94e97765d39..5bbfdb5fe3d6 100644 --- a/python/semantic_kernel/kernel_types.py +++ b/python/semantic_kernel/kernel_types.py @@ -1,7 +1,15 @@ # Copyright (c) Microsoft. All rights reserved. -from typing import TypeVar +from collections.abc import Sequence +from typing import TypeVar, Union from semantic_kernel.services.ai_service_client_base import AIServiceClientBase AI_SERVICE_CLIENT_TYPE = TypeVar("AI_SERVICE_CLIENT_TYPE", bound=AIServiceClientBase) + +T = TypeVar("T") + +OneOrMany = Union[T, Sequence[T]] +OptionalOneOrMany = Union[None, T, Sequence[T]] + +__all__ = ["AI_SERVICE_CLIENT_TYPE", "OneOrMany", "OptionalOneOrMany"] diff --git a/python/semantic_kernel/memory/memory_query_result.py b/python/semantic_kernel/memory/memory_query_result.py index 1147ee8c91aa..23467885a257 100644 --- a/python/semantic_kernel/memory/memory_query_result.py +++ b/python/semantic_kernel/memory/memory_query_result.py @@ -8,6 +8,8 @@ @experimental_class class MemoryQueryResult: + """The memory query result.""" + is_reference: bool external_source_name: str | None id: str diff --git a/python/semantic_kernel/memory/memory_record.py b/python/semantic_kernel/memory/memory_record.py index a6234605ad0b..877953a336cd 100644 --- a/python/semantic_kernel/memory/memory_record.py +++ b/python/semantic_kernel/memory/memory_record.py @@ -9,6 +9,8 @@ @experimental_class class MemoryRecord: + """The in-built memory record.""" + _key: str _timestamp: datetime | None _is_reference: bool diff --git a/python/semantic_kernel/memory/memory_store_base.py b/python/semantic_kernel/memory/memory_store_base.py index b1b695e81665..8a79472e1b00 100644 --- a/python/semantic_kernel/memory/memory_store_base.py +++ b/python/semantic_kernel/memory/memory_store_base.py @@ -10,6 +10,8 @@ @experimental_class class MemoryStoreBase(ABC): + """Base class for memory store.""" + async def __aenter__(self): """Enter the context manager.""" return self diff --git a/python/semantic_kernel/memory/null_memory.py b/python/semantic_kernel/memory/null_memory.py index c72e50835939..78fb88d74c42 100644 --- a/python/semantic_kernel/memory/null_memory.py +++ b/python/semantic_kernel/memory/null_memory.py @@ -7,6 +7,8 @@ @experimental_class class NullMemory(SemanticTextMemoryBase): + """Class for null memory.""" + async def save_information( self, collection: str, diff --git a/python/semantic_kernel/memory/semantic_text_memory.py b/python/semantic_kernel/memory/semantic_text_memory.py index 8ec629550be5..454727a8c987 100644 --- a/python/semantic_kernel/memory/semantic_text_memory.py +++ b/python/semantic_kernel/memory/semantic_text_memory.py @@ -14,6 +14,8 @@ @experimental_class class SemanticTextMemory(SemanticTextMemoryBase): + """Class for semantic text memory.""" + _storage: MemoryStoreBase = PrivateAttr() _embeddings_generator: EmbeddingGeneratorBase = PrivateAttr() diff --git a/python/semantic_kernel/memory/semantic_text_memory_base.py b/python/semantic_kernel/memory/semantic_text_memory_base.py index 95ebd9672580..74c4c48a67c9 100644 --- a/python/semantic_kernel/memory/semantic_text_memory_base.py +++ b/python/semantic_kernel/memory/semantic_text_memory_base.py @@ -14,6 +14,8 @@ @experimental_class class SemanticTextMemoryBase(KernelBaseModel): + """Base class for semantic text memory.""" + @abstractmethod async def save_information( self, diff --git a/python/semantic_kernel/memory/volatile_memory_store.py b/python/semantic_kernel/memory/volatile_memory_store.py index 13a207f3ce04..9b3ab4ccb65d 100644 --- a/python/semantic_kernel/memory/volatile_memory_store.py +++ b/python/semantic_kernel/memory/volatile_memory_store.py @@ -15,6 +15,8 @@ @experimental_class class VolatileMemoryStore(MemoryStoreBase): + """A volatile memory store that stores data in memory.""" + _store: dict[str, dict[str, MemoryRecord]] def __init__(self) -> None: diff --git a/python/semantic_kernel/planners/function_calling_stepwise_planner/function_calling_stepwise_planner.py b/python/semantic_kernel/planners/function_calling_stepwise_planner/function_calling_stepwise_planner.py index 45af8756adca..38ab0de2090d 100644 --- a/python/semantic_kernel/planners/function_calling_stepwise_planner/function_calling_stepwise_planner.py +++ b/python/semantic_kernel/planners/function_calling_stepwise_planner/function_calling_stepwise_planner.py @@ -53,6 +53,8 @@ class FunctionCallingStepwisePlanner(KernelBaseModel): + """A Function Calling Stepwise Planner.""" + service_id: str options: FunctionCallingStepwisePlannerOptions generate_plan_yaml: str @@ -277,9 +279,9 @@ async def _generate_plan( generate_plan_function = self._create_config_from_yaml(kernel) functions_manual = [ kernel_function_metadata_to_function_call_format(f) - for f in kernel.get_list_of_function_metadata( - {"excluded_functions": [f"{self.service_id}", "sequential_planner-create_plan"]} - ) + for f in kernel.get_list_of_function_metadata({ + "excluded_functions": [f"{self.service_id}", "sequential_planner-create_plan"] + }) ] generated_plan_args = KernelArguments( name_delimiter="-", diff --git a/python/semantic_kernel/planners/plan.py b/python/semantic_kernel/planners/plan.py index f0b55bf08865..663bb5ddef0d 100644 --- a/python/semantic_kernel/planners/plan.py +++ b/python/semantic_kernel/planners/plan.py @@ -22,6 +22,8 @@ class Plan: + """A plan for the kernel.""" + _state: KernelArguments = PrivateAttr() _steps: list["Plan"] = PrivateAttr() _function: KernelFunction = PrivateAttr() diff --git a/python/semantic_kernel/planners/planner_extensions.py b/python/semantic_kernel/planners/planner_extensions.py index b99cd58dd13e..06ac0c858250 100644 --- a/python/semantic_kernel/planners/planner_extensions.py +++ b/python/semantic_kernel/planners/planner_extensions.py @@ -15,6 +15,8 @@ class PlannerFunctionExtension: + """Function extension for the planner.""" + @staticmethod def to_manual_string(function: KernelFunctionMetadata): """Convert the function to a string that can be used in the manual.""" @@ -34,6 +36,8 @@ def to_embedding_string(function: KernelFunctionMetadata): class PlannerKernelExtension: + """Kernel extension for the planner.""" + PLANNER_MEMORY_COLLECTION_NAME = " Planning.KernelFunctionManual" PLAN_KERNEL_FUNCTIONS_ARE_REMEMBERED = "Planning.KernelFunctionsAreRemembered" diff --git a/python/semantic_kernel/planners/sequential_planner/sequential_planner.py b/python/semantic_kernel/planners/sequential_planner/sequential_planner.py index 963e7d12934d..0939c2517cf5 100644 --- a/python/semantic_kernel/planners/sequential_planner/sequential_planner.py +++ b/python/semantic_kernel/planners/sequential_planner/sequential_planner.py @@ -32,6 +32,8 @@ def read_file(file_path: str) -> str: class SequentialPlanner: + """Sequential planner class.""" + RESTRICTED_PLUGIN_NAME = "SequentialPlanner_Excluded" config: SequentialPlannerConfig diff --git a/python/semantic_kernel/planners/sequential_planner/sequential_planner_config.py b/python/semantic_kernel/planners/sequential_planner/sequential_planner_config.py index f3a1e63de3cc..b09b2722f8f9 100644 --- a/python/semantic_kernel/planners/sequential_planner/sequential_planner_config.py +++ b/python/semantic_kernel/planners/sequential_planner/sequential_planner_config.py @@ -4,6 +4,8 @@ class SequentialPlannerConfig: + """Configuration for the SequentialPlanner.""" + def __init__( self, relevancy_threshold: float | None = None, diff --git a/python/semantic_kernel/planners/sequential_planner/sequential_planner_extensions.py b/python/semantic_kernel/planners/sequential_planner/sequential_planner_extensions.py index b71b48943764..cec5446430f3 100644 --- a/python/semantic_kernel/planners/sequential_planner/sequential_planner_extensions.py +++ b/python/semantic_kernel/planners/sequential_planner/sequential_planner_extensions.py @@ -12,6 +12,8 @@ class SequentialPlannerFunctionExtension: + """Function extension for the sequential planner.""" + @staticmethod def to_manual_string(function: KernelFunctionMetadata): """Convert the function to a manual string.""" @@ -31,6 +33,8 @@ def to_embedding_string(function: KernelFunctionMetadata): class SequentialPlannerKernelExtension: + """Kernel extension for the sequential planner.""" + PLANNER_MEMORY_COLLECTION_NAME = " Planning.KernelFunctionManual" PLAN_KERNEL_FUNCTIONS_ARE_REMEMBERED = "Planning.KernelFunctionsAreRemembered" diff --git a/python/semantic_kernel/planners/sequential_planner/sequential_planner_parser.py b/python/semantic_kernel/planners/sequential_planner/sequential_planner_parser.py index 301b9483c163..e6808057211b 100644 --- a/python/semantic_kernel/planners/sequential_planner/sequential_planner_parser.py +++ b/python/semantic_kernel/planners/sequential_planner/sequential_planner_parser.py @@ -21,6 +21,8 @@ class SequentialPlanParser: + """Parser for Sequential planners.""" + @staticmethod def to_plan_from_xml( xml_string: str, diff --git a/python/semantic_kernel/prompt_template/kernel_prompt_template.py b/python/semantic_kernel/prompt_template/kernel_prompt_template.py index 499cada09b66..d8097d09ad64 100644 --- a/python/semantic_kernel/prompt_template/kernel_prompt_template.py +++ b/python/semantic_kernel/prompt_template/kernel_prompt_template.py @@ -2,7 +2,7 @@ import logging from html import escape -from typing import TYPE_CHECKING, Any, Optional +from typing import TYPE_CHECKING, Any from pydantic import PrivateAttr, field_validator @@ -25,19 +25,7 @@ class KernelPromptTemplate(PromptTemplateBase): - """Create a Kernel prompt template. - - Args: - prompt_template_config (PromptTemplateConfig): The prompt template configuration - This includes the actual template to use. - allow_dangerously_set_content (bool = False): Allow content without encoding throughout, this overrides - the same settings in the prompt template config and input variables. - This reverts the behavior to unencoded input. - - Raises: - ValueError: If the template format is not 'semantic-kernel' - TemplateSyntaxError: If the template has a syntax error - """ + """Create a Kernel prompt template.""" _blocks: list[Block] = PrivateAttr(default_factory=list) @@ -49,7 +37,7 @@ def validate_template_format(cls, v: "PromptTemplateConfig") -> "PromptTemplateC raise ValueError(f"Invalid prompt template format: {v.template_format}. Expected: semantic-kernel") return v - def model_post_init(self, __context: Any) -> None: + def model_post_init(self, _: Any) -> None: """Post init model.""" self._blocks = self.extract_blocks() # Add all of the existing input variables to our known set. We'll avoid adding any @@ -75,25 +63,20 @@ def model_post_init(self, __context: Any) -> None: # is a named arg block. self._add_if_missing(sub_block.variable.name, seen) - def _add_if_missing(self, variable_name: str, seen: set): - # Convert variable_name to lower case to handle case-insensitivity - if variable_name and variable_name.lower() not in seen: - seen.add(variable_name.lower()) - self.prompt_template_config.input_variables.append(InputVariable(name=variable_name)) - def extract_blocks(self) -> list[Block]: - """Given the prompt template, extract all the blocks (text, variables, function calls). - - Returns: - A list of all the blocks, ie the template tokenized in - text, variables and function calls - """ + """Given the prompt template, extract all the blocks (text, variables, function calls).""" logger.debug(f"Extracting blocks from template: {self.prompt_template_config.template}") if not self.prompt_template_config.template: return [] return TemplateTokenizer.tokenize(self.prompt_template_config.template) - async def render(self, kernel: "Kernel", arguments: Optional["KernelArguments"] = None) -> str: + def _add_if_missing(self, variable_name: str, seen: set): + # Convert variable_name to lower case to handle case-insensitivity + if variable_name and variable_name.lower() not in seen: + seen.add(variable_name.lower()) + self.prompt_template_config.input_variables.append(InputVariable(name=variable_name)) + + async def render(self, kernel: "Kernel", arguments: "KernelArguments | None" = None) -> str: """Render the prompt template. Using the prompt template, replace the variables with their values @@ -101,30 +84,35 @@ async def render(self, kernel: "Kernel", arguments: Optional["KernelArguments"] function result. Args: - kernel: The kernel instance - arguments: The kernel arguments + kernel ("Kernel"): The kernel to use for functions. + arguments ("KernelArguments | None"): The arguments to use for rendering. (Default value = None) Returns: - The prompt template ready to be used for an AI request + str: The prompt template ready to be used for an AI request + """ - if arguments is None: - arguments = KernelArguments() return await self.render_blocks(self._blocks, kernel, arguments) - async def render_blocks(self, blocks: list[Block], kernel: "Kernel", arguments: "KernelArguments") -> str: + async def render_blocks( + self, blocks: list[Block], kernel: "Kernel", arguments: "KernelArguments | None" = None + ) -> str: """Given a list of blocks render each block and compose the final result. - :param blocks: Template blocks generated by ExtractBlocks - :param context: Access into the current kernel execution context - :return: The prompt template ready to be used for an AI request + Args: + blocks (list[Block]): Template blocks generated by ExtractBlocks + kernel ("Kernel"): The kernel to use for functions + arguments ("KernelArguments | None"): The arguments to use for rendering (Default value = None) + + Returns: + str: The prompt template ready to be used for an AI request + """ from semantic_kernel.template_engine.protocols.code_renderer import CodeRenderer from semantic_kernel.template_engine.protocols.text_renderer import TextRenderer logger.debug(f"Rendering list of {len(blocks)} blocks") rendered_blocks: list[str] = [] - - arguments = self._get_trusted_arguments(arguments) + arguments = self._get_trusted_arguments(arguments or KernelArguments()) allow_unsafe_function_output = self._get_allow_dangerously_set_function_output() for block in blocks: if isinstance(block, TextRenderer): diff --git a/python/semantic_kernel/prompt_template/prompt_template_base.py b/python/semantic_kernel/prompt_template/prompt_template_base.py index fb67a3f37828..6cac84ba4693 100644 --- a/python/semantic_kernel/prompt_template/prompt_template_base.py +++ b/python/semantic_kernel/prompt_template/prompt_template_base.py @@ -14,6 +14,8 @@ class PromptTemplateBase(KernelBaseModel, ABC): + """Base class for prompt templates.""" + prompt_template_config: PromptTemplateConfig allow_dangerously_set_content: bool = False diff --git a/python/semantic_kernel/reliability/kernel_reliability_extension.py b/python/semantic_kernel/reliability/kernel_reliability_extension.py index 47d647c5026f..82a020cfdeff 100644 --- a/python/semantic_kernel/reliability/kernel_reliability_extension.py +++ b/python/semantic_kernel/reliability/kernel_reliability_extension.py @@ -13,4 +13,6 @@ class KernelReliabilityExtension(KernelBaseModel, ABC): + """Kernel reliability extension.""" + retry_mechanism: RetryMechanismBase = Field(default_factory=PassThroughWithoutRetry) diff --git a/python/semantic_kernel/reliability/retry_mechanism_base.py b/python/semantic_kernel/reliability/retry_mechanism_base.py index bc026e0c5235..49fe20d91c81 100644 --- a/python/semantic_kernel/reliability/retry_mechanism_base.py +++ b/python/semantic_kernel/reliability/retry_mechanism_base.py @@ -11,6 +11,8 @@ class RetryMechanismBase(ABC): + """Base class for retry mechanisms.""" + @abstractmethod async def execute_with_retry(self, action: Callable[[], Awaitable[T]]) -> Awaitable[T]: """Executes the given action with retry logic. diff --git a/python/semantic_kernel/schema/kernel_json_schema_builder.py b/python/semantic_kernel/schema/kernel_json_schema_builder.py index c6ae261a6eab..438833fda6dc 100644 --- a/python/semantic_kernel/schema/kernel_json_schema_builder.py +++ b/python/semantic_kernel/schema/kernel_json_schema_builder.py @@ -31,6 +31,8 @@ class KernelJsonSchemaBuilder: + """Kernel JSON schema builder.""" + @classmethod def build(cls, parameter_type: type | str, description: str | None = None) -> dict[str, Any]: """Builds the JSON schema for a given parameter type and description. @@ -202,7 +204,7 @@ def build_enum_schema(cls, enum_type: type, description: str | None = None) -> d """ if not issubclass(enum_type, Enum): raise FunctionInvalidParameterConfiguration(f"{enum_type} is not a valid Enum type") - + try: enum_values = [item.value for item in enum_type] except TypeError as ex: diff --git a/python/semantic_kernel/services/ai_service_client_base.py b/python/semantic_kernel/services/ai_service_client_base.py index 7eadc8d5f52b..2f3b1ff22fdb 100644 --- a/python/semantic_kernel/services/ai_service_client_base.py +++ b/python/semantic_kernel/services/ai_service_client_base.py @@ -1,13 +1,15 @@ # Copyright (c) Microsoft. All rights reserved. from abc import ABC -from typing import Annotated +from typing import TYPE_CHECKING, Annotated from pydantic import Field, StringConstraints -from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.kernel_pydantic import KernelBaseModel +if TYPE_CHECKING: + from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + class AIServiceClientBase(KernelBaseModel, ABC): """Base class for all AI Services. @@ -30,18 +32,22 @@ def model_post_init(self, __context: object | None = None): # Override this in subclass to return the proper prompt execution type the # service is expecting. - def get_prompt_execution_settings_class(self) -> type[PromptExecutionSettings]: + def get_prompt_execution_settings_class(self) -> type["PromptExecutionSettings"]: """Get the request settings class.""" + from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + return PromptExecutionSettings - def instantiate_prompt_execution_settings(self, **kwargs) -> PromptExecutionSettings: + def instantiate_prompt_execution_settings(self, **kwargs) -> "PromptExecutionSettings": """Create a request settings object. All arguments are passed to the constructor of the request settings object. """ return self.get_prompt_execution_settings_class()(**kwargs) - def get_prompt_execution_settings_from_settings(self, settings: PromptExecutionSettings) -> PromptExecutionSettings: + def get_prompt_execution_settings_from_settings( + self, settings: "PromptExecutionSettings" + ) -> "PromptExecutionSettings": """Get the request settings from a settings object.""" prompt_execution_settings_type = self.get_prompt_execution_settings_class() if isinstance(settings, prompt_execution_settings_type): diff --git a/python/semantic_kernel/services/kernel_services_extension.py b/python/semantic_kernel/services/kernel_services_extension.py index 094d1b5ceab6..13b6aea5e3ae 100644 --- a/python/semantic_kernel/services/kernel_services_extension.py +++ b/python/semantic_kernel/services/kernel_services_extension.py @@ -23,6 +23,11 @@ class KernelServicesExtension(KernelBaseModel, ABC): + """Kernel services extension. + + Adds all services related entities to the Kernel. + """ + services: dict[str, AIServiceClientBase] = Field(default_factory=dict) ai_service_selector: AIServiceSelector = Field(default_factory=AIServiceSelector) diff --git a/python/semantic_kernel/template_engine/blocks/block.py b/python/semantic_kernel/template_engine/blocks/block.py index 25539ea538f1..b0953e130a4b 100644 --- a/python/semantic_kernel/template_engine/blocks/block.py +++ b/python/semantic_kernel/template_engine/blocks/block.py @@ -12,6 +12,8 @@ class Block(KernelBaseModel): + """A block.""" + type: ClassVar[BlockTypes] = BlockTypes.UNDEFINED content: str diff --git a/python/semantic_kernel/template_engine/blocks/block_types.py b/python/semantic_kernel/template_engine/blocks/block_types.py index a903420047a7..525082b8abb6 100644 --- a/python/semantic_kernel/template_engine/blocks/block_types.py +++ b/python/semantic_kernel/template_engine/blocks/block_types.py @@ -4,6 +4,8 @@ class BlockTypes(Enum): + """Block types.""" + UNDEFINED = auto() TEXT = auto() CODE = auto() diff --git a/python/semantic_kernel/template_engine/blocks/symbols.py b/python/semantic_kernel/template_engine/blocks/symbols.py index 7b5a77ee3ef1..595a92167f8c 100644 --- a/python/semantic_kernel/template_engine/blocks/symbols.py +++ b/python/semantic_kernel/template_engine/blocks/symbols.py @@ -3,6 +3,8 @@ class Symbols(str, Enum): + """Symbols used in the template engine.""" + BLOCK_STARTER = "{" BLOCK_ENDER = "}" diff --git a/python/semantic_kernel/template_engine/blocks/text_block.py b/python/semantic_kernel/template_engine/blocks/text_block.py index a21af9318486..dad3c28433d1 100644 --- a/python/semantic_kernel/template_engine/blocks/text_block.py +++ b/python/semantic_kernel/template_engine/blocks/text_block.py @@ -16,6 +16,8 @@ class TextBlock(Block): + """A block with text content.""" + type: ClassVar[BlockTypes] = BlockTypes.TEXT @field_validator("content", mode="before") diff --git a/python/semantic_kernel/template_engine/code_tokenizer.py b/python/semantic_kernel/template_engine/code_tokenizer.py index fc494feffd78..7b4ebb9c4a2a 100644 --- a/python/semantic_kernel/template_engine/code_tokenizer.py +++ b/python/semantic_kernel/template_engine/code_tokenizer.py @@ -23,6 +23,8 @@ # [function-call] ::= [function-id] | [function-id] [parameter] # [parameter] ::= [variable] | [value] class CodeTokenizer: + """Tokenize the code text into blocks.""" + @staticmethod def tokenize(text: str) -> list[Block]: """Tokenize the code text into blocks.""" diff --git a/python/semantic_kernel/template_engine/template_tokenizer.py b/python/semantic_kernel/template_engine/template_tokenizer.py index a2770a12dc9a..20cdee944aad 100644 --- a/python/semantic_kernel/template_engine/template_tokenizer.py +++ b/python/semantic_kernel/template_engine/template_tokenizer.py @@ -22,6 +22,8 @@ # [text-block] ::= [any-char] | [any-char] [text-block] # [any-char] ::= any char class TemplateTokenizer: + """Tokenize the template text into blocks.""" + @staticmethod def tokenize(text: str) -> list[Block]: """Tokenize the template text into blocks.""" diff --git a/python/tests/conftest.py b/python/tests/conftest.py index bff9b5eae24c..692c9c759ab1 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -1,13 +1,10 @@ # Copyright (c) Microsoft. All rights reserved. -import warnings from collections.abc import Callable from typing import TYPE_CHECKING from unittest.mock import MagicMock -import pytest - -from semantic_kernel.contents.function_call_content import FunctionCallContent +from pytest import fixture if TYPE_CHECKING: from semantic_kernel.contents.chat_history import ChatHistory @@ -17,40 +14,40 @@ from semantic_kernel.services.ai_service_client_base import AIServiceClientBase -@pytest.fixture(scope="function") +@fixture(scope="function") def kernel() -> "Kernel": from semantic_kernel.kernel import Kernel return Kernel() -@pytest.fixture(scope="session") +@fixture(scope="session") def service() -> "AIServiceClientBase": from semantic_kernel.services.ai_service_client_base import AIServiceClientBase return AIServiceClientBase(service_id="service", ai_model_id="ai_model_id") -@pytest.fixture(scope="session") +@fixture(scope="session") def default_service() -> "AIServiceClientBase": from semantic_kernel.services.ai_service_client_base import AIServiceClientBase return AIServiceClientBase(service_id="default", ai_model_id="ai_model_id") -@pytest.fixture(scope="function") +@fixture(scope="function") def kernel_with_service(kernel: "Kernel", service: "AIServiceClientBase") -> "Kernel": kernel.add_service(service) return kernel -@pytest.fixture(scope="function") +@fixture(scope="function") def kernel_with_default_service(kernel: "Kernel", default_service: "AIServiceClientBase") -> "Kernel": kernel.add_service(default_service) return kernel -@pytest.fixture(scope="session") +@fixture(scope="session") def not_decorated_native_function() -> Callable: def not_decorated_native_function(arg1: str) -> str: return "test" @@ -58,7 +55,7 @@ def not_decorated_native_function(arg1: str) -> str: return not_decorated_native_function -@pytest.fixture(scope="session") +@fixture(scope="session") def decorated_native_function() -> Callable: from semantic_kernel.functions.kernel_function_decorator import kernel_function @@ -69,7 +66,7 @@ def decorated_native_function(arg1: str) -> str: return decorated_native_function -@pytest.fixture(scope="session") +@fixture(scope="session") def custom_plugin_class(): from semantic_kernel.functions.kernel_function_decorator import kernel_function @@ -81,7 +78,7 @@ def decorated_native_function(self) -> str: return CustomPlugin -@pytest.fixture(scope="session") +@fixture(scope="session") def experimental_plugin_class(): from semantic_kernel.functions.kernel_function_decorator import kernel_function from semantic_kernel.utils.experimental_decorator import experimental_class @@ -95,7 +92,7 @@ def decorated_native_function(self) -> str: return ExperimentalPlugin -@pytest.fixture(scope="session") +@fixture(scope="session") def create_mock_function() -> Callable: from semantic_kernel.contents.streaming_text_content import StreamingTextContent from semantic_kernel.functions.function_result import FunctionResult @@ -137,8 +134,10 @@ async def _invoke_internal(self, context: "FunctionInvocationContext"): return create_mock_function -@pytest.fixture(scope="function") +@fixture(scope="function") def get_tool_call_mock(): + from semantic_kernel.contents.function_call_content import FunctionCallContent + tool_call_mock = MagicMock(spec=FunctionCallContent) tool_call_mock.split_name_dict.return_value = {"arg_name": "arg_value"} tool_call_mock.to_kernel_arguments.return_value = {"arg_name": "arg_value"} @@ -155,64 +154,64 @@ def get_tool_call_mock(): return tool_call_mock -@pytest.fixture(scope="function") +@fixture(scope="function") def chat_history() -> "ChatHistory": from semantic_kernel.contents.chat_history import ChatHistory return ChatHistory() -@pytest.fixture(autouse=True) -def enable_debug_mode(): - """Set `autouse=True` to enable easy debugging for tests. - - How to debug: - 1. Ensure [snoop](https://github.com/alexmojaki/snoop) is installed - (`pip install snoop`). - 2. If you're doing print based debugging, use `pr` instead of `print`. - That is, convert `print(some_var)` to `pr(some_var)`. - 3. If you want a trace of a particular functions calls, just add `ss()` as the first - line of the function. - - Note: - ---- - It's completely fine to leave `autouse=True` in the fixture. It doesn't affect - the tests unless you use `pr` or `ss` in any test. - - Note: - ---- - When you use `ss` or `pr` in a test, pylance or mypy will complain. This is - because they don't know that we're adding these functions to the builtins. The - tests will run fine though. - """ - import builtins - - try: - import snoop - except ImportError: - warnings.warn( - "Install snoop to enable trace debugging. `pip install snoop`", - ImportWarning, - ) - return - - builtins.ss = snoop.snoop(depth=4).__enter__ - builtins.pr = snoop.pp - - -@pytest.fixture +# @fixture(autouse=True) +# def enable_debug_mode(): +# """Set `autouse=True` to enable easy debugging for tests. + +# How to debug: +# 1. Ensure [snoop](https://github.com/alexmojaki/snoop) is installed +# (`pip install snoop`). +# 2. If you're doing print based debugging, use `pr` instead of `print`. +# That is, convert `print(some_var)` to `pr(some_var)`. +# 3. If you want a trace of a particular functions calls, just add `ss()` as the first +# line of the function. + +# Note: +# ---- +# It's completely fine to leave `autouse=True` in the fixture. It doesn't affect +# the tests unless you use `pr` or `ss` in any test. + +# Note: +# ---- +# When you use `ss` or `pr` in a test, pylance or mypy will complain. This is +# because they don't know that we're adding these functions to the builtins. The +# tests will run fine though. +# """ +# import builtins + +# try: +# import snoop +# except ImportError: +# warnings.warn( +# "Install snoop to enable trace debugging. `pip install snoop`", +# ImportWarning, +# ) +# return + +# builtins.ss = snoop.snoop(depth=4).__enter__ +# builtins.pr = snoop.pp + + +@fixture def exclude_list(request): """Fixture that returns a list of environment variables to exclude.""" return request.param if hasattr(request, "param") else [] -@pytest.fixture +@fixture def override_env_param_dict(request): """Fixture that returns a dict of environment variables to override.""" return request.param if hasattr(request, "param") else {} -@pytest.fixture() +@fixture() def azure_openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): """Fixture to set environment variables for AzureOpenAISettings.""" if exclude_list is None: @@ -225,6 +224,7 @@ def azure_openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dic "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME": "test_chat_deployment", "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME": "test_text_deployment", "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME": "test_embedding_deployment", + "AZURE_OPENAI_TEXT_TO_IMAGE_DEPLOYMENT_NAME": "test_text_to_image_deployment", "AZURE_OPENAI_API_KEY": "test_api_key", "AZURE_OPENAI_ENDPOINT": "https://test-endpoint.com", "AZURE_OPENAI_API_VERSION": "2023-03-15-preview", @@ -242,7 +242,7 @@ def azure_openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dic return env_vars -@pytest.fixture() +@fixture() def openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): """Fixture to set environment variables for OpenAISettings.""" if exclude_list is None: @@ -257,6 +257,7 @@ def openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): "OPENAI_CHAT_MODEL_ID": "test_chat_model_id", "OPENAI_TEXT_MODEL_ID": "test_text_model_id", "OPENAI_EMBEDDING_MODEL_ID": "test_embedding_model_id", + "OPENAI_TEXT_TO_IMAGE_MODEL_ID": "test_text_to_image_model_id", } env_vars.update(override_env_param_dict) @@ -270,7 +271,7 @@ def openai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): return env_vars -@pytest.fixture() +@fixture() def mistralai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): """Fixture to set environment variables for MistralAISettings.""" if exclude_list is None: @@ -279,7 +280,36 @@ def mistralai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): if override_env_param_dict is None: override_env_param_dict = {} - env_vars = {"MISTRALAI_CHAT_MODEL_ID": "test_chat_model_id", "MISTRALAI_API_KEY": "test_api_key"} + env_vars = { + "MISTRALAI_CHAT_MODEL_ID": "test_chat_model_id", + "MISTRALAI_API_KEY": "test_api_key", + "MISTRALAI_EMBEDDING_MODEL_ID": "test_embedding_model_id", + } + + env_vars.update(override_env_param_dict) + + for key, value in env_vars.items(): + if key not in exclude_list: + monkeypatch.setenv(key, value) + else: + monkeypatch.delenv(key, raising=False) + + return env_vars + + +@fixture() +def anthropic_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): + """Fixture to set environment variables for AnthropicSettings.""" + if exclude_list is None: + exclude_list = [] + + if override_env_param_dict is None: + override_env_param_dict = {} + + env_vars = { + "ANTHROPIC_CHAT_MODEL_ID": "test_chat_model_id", + "ANTHROPIC_API_KEY": "test_api_key" + } env_vars.update(override_env_param_dict) @@ -292,7 +322,7 @@ def mistralai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): return env_vars -@pytest.fixture() +@fixture() def aca_python_sessions_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): """Fixture to set environment variables for ACA Python Unit Tests.""" if exclude_list is None: @@ -316,7 +346,7 @@ def aca_python_sessions_unit_test_env(monkeypatch, exclude_list, override_env_pa return env_vars -@pytest.fixture() +@fixture() def azure_ai_search_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): """Fixture to set environment variables for ACA Python Unit Tests.""" if exclude_list is None: @@ -342,7 +372,7 @@ def azure_ai_search_unit_test_env(monkeypatch, exclude_list, override_env_param_ return env_vars -@pytest.fixture() +@fixture() def bing_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): """Fixture to set environment variables for BingConnector.""" if exclude_list is None: @@ -367,7 +397,7 @@ def bing_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): return env_vars -@pytest.fixture() +@fixture() def google_search_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): """Fixture to set environment variables for the Google Search Connector.""" if exclude_list is None: @@ -390,3 +420,47 @@ def google_search_unit_test_env(monkeypatch, exclude_list, override_env_param_di monkeypatch.delenv(key, raising=False) return env_vars + + +@fixture +def qdrant_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): + """Fixture to set environment variables for QdrantConnector.""" + if exclude_list is None: + exclude_list = [] + + if override_env_param_dict is None: + override_env_param_dict = {} + + env_vars = {"QDRANT_LOCATION": "http://localhost:6333"} + + env_vars.update(override_env_param_dict) + + for key, value in env_vars.items(): + if key not in exclude_list: + monkeypatch.setenv(key, value) + else: + monkeypatch.delenv(key, raising=False) + + return env_vars + + +@fixture +def redis_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): + """Fixture to set environment variables for Redis.""" + if exclude_list is None: + exclude_list = [] + + if override_env_param_dict is None: + override_env_param_dict = {} + + env_vars = {"REDIS_CONNECTION_STRING": "redis://localhost:6379"} + + env_vars.update(override_env_param_dict) + + for key, value in env_vars.items(): + if key not in exclude_list: + monkeypatch.setenv(key, value) + else: + monkeypatch.delenv(key, raising=False) + + return env_vars diff --git a/python/tests/integration/completions/chat_completion_test_base.py b/python/tests/integration/completions/chat_completion_test_base.py new file mode 100644 index 000000000000..614928720301 --- /dev/null +++ b/python/tests/integration/completions/chat_completion_test_base.py @@ -0,0 +1,152 @@ +# Copyright (c) Microsoft. All rights reserved. + + +import os +import sys +from functools import reduce +from typing import Any + +import pytest +from azure.ai.inference.aio import ChatCompletionsClient +from azure.core.credentials import AzureKeyCredential +from openai import AsyncAzureOpenAI + +from semantic_kernel.connectors.ai.azure_ai_inference.azure_ai_inference_prompt_execution_settings import ( + AzureAIInferenceChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.azure_ai_inference.services.azure_ai_inference_chat_completion import ( + AzureAIInferenceChatCompletion, +) +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase +from semantic_kernel.connectors.ai.google.google_ai.google_ai_prompt_execution_settings import ( + GoogleAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.google_ai.services.google_ai_chat_completion import GoogleAIChatCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_chat_completion import VertexAIChatCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.mistral_ai.prompt_execution_settings.mistral_ai_prompt_execution_settings import ( + MistralAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.mistral_ai.services.mistral_ai_chat_completion import MistralAIChatCompletion +from semantic_kernel.connectors.ai.ollama.ollama_prompt_execution_settings import OllamaChatPromptExecutionSettings +from semantic_kernel.connectors.ai.ollama.services.ollama_chat_completion import OllamaChatCompletion +from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import ( + AzureChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( + OpenAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion +from semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion import OpenAIChatCompletion +from semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings import AzureOpenAISettings +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.core_plugins.math_plugin import MathPlugin +from semantic_kernel.kernel import Kernel +from tests.integration.completions.completion_test_base import CompletionTestBase, ServiceType + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + +mistral_ai_setup: bool = False +try: + if os.environ["MISTRALAI_API_KEY"] and os.environ["MISTRALAI_CHAT_MODEL_ID"]: + mistral_ai_setup = True +except KeyError: + mistral_ai_setup = False + +ollama_setup: bool = False +try: + if os.environ["OLLAMA_MODEL"]: + ollama_setup = True +except KeyError: + ollama_setup = False + + +class ChatCompletionTestBase(CompletionTestBase): + """Base class for testing completion services.""" + + @override + @pytest.fixture(scope="class") + def services(self) -> dict[str, tuple[ServiceType, type[PromptExecutionSettings]]]: + azure_openai_settings = AzureOpenAISettings.create() + endpoint = azure_openai_settings.endpoint + deployment_name = azure_openai_settings.chat_deployment_name + api_key = azure_openai_settings.api_key.get_secret_value() + api_version = azure_openai_settings.api_version + azure_custom_client = AzureChatCompletion( + async_client=AsyncAzureOpenAI( + azure_endpoint=endpoint, + azure_deployment=deployment_name, + api_key=api_key, + api_version=api_version, + default_headers={"Test-User-X-ID": "test"}, + ), + ) + azure_ai_inference_client = AzureAIInferenceChatCompletion( + ai_model_id=deployment_name, + client=ChatCompletionsClient( + endpoint=f'{str(endpoint).strip("/")}/openai/deployments/{deployment_name}', + credential=AzureKeyCredential(""), + headers={"api-key": api_key}, + ), + ) + + return { + "openai": (OpenAIChatCompletion(), OpenAIChatPromptExecutionSettings), + "azure": (AzureChatCompletion(), AzureChatPromptExecutionSettings), + "azure_custom_client": (azure_custom_client, AzureChatPromptExecutionSettings), + "azure_ai_inference": (azure_ai_inference_client, AzureAIInferenceChatPromptExecutionSettings), + "mistral_ai": ( + MistralAIChatCompletion() if mistral_ai_setup else None, + MistralAIChatPromptExecutionSettings, + ), + "ollama": (OllamaChatCompletion() if ollama_setup else None, OllamaChatPromptExecutionSettings), + "google_ai": (GoogleAIChatCompletion(), GoogleAIChatPromptExecutionSettings), + "vertex_ai": (VertexAIChatCompletion(), VertexAIChatPromptExecutionSettings), + } + + def setup(self, kernel: Kernel): + """Setup the kernel with the completion service and function.""" + kernel.add_plugin(MathPlugin(), plugin_name="math") + + async def get_chat_completion_response( + self, + kernel: Kernel, + service: ChatCompletionClientBase, + execution_settings: PromptExecutionSettings, + chat_history: ChatHistory, + stream: bool, + ) -> Any: + """Get response from the service + + Args: + kernel (Kernel): Kernel instance. + service (ChatCompletionClientBase): Chat completion service. + execution_settings (PromptExecutionSettings): Execution settings. + input (str): Input string. + stream (bool): Stream flag. + """ + if stream: + response = service.get_streaming_chat_message_content( + chat_history, + execution_settings, + kernel=kernel, + ) + parts = [part async for part in response] + if parts: + response = reduce(lambda p, r: p + r, parts) + else: + raise AssertionError("No response") + else: + response = await service.get_chat_message_content( + chat_history, + execution_settings, + kernel=kernel, + ) + + return response diff --git a/python/tests/integration/completions/completion_test_base.py b/python/tests/integration/completions/completion_test_base.py new file mode 100644 index 000000000000..96a84ef180bf --- /dev/null +++ b/python/tests/integration/completions/completion_test_base.py @@ -0,0 +1,70 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import Any, Union + +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.kernel import Kernel + +ServiceType = Union[ChatCompletionClientBase | TextCompletionClientBase] + + +class CompletionTestBase: + """Base class for testing completion services.""" + + def services(self) -> dict[str, tuple[ServiceType, type[PromptExecutionSettings]]]: + """Return completion services.""" + raise NotImplementedError + + async def test_completion( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str | ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + ) -> None: + """Test completion service (Non-streaming). + + Args: + kernel (Kernel): Kernel instance. + service_id (str): Service name. + services (dict[str, tuple[ServiceType, type[PromptExecutionSettings]]]): Completion services. + execution_settings_kwargs (dict[str, Any]): Execution settings keyword arguments. + inputs (list[str]): List of input strings. + kwargs (dict[str, Any]): Keyword arguments. + """ + raise NotImplementedError + + async def test_streaming_completion( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str | ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + ): + """Test completion service (Streaming). + + Args: + kernel (Kernel): Kernel instance. + service_id (str): Service name. + services (dict[str, tuple[ServiceType, type[PromptExecutionSettings]]]): Completion services. + execution_settings_kwargs (dict[str, Any]): Execution settings keyword arguments. + inputs (list[str]): List of input strings. + kwargs (dict[str, Any]): Keyword arguments. + """ + raise NotImplementedError + + def evaluate(self, test_target: Any, **kwargs): + """Evaluate the response. + + Args: + test_target (Any): Test target. + kwargs (dict[str, Any]): Keyword arguments. + """ + raise NotImplementedError diff --git a/python/tests/integration/completions/test_chat_completion_with_function_calling.py b/python/tests/integration/completions/test_chat_completion_with_function_calling.py new file mode 100644 index 000000000000..47cdfaa7294f --- /dev/null +++ b/python/tests/integration/completions/test_chat_completion_with_function_calling.py @@ -0,0 +1,554 @@ +# Copyright (c) Microsoft. All rights reserved. + +import sys +from enum import Enum +from functools import partial +from typing import Any + +import pytest + +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.kernel import Kernel +from tests.integration.completions.chat_completion_test_base import ChatCompletionTestBase +from tests.integration.completions.completion_test_base import ServiceType +from tests.integration.completions.test_utils import retry + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + + +class FunctionChoiceTestTypes(str, Enum): + """Test function choice types.""" + + AUTO = "auto" + """ + Expect a FunctionCallContent, a FunctionResultContent, and a + TextContent in the response, apart from the input. + """ + NON_AUTO = "non_auto" + """ + Expect a FunctionCallContent in the response, apart from the input. + """ + FLOW = "flow" + """ + Expect a TextContent in the response, apart from the input. + """ + + +pytestmark = pytest.mark.parametrize( + "service_id, execution_settings_kwargs, inputs, kwargs", + [ + pytest.param( + "openai", + { + "function_choice_behavior": FunctionChoiceBehavior.Auto( + auto_invoke=True, filters={"excluded_plugins": ["task_plugin"]} + ) + }, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="What is 345 + 3?")], + ), + ] + ], + {"test_type": FunctionChoiceTestTypes.AUTO}, + id="openai_tool_call_auto", + ), + pytest.param( + "openai", + { + "function_choice_behavior": FunctionChoiceBehavior.Auto( + auto_invoke=False, filters={"excluded_plugins": ["task_plugin"]} + ) + }, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="What is 345 + 3?")], + ), + ] + ], + {"test_type": FunctionChoiceTestTypes.NON_AUTO}, + id="openai_tool_call_non_auto", + ), + pytest.param( + "openai", + {}, + [ + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="What was our 2024 revenue?")], + ), + ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + FunctionCallContent( + id="fin", name="finance-search", arguments='{"company": "contoso", "year": 2024}' + ) + ], + ), + ChatMessageContent( + role=AuthorRole.TOOL, + items=[FunctionResultContent(id="fin", name="finance-search", result="1.2B")], + ), + ], + ], + {"test_type": FunctionChoiceTestTypes.FLOW}, + id="openai_tool_call_flow", + ), + pytest.param( + "azure", + {"function_choice_behavior": FunctionChoiceBehavior.Auto(filters={"excluded_plugins": ["task_plugin"]})}, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 345 + 3?")]), + ] + ], + {"test_type": FunctionChoiceTestTypes.AUTO}, + id="azure_tool_call_auto", + ), + pytest.param( + "azure", + {"function_choice_behavior": "auto"}, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 345 + 3?")]), + ] + ], + {"test_type": FunctionChoiceTestTypes.AUTO}, + id="azure_tool_call_auto_as_string", + ), + pytest.param( + "azure", + { + "function_choice_behavior": FunctionChoiceBehavior.Auto( + auto_invoke=False, filters={"excluded_plugins": ["task_plugin"]} + ) + }, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 345 + 3?")]), + ] + ], + {"test_type": FunctionChoiceTestTypes.NON_AUTO}, + id="azure_tool_call_non_auto", + ), + pytest.param( + "azure", + {}, + [ + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="What was our 2024 revenue?")], + ), + ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + FunctionCallContent( + id="fin", name="finance-search", arguments='{"company": "contoso", "year": 2024}' + ) + ], + ), + ChatMessageContent( + role=AuthorRole.TOOL, + items=[FunctionResultContent(id="fin", name="finance-search", result="1.2B")], + ), + ], + ], + {"test_type": FunctionChoiceTestTypes.FLOW}, + id="azure_tool_call_flow", + ), + pytest.param( + "azure_ai_inference", + { + "function_choice_behavior": FunctionChoiceBehavior.Auto( + auto_invoke=True, filters={"excluded_plugins": ["task_plugin"]} + ), + "max_tokens": 256, + }, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 345 + 3?")]), + ] + ], + {"test_type": FunctionChoiceTestTypes.AUTO}, + marks=pytest.mark.skip( + reason="Possible regression on the Azure AI Inference side when" + " returning tool calls in streaming responses. Investigating..." + ), + id="azure_ai_inference_tool_call_auto", + ), + pytest.param( + "azure_ai_inference", + { + "function_choice_behavior": FunctionChoiceBehavior.Auto( + auto_invoke=False, filters={"excluded_plugins": ["task_plugin"]} + ) + }, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 345 + 3?")]), + ] + ], + {"test_type": FunctionChoiceTestTypes.NON_AUTO}, + id="azure_ai_inference_tool_call_non_auto", + ), + pytest.param( + "azure_ai_inference", + {}, + [ + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="What was our 2024 revenue?")], + ), + ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + FunctionCallContent( + id="fin", name="finance-search", arguments='{"company": "contoso", "year": 2024}' + ) + ], + ), + ChatMessageContent( + role=AuthorRole.TOOL, + items=[FunctionResultContent(id="fin", name="finance-search", result="1.2B")], + ), + ], + ], + {"test_type": FunctionChoiceTestTypes.FLOW}, + id="azure_ai_inference_tool_call_flow", + ), + pytest.param( + "google_ai", + { + "function_choice_behavior": FunctionChoiceBehavior.Auto( + auto_invoke=True, filters={"excluded_plugins": ["task_plugin"]} + ), + "max_tokens": 256, + }, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 345 + 3?")]), + ] + ], + {"test_type": FunctionChoiceTestTypes.AUTO}, + id="google_ai_tool_call_auto", + ), + pytest.param( + "google_ai", + { + "function_choice_behavior": FunctionChoiceBehavior.Auto( + auto_invoke=False, filters={"excluded_plugins": ["task_plugin"]} + ) + }, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 345 + 3?")]), + ] + ], + {"test_type": FunctionChoiceTestTypes.NON_AUTO}, + id="google_ai_tool_call_non_auto", + ), + pytest.param( + "google_ai", + {}, + [ + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="What was our 2024 revenue?")], + ), + ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + FunctionCallContent( + id="fin", name="finance-search", arguments='{"company": "contoso", "year": 2024}' + ) + ], + ), + ChatMessageContent( + role=AuthorRole.TOOL, + items=[FunctionResultContent(id="fin", name="finance-search", result="1.2B")], + ), + ], + ], + {"test_type": FunctionChoiceTestTypes.FLOW}, + marks=pytest.mark.skip(reason="Skipping due to 429s from Google AI."), + id="google_ai_tool_call_flow", + ), + pytest.param( + "vertex_ai", + { + "function_choice_behavior": FunctionChoiceBehavior.Auto( + auto_invoke=True, filters={"excluded_plugins": ["task_plugin"]} + ), + "max_tokens": 256, + }, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 345 + 3?")]), + ] + ], + {"test_type": FunctionChoiceTestTypes.AUTO}, + id="vertex_ai_tool_call_auto", + ), + pytest.param( + "vertex_ai", + { + "function_choice_behavior": FunctionChoiceBehavior.Auto( + auto_invoke=False, filters={"excluded_plugins": ["task_plugin"]} + ) + }, + [ + [ + ChatMessageContent( + role=AuthorRole.SYSTEM, + items=[TextContent(text="You're very bad at math. Don't attempt to do it yourself.")], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 345 + 3?")]), + ] + ], + {"test_type": FunctionChoiceTestTypes.NON_AUTO}, + id="vertex_ai_tool_call_non_auto", + ), + pytest.param( + "vertex_ai", + {}, + [ + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="What was our 2024 revenue?")], + ), + ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + FunctionCallContent( + id="fin", name="finance-search", arguments='{"company": "contoso", "year": 2024}' + ) + ], + ), + ChatMessageContent( + role=AuthorRole.TOOL, + items=[FunctionResultContent(id="fin", name="finance-search", result="1.2B")], + ), + ], + ], + {"test_type": FunctionChoiceTestTypes.FLOW}, + id="vertex_ai_tool_call_flow", + ), + ], +) + + +@pytest.mark.asyncio(scope="module") +class TestChatCompletionWithFunctionCalling(ChatCompletionTestBase): + """Test Chat Completion with function calling""" + + @override + async def test_completion( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str | ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + ): + await self._test_helper( + kernel, + service_id, + services, + execution_settings_kwargs, + inputs, + kwargs, + False, + ) + + @override + async def test_streaming_completion( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str | ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + ): + await self._test_helper( + kernel, + service_id, + services, + execution_settings_kwargs, + inputs, + kwargs, + True, + ) + + @override + def evaluate(self, test_target: Any, **kwargs): + inputs = kwargs.get("inputs") + test_type = kwargs.get("test_type") + + if test_type == FunctionChoiceTestTypes.AUTO: + self._evaluate_auto_function_choice(test_target, inputs) + return + if test_type == FunctionChoiceTestTypes.NON_AUTO: + self._evaluate_non_auto_function_choice(test_target, inputs) + return + if test_type == FunctionChoiceTestTypes.FLOW: + self._evaluate_flow_test_type(test_target, inputs) + return + + raise ValueError(f"Invalid test type: {test_type}") + + def _evaluate_auto_function_choice( + self, + chat_history: ChatHistory, + inputs: list[ChatMessageContent | list[ChatMessageContent]], + ): + # Skip the input messages + skip_counts = len(inputs[0]) if isinstance(inputs[0], list) else 1 + + # Expect a FunctionCallContent, a FunctionResultContent, and a + # TextContent in the response, apart from the input. + assert len(chat_history.messages) == skip_counts + 3 + + # Expect the first message to be a FunctionCallContent + assert any(isinstance(item, FunctionCallContent) for item in chat_history.messages[skip_counts].items) + # Expect the second message to be a FunctionResultContent + assert isinstance(chat_history.messages[skip_counts + 1].items[0], FunctionResultContent) + # Expect the third message to be a TextContent + assert any(isinstance(item, TextContent) for item in chat_history.messages[skip_counts + 2].items) + + def _evaluate_non_auto_function_choice( + self, + chat_history: ChatHistory, + inputs: list[ChatMessageContent | list[ChatMessageContent]], + ): + # Skip the input messages + skip_counts = len(inputs[0]) if isinstance(inputs[0], list) else 1 + + # Expect a FunctionCallContent apart from the input. + assert len(chat_history.messages) == skip_counts + 1 + + # Expect the first message to be a FunctionCallContent + assert any(isinstance(item, FunctionCallContent) for item in chat_history.messages[skip_counts].items) + + def _evaluate_flow_test_type( + self, + chat_history: ChatHistory, + inputs: list[ChatMessageContent | list[ChatMessageContent]], + ): + # Skip the input messages + skip_counts = len(inputs[0]) if isinstance(inputs[0], list) else 1 + + # Expect a TextContent in the response, apart from the input. + assert len(chat_history.messages) == skip_counts + 1 + + # Expect a single item in each message + for message in chat_history.messages[skip_counts:]: + assert len(message.items) == 1 + + # Expect the first message to be a TextContent + assert any(isinstance(item, TextContent) for item in chat_history.messages[skip_counts].items) + + async def _test_helper( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + stream: bool, + ): + assert "test_type" in kwargs, "Invalid parameterization: Test type not provided" + test_type = kwargs["test_type"] + + assert len(inputs) == 1, "Invalid parameterization: Only one input message or a single list are allowed" + history = ChatHistory() + if isinstance(inputs[0], list): + [history.add_message(message) for message in inputs[0]] + else: + [history.add_message(message) for message in inputs] + + self.setup(kernel) + service, settings_type = services[service_id] + + cmc = await retry( + partial( + self.get_chat_completion_response, + kernel=kernel, + service=service, + execution_settings=settings_type(**execution_settings_kwargs), + chat_history=history, + stream=stream, + ), + retries=5, + ) + + if test_type != FunctionChoiceTestTypes.AUTO or stream: + # Need to add the last response (the response from the model after it sees the tool call result) + # to the chat history. + # When not streaming: responses from within the auto invoke loop will be added to the history. + # When streaming, responses will not add the message to the history if the response doesn't + # contain a FunctionCallContent + history.add_message(cmc) + + self.evaluate(history, inputs=inputs, test_type=test_type) diff --git a/python/tests/integration/completions/test_chat_completion_with_image_input_text_output.py b/python/tests/integration/completions/test_chat_completion_with_image_input_text_output.py new file mode 100644 index 000000000000..3ca6c41e6e36 --- /dev/null +++ b/python/tests/integration/completions/test_chat_completion_with_image_input_text_output.py @@ -0,0 +1,268 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +import sys +from functools import partial +from typing import Any + +import pytest + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.contents import ChatHistory, ChatMessageContent, TextContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from tests.integration.completions.chat_completion_test_base import ChatCompletionTestBase +from tests.integration.completions.completion_test_base import ServiceType +from tests.integration.completions.test_utils import retry + +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + + +pytestmark = pytest.mark.parametrize( + "service_id, execution_settings_kwargs, inputs, kwargs", + [ + pytest.param( + "openai", + {}, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent( + uri="https://upload.wikimedia.org/wikipedia/commons/d/d5/Half-timbered_mansion%2C_Zirkel%2C_East_view.jpg" + ), + ], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), + ], + {}, + id="openai_image_input_uri", + ), + pytest.param( + "openai", + {}, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent.from_image_path( + image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") + ), + ], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), + ], + {}, + id="openai_image_input_file", + ), + pytest.param( + "azure", + {}, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent( + uri="https://upload.wikimedia.org/wikipedia/commons/d/d5/Half-timbered_mansion%2C_Zirkel%2C_East_view.jpg" + ), + ], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), + ], + {}, + id="azure_image_input_uri", + ), + pytest.param( + "azure", + {}, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent.from_image_path( + image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") + ), + ], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), + ], + {}, + id="azure_image_input_file", + ), + pytest.param( + "azure_ai_inference", + { + "max_tokens": 256, + }, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent( + uri="https://upload.wikimedia.org/wikipedia/commons/d/d5/Half-timbered_mansion%2C_Zirkel%2C_East_view.jpg" + ), + ], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), + ], + {}, + id="azure_ai_inference_image_input_uri", + ), + pytest.param( + "azure_ai_inference", + { + "max_tokens": 256, + }, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent.from_image_path( + image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") + ), + ], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), + ], + {}, + id="azure_ai_inference_image_input_file", + ), + pytest.param( + "google_ai", + {}, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent.from_image_path( + image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") + ), + ], + ), + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="Where was it made? Make a guess if you are not sure.")], + ), + ], + {}, + id="google_ai_image_input_file", + ), + pytest.param( + "vertex_ai", + {}, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent.from_image_path( + image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") + ), + ], + ), + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="Where was it made? Make a guess if you are not sure.")], + ), + ], + {}, + id="vertex_ai_image_input_file", + ), + ], +) + + +@pytest.mark.asyncio(scope="module") +class TestChatCompletionWithImageInputTextOutput(ChatCompletionTestBase): + """Test chat completion with image input and text output.""" + + @override + async def test_completion( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str | ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + ): + await self._test_helper( + kernel, + service_id, + services, + execution_settings_kwargs, + inputs, + False, + ) + + @override + async def test_streaming_completion( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str | ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + ): + await self._test_helper( + kernel, + service_id, + services, + execution_settings_kwargs, + inputs, + True, + ) + + @override + def evaluate(self, test_target: Any, **kwargs): + inputs = kwargs.get("inputs") + assert len(test_target) == len(inputs) * 2 + for i in range(len(inputs)): + message = test_target[i * 2 + 1] + assert message.items, "No items in message" + assert len(message.items) == 1, "Unexpected number of items in message" + assert isinstance(message.items[0], TextContent), "Unexpected message item type" + assert message.items[0].text, "Empty message text" + + async def _test_helper( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[ChatMessageContent], + stream: bool, + ): + self.setup(kernel) + service, settings_type = services[service_id] + + history = ChatHistory() + for message in inputs: + history.add_message(message) + + cmc = await retry( + partial( + self.get_chat_completion_response, + kernel=kernel, + service=service, + execution_settings=settings_type(**execution_settings_kwargs), + chat_history=history, + stream=stream, + ), + retries=5, + ) + history.add_message(cmc) + + self.evaluate(history.messages, inputs=inputs) diff --git a/python/tests/integration/completions/test_chat_completions.py b/python/tests/integration/completions/test_chat_completions.py index 9f16b22bd1a3..417a35858548 100644 --- a/python/tests/integration/completions/test_chat_completions.py +++ b/python/tests/integration/completions/test_chat_completions.py @@ -1,51 +1,26 @@ # Copyright (c) Microsoft. All rights reserved. import os -from functools import partial, reduce +import sys +from functools import partial from typing import Any import pytest -from azure.ai.inference.aio import ChatCompletionsClient -from azure.core.credentials import AzureKeyCredential -from openai import AsyncAzureOpenAI from semantic_kernel import Kernel -from semantic_kernel.connectors.ai.azure_ai_inference.azure_ai_inference_prompt_execution_settings import ( - AzureAIInferenceChatPromptExecutionSettings, -) -from semantic_kernel.connectors.ai.azure_ai_inference.services.azure_ai_inference_chat_completion import ( - AzureAIInferenceChatCompletion, -) -from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase -from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior -from semantic_kernel.connectors.ai.google.google_ai.google_ai_prompt_execution_settings import ( - GoogleAIChatPromptExecutionSettings, -) -from semantic_kernel.connectors.ai.google.google_ai.services.google_ai_chat_completion import GoogleAIChatCompletion -from semantic_kernel.connectors.ai.mistral_ai.prompt_execution_settings.mistral_ai_prompt_execution_settings import ( - MistralAIChatPromptExecutionSettings, -) -from semantic_kernel.connectors.ai.mistral_ai.services.mistral_ai_chat_completion import MistralAIChatCompletion -from semantic_kernel.connectors.ai.ollama.ollama_prompt_execution_settings import OllamaChatPromptExecutionSettings -from semantic_kernel.connectors.ai.ollama.services.ollama_chat_completion import OllamaChatCompletion -from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import ( - AzureChatPromptExecutionSettings, -) -from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( - OpenAIChatPromptExecutionSettings, -) -from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion -from semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion import OpenAIChatCompletion -from semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings import AzureOpenAISettings from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings -from semantic_kernel.contents import ChatHistory, ChatMessageContent, TextContent -from semantic_kernel.contents.function_call_content import FunctionCallContent -from semantic_kernel.contents.function_result_content import FunctionResultContent -from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents import ChatMessageContent, TextContent +from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.utils.author_role import AuthorRole -from semantic_kernel.core_plugins.math_plugin import MathPlugin +from tests.integration.completions.chat_completion_test_base import ChatCompletionTestBase +from tests.integration.completions.completion_test_base import ServiceType from tests.integration.completions.test_utils import retry +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover + mistral_ai_setup: bool = False try: if os.environ["MISTRALAI_API_KEY"] and os.environ["MISTRALAI_CHAT_MODEL_ID"]: @@ -60,67 +35,16 @@ except KeyError: ollama_setup = False - -def setup( - kernel: Kernel, - service: str, - execution_settings_kwargs: dict[str, Any], - services: dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]], -): - kernel.add_service(services[service][0]) - kernel.add_plugin(MathPlugin(), plugin_name="math") - kernel.add_function( - function_name="chat", - plugin_name="chat", - prompt="If someone asks how you are, always include the word 'well', " - "if you get a direct question, answer the question. {{$chat_history}}", - prompt_execution_settings=services[service][1](**execution_settings_kwargs), - ) - - -@pytest.fixture(scope="function") -def history() -> ChatHistory: - return ChatHistory() - - -@pytest.fixture(scope="module") -def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]]: - azure_openai_settings = AzureOpenAISettings.create() - endpoint = azure_openai_settings.endpoint - deployment_name = azure_openai_settings.chat_deployment_name - api_key = azure_openai_settings.api_key.get_secret_value() - api_version = azure_openai_settings.api_version - azure_custom_client = AzureChatCompletion( - async_client=AsyncAzureOpenAI( - azure_endpoint=endpoint, - azure_deployment=deployment_name, - api_key=api_key, - api_version=api_version, - default_headers={"Test-User-X-ID": "test"}, - ), - ) - azure_ai_inference_client = AzureAIInferenceChatCompletion( - ai_model_id=deployment_name, - client=ChatCompletionsClient( - endpoint=f'{str(endpoint).strip("/")}/openai/deployments/{deployment_name}', - credential=AzureKeyCredential(""), - headers={"api-key": api_key}, - ), - ) - - return { - "openai": (OpenAIChatCompletion(), OpenAIChatPromptExecutionSettings), - "azure": (AzureChatCompletion(), AzureChatPromptExecutionSettings), - "azure_custom_client": (azure_custom_client, AzureChatPromptExecutionSettings), - "azure_ai_inference": (azure_ai_inference_client, AzureAIInferenceChatPromptExecutionSettings), - "mistral_ai": (MistralAIChatCompletion() if mistral_ai_setup else None, MistralAIChatPromptExecutionSettings), - "ollama": (OllamaChatCompletion(), OllamaChatPromptExecutionSettings), - "google_ai": (GoogleAIChatCompletion(), GoogleAIChatPromptExecutionSettings), - } +anthropic_setup: bool = False +try: + if os.environ["ANTHROPIC_API_KEY"] and os.environ["ANTHROPIC_CHAT_MODEL_ID"]: + anthropic_setup = True +except KeyError: + anthropic_setup = False pytestmark = pytest.mark.parametrize( - "service, execution_settings_kwargs, inputs, outputs", + "service_id, execution_settings_kwargs, inputs, kwargs", [ pytest.param( "openai", @@ -129,96 +53,8 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Hello")]), ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), ], - ["Hello", "well"], - id="openai_text_input", - ), - pytest.param( - "openai", - {}, - [ - ChatMessageContent( - role=AuthorRole.USER, - items=[ - TextContent(text="What is in this image?"), - ImageContent( - uri="https://upload.wikimedia.org/wikipedia/commons/d/d5/Half-timbered_mansion%2C_Zirkel%2C_East_view.jpg" - ), - ], - ), - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), - ], - ["house", "germany"], - id="openai_image_input_uri", - ), - pytest.param( - "openai", - {}, - [ - ChatMessageContent( - role=AuthorRole.USER, - items=[ - TextContent(text="What is in this image?"), - ImageContent.from_image_path( - image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") - ), - ], - ), - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), - ], - ["house", "germany"], - id="openai_image_input_file", - ), - pytest.param( - "openai", - { - "function_choice_behavior": FunctionChoiceBehavior.Auto( - auto_invoke=True, filters={"excluded_plugins": ["chat"]} - ) - }, - [ - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), - ], - ["348"], - id="openai_tool_call_auto", - ), - pytest.param( - "openai", - { - "function_choice_behavior": FunctionChoiceBehavior.Auto( - auto_invoke=False, filters={"excluded_plugins": ["chat"]} - ) - }, - [ - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), - ], - ["348"], - id="openai_tool_call_non_auto", - ), - pytest.param( - "openai", {}, - [ - [ - ChatMessageContent( - role=AuthorRole.USER, - items=[TextContent(text="What was our 2024 revenue?")], - ), - ChatMessageContent( - role=AuthorRole.ASSISTANT, - items=[ - FunctionCallContent( - id="fin", name="finance-search", arguments='{"company": "contoso", "year": 2024}' - ) - ], - ), - ChatMessageContent( - role=AuthorRole.TOOL, - items=[FunctionResultContent(id="fin", name="finance-search", result="1.2B")], - ), - ], - ], - ["1.2"], - id="openai_tool_call_flow", + id="openai_text_input", ), pytest.param( "azure", @@ -227,101 +63,8 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Hello")]), ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), ], - ["Hello", "well"], - id="azure_text_input", - ), - pytest.param( - "azure", {}, - [ - ChatMessageContent( - role=AuthorRole.USER, - items=[ - TextContent(text="What is in this image?"), - ImageContent( - uri="https://upload.wikimedia.org/wikipedia/commons/d/d5/Half-timbered_mansion%2C_Zirkel%2C_East_view.jpg" - ), - ], - ), - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), - ], - ["house", "germany"], - id="azure_image_input_uri", - ), - pytest.param( - "azure", - {}, - [ - ChatMessageContent( - role=AuthorRole.USER, - items=[ - TextContent(text="What is in this image?"), - ImageContent.from_image_path( - image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") - ), - ], - ), - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), - ], - ["house", "germany"], - id="azure_image_input_file", - ), - pytest.param( - "azure", - {"function_choice_behavior": FunctionChoiceBehavior.Auto(filters={"excluded_plugins": ["chat"]})}, - [ - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), - ], - ["348"], - id="azure_tool_call_auto", - ), - pytest.param( - "azure", - {"function_choice_behavior": "auto"}, - [ - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), - ], - ["348"], - id="azure_tool_call_auto_as_string", - ), - pytest.param( - "azure", - { - "function_choice_behavior": FunctionChoiceBehavior.Auto( - auto_invoke=False, filters={"excluded_plugins": ["chat"]} - ) - }, - [ - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), - ], - ["348"], - id="azure_tool_call_non_auto", - ), - pytest.param( - "azure", - {}, - [ - [ - ChatMessageContent( - role=AuthorRole.USER, - items=[TextContent(text="What was our 2024 revenue?")], - ), - ChatMessageContent( - role=AuthorRole.ASSISTANT, - items=[ - FunctionCallContent( - id="fin", name="finance-search", arguments='{"company": "contoso", "year": 2024}' - ) - ], - ), - ChatMessageContent( - role=AuthorRole.TOOL, - items=[FunctionResultContent(id="fin", name="finance-search", result="1.2B")], - ), - ], - ], - ["1.2"], - id="azure_tool_call_flow", + id="azure_text_input", ), pytest.param( "azure_custom_client", @@ -330,7 +73,7 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Hello")]), ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), ], - ["Hello", "well"], + {}, id="azure_custom_client", ), pytest.param( @@ -340,105 +83,8 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Hello")]), ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), ], - ["Hello", "well"], - id="azure_ai_inference_text_input", - ), - pytest.param( - "azure_ai_inference", - { - "max_tokens": 256, - }, - [ - ChatMessageContent( - role=AuthorRole.USER, - items=[ - TextContent(text="What is in this image?"), - ImageContent( - uri="https://upload.wikimedia.org/wikipedia/commons/d/d5/Half-timbered_mansion%2C_Zirkel%2C_East_view.jpg" - ), - ], - ), - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), - ], - ["house", "germany"], - id="azure_ai_inference_image_input_uri", - ), - pytest.param( - "azure_ai_inference", - { - "max_tokens": 256, - }, - [ - ChatMessageContent( - role=AuthorRole.USER, - items=[ - TextContent(text="What is in this image?"), - ImageContent.from_image_path( - image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") - ), - ], - ), - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), - ], - ["house", "germany"], - id="azure_ai_inference_image_input_file", - ), - pytest.param( - "azure_ai_inference", - { - "function_choice_behavior": FunctionChoiceBehavior.Auto( - auto_invoke=True, filters={"excluded_plugins": ["chat"]} - ), - "max_tokens": 256, - }, - [ - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), - ], - ["348"], - marks=pytest.mark.skip( - reason="Possible regression on the Azure AI Inference side when" - " returning tool calls in streaming responses. Investigating..." - ), - id="azure_ai_inference_tool_call_auto", - ), - pytest.param( - "azure_ai_inference", - { - "function_choice_behavior": FunctionChoiceBehavior.Auto( - auto_invoke=False, filters={"excluded_plugins": ["chat"]} - ) - }, - [ - ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), - ], - ["348"], - id="azure_ai_inference_tool_call_non_auto", - ), - pytest.param( - "azure_ai_inference", {}, - [ - [ - ChatMessageContent( - role=AuthorRole.USER, - items=[TextContent(text="What was our 2024 revenue?")], - ), - ChatMessageContent( - role=AuthorRole.ASSISTANT, - items=[ - FunctionCallContent( - id="fin", name="finance-search", arguments='{"company": "contoso", "year": 2024}' - ) - ], - ), - ChatMessageContent( - role=AuthorRole.TOOL, - items=[FunctionResultContent(id="fin", name="finance-search", result="1.2B")], - ), - ], - ], - ["1.2"], - id="azure_ai_inference_tool_call_flow", + id="azure_ai_inference_text_input", ), pytest.param( "mistral_ai", @@ -461,6 +107,17 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution ["Hello", "well"], marks=pytest.mark.skipif(not ollama_setup, reason="Need local Ollama setup"), id="ollama_text_input", + ), + pytest.param( + "anthropic", + {}, + [ + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Hello")]), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), + ], + ["Hello", "well"], + marks=pytest.mark.skipif(not anthropic_setup, reason="Anthropic Environment Variables not set"), + id="anthropic_text_input", ), pytest.param( "google_ai", @@ -470,102 +127,106 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), ], ["Hello", "well"], + marks=pytest.mark.skip(reason="Skipping due to 429s from Google AI."), id="google_ai_text_input", ), pytest.param( - "google_ai", - { - "max_tokens": 256, - }, + "vertex_ai", + {}, [ - ChatMessageContent( - role=AuthorRole.USER, - items=[ - TextContent(text="What is in this image?"), - ImageContent.from_image_path( - image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") - ), - ], - ), - ChatMessageContent( - role=AuthorRole.USER, - items=[TextContent(text="Where was it made? Make a guess if you are not sure.")], - ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Hello")]), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), ], - ["house", "germany"], - id="google_ai_image_input_file", + ["Hello", "well"], + id="vertex_ai_text_input", ), ], ) @pytest.mark.asyncio(scope="module") -async def test_chat_completion( - kernel: Kernel, - service: str, - execution_settings_kwargs: dict[str, Any], - inputs: list[ChatMessageContent | list[ChatMessageContent]], - outputs: list[str], - services: dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]], - history: ChatHistory, -): - setup(kernel, service, execution_settings_kwargs, services) - for message, output in zip(inputs, outputs): - if isinstance(message, list): - for msg in message: - history.add_message(msg) - else: - history.add_message(message) - - cmc = await retry( - partial(execute_invoke, kernel=kernel, history=history, output=output, stream=False), retries=5 +class TestChatCompletion(ChatCompletionTestBase): + """Test Chat Completions. + + This only tests if the services can return text completions given text inputs. + """ + + @override + async def test_completion( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str | ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + ): + await self._test_helper( + kernel, + service_id, + services, + execution_settings_kwargs, + inputs, + False, ) - history.add_message(cmc) + @override + async def test_streaming_completion( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str | ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + ): + await self._test_helper( + kernel, + service_id, + services, + execution_settings_kwargs, + inputs, + True, + ) -@pytest.mark.asyncio(scope="module") -async def test_streaming_chat_completion( - kernel: Kernel, - service: str, - execution_settings_kwargs: dict[str, Any], - inputs: list[ChatMessageContent | list[ChatMessageContent]], - outputs: list[str], - services: dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]], - history: ChatHistory, -): - setup(kernel, service, execution_settings_kwargs, services) - for message, output in zip(inputs, outputs): - if isinstance(message, list): - for msg in message: - history.add_message(msg) - else: + @override + def evaluate(self, test_target: Any, **kwargs): + inputs = kwargs.get("inputs") + assert len(test_target) == len(inputs) * 2 + for i in range(len(inputs)): + message = test_target[i * 2 + 1] + assert message.items, "No items in message" + assert len(message.items) == 1, "Unexpected number of items in message" + assert isinstance(message.items[0], TextContent), "Unexpected message item type" + assert message.items[0].text, "Empty message text" + + async def _test_helper( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[ChatMessageContent], + stream: bool, + ): + self.setup(kernel) + service, settings_type = services[service_id] + + history = ChatHistory() + for message in inputs: history.add_message(message) - cmc = await retry( - partial(execute_invoke, kernel=kernel, history=history, output=output, stream=True), retries=5 - ) - history.add_message(cmc) + cmc = await retry( + partial( + self.get_chat_completion_response, + kernel=kernel, + service=service, + execution_settings=settings_type(**execution_settings_kwargs), + chat_history=history, + stream=stream, + ), + retries=5, + ) + history.add_message(cmc) -async def execute_invoke(kernel: Kernel, history: ChatHistory, output: str, stream: bool) -> "ChatMessageContent": - if stream: - invocation = kernel.invoke_stream(function_name="chat", plugin_name="chat", chat_history=history) - parts = [part[0] async for part in invocation] - if parts: - response = reduce(lambda p, r: p + r, parts) - else: - raise AssertionError("No response") - else: - invocation = await kernel.invoke(function_name="chat", plugin_name="chat", chat_history=history) - assert invocation is not None - response = invocation.value[0] - print(response) - if isinstance(response, ChatMessageContent): - for item in response.items: - if isinstance(item, TextContent): - assert item.text is not None - assert output.lower() in item.text.lower() - if isinstance(item, FunctionCallContent): - assert item.arguments - assert kernel.get_function_from_fully_qualified_function_name(item.name) - return response - raise AssertionError(f"Unexpected output: response: {invocation}, type: {type(invocation)}") + self.evaluate(history.messages, inputs=inputs) diff --git a/python/tests/integration/completions/test_text_completion.py b/python/tests/integration/completions/test_text_completion.py index 506ea5b9c301..56b11ddc3fad 100644 --- a/python/tests/integration/completions/test_text_completion.py +++ b/python/tests/integration/completions/test_text_completion.py @@ -1,101 +1,76 @@ # Copyright (c) Microsoft. All rights reserved. +import os +import sys from functools import partial, reduce from typing import Any import pytest +from openai import AsyncAzureOpenAI -from semantic_kernel import Kernel -from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase from semantic_kernel.connectors.ai.google.google_ai.google_ai_prompt_execution_settings import ( GoogleAITextPromptExecutionSettings, ) from semantic_kernel.connectors.ai.google.google_ai.services.google_ai_text_completion import GoogleAITextCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_text_completion import VertexAITextCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAITextPromptExecutionSettings, +) from semantic_kernel.connectors.ai.hugging_face.hf_prompt_execution_settings import HuggingFacePromptExecutionSettings from semantic_kernel.connectors.ai.hugging_face.services.hf_text_completion import HuggingFaceTextCompletion +from semantic_kernel.connectors.ai.ollama.ollama_prompt_execution_settings import OllamaTextPromptExecutionSettings +from semantic_kernel.connectors.ai.ollama.services.ollama_text_completion import OllamaTextCompletion from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( OpenAITextPromptExecutionSettings, ) from semantic_kernel.connectors.ai.open_ai.services.azure_text_completion import AzureTextCompletion from semantic_kernel.connectors.ai.open_ai.services.open_ai_text_completion import OpenAITextCompletion -from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings import AzureOpenAISettings from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase -from semantic_kernel.contents import TextContent -from tests.integration.completions.test_utils import retry +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.text_content import TextContent +if sys.version_info >= (3, 12): + from typing import override # pragma: no cover +else: + from typing_extensions import override # pragma: no cover -def setup( - kernel: Kernel, - service: str, - execution_settings_kwargs: dict[str, Any], - services: dict[str, tuple[TextCompletionClientBase, type[PromptExecutionSettings]]], -): - kernel.add_service(services[service][0]) - kernel.add_function( - function_name="text", - plugin_name="text", - prompt="If someone asks how you are, always include the word 'well', " - "if you get a direct question, answer the question. {{$input}}", - prompt_execution_settings=services[service][1](**execution_settings_kwargs), - ) - - -@pytest.fixture(scope="module") -def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]]: - return { - "openai": (OpenAITextCompletion(), OpenAITextPromptExecutionSettings), - "azure": (AzureTextCompletion(), OpenAITextPromptExecutionSettings), - "hf_t2t": ( - HuggingFaceTextCompletion( - service_id="patrickvonplaten/t5-tiny-random", - ai_model_id="patrickvonplaten/t5-tiny-random", - task="text2text-generation", - ), - HuggingFacePromptExecutionSettings, - ), - "hf_summ": ( - HuggingFaceTextCompletion( - service_id="jotamunz/billsum_tiny_summarization", - ai_model_id="jotamunz/billsum_tiny_summarization", - task="summarization", - ), - HuggingFacePromptExecutionSettings, - ), - "hf_gen": ( - HuggingFaceTextCompletion( - service_id="HuggingFaceM4/tiny-random-LlamaForCausalLM", - ai_model_id="HuggingFaceM4/tiny-random-LlamaForCausalLM", - task="text-generation", - ), - HuggingFacePromptExecutionSettings, - ), - "google_ai": (GoogleAITextCompletion(), GoogleAITextPromptExecutionSettings), - } +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from tests.integration.completions.completion_test_base import CompletionTestBase, ServiceType +from tests.integration.completions.test_utils import retry + +ollama_setup: bool = False +try: + if os.environ["OLLAMA_MODEL"]: + ollama_setup = True +except KeyError: + ollama_setup = False pytestmark = pytest.mark.parametrize( - "service, execution_settings_kwargs, inputs, outputs", + "service_id, execution_settings_kwargs, inputs, kwargs", [ pytest.param( "openai", {}, ["Repeat the word Hello"], - ["Hello"], - id="openai_text_input", + {}, + id="openai_text_completion", ), pytest.param( "azure", {}, ["Repeat the word Hello"], - ["Hello"], - id="azure_text_input", + {}, + id="azure_text_completion", ), pytest.param( "hf_t2t", {}, ["translate English to Dutch: Hello"], - [""], - id="hf_t2", + {}, + id="huggingface_text_completion_translation", ), pytest.param( "hf_summ", @@ -109,70 +84,184 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution toothed predator on Earth. Several whale species exhibit sexual dimorphism, in that the females are larger than males.""" ], - ["whale"], - id="hf_summ", + {}, + id="huggingface_text_completion_summarization", ), pytest.param( "hf_gen", {}, ["Hello, I like sleeping and "], - [""], - id="hf_gen", + {}, + id="huggingface_text_completion_generation", + ), + pytest.param( + "ollama", + {}, + ["Repeat the word Hello"], + {}, + marks=pytest.mark.skipif(not ollama_setup, reason="Need local Ollama setup"), + id="ollama_text_completion", ), pytest.param( "google_ai", {}, ["Repeat the word Hello"], - ["Hello"], - id="google_ai_text_input", + {}, + marks=pytest.mark.skip(reason="Skipping due to 429s from Google AI."), + id="google_ai_text_completion", + ), + pytest.param( + "vertex_ai", + {}, + ["Repeat the word Hello"], + {}, + id="vertex_ai_text_completion", ), ], ) @pytest.mark.asyncio(scope="module") -async def test_text_completion( - kernel: Kernel, - service: str, - execution_settings_kwargs: dict[str, Any], - inputs: list[str], - outputs: list[str], - services: dict[str, tuple[TextCompletionClientBase, type[PromptExecutionSettings]]], -): - setup(kernel, service, execution_settings_kwargs, services) - for message, output in zip(inputs, outputs): - await retry(partial(execute_invoke, kernel=kernel, input=message, output=output, stream=False), retries=5) +class TestTextCompletion(CompletionTestBase): + """Test class for text completion""" + @override + @pytest.fixture(scope="class") + def services(self) -> dict[str, tuple[ServiceType, type[PromptExecutionSettings]]]: + azure_openai_settings = AzureOpenAISettings.create() + endpoint = azure_openai_settings.endpoint + deployment_name = azure_openai_settings.text_deployment_name + api_key = azure_openai_settings.api_key.get_secret_value() + api_version = azure_openai_settings.api_version + azure_custom_client = AzureTextCompletion( + async_client=AsyncAzureOpenAI( + azure_endpoint=endpoint, + azure_deployment=deployment_name, + api_key=api_key, + api_version=api_version, + default_headers={"Test-User-X-ID": "test"}, + ), + ) -@pytest.mark.asyncio(scope="module") -async def test_streaming_text_completion( - kernel: Kernel, - service: str, - execution_settings_kwargs: dict[str, Any], - inputs: list[str], - outputs: list[str], - services: dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]], -): - setup(kernel, service, execution_settings_kwargs, services) - for message, output in zip(inputs, outputs): - await retry(partial(execute_invoke, kernel=kernel, input=message, output=output, stream=True), retries=5) - - -async def execute_invoke(kernel: Kernel, input: str, output: str, stream: bool) -> None: - if stream: - invocation = kernel.invoke_stream(function_name="text", plugin_name="text", input=input) - parts = [part[0] async for part in invocation] - if parts: - response = reduce(lambda p, r: p + r, parts) + return { + "openai": (OpenAITextCompletion(), OpenAITextPromptExecutionSettings), + "azure": (AzureTextCompletion(), OpenAITextPromptExecutionSettings), + "azure_custom_client": (azure_custom_client, OpenAITextPromptExecutionSettings), + "ollama": (OllamaTextCompletion() if ollama_setup else None, OllamaTextPromptExecutionSettings), + "google_ai": (GoogleAITextCompletion(), GoogleAITextPromptExecutionSettings), + "vertex_ai": (VertexAITextCompletion(), VertexAITextPromptExecutionSettings), + "hf_t2t": ( + HuggingFaceTextCompletion( + service_id="patrickvonplaten/t5-tiny-random", + ai_model_id="patrickvonplaten/t5-tiny-random", + task="text2text-generation", + ), + HuggingFacePromptExecutionSettings, + ), + "hf_summ": ( + HuggingFaceTextCompletion( + service_id="jotamunz/billsum_tiny_summarization", + ai_model_id="jotamunz/billsum_tiny_summarization", + task="summarization", + ), + HuggingFacePromptExecutionSettings, + ), + "hf_gen": ( + HuggingFaceTextCompletion( + service_id="HuggingFaceM4/tiny-random-LlamaForCausalLM", + ai_model_id="HuggingFaceM4/tiny-random-LlamaForCausalLM", + task="text-generation", + ), + HuggingFacePromptExecutionSettings, + ), + } + + async def get_text_completion_response( + self, + service: TextCompletionClientBase, + execution_settings: PromptExecutionSettings, + prompt: str, + stream: bool, + ) -> Any: + """Get response from the service + + Args: + kernel (Kernel): Kernel instance. + service (ChatCompletionClientBase): Chat completion service. + execution_settings (PromptExecutionSettings): Execution settings. + prompt (str): Input string. + stream (bool): Stream flag. + """ + if stream: + response = service.get_streaming_text_content( + prompt=prompt, + settings=execution_settings, + ) + parts = [part async for part in response] + if parts: + response = reduce(lambda p, r: p + r, parts) + else: + raise AssertionError("No response") else: - raise AssertionError("No response") - else: - invocation = await kernel.invoke(function_name="text", plugin_name="text", input=input) - assert invocation is not None - response = invocation.value[0] - print(response) - if isinstance(response, TextContent): - assert response.text is not None - assert output in response.text - return - raise AssertionError(f"Unexpected output: response: {invocation}, type: {type(invocation)}") + response = await service.get_text_content( + prompt=prompt, + settings=execution_settings, + ) + + return response + + @override + async def test_completion( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str | ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + ) -> None: + await self._test_helper(service_id, services, execution_settings_kwargs, inputs, False) + + @override + async def test_streaming_completion( + self, + kernel: Kernel, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str | ChatMessageContent | list[ChatMessageContent]], + kwargs: dict[str, Any], + ): + await self._test_helper(service_id, services, execution_settings_kwargs, inputs, True) + + @override + def evaluate(self, test_target: Any, **kwargs): + print(test_target) + if isinstance(test_target, TextContent): + # Test is considered successful if the test_target is not empty + assert test_target.text, "Error: Empty test target" + return + raise AssertionError(f"Unexpected output: {test_target}, type: {type(test_target)}") + + async def _test_helper( + self, + service_id: str, + services: dict[str, tuple[ServiceType, type[PromptExecutionSettings]]], + execution_settings_kwargs: dict[str, Any], + inputs: list[str], + stream: bool, + ): + service, settings_type = services[service_id] + + for test_input in inputs: + response = await retry( + partial( + self.get_text_completion_response, + service=service, + execution_settings=settings_type(**execution_settings_kwargs), + prompt=test_input, + stream=stream, + ), + retries=5, + ) + self.evaluate(response) diff --git a/python/tests/integration/connectors/memory/conftest.py b/python/tests/integration/connectors/memory/conftest.py index adbc03514e86..cd39b3fb46c8 100644 --- a/python/tests/integration/connectors/memory/conftest.py +++ b/python/tests/integration/connectors/memory/conftest.py @@ -1,15 +1,109 @@ # Copyright (c) Microsoft. All rights reserved. +from copy import deepcopy +from dataclasses import dataclass, field from datetime import datetime +from typing import Annotated, Any +from uuid import uuid4 import numpy as np -import pytest +from pytest import fixture +from semantic_kernel.data.vector_store_model_decorator import vectorstoremodel +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) from semantic_kernel.memory.memory_record import MemoryRecord +raw_record = {"id": "testid", "content": "test content", "vector": [0.1, 0.2, 0.3, 0.4, 0.5]} -@pytest.fixture(scope="module") + +@fixture +def record(): + return deepcopy(raw_record) + + +def DataModelArray(record) -> tuple[type | None, VectorStoreRecordDefinition | None, Any]: + @vectorstoremodel + @dataclass + class MyDataModelArray: + vector: Annotated[ + np.ndarray | None, + VectorStoreRecordVectorField( + index_kind="hnsw", + dimensions=5, + distance_function="cosine", + property_type="float", + serialize_function=np.ndarray.tolist, + deserialize_function=np.array, + ), + ] = None + other: str | None = None + id: Annotated[str, VectorStoreRecordKeyField()] = field(default_factory=lambda: str(uuid4())) + content: Annotated[ + str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector", property_type="str") + ] = "content1" + + record["vector"] = np.array(record["vector"]) + + return MyDataModelArray, None, MyDataModelArray(**record) + + +def DataModelList(record) -> tuple[type | None, VectorStoreRecordDefinition | None, Any]: + @vectorstoremodel + @dataclass + class MyDataModelList: + vector: Annotated[ + list[float] | None, + VectorStoreRecordVectorField( + index_kind="hnsw", + dimensions=5, + distance_function="cosine", + property_type="float", + ), + ] = None + other: str | None = None + id: Annotated[str, VectorStoreRecordKeyField()] = field(default_factory=lambda: str(uuid4())) + content: Annotated[ + str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector", property_type="str") + ] = "content1" + + return MyDataModelList, None, MyDataModelList(**record) + + +def DataModelPandas(record) -> tuple[type | None, VectorStoreRecordDefinition | None, Any]: + import pandas as pd + + definition = VectorStoreRecordDefinition( + fields={ + "vector": VectorStoreRecordVectorField( + name="vector", + index_kind="hnsw", + dimensions=5, + distance_function="cosine", + property_type="float", + ), + "id": VectorStoreRecordKeyField(name="id"), + "content": VectorStoreRecordDataField( + name="content", has_embedding=True, embedding_property_name="vector", property_type="str" + ), + }, + container_mode=True, + ) + df = pd.DataFrame([record]) + return None, definition, df + + +@fixture(scope="module") +def models(record): + return [DataModelArray(record), DataModelList(record), DataModelPandas(record)] + + +@fixture(scope="module") def memory_record1(): return MemoryRecord( id="test_id1", @@ -23,7 +117,7 @@ def memory_record1(): ) -@pytest.fixture(scope="module") +@fixture(scope="module") def memory_record2(): return MemoryRecord( id="test_id2", @@ -37,7 +131,7 @@ def memory_record2(): ) -@pytest.fixture(scope="module") +@fixture(scope="module") def memory_record3(): return MemoryRecord( id="test_id3", diff --git a/python/tests/integration/connectors/memory/test_postgres.py b/python/tests/integration/connectors/memory/test_postgres.py index 84f463bba189..fa57e253b5a4 100644 --- a/python/tests/integration/connectors/memory/test_postgres.py +++ b/python/tests/integration/connectors/memory/test_postgres.py @@ -3,6 +3,7 @@ import time import pytest +from psycopg_pool import PoolTimeout from pydantic import ValidationError from semantic_kernel.connectors.memory.postgres import PostgresMemoryStore @@ -52,147 +53,162 @@ def test_constructor(connection_string): @pytest.mark.asyncio async def test_create_and_does_collection_exist(connection_string): memory = PostgresMemoryStore(connection_string, 2, 1, 5) - - await memory.create_collection("test_collection") - result = await memory.does_collection_exist("test_collection") - assert result is not None + try: + await memory.create_collection("test_collection") + result = await memory.does_collection_exist("test_collection") + assert result is not None + except PoolTimeout: + pytest.skip("PoolTimeout exception raised, skipping test.") @pytest.mark.asyncio async def test_get_collections(connection_string): memory = PostgresMemoryStore(connection_string, 2, 1, 5) - await memory.create_collection("test_collection") - result = await memory.get_collections() - assert "test_collection" in result + try: + await memory.create_collection("test_collection") + result = await memory.get_collections() + assert "test_collection" in result + except PoolTimeout: + pytest.skip("PoolTimeout exception raised, skipping test.") @pytest.mark.asyncio async def test_delete_collection(connection_string): memory = PostgresMemoryStore(connection_string, 2, 1, 5) + try: + await memory.create_collection("test_collection") - await memory.create_collection("test_collection") - - result = await memory.get_collections() - assert "test_collection" in result + result = await memory.get_collections() + assert "test_collection" in result - await memory.delete_collection("test_collection") - result = await memory.get_collections() - assert "test_collection" not in result + await memory.delete_collection("test_collection") + result = await memory.get_collections() + assert "test_collection" not in result + except PoolTimeout: + pytest.skip("PoolTimeout exception raised, skipping test.") @pytest.mark.asyncio async def test_does_collection_exist(connection_string): memory = PostgresMemoryStore(connection_string, 2, 1, 5) - - await memory.create_collection("test_collection") - result = await memory.does_collection_exist("test_collection") - assert result is True + try: + await memory.create_collection("test_collection") + result = await memory.does_collection_exist("test_collection") + assert result is True + except PoolTimeout: + pytest.skip("PoolTimeout exception raised, skipping test.") @pytest.mark.asyncio async def test_upsert_and_get(connection_string, memory_record1): memory = PostgresMemoryStore(connection_string, 2, 1, 5) - - await memory.create_collection("test_collection") - await memory.upsert("test_collection", memory_record1) - result = await memory.get("test_collection", memory_record1._id, with_embedding=True) - assert result is not None - assert result._id == memory_record1._id - assert result._text == memory_record1._text - assert result._timestamp == memory_record1._timestamp - for i in range(len(result._embedding)): - assert result._embedding[i] == memory_record1._embedding[i] + try: + await memory.create_collection("test_collection") + await memory.upsert("test_collection", memory_record1) + result = await memory.get("test_collection", memory_record1._id, with_embedding=True) + assert result is not None + assert result._id == memory_record1._id + assert result._text == memory_record1._text + assert result._timestamp == memory_record1._timestamp + for i in range(len(result._embedding)): + assert result._embedding[i] == memory_record1._embedding[i] + except PoolTimeout: + pytest.skip("PoolTimeout exception raised, skipping test.") -@pytest.mark.xfail(reason="Test failing with reason couldn't: get a connection after 30.00 sec") @pytest.mark.asyncio async def test_upsert_batch_and_get_batch(connection_string, memory_record1, memory_record2): memory = PostgresMemoryStore(connection_string, 2, 1, 5) + try: + await memory.create_collection("test_collection") + await memory.upsert_batch("test_collection", [memory_record1, memory_record2]) - await memory.create_collection("test_collection") - await memory.upsert_batch("test_collection", [memory_record1, memory_record2]) - - results = await memory.get_batch( - "test_collection", - [memory_record1._id, memory_record2._id], - with_embeddings=True, - ) - - assert len(results) == 2 - assert results[0]._id in [memory_record1._id, memory_record2._id] - assert results[1]._id in [memory_record1._id, memory_record2._id] + results = await memory.get_batch( + "test_collection", + [memory_record1._id, memory_record2._id], + with_embeddings=True, + ) + assert len(results) == 2 + assert results[0]._id in [memory_record1._id, memory_record2._id] + assert results[1]._id in [memory_record1._id, memory_record2._id] + except PoolTimeout: + pytest.skip("PoolTimeout exception raised, skipping test.") -@pytest.mark.xfail(reason="Test failing with reason couldn't: get a connection after 30.00 sec") @pytest.mark.asyncio async def test_remove(connection_string, memory_record1): memory = PostgresMemoryStore(connection_string, 2, 1, 5) + try: + await memory.create_collection("test_collection") + await memory.upsert("test_collection", memory_record1) - await memory.create_collection("test_collection") - await memory.upsert("test_collection", memory_record1) - - result = await memory.get("test_collection", memory_record1._id, with_embedding=True) - assert result is not None + result = await memory.get("test_collection", memory_record1._id, with_embedding=True) + assert result is not None - await memory.remove("test_collection", memory_record1._id) - with pytest.raises(ServiceResourceNotFoundError): - _ = await memory.get("test_collection", memory_record1._id, with_embedding=True) + await memory.remove("test_collection", memory_record1._id) + with pytest.raises(ServiceResourceNotFoundError): + await memory.get("test_collection", memory_record1._id, with_embedding=True) + except PoolTimeout: + pytest.skip("PoolTimeout exception raised, skipping test.") -@pytest.mark.xfail(reason="Test failing with reason couldn't: get a connection after 30.00 sec") @pytest.mark.asyncio async def test_remove_batch(connection_string, memory_record1, memory_record2): memory = PostgresMemoryStore(connection_string, 2, 1, 5) + try: + await memory.create_collection("test_collection") + await memory.upsert_batch("test_collection", [memory_record1, memory_record2]) + await memory.remove_batch("test_collection", [memory_record1._id, memory_record2._id]) + with pytest.raises(ServiceResourceNotFoundError): + _ = await memory.get("test_collection", memory_record1._id, with_embedding=True) - await memory.create_collection("test_collection") - await memory.upsert_batch("test_collection", [memory_record1, memory_record2]) - await memory.remove_batch("test_collection", [memory_record1._id, memory_record2._id]) - with pytest.raises(ServiceResourceNotFoundError): - _ = await memory.get("test_collection", memory_record1._id, with_embedding=True) - - with pytest.raises(ServiceResourceNotFoundError): - _ = await memory.get("test_collection", memory_record2._id, with_embedding=True) + with pytest.raises(ServiceResourceNotFoundError): + _ = await memory.get("test_collection", memory_record2._id, with_embedding=True) + except PoolTimeout: + pytest.skip("PoolTimeout exception raised, skipping test.") -@pytest.mark.xfail(reason="Test failing with reason couldn't: get a connection after 30.00 sec") @pytest.mark.asyncio async def test_get_nearest_match(connection_string, memory_record1, memory_record2): memory = PostgresMemoryStore(connection_string, 2, 1, 5) - - await memory.create_collection("test_collection") - await memory.upsert_batch("test_collection", [memory_record1, memory_record2]) - test_embedding = memory_record1.embedding.copy() - test_embedding[0] = test_embedding[0] + 0.01 - - result = await memory.get_nearest_match( - "test_collection", test_embedding, min_relevance_score=0.0, with_embedding=True - ) - assert result is not None - assert result[0]._id == memory_record1._id - assert result[0]._text == memory_record1._text - assert result[0]._timestamp == memory_record1._timestamp - for i in range(len(result[0]._embedding)): - assert result[0]._embedding[i] == memory_record1._embedding[i] + try: + await memory.create_collection("test_collection") + await memory.upsert_batch("test_collection", [memory_record1, memory_record2]) + test_embedding = memory_record1.embedding.copy() + test_embedding[0] = test_embedding[0] + 0.01 + + result = await memory.get_nearest_match( + "test_collection", test_embedding, min_relevance_score=0.0, with_embedding=True + ) + assert result is not None + assert result[0]._id == memory_record1._id + assert result[0]._text == memory_record1._text + assert result[0]._timestamp == memory_record1._timestamp + for i in range(len(result[0]._embedding)): + assert result[0]._embedding[i] == memory_record1._embedding[i] + except PoolTimeout: + pytest.skip("PoolTimeout exception raised, skipping test.") @pytest.mark.asyncio -@pytest.mark.xfail(reason="The test is failing due to a timeout.") async def test_get_nearest_matches(connection_string, memory_record1, memory_record2, memory_record3): memory = PostgresMemoryStore(connection_string, 2, 1, 5) - - await memory.create_collection("test_collection") - await memory.upsert_batch("test_collection", [memory_record1, memory_record2, memory_record3]) - test_embedding = memory_record2.embedding - test_embedding[0] = test_embedding[0] + 0.025 - - result = await memory.get_nearest_matches( - "test_collection", - test_embedding, - limit=2, - min_relevance_score=0.0, - with_embeddings=True, - ) - assert len(result) == 2 - assert result[0][0]._id in [memory_record3._id, memory_record2._id] - assert result[1][0]._id in [memory_record3._id, memory_record2._id] + try: + await memory.create_collection("test_collection") + await memory.upsert_batch("test_collection", [memory_record1, memory_record2, memory_record3]) + test_embedding = memory_record2.embedding + test_embedding[0] = test_embedding[0] + 0.025 + + result = await memory.get_nearest_matches( + "test_collection", + test_embedding, + limit=2, + min_relevance_score=0.0, + with_embeddings=True, + ) + assert len(result) == 2 + assert result[0][0]._id in [memory_record3._id, memory_record2._id] + assert result[1][0]._id in [memory_record3._id, memory_record2._id] + except PoolTimeout: + pytest.skip("PoolTimeout exception raised, skipping test.") diff --git a/python/tests/integration/connectors/memory/test_vector_collections.py b/python/tests/integration/connectors/memory/test_vector_collections.py new file mode 100644 index 000000000000..711387068ea4 --- /dev/null +++ b/python/tests/integration/connectors/memory/test_vector_collections.py @@ -0,0 +1,175 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from copy import deepcopy +from dataclasses import dataclass, field +from typing import Annotated +from uuid import uuid4 + +import numpy as np +import pytest +from pytest import fixture, mark, param + +from semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_store import AzureAISearchStore +from semantic_kernel.connectors.memory.qdrant.qdrant_store import QdrantStore +from semantic_kernel.connectors.memory.redis.const import RedisCollectionTypes +from semantic_kernel.connectors.memory.redis.redis_store import RedisStore +from semantic_kernel.data.vector_store_model_decorator import vectorstoremodel +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) + +raw_record = { + "id": "e6103c03-487f-4d7d-9c23-4723651c17f4", + "content": "test content", + "vector": [0.1, 0.2, 0.3, 0.4, 0.5], +} + + +def record(): + return deepcopy(raw_record) + + +def DataModelArray(record) -> param: + @vectorstoremodel + @dataclass + class MyDataModelArray: + vector: Annotated[ + np.ndarray | None, + VectorStoreRecordVectorField( + index_kind="hnsw", + dimensions=5, + distance_function="cosine", + property_type="float", + serialize_function=np.ndarray.tolist, + deserialize_function=np.array, + ), + ] = None + other: str | None = None + id: Annotated[str, VectorStoreRecordKeyField()] = field(default_factory=lambda: str(uuid4())) + content: Annotated[ + str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector", property_type="str") + ] = "content1" + + record["vector"] = np.array(record["vector"]) + + return "array", MyDataModelArray, None, MyDataModelArray(**record) + + +def DataModelList(record) -> tuple: + @vectorstoremodel + @dataclass + class MyDataModelList: + vector: Annotated[ + list[float] | None, + VectorStoreRecordVectorField( + index_kind="hnsw", + dimensions=5, + distance_function="cosine", + property_type="float", + ), + ] = None + other: str | None = None + id: Annotated[str, VectorStoreRecordKeyField()] = field(default_factory=lambda: str(uuid4())) + content: Annotated[ + str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector", property_type="str") + ] = "content1" + + return "list", MyDataModelList, None, MyDataModelList(**record) + + +def DataModelPandas(record) -> tuple: + import pandas as pd + + definition = VectorStoreRecordDefinition( + fields={ + "vector": VectorStoreRecordVectorField( + name="vector", + index_kind="hnsw", + dimensions=5, + distance_function="cosine", + property_type="float", + ), + "id": VectorStoreRecordKeyField(name="id"), + "content": VectorStoreRecordDataField( + name="content", has_embedding=True, embedding_property_name="vector", property_type="str" + ), + }, + container_mode=True, + to_dict=lambda x: x.to_dict(orient="records"), + from_dict=lambda x, **_: pd.DataFrame(x), + ) + df = pd.DataFrame([record]) + return "pandas", pd.DataFrame, definition, df + + +@fixture +def collection_details(request): + match request.param: + case "array": + yield DataModelArray(record()) + case "list": + yield DataModelList(record()) + case "pandas": + yield DataModelPandas(record()) + + +@fixture +def store(request): + match request.param: + case "redis_json": + yield RedisStore(), {"collection_type": RedisCollectionTypes.JSON} + case "redis_hashset": + yield RedisStore(), {"collection_type": RedisCollectionTypes.HASHSET} + case "azure_ai_search": + yield AzureAISearchStore(), {} + case "qdrant": + yield QdrantStore(), {} + case "qdrant_in_memory": + yield QdrantStore(location=":memory:"), {} + case "qdrant_grpc": + yield QdrantStore(), {"prefer_grpc": True} + + +@fixture +@mark.asyncio +async def collection_and_data(store, collection_details): + vector_store, collection_options = store + collection_name, data_model_type, data_model_definition, data_record = collection_details + collection = vector_store.get_collection( + collection_name, data_model_type, data_model_definition, **collection_options + ) + try: + await collection.create_collection_if_not_exists() + except Exception as exc: + pytest.fail(f"Failed to create collection: {exc}") + yield collection, data_record + try: + await collection.delete_collection() + except Exception as exc: + pytest.fail(f"Failed to delete collection: {exc}") + + +@mark.asyncio +@mark.parametrize("collection_details", ["array", "list", "pandas"], indirect=True) +@mark.parametrize( + "store", + ["redis_json", "redis_hashset", "azure_ai_search", "qdrant", "qdrant_in_memory", "qdrant_grpc"], + indirect=True, +) +async def test_collections(collection_and_data): + compare_record = record() + async for collection, data_record in collection_and_data: + print("upserting record") + await collection.upsert(data_record) + print("getting record") + result = await collection.get(compare_record["id"]) + assert result is not None + print("deleting record") + await collection.delete(compare_record["id"]) + print("getting record again, expect None") + result = await collection.get(compare_record["id"]) + assert result is None diff --git a/python/tests/integration/embeddings/test_embedding_service.py b/python/tests/integration/embeddings/test_embedding_service.py new file mode 100644 index 000000000000..60917da23dca --- /dev/null +++ b/python/tests/integration/embeddings/test_embedding_service.py @@ -0,0 +1,79 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +import pytest + +import semantic_kernel as sk +from semantic_kernel.connectors.ai.embeddings.embedding_generator_base import EmbeddingGeneratorBase +from semantic_kernel.connectors.ai.mistral_ai import MistralAITextEmbedding +from semantic_kernel.core_plugins.text_memory_plugin import TextMemoryPlugin +from semantic_kernel.kernel import Kernel +from semantic_kernel.memory.semantic_text_memory import SemanticTextMemory + +mistral_ai_setup: bool = False +try: + if os.environ["MISTRALAI_API_KEY"] and os.environ["MISTRALAI_EMBEDDING_MODEL_ID"]: + mistral_ai_setup = True +except KeyError: + mistral_ai_setup = False + + +pytestmark = pytest.mark.parametrize("embeddings_generator", + [ + pytest.param( + MistralAITextEmbedding() if mistral_ai_setup else None, + marks=pytest.mark.skipif(not mistral_ai_setup, reason="Mistral AI environment variables not set"), + id="MistralEmbeddings" + ) + ] +) + + +@pytest.mark.asyncio(scope="module") +async def test_embedding_service(kernel: Kernel, embeddings_generator: EmbeddingGeneratorBase): + kernel.add_service(embeddings_generator) + + memory = SemanticTextMemory(storage=sk.memory.VolatileMemoryStore(), embeddings_generator=embeddings_generator) + kernel.add_plugin(TextMemoryPlugin(memory), "TextMemoryPlugin") + + await memory.save_reference( + "test", + external_id="info1", + text="this is a test", + external_source_name="external source", + ) + + # Add some documents to the semantic memory + await memory.save_information("test", id="info1", text="Sharks are fish.") + await memory.save_information("test", id="info2", text="Whales are mammals.") + await memory.save_information("test", id="info3", text="Penguins are birds.") + await memory.save_information("test", id="info4", text="Dolphins are mammals.") + await memory.save_information("test", id="info5", text="Flies are insects.") + + # Search for documents + query = "What are mammals?" + result = await memory.search("test", query, limit=2, min_relevance_score=0.0) + print(f"Query: {query}") + print(f"\tAnswer 1: {result[0].text}") + print(f"\tAnswer 2: {result[1].text}\n") + assert "mammals." in result[0].text + assert "mammals." in result[1].text + + query = "What are fish?" + result = await memory.search("test", query, limit=1, min_relevance_score=0.0) + print(f"Query: {query}") + print(f"\tAnswer: {result[0].text}\n") + assert result[0].text == "Sharks are fish." + + query = "What are insects?" + result = await memory.search("test", query, limit=1, min_relevance_score=0.0) + print(f"Query: {query}") + print(f"\tAnswer: {result[0].text}\n") + assert result[0].text == "Flies are insects." + + query = "What are birds?" + result = await memory.search("test", query, limit=1, min_relevance_score=0.0) + print(f"Query: {query}") + print(f"\tAnswer: {result[0].text}\n") + assert result[0].text == "Penguins are birds." diff --git a/python/tests/integration/embeddings/test_vertex_ai_embedding_service.py b/python/tests/integration/embeddings/test_vertex_ai_embedding_service.py new file mode 100644 index 000000000000..c4226d6d713e --- /dev/null +++ b/python/tests/integration/embeddings/test_vertex_ai_embedding_service.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft. All rights reserved. + + +import pytest + +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_text_embedding import VertexAITextEmbedding +from semantic_kernel.core_plugins.text_memory_plugin import TextMemoryPlugin +from semantic_kernel.kernel import Kernel +from semantic_kernel.memory.semantic_text_memory import SemanticTextMemory +from semantic_kernel.memory.volatile_memory_store import VolatileMemoryStore + + +@pytest.mark.asyncio +async def test_vertex_ai_embedding_service(kernel: Kernel): + embeddings_gen = VertexAITextEmbedding() + + kernel.add_service(embeddings_gen) + + memory = SemanticTextMemory(storage=VolatileMemoryStore(), embeddings_generator=embeddings_gen) + kernel.add_plugin(TextMemoryPlugin(memory), "TextMemoryPlugin") + + await memory.save_information(collection="generic", id="info1", text="My budget for 2024 is $100,000") + await memory.save_reference( + "test", + external_id="info1", + text="this is a test", + external_source_name="external source", + ) diff --git a/python/tests/samples/test_concepts.py b/python/tests/samples/test_concepts.py index 64f6f2c6820c..74ef57f4030f 100644 --- a/python/tests/samples/test_concepts.py +++ b/python/tests/samples/test_concepts.py @@ -5,8 +5,6 @@ import pytest from pytest import mark, param -from samples.concepts.agents.step1_agent import main as step1_agent -from samples.concepts.agents.step2_plugins import main as step2_plugins from samples.concepts.auto_function_calling.azure_python_code_interpreter_function_calling import ( main as azure_python_code_interpreter_function_calling, ) @@ -28,6 +26,7 @@ from samples.concepts.filtering.prompt_filters import main as prompt_filters from samples.concepts.functions.kernel_arguments import main as kernel_arguments from samples.concepts.grounding.grounded import main as grounded +from samples.concepts.images.image_generation import main as image_generation from samples.concepts.local_models.lm_studio_chat_completion import main as lm_studio_chat_completion from samples.concepts.local_models.lm_studio_text_embedding import main as lm_studio_text_embedding from samples.concepts.local_models.ollama_chat_completion import main as ollama_chat_completion @@ -54,6 +53,10 @@ from samples.concepts.rag.rag_with_text_memory_plugin import main as rag_with_text_memory_plugin from samples.concepts.search.bing_search_plugin import main as bing_search_plugin from samples.concepts.service_selector.custom_service_selector import main as custom_service_selector +from samples.getting_started_with_agents.step1_agent import main as step1_agent +from samples.getting_started_with_agents.step2_plugins import main as step2_plugins +from samples.getting_started_with_agents.step3_chat import main as step3_chat +from samples.getting_started_with_agents.step7_assistant import main as step7_assistant from tests.samples.samples_utils import retry concepts = [ @@ -106,6 +109,8 @@ param(function_defined_in_yaml_prompt, ["What is 3+3?", "exit"], id="function_defined_in_yaml_prompt"), param(step1_agent, [], id="step1_agent"), param(step2_plugins, [], id="step2_agent_plugins"), + param(step3_chat, [], id="step3_chat"), + param(step7_assistant, [], id="step7_assistant"), param( ollama_chat_completion, ["Why is the sky blue?", "exit"], @@ -124,6 +129,7 @@ id="lm_studio_text_embedding", marks=pytest.mark.skip(reason="Need to set up LM Studio locally. Check out the module for more details."), ), + param(image_generation, [], id="image_generation"), ] diff --git a/python/tests/unit/agents/test_agent.py b/python/tests/unit/agents/test_agent.py index 6094b649e1e7..829ed165874e 100644 --- a/python/tests/unit/agents/test_agent.py +++ b/python/tests/unit/agents/test_agent.py @@ -5,8 +5,8 @@ import pytest -from semantic_kernel.agents.agent import Agent -from semantic_kernel.agents.agent_channel import AgentChannel +from semantic_kernel.agents import Agent +from semantic_kernel.agents.channels.agent_channel import AgentChannel class MockAgent(Agent): @@ -62,3 +62,40 @@ async def test_create_channel(): channel = await agent.create_channel() assert isinstance(channel, AgentChannel) + + +@pytest.mark.asyncio +async def test_agent_equality(): + id_value = str(uuid.uuid4()) + + agent1 = MockAgent(name="Test Agent", description="A test agent", id=id_value) + agent2 = MockAgent(name="Test Agent", description="A test agent", id=id_value) + + assert agent1 == agent2 + + agent3 = MockAgent(name="Test Agent", description="A different description", id=id_value) + assert agent1 != agent3 + + agent4 = MockAgent(name="Another Agent", description="A test agent", id=id_value) + assert agent1 != agent4 + + +@pytest.mark.asyncio +async def test_agent_equality_different_type(): + agent = MockAgent(name="Test Agent", description="A test agent", id=str(uuid.uuid4())) + non_agent = "Not an agent" + + assert agent != non_agent + + +@pytest.mark.asyncio +async def test_agent_hash(): + id_value = str(uuid.uuid4()) + + agent1 = MockAgent(name="Test Agent", description="A test agent", id=id_value) + agent2 = MockAgent(name="Test Agent", description="A test agent", id=id_value) + + assert hash(agent1) == hash(agent2) + + agent3 = MockAgent(name="Test Agent", description="A different description", id=id_value) + assert hash(agent1) != hash(agent3) diff --git a/python/tests/unit/agents/test_agent_channel.py b/python/tests/unit/agents/test_agent_channel.py index 20b61d956686..33b24b743b54 100644 --- a/python/tests/unit/agents/test_agent_channel.py +++ b/python/tests/unit/agents/test_agent_channel.py @@ -5,8 +5,8 @@ import pytest -from semantic_kernel.agents.agent import Agent -from semantic_kernel.agents.agent_channel import AgentChannel +from semantic_kernel.agents import Agent +from semantic_kernel.agents.channels.agent_channel import AgentChannel from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.utils.author_role import AuthorRole diff --git a/python/tests/unit/agents/test_agent_chat.py b/python/tests/unit/agents/test_agent_chat.py new file mode 100644 index 000000000000..622c3654f853 --- /dev/null +++ b/python/tests/unit/agents/test_agent_chat.py @@ -0,0 +1,235 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest import mock +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.group_chat.agent_chat import AgentChat +from semantic_kernel.agents.group_chat.broadcast_queue import ChannelReference +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.agent_exceptions import AgentChatException + + +@pytest.fixture +def agent_chat(): + return AgentChat() + + +@pytest.fixture +def agent(): + mock_agent = MagicMock() + mock_agent.name = "TestAgent" + return mock_agent + + +@pytest.fixture +def chat_message(): + mock_chat_message = MagicMock(spec=ChatMessageContent) + mock_chat_message.role = "user" + return mock_chat_message + + +@pytest.mark.asyncio +async def test_set_activity_or_throw_when_inactive(agent_chat): + agent_chat._is_active = False + agent_chat.set_activity_or_throw() + assert agent_chat.is_active + + +@pytest.mark.asyncio +async def test_set_activity_or_throw_when_active(agent_chat): + agent_chat._is_active = True + with pytest.raises(Exception, match="Unable to proceed while another agent is active."): + agent_chat.set_activity_or_throw() + + +@pytest.mark.asyncio +async def test_clear_activity_signal(agent_chat): + agent_chat._is_active = True + agent_chat.clear_activity_signal() + assert not agent_chat.is_active + + +@pytest.mark.asyncio +async def test_get_messages_in_descending_order(agent_chat, chat_message): + agent_chat.history.messages = [chat_message, chat_message, chat_message] + messages = [] + async for message in agent_chat.get_messages_in_descending_order(): + messages.append(message) + assert len(messages) == 3 + + +@pytest.mark.asyncio +async def test_get_chat_messages_without_agent(agent_chat, chat_message): + agent_chat.history.messages = [chat_message] + with patch( + "semantic_kernel.agents.group_chat.agent_chat.AgentChat.get_messages_in_descending_order", + return_value=AsyncMock(), + ) as mock_get_messages: + async for _ in agent_chat.get_chat_messages(): + pass + mock_get_messages.assert_called_once() + + +@pytest.mark.asyncio +async def test_get_chat_messages_with_agent(agent_chat, agent, chat_message): + agent_chat.channel_map[agent] = "test_channel" + + mock_channel = mock.MagicMock(spec=AgentChannel) + agent_chat.agent_channels["test_channel"] = mock_channel + + with ( + patch("semantic_kernel.agents.group_chat.agent_chat.AgentChat._get_agent_hash", return_value="test_channel"), + patch("semantic_kernel.agents.group_chat.agent_chat.AgentChat._synchronize_channel", return_value=mock_channel), + patch.object(mock_channel, "get_history", return_value=AsyncMock()), + ): + async for _ in agent_chat.get_chat_messages(agent): + pass + + +@pytest.mark.asyncio +async def test_add_chat_message(agent_chat, chat_message): + with patch( + "semantic_kernel.agents.group_chat.agent_chat.AgentChat.add_chat_messages", + return_value=AsyncMock(), + ) as mock_add_chat_messages: + await agent_chat.add_chat_message(chat_message) + mock_add_chat_messages.assert_called_once_with([chat_message]) + + +@pytest.mark.asyncio +async def test_add_chat_messages(agent_chat, chat_message): + with patch("semantic_kernel.agents.group_chat.broadcast_queue.BroadcastQueue.enqueue", return_value=AsyncMock()): + await agent_chat.add_chat_messages([chat_message]) + assert chat_message in agent_chat.history.messages + + +@pytest.mark.asyncio +async def test_invoke_agent(agent_chat, agent, chat_message): + mock_channel = mock.MagicMock(spec=AgentChannel) + + async def mock_invoke(*args, **kwargs): + yield True, chat_message + + mock_channel.invoke.side_effect = mock_invoke + + with ( + patch( + "semantic_kernel.agents.group_chat.agent_chat.AgentChat._get_or_create_channel", return_value=mock_channel + ), + patch( + "semantic_kernel.agents.group_chat.broadcast_queue.BroadcastQueue.enqueue", + return_value=AsyncMock(), + ), + ): + async for _ in agent_chat.invoke_agent(agent): + pass + + mock_channel.invoke.assert_called_once_with(agent) + await agent_chat.reset() + + +@pytest.mark.asyncio +async def test_synchronize_channel_with_existing_channel(agent_chat): + mock_channel = MagicMock(spec=AgentChannel) + channel_key = "test_channel_key" + agent_chat.agent_channels[channel_key] = mock_channel + + with patch( + "semantic_kernel.agents.group_chat.broadcast_queue.BroadcastQueue.ensure_synchronized", return_value=AsyncMock() + ) as mock_ensure_synchronized: + result = await agent_chat._synchronize_channel(channel_key) + + assert result == mock_channel + mock_ensure_synchronized.assert_called_once_with(ChannelReference(channel=mock_channel, hash=channel_key)) + + +@pytest.mark.asyncio +async def test_synchronize_channel_with_nonexistent_channel(agent_chat): + channel_key = "test_channel_key" + + result = await agent_chat._synchronize_channel(channel_key) + + assert result is None + + +def test_get_agent_hash_with_existing_hash(agent_chat, agent): + expected_hash = "existing_hash" + agent_chat.channel_map[agent] = expected_hash + + result = agent_chat._get_agent_hash(agent) + + assert result == expected_hash + + +def test_get_agent_hash_generates_new_hash(agent_chat, agent): + expected_hash = "new_hash" + agent.get_channel_keys = MagicMock(return_value=["key1", "key2"]) + + with patch( + "semantic_kernel.agents.group_chat.agent_chat.KeyEncoder.generate_hash", return_value=expected_hash + ) as mock_generate_hash: + result = agent_chat._get_agent_hash(agent) + + assert result == expected_hash + mock_generate_hash.assert_called_once_with(["key1", "key2"]) + assert agent_chat.channel_map[agent] == expected_hash + + +@pytest.mark.asyncio +async def test_add_chat_messages_throws_exception_for_system_role(agent_chat): + system_message = MagicMock(spec=ChatMessageContent) + system_message.role = AuthorRole.SYSTEM + + with pytest.raises(AgentChatException, match="System messages cannot be added to the chat history."): + await agent_chat.add_chat_messages([system_message]) + + +@pytest.mark.asyncio +async def test_get_or_create_channel_creates_new_channel(agent_chat, agent): + agent_chat.history.messages = [MagicMock(spec=ChatMessageContent)] + channel_key = "test_channel_key" + mock_channel = AsyncMock(spec=AgentChannel) + + with ( + patch( + "semantic_kernel.agents.group_chat.agent_chat.AgentChat._get_agent_hash", return_value=channel_key + ) as mock_get_agent_hash, + patch( + "semantic_kernel.agents.group_chat.agent_chat.AgentChat._synchronize_channel", return_value=None + ) as mock_synchronize_channel, + ): + agent.create_channel = AsyncMock(return_value=mock_channel) + with patch.object(mock_channel, "receive", return_value=AsyncMock()) as mock_receive: + result = await agent_chat._get_or_create_channel(agent) + + assert result == mock_channel + mock_get_agent_hash.assert_called_once_with(agent) + mock_synchronize_channel.assert_called_once_with(channel_key) + agent.create_channel.assert_called_once() + mock_receive.assert_called_once_with(agent_chat.history.messages) + assert agent_chat.agent_channels[channel_key] == mock_channel + + +@pytest.mark.asyncio +async def test_get_or_create_channel_reuses_existing_channel(agent_chat, agent): + channel_key = "test_channel_key" + mock_channel = MagicMock(spec=AgentChannel) + + with ( + patch( + "semantic_kernel.agents.group_chat.agent_chat.AgentChat._get_agent_hash", return_value=channel_key + ) as mock_get_agent_hash, + patch( + "semantic_kernel.agents.group_chat.agent_chat.AgentChat._synchronize_channel", return_value=mock_channel + ) as mock_synchronize_channel, + ): + result = await agent_chat._get_or_create_channel(agent) + + assert result == mock_channel + mock_get_agent_hash.assert_called_once_with(agent) + mock_synchronize_channel.assert_called_once_with(channel_key) + agent.create_channel.assert_not_called() diff --git a/python/tests/unit/agents/test_agent_chat_utils.py b/python/tests/unit/agents/test_agent_chat_utils.py new file mode 100644 index 000000000000..fb440a7ad896 --- /dev/null +++ b/python/tests/unit/agents/test_agent_chat_utils.py @@ -0,0 +1,27 @@ +# Copyright (c) Microsoft. All rights reserved. + +import base64 +from hashlib import sha256 + +import pytest + +from semantic_kernel.agents.group_chat.agent_chat_utils import KeyEncoder +from semantic_kernel.exceptions.agent_exceptions import AgentExecutionException + + +def test_generate_hash_valid_keys(): + keys = ["key1", "key2", "key3"] + expected_joined_keys = ":".join(keys).encode("utf-8") + expected_hash = sha256(expected_joined_keys).digest() + expected_base64 = base64.b64encode(expected_hash).decode("utf-8") + + result = KeyEncoder.generate_hash(keys) + + assert result == expected_base64 + + +def test_generate_hash_empty_keys(): + with pytest.raises( + AgentExecutionException, match="Channel Keys must not be empty. Unable to generate channel hash." + ): + KeyEncoder.generate_hash([]) diff --git a/python/tests/unit/agents/test_agent_group_chat.py b/python/tests/unit/agents/test_agent_group_chat.py new file mode 100644 index 000000000000..e4f607098527 --- /dev/null +++ b/python/tests/unit/agents/test_agent_group_chat.py @@ -0,0 +1,208 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest import mock +from unittest.mock import AsyncMock, MagicMock + +import pytest + +from semantic_kernel.agents.agent import Agent +from semantic_kernel.agents.group_chat.agent_chat import AgentChat +from semantic_kernel.agents.group_chat.agent_group_chat import AgentGroupChat +from semantic_kernel.agents.strategies.selection.selection_strategy import SelectionStrategy +from semantic_kernel.agents.strategies.selection.sequential_selection_strategy import SequentialSelectionStrategy +from semantic_kernel.agents.strategies.termination.default_termination_strategy import DefaultTerminationStrategy +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.agent_exceptions import AgentChatException + + +@pytest.fixture +def agents(): + """Fixture that provides a list of mock agents.""" + return [MagicMock(spec=Agent, id=f"agent-{i}") for i in range(3)] + + +@pytest.fixture +def termination_strategy(): + """Fixture that provides a mock termination strategy.""" + return AsyncMock(spec=TerminationStrategy) + + +@pytest.fixture +def selection_strategy(): + """Fixture that provides a mock selection strategy.""" + return AsyncMock(spec=SelectionStrategy) + + +def test_agent_group_chat_initialization(agents, termination_strategy, selection_strategy): + group_chat = AgentGroupChat( + agents=agents, termination_strategy=termination_strategy, selection_strategy=selection_strategy + ) + + assert group_chat.agents == agents + assert group_chat.agent_ids == {agent.id for agent in agents} + assert group_chat.termination_strategy == termination_strategy + assert group_chat.selection_strategy == selection_strategy + + +def test_agent_group_chat_initialization_defaults(): + group_chat = AgentGroupChat() + + assert group_chat.agents == [] + assert group_chat.agent_ids == set() + assert isinstance(group_chat.termination_strategy, DefaultTerminationStrategy) + assert isinstance(group_chat.selection_strategy, SequentialSelectionStrategy) + + +def test_add_agent(agents): + group_chat = AgentGroupChat() + + group_chat.add_agent(agents[0]) + + assert agents[0] in group_chat.agents + assert agents[0].id in group_chat.agent_ids + + +def test_add_duplicate_agent(agents): + group_chat = AgentGroupChat(agents=[agents[0]]) + + group_chat.add_agent(agents[0]) + + assert len(group_chat.agents) == 1 + assert len(group_chat.agent_ids) == 1 + + +@pytest.mark.asyncio +async def test_invoke_single_turn(agents, termination_strategy): + group_chat = AgentGroupChat(termination_strategy=termination_strategy) + + async def mock_invoke(agent, is_joining=True): + yield MagicMock(role=AuthorRole.ASSISTANT) + + with mock.patch.object(AgentGroupChat, "invoke", side_effect=mock_invoke): + termination_strategy.should_terminate.return_value = False + + async for message in group_chat.invoke_single_turn(agents[0]): + assert message.role == AuthorRole.ASSISTANT + + termination_strategy.should_terminate.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_invoke_single_turn_sets_complete(agents, termination_strategy): + group_chat = AgentGroupChat(termination_strategy=termination_strategy) + + async def mock_invoke(agent, is_joining=True): + yield MagicMock(role=AuthorRole.ASSISTANT) + + with mock.patch.object(AgentGroupChat, "invoke", side_effect=mock_invoke): + termination_strategy.should_terminate.return_value = True + + async for _ in group_chat.invoke_single_turn(agents[0]): + pass + + assert group_chat.is_complete is True + termination_strategy.should_terminate.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_invoke_with_agent_joining(agents, termination_strategy): + for agent in agents: + agent.name = f"Agent {agent.id}" + agent.id = f"agent-{agent.id}" + + group_chat = AgentGroupChat(termination_strategy=termination_strategy) + + with ( + mock.patch.object(AgentGroupChat, "add_agent", autospec=True) as mock_add_agent, + mock.patch.object(AgentChat, "invoke_agent", autospec=True) as mock_invoke_agent, + ): + + async def mock_invoke_gen(*args, **kwargs): + yield MagicMock(role=AuthorRole.ASSISTANT) + + mock_invoke_agent.side_effect = mock_invoke_gen + + async for _ in group_chat.invoke(agents[0], is_joining=True): + pass + + mock_add_agent.assert_called_once_with(group_chat, agents[0]) + + +@pytest.mark.asyncio +async def test_invoke_with_complete_chat(agents, termination_strategy): + termination_strategy.automatic_reset = False + group_chat = AgentGroupChat(termination_strategy=termination_strategy) + group_chat.is_complete = True + + with pytest.raises(AgentChatException, match="Chat is already complete"): + async for _ in group_chat.invoke(): + pass + + +@pytest.mark.asyncio +async def test_invoke_selection_strategy_error(agents, selection_strategy): + group_chat = AgentGroupChat(agents=agents, selection_strategy=selection_strategy) + + selection_strategy.next.side_effect = Exception("Selection failed") + + with pytest.raises(AgentChatException, match="Failed to select agent"): + async for _ in group_chat.invoke(): + pass + + +@pytest.mark.asyncio +async def test_invoke_iterations(agents, termination_strategy, selection_strategy): + for agent in agents: + agent.name = f"Agent {agent.id}" + agent.id = f"agent-{agent.id}" + + termination_strategy.maximum_iterations = 2 + + group_chat = AgentGroupChat( + agents=agents, termination_strategy=termination_strategy, selection_strategy=selection_strategy + ) + + selection_strategy.next.side_effect = lambda agents, history: agents[0] + + async def mock_invoke_agent(*args, **kwargs): + yield MagicMock(role=AuthorRole.ASSISTANT) + + with mock.patch.object(AgentChat, "invoke_agent", side_effect=mock_invoke_agent): + termination_strategy.should_terminate.return_value = False + + iteration_count = 0 + async for _ in group_chat.invoke(): + iteration_count += 1 + + assert iteration_count == 2 + + +@pytest.mark.asyncio +async def test_invoke_is_complete_then_reset(agents, termination_strategy, selection_strategy): + for agent in agents: + agent.name = f"Agent {agent.id}" + agent.id = f"agent-{agent.id}" + + termination_strategy.maximum_iterations = 2 + termination_strategy.automatic_reset = True + + group_chat = AgentGroupChat( + agents=agents, termination_strategy=termination_strategy, selection_strategy=selection_strategy + ) + + group_chat.is_complete = True + + selection_strategy.next.side_effect = lambda agents, history: agents[0] + + async def mock_invoke_agent(*args, **kwargs): + yield MagicMock(role=AuthorRole.ASSISTANT) + + with mock.patch.object(AgentChat, "invoke_agent", side_effect=mock_invoke_agent): + termination_strategy.should_terminate.return_value = False + + iteration_count = 0 + async for _ in group_chat.invoke(): + iteration_count += 1 + + assert iteration_count == 2 diff --git a/python/tests/unit/agents/test_aggregator_termination_strategy.py b/python/tests/unit/agents/test_aggregator_termination_strategy.py new file mode 100644 index 000000000000..e729dc8acd2f --- /dev/null +++ b/python/tests/unit/agents/test_aggregator_termination_strategy.py @@ -0,0 +1,125 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, MagicMock + +import pytest + +from semantic_kernel.agents.agent import Agent +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.strategies.termination.aggregator_termination_strategy import ( + AggregateTerminationCondition, + AggregatorTerminationStrategy, +) +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.contents.chat_message_content import ChatMessageContent + + +class MockAgent(Agent): + """A mock agent for testing purposes.""" + + def __init__(self, id: str = None, name: str = "Test Agent", description: str = "A test agent"): + args = { + "name": name, + "description": description, + } + if id is not None: + args["id"] = id + super().__init__(**args) + + def get_channel_keys(self) -> list[str]: + return ["key1", "key2"] + + async def create_channel(self) -> AgentChannel: + return AsyncMock(spec=AgentChannel) + + +@pytest.mark.asyncio +async def test_aggregate_termination_condition_all_true(): + agent = MockAgent(id="test-agent-id") + history = [MagicMock(spec=ChatMessageContent)] + + # Mocking two strategies that return True + strategy1 = AsyncMock(spec=TerminationStrategy) + strategy1.should_terminate.return_value = True + + strategy2 = AsyncMock(spec=TerminationStrategy) + strategy2.should_terminate.return_value = True + + strategy = AggregatorTerminationStrategy( + strategies=[strategy1, strategy2], condition=AggregateTerminationCondition.ALL + ) + + result = await strategy.should_terminate_async(agent, history) + + assert result is True + strategy1.should_terminate.assert_awaited_once() + strategy2.should_terminate.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_aggregate_termination_condition_all_false(): + agent = MockAgent(id="test-agent-id") + history = [MagicMock(spec=ChatMessageContent)] + + # Mocking two strategies, one returns True, the other False + strategy1 = AsyncMock(spec=TerminationStrategy) + strategy1.should_terminate.return_value = True + + strategy2 = AsyncMock(spec=TerminationStrategy) + strategy2.should_terminate.return_value = False + + strategy = AggregatorTerminationStrategy( + strategies=[strategy1, strategy2], condition=AggregateTerminationCondition.ALL + ) + + result = await strategy.should_terminate_async(agent, history) + + assert result is False + strategy1.should_terminate.assert_awaited_once() + strategy2.should_terminate.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_aggregate_termination_condition_any_true(): + agent = MockAgent(id="test-agent-id") + history = [MagicMock(spec=ChatMessageContent)] + + # Mocking two strategies, one returns False, the other True + strategy1 = AsyncMock(spec=TerminationStrategy) + strategy1.should_terminate.return_value = False + + strategy2 = AsyncMock(spec=TerminationStrategy) + strategy2.should_terminate.return_value = True + + strategy = AggregatorTerminationStrategy( + strategies=[strategy1, strategy2], condition=AggregateTerminationCondition.ANY + ) + + result = await strategy.should_terminate_async(agent, history) + + assert result is True + strategy1.should_terminate.assert_awaited_once() + strategy2.should_terminate.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_aggregate_termination_condition_any_false(): + agent = MockAgent(id="test-agent-id") + history = [MagicMock(spec=ChatMessageContent)] + + # Mocking two strategies that return False + strategy1 = AsyncMock(spec=TerminationStrategy) + strategy1.should_terminate.return_value = False + + strategy2 = AsyncMock(spec=TerminationStrategy) + strategy2.should_terminate.return_value = False + + strategy = AggregatorTerminationStrategy( + strategies=[strategy1, strategy2], condition=AggregateTerminationCondition.ANY + ) + + result = await strategy.should_terminate_async(agent, history) + + assert result is False + strategy1.should_terminate.assert_awaited_once() + strategy2.should_terminate.assert_awaited_once() diff --git a/python/tests/unit/agents/test_azure_assistant_agent.py b/python/tests/unit/agents/test_azure_assistant_agent.py new file mode 100644 index 000000000000..4dc6cf8090c3 --- /dev/null +++ b/python/tests/unit/agents/test_azure_assistant_agent.py @@ -0,0 +1,365 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, MagicMock, mock_open, patch + +import pytest +from openai import AsyncAzureOpenAI +from openai.resources.beta.assistants import Assistant +from openai.types.beta.assistant import ToolResources, ToolResourcesCodeInterpreter, ToolResourcesFileSearch +from pydantic import ValidationError + +from semantic_kernel.agents.open_ai import AzureAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_base import OpenAIAssistantBase +from semantic_kernel.exceptions.agent_exceptions import AgentInitializationException +from semantic_kernel.kernel import Kernel + + +@pytest.fixture +def azure_openai_assistant_agent(kernel: Kernel, azure_openai_unit_test_env): + return AzureAssistantAgent( + kernel=kernel, + service_id="test_service", + name="test_name", + instructions="test_instructions", + api_key="test_api_key", + endpoint="https://test.endpoint", + ai_model_id="test_model", + api_version="2024-05-01-preview", + default_headers={"User-Agent": "test-agent"}, + ) + + +@pytest.fixture +def mock_assistant(): + return Assistant( + created_at=123456789, + object="assistant", + metadata={ + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + model="test_model", + description="test_description", + id="test_id", + instructions="test_instructions", + name="test_name", + tools=[{"type": "code_interpreter"}, {"type": "file_search"}], + temperature=0.7, + top_p=0.9, + response_format={"type": "json_object"}, + tool_resources=ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(code_interpreter_file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ), + ) + + +def test_initialization(azure_openai_assistant_agent: AzureAssistantAgent, azure_openai_unit_test_env): + agent = azure_openai_assistant_agent + assert agent is not None + + +def test_create_client(azure_openai_assistant_agent, azure_openai_unit_test_env): + assert isinstance(azure_openai_assistant_agent.client, AsyncAzureOpenAI) + + +def test_create_client_from_configuration(azure_openai_assistant_agent, azure_openai_unit_test_env): + assert isinstance(azure_openai_assistant_agent.client, AsyncAzureOpenAI) + assert azure_openai_assistant_agent.client.api_key == "test_api_key" + + +def test_create_client_from_configuration_missing_api_key(): + with pytest.raises( + AgentInitializationException, + match="Please provide either AzureOpenAI api_key, an ad_token or an ad_token_provider or a client.", + ): + AzureAssistantAgent._create_client(None) + + +def test_create_client_from_configuration_missing_endpoint(): + with pytest.raises( + AgentInitializationException, + match="Please provide an AzureOpenAI endpoint.", + ): + AzureAssistantAgent._create_client(api_key="test") + + +@pytest.mark.asyncio +async def test_create_agent(kernel: Kernel, azure_openai_unit_test_env): + with patch.object(AzureAssistantAgent, "create_assistant", new_callable=AsyncMock) as mock_create_assistant: + mock_create_assistant.return_value = MagicMock(spec=Assistant) + agent = await AzureAssistantAgent.create( + kernel=kernel, service_id="test_service", name="test_name", api_key="test_api_key", api_version="2024-05-01" + ) + assert agent.assistant is not None + mock_create_assistant.assert_called_once() + await agent.client.close() + + +@pytest.mark.asyncio +async def test_create_agent_with_files(kernel: Kernel, azure_openai_unit_test_env): + mock_open_file = mock_open(read_data="file_content") + with ( + patch("builtins.open", mock_open_file), + patch( + "semantic_kernel.agents.open_ai.open_ai_assistant_base.OpenAIAssistantBase.add_file", + return_value="test_file_id", + ), + patch( + "semantic_kernel.agents.open_ai.open_ai_assistant_base.OpenAIAssistantBase.create_vector_store", + return_value="vector_store_id", + ), + patch.object(AzureAssistantAgent, "create_assistant", new_callable=AsyncMock) as mock_create_assistant, + ): + mock_create_assistant.return_value = MagicMock(spec=Assistant) + agent = await AzureAssistantAgent.create( + kernel=kernel, + service_id="test_service", + name="test_name", + api_key="test_api_key", + api_version="2024-05-01", + code_interpreter_filenames=["file1", "file2"], + vector_store_filenames=["file3", "file4"], + enable_code_interpreter=True, + enable_file_search=True, + ) + assert agent.assistant is not None + mock_create_assistant.assert_called_once() + + +@pytest.mark.asyncio +async def test_create_agent_with_code_files_not_found_raises_exception(kernel: Kernel, azure_openai_unit_test_env): + mock_open_file = mock_open(read_data="file_content") + with ( + patch("builtins.open", mock_open_file), + patch( + "semantic_kernel.agents.open_ai.open_ai_assistant_base.OpenAIAssistantBase.add_file", + side_effect=FileNotFoundError("File not found"), + ), + patch.object(AzureAssistantAgent, "create_assistant", new_callable=AsyncMock) as mock_create_assistant, + ): + mock_create_assistant.return_value = MagicMock(spec=Assistant) + with pytest.raises(AgentInitializationException, match="Failed to upload code interpreter files."): + _ = await AzureAssistantAgent.create( + kernel=kernel, + service_id="test_service", + deployment_name="test_deployment_name", + name="test_name", + api_key="test_api_key", + api_version="2024-05-01", + code_interpreter_filenames=["file1", "file2"], + ) + + +@pytest.mark.asyncio +async def test_create_agent_with_search_files_not_found_raises_exception(kernel: Kernel, azure_openai_unit_test_env): + mock_open_file = mock_open(read_data="file_content") + with ( + patch("builtins.open", mock_open_file), + patch( + "semantic_kernel.agents.open_ai.open_ai_assistant_base.OpenAIAssistantBase.add_file", + side_effect=FileNotFoundError("File not found"), + ), + patch.object(AzureAssistantAgent, "create_assistant", new_callable=AsyncMock) as mock_create_assistant, + ): + mock_create_assistant.return_value = MagicMock(spec=Assistant) + with pytest.raises(AgentInitializationException, match="Failed to upload file search files."): + _ = await AzureAssistantAgent.create( + kernel=kernel, + service_id="test_service", + deployment_name="test_deployment_name", + name="test_name", + api_key="test_api_key", + api_version="2024-05-01", + vector_store_filenames=["file3", "file4"], + ) + + +@pytest.mark.asyncio +async def test_list_definitions(kernel: Kernel, mock_assistant, azure_openai_unit_test_env): + agent = AzureAssistantAgent( + kernel=kernel, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + + with patch.object( + AzureAssistantAgent, "_create_client", return_value=MagicMock(spec=AsyncAzureOpenAI) + ) as mock_create_client: + mock_client_instance = mock_create_client.return_value + mock_client_instance.beta = MagicMock() + mock_client_instance.beta.assistants = MagicMock() + mock_client_instance.beta.assistants.list = AsyncMock(return_value=MagicMock(data=[mock_assistant])) + + agent.client = mock_client_instance + + definitions = [] + async for definition in agent.list_definitions(): + definitions.append(definition) + + mock_client_instance.beta.assistants.list.assert_called() + + assert len(definitions) == 1 + assert definitions[0] == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "code_interpreter_file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + + +@pytest.mark.asyncio +async def test_retrieve_agent(kernel, azure_openai_unit_test_env): + with patch.object( + AzureAssistantAgent, "_create_client", return_value=MagicMock(spec=AsyncAzureOpenAI) + ) as mock_create_client: + mock_client_instance = mock_create_client.return_value + mock_client_instance.beta = MagicMock() + mock_client_instance.beta.assistants = MagicMock() + + mock_client_instance.beta.assistants.retrieve = AsyncMock(return_value=AsyncMock()) + + OpenAIAssistantBase._create_open_ai_assistant_definition = MagicMock( + return_value={ + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "code_interpreter_file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + ) + + retrieved_agent = await AzureAssistantAgent.retrieve(id="test_id", api_key="test_api_key", kernel=kernel) + assert retrieved_agent.model_dump( + include={ + "ai_model_id", + "description", + "id", + "instructions", + "name", + "enable_code_interpreter", + "enable_file_search", + "enable_json_response", + "code_interpreter_file_ids", + "temperature", + "top_p", + "vector_store_id", + "metadata", + "max_completion_tokens", + "max_prompt_tokens", + "parallel_tool_calls_enabled", + "truncation_message_count", + } + ) == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "code_interpreter_file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + mock_client_instance.beta.assistants.retrieve.assert_called_once_with("test_id") + OpenAIAssistantBase._create_open_ai_assistant_definition.assert_called_once() + + +@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"]], indirect=True) +@pytest.mark.asyncio +async def test_retrieve_agent_missing_chat_deployment_name_throws(kernel, azure_openai_unit_test_env): + with pytest.raises(AgentInitializationException, match="The Azure OpenAI chat_deployment_name is required."): + _ = await AzureAssistantAgent.retrieve( + id="test_id", api_key="test_api_key", kernel=kernel, env_file_path="test.env" + ) + + +@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_KEY"]], indirect=True) +@pytest.mark.asyncio +async def test_retrieve_agent_missing_api_key_throws(kernel, azure_openai_unit_test_env): + with pytest.raises( + AgentInitializationException, match="Please provide either api_key, ad_token or ad_token_provider." + ): + _ = await AzureAssistantAgent.retrieve(id="test_id", kernel=kernel, env_file_path="test.env") + + +def test_open_ai_settings_create_throws(azure_openai_unit_test_env): + with patch( + "semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings.AzureOpenAISettings.create" + ) as mock_create: + mock_create.side_effect = ValidationError.from_exception_data("test", line_errors=[], input_type="python") + + with pytest.raises(AgentInitializationException, match="Failed to create Azure OpenAI settings."): + AzureAssistantAgent(service_id="test", api_key="test_api_key", deployment_name="test_deployment_name") + + +@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"]], indirect=True) +def test_azure_openai_agent_create_missing_deployment_name(azure_openai_unit_test_env): + with pytest.raises(AgentInitializationException, match="The Azure OpenAI chat_deployment_name is required."): + AzureAssistantAgent( + service_id="test_service", api_key="test_key", endpoint="https://example.com", env_file_path="test.env" + ) + + +@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_KEY"]], indirect=True) +def test_azure_openai_agent_create_missing_api_key(azure_openai_unit_test_env): + with pytest.raises( + AgentInitializationException, match="Please provide either api_key, ad_token or ad_token_provider." + ): + AzureAssistantAgent(service_id="test_service", endpoint="https://example.com", env_file_path="test.env") diff --git a/python/tests/unit/agents/test_broadcast_queue.py b/python/tests/unit/agents/test_broadcast_queue.py new file mode 100644 index 000000000000..3dc60923584c --- /dev/null +++ b/python/tests/unit/agents/test_broadcast_queue.py @@ -0,0 +1,181 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.group_chat.broadcast_queue import BroadcastQueue, ChannelReference, QueueReference +from semantic_kernel.contents.chat_message_content import ChatMessageContent + + +@pytest.fixture +def channel_ref(): + """Fixture that provides a mock ChannelReference.""" + mock_channel = AsyncMock(spec=AgentChannel) + return ChannelReference(channel=mock_channel, hash="test-hash") + + +@pytest.fixture +def message(): + """Fixture that provides a mock ChatMessageContent.""" + return MagicMock(spec=ChatMessageContent) + + +# region QueueReference Tests + + +def test_queue_reference_is_empty_true(): + queue_ref = QueueReference() + assert queue_ref.is_empty is True + + +def test_queue_reference_is_empty_false(): + queue_ref = QueueReference() + queue_ref.queue.append(MagicMock()) + assert queue_ref.is_empty is False + + +# endregion + +# region BroadcastQueue Tests + + +@pytest.mark.asyncio +async def test_enqueue_new_channel(channel_ref, message): + broadcast_queue = BroadcastQueue() + + await broadcast_queue.enqueue([channel_ref], [message]) + + assert channel_ref.hash in broadcast_queue.queues + queue_ref = broadcast_queue.queues[channel_ref.hash] + assert queue_ref.queue[0] == [message] + assert queue_ref.receive_task is not None + assert not queue_ref.receive_task.done() + + +@pytest.mark.asyncio +async def test_enqueue_existing_channel(channel_ref, message): + broadcast_queue = BroadcastQueue() + + await broadcast_queue.enqueue([channel_ref], [message]) + + await broadcast_queue.enqueue([channel_ref], [message]) + + queue_ref = broadcast_queue.queues[channel_ref.hash] + assert len(queue_ref.queue) == 2 + assert queue_ref.queue[1] == [message] + assert queue_ref.receive_task is not None + assert not queue_ref.receive_task.done() + + +@pytest.mark.asyncio +async def test_ensure_synchronized_channel_empty(channel_ref): + broadcast_queue = BroadcastQueue() + + await broadcast_queue.ensure_synchronized(channel_ref) + assert channel_ref.hash not in broadcast_queue.queues + + +@pytest.mark.asyncio +async def test_ensure_synchronized_with_messages(channel_ref, message): + broadcast_queue = BroadcastQueue() + + await broadcast_queue.enqueue([channel_ref], [message]) + + await broadcast_queue.ensure_synchronized(channel_ref) + + queue_ref = broadcast_queue.queues[channel_ref.hash] + assert queue_ref.is_empty is True + + +@pytest.mark.asyncio +async def test_ensure_synchronized_with_failure(channel_ref, message): + broadcast_queue = BroadcastQueue() + + await broadcast_queue.enqueue([channel_ref], [message]) + + queue_ref = broadcast_queue.queues[channel_ref.hash] + queue_ref.receive_failure = Exception("Simulated failure") + + with pytest.raises(Exception, match="Unexpected failure broadcasting to channel"): + await broadcast_queue.ensure_synchronized(channel_ref) + + assert queue_ref.receive_failure is None + + +@pytest.mark.asyncio +async def test_ensure_synchronized_creates_new_task(channel_ref, message): + broadcast_queue = BroadcastQueue() + + await broadcast_queue.enqueue([channel_ref], [message]) + + queue_ref = broadcast_queue.queues[channel_ref.hash] + + queue_ref.receive_task = None + + with patch( + "semantic_kernel.agents.group_chat.broadcast_queue.BroadcastQueue.receive", new_callable=AsyncMock + ) as mock_receive: + mock_receive.return_value = await asyncio.sleep(0.1) + + await broadcast_queue.ensure_synchronized(channel_ref) + + assert queue_ref.receive_task is None + + +@pytest.mark.asyncio +async def test_receive_processes_queue(channel_ref, message): + broadcast_queue = BroadcastQueue() + + await broadcast_queue.enqueue([channel_ref], [message]) + + queue_ref = broadcast_queue.queues[channel_ref.hash] + + await broadcast_queue.receive(channel_ref, queue_ref) + + assert queue_ref.is_empty is True + + assert channel_ref.channel.receive.await_count >= 1 + channel_ref.channel.receive.assert_any_await([message]) + + +@pytest.mark.asyncio +async def test_receive_handles_failure(channel_ref, message): + broadcast_queue = BroadcastQueue() + + await broadcast_queue.enqueue([channel_ref], [message]) + + channel_ref.channel.receive.side_effect = Exception("Simulated failure") + + queue_ref = broadcast_queue.queues[channel_ref.hash] + + await broadcast_queue.receive(channel_ref, queue_ref) + + assert queue_ref.receive_failure is not None + assert str(queue_ref.receive_failure) == "Simulated failure" + + +@pytest.mark.asyncio +async def test_receive_breaks_when_queue_is_empty(channel_ref, message): + broadcast_queue = BroadcastQueue() + + await broadcast_queue.enqueue([channel_ref], [message]) + + queue_ref = broadcast_queue.queues[channel_ref.hash] + + assert not queue_ref.is_empty + + channel_ref.channel.receive = AsyncMock() + + queue_ref.queue.clear() + + await broadcast_queue.receive(channel_ref, queue_ref) + + channel_ref.channel.receive.assert_not_awaited() + + assert queue_ref.is_empty + + +# endregion diff --git a/python/tests/unit/agents/test_chat_completion_agent.py b/python/tests/unit/agents/test_chat_completion_agent.py index 7b40176cbfd1..a52e89129cef 100644 --- a/python/tests/unit/agents/test_chat_completion_agent.py +++ b/python/tests/unit/agents/test_chat_completion_agent.py @@ -4,8 +4,8 @@ import pytest -from semantic_kernel.agents.chat_completion_agent import ChatCompletionAgent -from semantic_kernel.agents.chat_history_channel import ChatHistoryChannel +from semantic_kernel.agents import ChatCompletionAgent +from semantic_kernel.agents.channels.chat_history_channel import ChatHistoryChannel from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent @@ -206,8 +206,9 @@ def test_get_channel_keys(): assert keys == [ChatHistoryChannel.__name__] -def test_create_channel(): +@pytest.mark.asyncio +async def test_create_channel(): agent = ChatCompletionAgent() - channel = agent.create_channel() + channel = await agent.create_channel() assert isinstance(channel, ChatHistoryChannel) diff --git a/python/tests/unit/agents/test_chat_history_channel.py b/python/tests/unit/agents/test_chat_history_channel.py index b3160cb91ebf..4d0bb84997b1 100644 --- a/python/tests/unit/agents/test_chat_history_channel.py +++ b/python/tests/unit/agents/test_chat_history_channel.py @@ -1,12 +1,13 @@ # Copyright (c) Microsoft. All rights reserved. from collections.abc import AsyncIterable +from unittest.mock import AsyncMock import pytest -from semantic_kernel.agents.chat_history_channel import ChatHistoryAgentProtocol, ChatHistoryChannel +from semantic_kernel.agents.channels.chat_history_channel import ChatHistoryAgentProtocol, ChatHistoryChannel from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent +from semantic_kernel.contents.function_result_content import FunctionResultContent from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.exceptions import ServiceInvalidTypeError @@ -18,9 +19,6 @@ async def invoke(self, history: list[ChatMessageContent]) -> AsyncIterable[ChatM for message in history: yield ChatMessageContent(role=AuthorRole.SYSTEM, content=f"Processed: {message.content}") - async def invoke_stream(self, history: list[ChatMessageContent]) -> AsyncIterable["StreamingChatMessageContent"]: - pass - class MockNonChatHistoryHandler: """Mock agent to test incorrect instance handling.""" @@ -31,23 +29,77 @@ class MockNonChatHistoryHandler: ChatHistoryAgentProtocol.register(MockChatHistoryHandler) +class AsyncIterableMock: + def __init__(self, async_gen): + self.async_gen = async_gen + + def __aiter__(self): + return self.async_gen() + + @pytest.mark.asyncio async def test_invoke(): channel = ChatHistoryChannel() - agent = MockChatHistoryHandler() + agent = AsyncMock(spec=MockChatHistoryHandler) + + async def mock_invoke(history: list[ChatMessageContent]): + for message in history: + yield ChatMessageContent(role=AuthorRole.SYSTEM, content=f"Processed: {message.content}") + + agent.invoke.return_value = AsyncIterableMock( + lambda: mock_invoke([ChatMessageContent(role=AuthorRole.USER, content="Initial message")]) + ) initial_message = ChatMessageContent(role=AuthorRole.USER, content="Initial message") channel.messages.append(initial_message) received_messages = [] - async for message in channel.invoke(agent): + async for is_visible, message in channel.invoke(agent): received_messages.append(message) - break # only process one message for the test + assert is_visible assert len(received_messages) == 1 assert "Processed: Initial message" in received_messages[0].content +@pytest.mark.asyncio +async def test_invoke_leftover_in_queue(): + channel = ChatHistoryChannel() + agent = AsyncMock(spec=MockChatHistoryHandler) + + async def mock_invoke(history: list[ChatMessageContent]): + for message in history: + yield ChatMessageContent(role=AuthorRole.SYSTEM, content=f"Processed: {message.content}") + yield ChatMessageContent( + role=AuthorRole.SYSTEM, content="Final message", items=[FunctionResultContent(id="test_id", result="test")] + ) + + agent.invoke.return_value = AsyncIterableMock( + lambda: mock_invoke([ + ChatMessageContent( + role=AuthorRole.USER, + content="Initial message", + items=[FunctionResultContent(id="test_id", result="test")], + ) + ]) + ) + + initial_message = ChatMessageContent(role=AuthorRole.USER, content="Initial message") + channel.messages.append(initial_message) + + received_messages = [] + async for is_visible, message in channel.invoke(agent): + received_messages.append(message) + assert is_visible + if len(received_messages) >= 3: + break + + assert len(received_messages) == 3 + assert "Processed: Initial message" in received_messages[0].content + assert "Final message" in received_messages[2].content + assert received_messages[2].items[0].id == "test_id" + + @pytest.mark.asyncio async def test_invoke_incorrect_instance_throws(): channel = ChatHistoryChannel() @@ -91,3 +143,25 @@ async def test_get_history(): assert messages[0].role == AuthorRole.USER assert messages[1].content == "test message 1" assert messages[1].role == AuthorRole.SYSTEM + + +@pytest.mark.asyncio +async def test_reset_history(): + channel = ChatHistoryChannel() + history = [ + ChatMessageContent(role=AuthorRole.SYSTEM, content="test message 1"), + ChatMessageContent(role=AuthorRole.USER, content="test message 2"), + ] + channel.messages.extend(history) + + messages = [message async for message in channel.get_history()] + + assert len(messages) == 2 + assert messages[0].content == "test message 2" + assert messages[0].role == AuthorRole.USER + assert messages[1].content == "test message 1" + assert messages[1].role == AuthorRole.SYSTEM + + await channel.reset() + + assert len(channel.messages) == 0 diff --git a/python/tests/unit/agents/test_default_termination_strategy.py b/python/tests/unit/agents/test_default_termination_strategy.py new file mode 100644 index 000000000000..a28798ff1ae8 --- /dev/null +++ b/python/tests/unit/agents/test_default_termination_strategy.py @@ -0,0 +1,12 @@ +# Copyright (c) Microsoft. All rights reserved. + +import pytest + +from semantic_kernel.agents.strategies.termination.default_termination_strategy import DefaultTerminationStrategy + + +@pytest.mark.asyncio +async def test_should_agent_terminate_(): + strategy = DefaultTerminationStrategy(maximum_iterations=2) + result = await strategy.should_agent_terminate(None, []) + assert not result diff --git a/python/tests/unit/agents/test_kernel_function_selection_strategy.py b/python/tests/unit/agents/test_kernel_function_selection_strategy.py new file mode 100644 index 000000000000..00853e7cba55 --- /dev/null +++ b/python/tests/unit/agents/test_kernel_function_selection_strategy.py @@ -0,0 +1,131 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, MagicMock + +import pytest + +from semantic_kernel.agents.agent import Agent +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.strategies.selection.kernel_function_selection_strategy import ( + KernelFunctionSelectionStrategy, +) +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.exceptions.agent_exceptions import AgentExecutionException +from semantic_kernel.functions.kernel_function import KernelFunction +from semantic_kernel.kernel import Kernel + + +class MockAgent(Agent): + """A mock agent for testing purposes.""" + + def __init__(self, id: str = None, name: str = "Test Agent", description: str = "A test agent"): + args = { + "name": name, + "description": description, + } + if id is not None: + args["id"] = id + super().__init__(**args) + + def get_channel_keys(self) -> list[str]: + return ["key1", "key2"] + + async def create_channel(self) -> AgentChannel: + return AsyncMock(spec=AgentChannel) + + +@pytest.fixture +def agents(): + """Fixture that provides a list of mock agents.""" + return [MockAgent(id=f"agent-{i}", name=f"Agent-{i}") for i in range(3)] + + +@pytest.mark.asyncio +async def test_kernel_function_selection_next_success(agents): + history = [MagicMock(spec=ChatMessageContent)] + mock_function = AsyncMock(spec=KernelFunction) + mock_function.invoke.return_value = MagicMock(value="Agent-1") + mock_kernel = MagicMock(spec=Kernel) + + strategy = KernelFunctionSelectionStrategy( + function=mock_function, kernel=mock_kernel, result_parser=lambda result: result.value + ) + + selected_agent = await strategy.next(agents, history) + + assert selected_agent.name == "Agent-1" + mock_function.invoke.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_kernel_function_selection_next_agent_not_found(agents): + history = [MagicMock(spec=ChatMessageContent)] + mock_function = AsyncMock(spec=KernelFunction) + mock_function.invoke.return_value = MagicMock(value="Nonexistent-Agent") + mock_kernel = MagicMock(spec=Kernel) + + strategy = KernelFunctionSelectionStrategy( + function=mock_function, kernel=mock_kernel, result_parser=lambda result: result.value + ) + + with pytest.raises(AgentExecutionException) as excinfo: + await strategy.next(agents, history) + + assert "Strategy unable to select next agent" in str(excinfo.value) + mock_function.invoke.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_kernel_function_selection_next_result_is_none(agents): + history = [MagicMock(spec=ChatMessageContent)] + mock_function = AsyncMock(spec=KernelFunction) + mock_function.invoke.return_value = None + mock_kernel = MagicMock(spec=Kernel) + + strategy = KernelFunctionSelectionStrategy( + function=mock_function, kernel=mock_kernel, result_parser=lambda result: result.value if result else None + ) + + with pytest.raises(AgentExecutionException) as excinfo: + await strategy.next(agents, history) + + assert "Strategy unable to determine next agent" in str(excinfo.value) + mock_function.invoke.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_kernel_function_selection_next_exception_during_invoke(agents): + history = [MagicMock(spec=ChatMessageContent)] + mock_function = AsyncMock(spec=KernelFunction) + mock_function.invoke.side_effect = Exception("Test exception") + mock_kernel = MagicMock(spec=Kernel) + + strategy = KernelFunctionSelectionStrategy( + function=mock_function, kernel=mock_kernel, result_parser=lambda result: result.value + ) + + with pytest.raises(AgentExecutionException) as excinfo: + await strategy.next(agents, history) + + assert "Strategy failed to execute function" in str(excinfo.value) + mock_function.invoke.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_kernel_function_selection_result_parser_is_async(agents): + history = [MagicMock(spec=ChatMessageContent)] + mock_function = AsyncMock(spec=KernelFunction) + mock_function.invoke.return_value = MagicMock(value="Agent-2") + mock_kernel = MagicMock(spec=Kernel) + + async def async_result_parser(result): + return result.value + + strategy = KernelFunctionSelectionStrategy( + function=mock_function, kernel=mock_kernel, result_parser=async_result_parser + ) + + selected_agent = await strategy.next(agents, history) + + assert selected_agent.name == "Agent-2" + mock_function.invoke.assert_awaited_once() diff --git a/python/tests/unit/agents/test_kernel_function_termination_strategy.py b/python/tests/unit/agents/test_kernel_function_termination_strategy.py new file mode 100644 index 000000000000..fa3860d34a3f --- /dev/null +++ b/python/tests/unit/agents/test_kernel_function_termination_strategy.py @@ -0,0 +1,141 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +from semantic_kernel.agents.agent import Agent +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.strategies import KernelFunctionTerminationStrategy +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.functions.kernel_function import KernelFunction +from semantic_kernel.kernel import Kernel + + +class MockAgent(Agent): + """A mock agent for testing purposes.""" + + def __init__(self, id: str = None, name: str = "Test Agent", description: str = "A test agent"): + args = { + "name": name, + "description": description, + } + if id is not None: + args["id"] = id + super().__init__(**args) + + def get_channel_keys(self) -> list[str]: + return ["key1", "key2"] + + async def create_channel(self) -> AgentChannel: + return AsyncMock(spec=AgentChannel) + + +@pytest.mark.asyncio +async def test_should_agent_terminate_with_result_true(): + agent = MockAgent(id="test-agent-id") + history = [MagicMock(spec=ChatMessageContent)] + + mock_function = AsyncMock(spec=KernelFunction) + mock_function.invoke.return_value = MagicMock(value=True) + mock_kernel = MagicMock(spec=Kernel) + + strategy = KernelFunctionTerminationStrategy( + agents=[agent], function=mock_function, kernel=mock_kernel, result_parser=lambda result: result.value + ) + + result = await strategy.should_agent_terminate(agent, history) + + assert result is True + mock_function.invoke.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_should_agent_terminate_with_result_false(): + agent = MockAgent(id="test-agent-id") + history = [MagicMock(spec=ChatMessageContent)] + + mock_function = AsyncMock(spec=KernelFunction) + mock_function.invoke.return_value = MagicMock(value=False) + mock_kernel = MagicMock(spec=Kernel) + + strategy = KernelFunctionTerminationStrategy( + agents=[agent], function=mock_function, kernel=mock_kernel, result_parser=lambda result: result.value + ) + + result = await strategy.should_agent_terminate(agent, history) + + assert result is False + mock_function.invoke.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_should_agent_terminate_with_none_result(): + agent = MockAgent(id="test-agent-id") + history = [MagicMock(spec=ChatMessageContent)] + + mock_function = AsyncMock(spec=KernelFunction) + mock_function.invoke.return_value = None + mock_kernel = MagicMock(spec=Kernel) + + strategy = KernelFunctionTerminationStrategy( + agents=[agent], + function=mock_function, + kernel=mock_kernel, + result_parser=lambda result: result.value if result else False, + ) + + result = await strategy.should_agent_terminate(agent, history) + + assert result is False + mock_function.invoke.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_should_agent_terminate_custom_arguments(): + agent = MockAgent(id="test-agent-id") + history = [MagicMock(spec=ChatMessageContent)] + + mock_function = AsyncMock(spec=KernelFunction) + mock_function.invoke.return_value = MagicMock(value=True) + mock_kernel = MagicMock(spec=Kernel) + + custom_args = KernelArguments(execution_settings={"some_setting": MagicMock(model_dump=lambda: {"key": "value"})}) + + strategy = KernelFunctionTerminationStrategy( + agents=[agent], + function=mock_function, + kernel=mock_kernel, + arguments=custom_args, + result_parser=lambda result: result.value, + ) + + with patch.object(KernelArguments, "__init__", return_value=None) as mock_init: + result = await strategy.should_agent_terminate(agent, history) + mock_init.assert_called_once() + + assert result is True + mock_function.invoke.assert_awaited_once() + + +@pytest.mark.asyncio +async def test_should_agent_terminate_result_parser_awaitable(): + agent = MockAgent(id="test-agent-id") + history = [MagicMock(spec=ChatMessageContent)] + + mock_function = AsyncMock(spec=KernelFunction) + mock_function.invoke.return_value = MagicMock(value=True) + mock_kernel = MagicMock(spec=Kernel) + + async def mock_result_parser(result): + return result.value + + strategy = KernelFunctionTerminationStrategy( + agents=[agent], function=mock_function, kernel=mock_kernel, result_parser=mock_result_parser + ) + + result = await strategy.should_agent_terminate(agent, history) + + assert result is True + mock_function.invoke.assert_awaited_once() diff --git a/python/tests/unit/agents/test_open_ai_assistant_agent.py b/python/tests/unit/agents/test_open_ai_assistant_agent.py new file mode 100644 index 000000000000..18096b90ab15 --- /dev/null +++ b/python/tests/unit/agents/test_open_ai_assistant_agent.py @@ -0,0 +1,503 @@ +# Copyright (c) Microsoft. All rights reserved. + +import json +from unittest.mock import AsyncMock, MagicMock, mock_open, patch + +import pytest +from openai import AsyncOpenAI +from openai.resources.beta.assistants import Assistant +from openai.types.beta.assistant import ( + ToolResources, + ToolResourcesCodeInterpreter, + ToolResourcesFileSearch, +) +from pydantic import ValidationError + +from semantic_kernel.agents.open_ai import OpenAIAssistantAgent +from semantic_kernel.agents.open_ai.open_ai_assistant_base import OpenAIAssistantBase +from semantic_kernel.exceptions.agent_exceptions import AgentInitializationException +from semantic_kernel.kernel import Kernel + + +@pytest.fixture +def openai_assistant_agent(kernel: Kernel, openai_unit_test_env): + return OpenAIAssistantAgent( + kernel=kernel, + service_id="test_service", + name="test_name", + instructions="test_instructions", + api_key="test_api_key", + kwargs={"temperature": 0.1}, + max_completion_tokens=100, + max_prompt_tokens=100, + parallel_tool_calls_enabled=True, + truncation_message_count=2, + ) + + +@pytest.fixture +def mock_assistant(): + return Assistant( + created_at=123456789, + object="assistant", + metadata={ + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + model="test_model", + description="test_description", + id="test_id", + instructions="test_instructions", + name="test_name", + tools=[{"type": "code_interpreter"}, {"type": "file_search"}], + temperature=0.7, + top_p=0.9, + response_format={"type": "json_object"}, + tool_resources=ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(code_interpreter_file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ), + ) + + +@pytest.fixture +def mock_assistant_json(): + return Assistant( + created_at=123456789, + object="assistant", + metadata={ + "__run_options": json.dumps({ + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + }) + }, + model="test_model", + description="test_description", + id="test_id", + instructions="test_instructions", + name="test_name", + tools=[{"type": "code_interpreter"}, {"type": "file_search"}], + temperature=0.7, + top_p=0.9, + response_format={"type": "json_object"}, + tool_resources=ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(code_interpreter_file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ), + ) + + +def test_initialization(openai_assistant_agent: OpenAIAssistantAgent, openai_unit_test_env): + agent = openai_assistant_agent + assert agent is not None + agent.kernel is not None + + +def test_create_client(openai_unit_test_env): + client = OpenAIAssistantAgent._create_client(api_key="test_api_key", default_headers={"User-Agent": "test-agent"}) + assert isinstance(client, AsyncOpenAI) + assert client.api_key == "test_api_key" + + +def test_create_client_from_configuration_missing_api_key(): + with pytest.raises( + AgentInitializationException, + match="Please provide an OpenAI api_key", + ): + OpenAIAssistantAgent._create_client(None) + + +@pytest.mark.asyncio +async def test_create_agent(kernel: Kernel, openai_unit_test_env): + with patch.object(OpenAIAssistantAgent, "create_assistant", new_callable=AsyncMock) as mock_create_assistant: + mock_create_assistant.return_value = MagicMock(spec=Assistant) + agent = await OpenAIAssistantAgent.create( + kernel=kernel, + ai_model_id="test_model_id", + service_id="test_service", + name="test_name", + api_key="test_api_key", + ) + assert agent.assistant is not None + mock_create_assistant.assert_called_once() + + +@pytest.mark.asyncio +async def test_create_agent_with_files(kernel: Kernel, openai_unit_test_env): + mock_open_file = mock_open(read_data="file_content") + with ( + patch("builtins.open", mock_open_file), + patch( + "semantic_kernel.agents.open_ai.open_ai_assistant_base.OpenAIAssistantBase.add_file", + return_value="test_file_id", + ), + patch( + "semantic_kernel.agents.open_ai.open_ai_assistant_base.OpenAIAssistantBase.create_vector_store", + return_value="vector_store_id", + ), + patch.object(OpenAIAssistantAgent, "create_assistant", new_callable=AsyncMock) as mock_create_assistant, + ): + mock_create_assistant.return_value = MagicMock(spec=Assistant) + agent = await OpenAIAssistantAgent.create( + kernel=kernel, + ai_model_id="test_model_id", + service_id="test_service", + name="test_name", + api_key="test_api_key", + code_interpreter_filenames=["file1", "file2"], + vector_store_filenames=["file3", "file4"], + enable_code_interpreter=True, + enable_file_search=True, + ) + assert agent.assistant is not None + mock_create_assistant.assert_called_once() + + +@pytest.mark.asyncio +async def test_create_agent_with_code_files_not_found_raises_exception(kernel: Kernel, openai_unit_test_env): + mock_open_file = mock_open(read_data="file_content") + with ( + patch("builtins.open", mock_open_file), + patch( + "semantic_kernel.agents.open_ai.open_ai_assistant_base.OpenAIAssistantBase.add_file", + side_effect=FileNotFoundError("File not found"), + ), + patch.object(OpenAIAssistantAgent, "create_assistant", new_callable=AsyncMock) as mock_create_assistant, + ): + mock_create_assistant.return_value = MagicMock(spec=Assistant) + with pytest.raises(AgentInitializationException, match="Failed to upload code interpreter files."): + _ = await OpenAIAssistantAgent.create( + kernel=kernel, + service_id="test_service", + ai_model_id="test_model_id", + name="test_name", + api_key="test_api_key", + api_version="2024-05-01", + code_interpreter_filenames=["file1", "file2"], + ) + + +@pytest.mark.asyncio +async def test_create_agent_with_search_files_not_found_raises_exception(kernel: Kernel, openai_unit_test_env): + mock_open_file = mock_open(read_data="file_content") + with ( + patch("builtins.open", mock_open_file), + patch( + "semantic_kernel.agents.open_ai.open_ai_assistant_base.OpenAIAssistantBase.add_file", + side_effect=FileNotFoundError("File not found"), + ), + patch.object(OpenAIAssistantAgent, "create_assistant", new_callable=AsyncMock) as mock_create_assistant, + ): + mock_create_assistant.return_value = MagicMock(spec=Assistant) + with pytest.raises(AgentInitializationException, match="Failed to upload file search files."): + _ = await OpenAIAssistantAgent.create( + kernel=kernel, + service_id="test_service", + ai_model_id="test_model_id", + name="test_name", + api_key="test_api_key", + api_version="2024-05-01", + vector_store_filenames=["file3", "file4"], + ) + + +@pytest.mark.asyncio +async def test_create_agent_second_way(kernel: Kernel, mock_assistant, openai_unit_test_env): + agent = OpenAIAssistantAgent( + kernel=kernel, + ai_model_id="test_model_id", + service_id="test_service", + name="test_name", + api_key="test_api_key", + max_completion_tokens=100, + max_prompt_tokens=100, + parallel_tool_calls_enabled=True, + truncation_message_count=2, + ) + + with patch.object( + OpenAIAssistantAgent, "_create_client", return_value=MagicMock(spec=AsyncOpenAI) + ) as mock_create_client: + mock_client_instance = mock_create_client.return_value + mock_client_instance.beta = MagicMock() + mock_client_instance.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + agent.client = mock_client_instance + + assistant = await agent.create_assistant() + + mock_client_instance.beta.assistants.create.assert_called_once() + + assert assistant == mock_assistant + + assert json.loads( + mock_client_instance.beta.assistants.create.call_args[1]["metadata"][agent._options_metadata_key] + ) == { + "max_completion_tokens": 100, + "max_prompt_tokens": 100, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 2, + } + + +@pytest.mark.asyncio +async def test_list_definitions(kernel: Kernel, mock_assistant, openai_unit_test_env): + agent = OpenAIAssistantAgent( + kernel=kernel, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + + with patch.object( + OpenAIAssistantAgent, "_create_client", return_value=MagicMock(spec=AsyncOpenAI) + ) as mock_create_client: + mock_client_instance = mock_create_client.return_value + mock_client_instance.beta = MagicMock() + mock_client_instance.beta.assistants = MagicMock() + mock_client_instance.beta.assistants.list = AsyncMock(return_value=MagicMock(data=[mock_assistant])) + + agent.client = mock_client_instance + + definitions = [] + async for definition in agent.list_definitions(): + definitions.append(definition) + + mock_client_instance.beta.assistants.list.assert_called() + + assert len(definitions) == 1 + assert definitions[0] == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "code_interpreter_file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + + +@pytest.mark.asyncio +async def test_retrieve_agent(kernel, openai_unit_test_env): + with patch.object( + OpenAIAssistantAgent, "_create_client", return_value=MagicMock(spec=AsyncOpenAI) + ) as mock_create_client: + mock_client_instance = mock_create_client.return_value + mock_client_instance.beta = MagicMock() + mock_client_instance.beta.assistants = MagicMock() + + mock_client_instance.beta.assistants.retrieve = AsyncMock(return_value=AsyncMock()) + + agent = OpenAIAssistantAgent( + kernel=kernel, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + OpenAIAssistantBase._create_open_ai_assistant_definition = MagicMock( + return_value={ + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "code_interpreter_file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + ) + + retrieved_agent = await agent.retrieve(id="test_id", api_key="test_api_key", kernel=kernel) + assert retrieved_agent.model_dump( + include={ + "ai_model_id", + "description", + "id", + "instructions", + "name", + "enable_code_interpreter", + "enable_file_search", + "enable_json_response", + "code_interpreter_file_ids", + "temperature", + "top_p", + "vector_store_id", + "metadata", + "max_completion_tokens", + "max_prompt_tokens", + "parallel_tool_calls_enabled", + "truncation_message_count", + } + ) == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "code_interpreter_file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + mock_client_instance.beta.assistants.retrieve.assert_called_once_with("test_id") + OpenAIAssistantBase._create_open_ai_assistant_definition.assert_called_once() + + +@pytest.mark.parametrize("exclude_list", [["OPENAI_CHAT_MODEL_ID"]], indirect=True) +@pytest.mark.asyncio +async def test_retrieve_agent_missing_chat_model_id_throws(kernel, openai_unit_test_env): + with pytest.raises(AgentInitializationException, match="The OpenAI chat model ID is required."): + _ = await OpenAIAssistantAgent.retrieve( + id="test_id", api_key="test_api_key", kernel=kernel, env_file_path="test.env" + ) + + +@pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) +@pytest.mark.asyncio +async def test_retrieve_agent_missing_api_key_throws(kernel, openai_unit_test_env): + with pytest.raises( + AgentInitializationException, match="The OpenAI API key is required, if a client is not provided." + ): + _ = await OpenAIAssistantAgent.retrieve(id="test_id", kernel=kernel, env_file_path="test.env") + + +def test_open_ai_settings_create_throws(openai_unit_test_env): + with patch("semantic_kernel.connectors.ai.open_ai.settings.open_ai_settings.OpenAISettings.create") as mock_create: + mock_create.side_effect = ValidationError.from_exception_data("test", line_errors=[], input_type="python") + + with pytest.raises(AgentInitializationException, match="Failed to create OpenAI settings."): + OpenAIAssistantAgent( + service_id="test", api_key="test_api_key", org_id="test_org_id", ai_model_id="test_model_id" + ) + + +@pytest.mark.parametrize("exclude_list", [["OPENAI_CHAT_MODEL_ID"]], indirect=True) +def test_azure_openai_agent_create_missing_chat_model_id_throws(openai_unit_test_env): + with pytest.raises(AgentInitializationException, match="The OpenAI chat model ID is required."): + OpenAIAssistantAgent(service_id="test_service", env_file_path="test.env") + + +@pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) +def test_azure_openai_agent_create_missing_api_key_throws(openai_unit_test_env): + with pytest.raises( + AgentInitializationException, match="The OpenAI API key is required, if a client is not provided." + ): + OpenAIAssistantAgent(env_file_path="test.env") + + +def test_create_open_ai_assistant_definition(mock_assistant, openai_unit_test_env): + agent = OpenAIAssistantAgent( + kernel=None, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + + definition = agent._create_open_ai_assistant_definition(mock_assistant) + + assert definition == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "code_interpreter_file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + + +def test_create_open_ai_assistant_definition_with_json_metadata(mock_assistant_json, openai_unit_test_env): + agent = OpenAIAssistantAgent( + kernel=None, service_id="test_service", name="test_name", instructions="test_instructions", id="test_id" + ) + + definition = agent._create_open_ai_assistant_definition(mock_assistant_json) + + assert definition == { + "ai_model_id": "test_model", + "description": "test_description", + "id": "test_id", + "instructions": "test_instructions", + "name": "test_name", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "code_interpreter_file_ids": ["file1", "file2"], + "temperature": 0.7, + "top_p": 0.9, + "vector_store_id": "vector_store1", + "metadata": { + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } diff --git a/python/tests/unit/agents/test_open_ai_assistant_base.py b/python/tests/unit/agents/test_open_ai_assistant_base.py new file mode 100644 index 000000000000..815ca2b2208e --- /dev/null +++ b/python/tests/unit/agents/test_open_ai_assistant_base.py @@ -0,0 +1,1180 @@ +# Copyright (c) Microsoft. All rights reserved. + +from datetime import datetime, timedelta, timezone +from typing import Any +from unittest.mock import AsyncMock, MagicMock, mock_open, patch + +import pytest +from openai import AsyncAzureOpenAI, AsyncOpenAI +from openai.resources.beta.threads.runs.runs import Run +from openai.types.beta.assistant import Assistant, ToolResources, ToolResourcesCodeInterpreter, ToolResourcesFileSearch +from openai.types.beta.assistant_tool import CodeInterpreterTool, FileSearchTool +from openai.types.beta.threads.annotation import FileCitationAnnotation, FilePathAnnotation +from openai.types.beta.threads.file_citation_annotation import FileCitation +from openai.types.beta.threads.file_path_annotation import FilePath +from openai.types.beta.threads.image_file import ImageFile +from openai.types.beta.threads.image_file_content_block import ImageFileContentBlock +from openai.types.beta.threads.required_action_function_tool_call import Function +from openai.types.beta.threads.required_action_function_tool_call import Function as RequiredActionFunction +from openai.types.beta.threads.run import ( + RequiredAction, + RequiredActionFunctionToolCall, + RequiredActionSubmitToolOutputs, +) +from openai.types.beta.threads.runs import RunStep +from openai.types.beta.threads.runs.code_interpreter_tool_call import ( + CodeInterpreter, + CodeInterpreterToolCall, +) +from openai.types.beta.threads.runs.function_tool_call import Function as RunsFunction +from openai.types.beta.threads.runs.function_tool_call import FunctionToolCall +from openai.types.beta.threads.runs.message_creation_step_details import MessageCreation, MessageCreationStepDetails +from openai.types.beta.threads.runs.tool_calls_step_details import ToolCallsStepDetails +from openai.types.beta.threads.text import Text +from openai.types.beta.threads.text_content_block import TextContentBlock +from openai.types.shared.response_format_json_object import ResponseFormatJSONObject + +from semantic_kernel.agents.open_ai.assistant_content_generation import ( + generate_function_call_content, + generate_function_result_content, + generate_message_content, + get_function_call_contents, + get_message_contents, +) +from semantic_kernel.agents.open_ai.azure_assistant_agent import AzureAssistantAgent +from semantic_kernel.contents.annotation_content import AnnotationContent +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.file_reference_content import FileReferenceContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.agent_exceptions import ( + AgentExecutionException, + AgentFileNotFoundException, + AgentInitializationException, + AgentInvokeException, +) +from semantic_kernel.functions.kernel_function_decorator import kernel_function +from semantic_kernel.functions.kernel_function_from_method import KernelFunctionFromMethod +from semantic_kernel.kernel import Kernel + +# region Test Fixtures + + +@pytest.fixture +def azure_openai_assistant_agent(kernel: Kernel, azure_openai_unit_test_env): + return AzureAssistantAgent( + kernel=kernel, + service_id="test_service", + name="test_name", + instructions="test_instructions", + api_key="test", + metadata={"key": "value"}, + api_version="2024-05-01", + description="test_description", + ai_model_id="test_model", + enable_code_interpreter=True, + enable_file_search=True, + vector_store_id="vector_store1", + file_ids=["file1", "file2"], + temperature=0.7, + top_p=0.9, + enable_json_response=True, + ) + + +@pytest.fixture +def mock_assistant(): + return Assistant( + created_at=123456789, + object="assistant", + metadata={ + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + model="test_model", + description="test_description", + id="test_id", + instructions="test_instructions", + name="test_name", + tools=[{"type": "code_interpreter"}, {"type": "file_search"}], + temperature=0.7, + top_p=0.9, + response_format={"type": "json_object"}, + tool_resources=ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ), + ) + + +@pytest.fixture +def mock_thread(): + class MockThread: + id = "test_thread_id" + + return MockThread() + + +@pytest.fixture +def mock_chat_message_content(): + return ChatMessageContent(role=AuthorRole.USER, content="test message", metadata={"key": "value"}) + + +@pytest.fixture +def mock_message(): + class MockMessage: + id = "test_message_id" + role = "user" + + return MockMessage() + + +@pytest.fixture +def mock_thread_messages(): + class MockMessage: + def __init__(self, role, content, assistant_id=None): + self.role = role + self.content = content + self.assistant_id = assistant_id + + return [ + MockMessage( + role="user", + content=[ + TextContentBlock( + type="text", + text=Text( + value="Hello", + annotations=[ + FilePathAnnotation( + type="file_path", + file_path=FilePath(file_id="test_file_id"), + end_index=5, + start_index=0, + text="Hello", + ), + FileCitationAnnotation( + type="file_citation", + file_citation=FileCitation(file_id="test_file_id", quote="test quote"), + text="Hello", + start_index=0, + end_index=5, + ), + ], + ), + ) + ], + ), + MockMessage( + role="assistant", + content=[ + ImageFileContentBlock(type="image_file", image_file=ImageFile(file_id="test_file_id", detail="auto")) + ], + assistant_id="assistant_1", + ), + ] + + +@pytest.fixture +def mock_run_failed(): + return Run( + id="run_id", + status="failed", + assistant_id="assistant_id", + created_at=123456789, + instructions="instructions", + model="model", + object="thread.run", + thread_id="thread_id", + tools=[], + parallel_tool_calls=True, + ) + + +@pytest.fixture +def mock_run_required_action(): + return Run( + id="run_id", + status="requires_action", + assistant_id="assistant_id", + created_at=123456789, + instructions="instructions", + model="model", + object="thread.run", + thread_id="thread_id", + tools=[], + required_action=RequiredAction( + type="submit_tool_outputs", + submit_tool_outputs=RequiredActionSubmitToolOutputs( + tool_calls=[ + RequiredActionFunctionToolCall( + id="tool_call_id", + type="function", + function=RequiredActionFunction(arguments="{}", name="function_name"), + ) + ] + ), + ), + parallel_tool_calls=True, + ) + + +@pytest.fixture +def mock_run_completed(): + return Run( + id="run_id", + status="completed", + assistant_id="assistant_id", + created_at=123456789, + instructions="instructions", + model="model", + object="thread.run", + thread_id="thread_id", + tools=[], + required_action=RequiredAction( + type="submit_tool_outputs", + submit_tool_outputs=RequiredActionSubmitToolOutputs( + tool_calls=[ + RequiredActionFunctionToolCall( + id="tool_call_id", type="function", function=Function(arguments="{}", name="function_name") + ) + ] + ), + ), + parallel_tool_calls=True, + ) + + +@pytest.fixture +def mock_function_call_content(): + return FunctionCallContent(id="function_call_id", name="function_name", arguments={}) + + +@pytest.fixture +def mock_run_in_progress(): + class MockRun: + def __init__(self): + self.id = "run_id" + self.status = "requires_action" + self.assistant_id = "assistant_id" + self.created_at = int(datetime.now(timezone.utc).timestamp()) + self.instructions = "instructions" + self.model = "model" + self.object = "run" + self.thread_id = "thread_id" + self.tools = [] + self.poll_count = 0 + self.required_action = RequiredAction( + type="submit_tool_outputs", + submit_tool_outputs=RequiredActionSubmitToolOutputs( + tool_calls=[ + RequiredActionFunctionToolCall( + id="tool_call_id", + type="function", + function=Function(arguments="{}", name="function_name"), + ) + ] + ), + ) + + def update_status(self): + self.poll_count += 1 + if self.poll_count > 2: + self.status = "completed" + + return MockRun() + + +@pytest.fixture +def mock_run_step_tool_call(): + class MockToolCall: + def __init__(self): + self.type = "code_interpreter" + self.code_interpreter = MagicMock(input="print('Hello, world!')") + + return RunStep( + id="step_id_1", + type="tool_calls", + completed_at=int(datetime.now(timezone.utc).timestamp()), + created_at=int((datetime.now(timezone.utc) - timedelta(minutes=1)).timestamp()), + step_details=ToolCallsStepDetails( + tool_calls=[ + CodeInterpreterToolCall( + type="code_interpreter", + id="tool_call_id", + code_interpreter=CodeInterpreter(input="test code", outputs=[]), + ), + FunctionToolCall( + type="function", + id="tool_call_id", + function=RunsFunction(arguments="{}", name="function_name", outpt="test output"), + ), + ], + type="tool_calls", + ), + assistant_id="assistant_id", + object="thread.run.step", + run_id="run_id", + status="completed", + thread_id="thread_id", + ) + + +@pytest.fixture +def mock_run_step_message_creation(): + class MockMessageCreation: + def __init__(self): + self.message_id = "message_id" + + class MockStepDetails: + def __init__(self): + self.message_creation = MockMessageCreation() + + return RunStep( + id="step_id_2", + type="message_creation", + completed_at=int(datetime.now(timezone.utc).timestamp()), + created_at=int((datetime.now(timezone.utc) - timedelta(minutes=2)).timestamp()), + step_details=MessageCreationStepDetails( + type="message_creation", message_creation=MessageCreation(message_id="test") + ), + assistant_id="assistant_id", + object="thread.run.step", + run_id="run_id", + status="completed", + thread_id="thread_id", + ) + + +# endregion + +# region Tests + + +@pytest.mark.asyncio +async def test_create_assistant( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + assistant = await azure_openai_assistant_agent.create_assistant( + ai_model_id="test_model", + description="test_description", + instructions="test_instructions", + name="test_name", + enable_code_interpreter=True, + enable_file_search=True, + vector_store_id="vector_store1", + code_interpreter_file_ids=["file1", "file2"], + metadata={"key": "value"}, + ) + + assert assistant.model == "test_model" + assert assistant.description == "test_description" + assert assistant.id == "test_id" + assert assistant.instructions == "test_instructions" + assert assistant.name == "test_name" + assert assistant.tools == [CodeInterpreterTool(type="code_interpreter"), FileSearchTool(type="file_search")] + assert assistant.temperature == 0.7 + assert assistant.top_p == 0.9 + assert assistant.response_format == ResponseFormatJSONObject(type="json_object") + assert assistant.tool_resources == ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ) + + +@pytest.mark.asyncio +async def test_modify_assistant( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + assistant = await azure_openai_assistant_agent.create_assistant( + ai_model_id="test_model", + description="test_description", + instructions="test_instructions", + name="test_name", + enable_code_interpreter=True, + enable_file_search=True, + vector_store_id="vector_store1", + code_interpreter_file_ids=["file1", "file2"], + metadata={"key": "value"}, + ) + + mock_client.beta.assistants.update = AsyncMock(return_value=mock_assistant) + + assistant = await azure_openai_assistant_agent.modify_assistant( + assistant_id=assistant.id, + ai_model_id="test_model", + description="test_description", + instructions="test_instructions", + name="test_name", + enable_code_interpreter=True, + enable_file_search=True, + vector_store_id="vector_store1", + code_interpreter_file_ids=["file1", "file2"], + metadata={"key": "value"}, + ) + + assert assistant.model == "test_model" + assert assistant.description == "test_description" + assert assistant.id == "test_id" + assert assistant.instructions == "test_instructions" + assert assistant.name == "test_name" + assert assistant.tools == [CodeInterpreterTool(type="code_interpreter"), FileSearchTool(type="file_search")] + assert assistant.temperature == 0.7 + assert assistant.top_p == 0.9 + assert assistant.response_format == ResponseFormatJSONObject(type="json_object") + assert assistant.tool_resources == ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ) + + +@pytest.mark.asyncio +async def test_modify_assistant_not_initialized_throws( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env +): + with pytest.raises(AgentInitializationException, match="The assistant has not been created."): + _ = await azure_openai_assistant_agent.modify_assistant( + assistant_id="id", + ai_model_id="test_model", + description="test_description", + instructions="test_instructions", + name="test_name", + enable_code_interpreter=True, + enable_file_search=True, + vector_store_id="vector_store1", + code_interpreter_file_ids=["file1", "file2"], + metadata={"key": "value"}, + ) + + +@pytest.mark.asyncio +async def test_create_assistant_with_model_attributes( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + assistant = await azure_openai_assistant_agent.create_assistant( + ai_model_id="test_model", + description="test_description", + instructions="test_instructions", + name="test_name", + enable_code_interpreter=True, + enable_file_search=True, + vector_store_id="vector_store1", + code_interpreter_file_ids=["file1", "file2"], + metadata={"key": "value"}, + kwargs={"temperature": 0.1}, + ) + + assert assistant.model == "test_model" + assert assistant.description == "test_description" + assert assistant.id == "test_id" + assert assistant.instructions == "test_instructions" + assert assistant.name == "test_name" + assert assistant.tools == [CodeInterpreterTool(type="code_interpreter"), FileSearchTool(type="file_search")] + assert assistant.temperature == 0.7 + assert assistant.top_p == 0.9 + assert assistant.response_format == ResponseFormatJSONObject(type="json_object") + assert assistant.tool_resources == ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ) + + +@pytest.mark.asyncio +async def test_create_assistant_delete_and_recreate( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + mock_client.beta.assistants.delete = AsyncMock() + + assistant = await azure_openai_assistant_agent.create_assistant() + + assert assistant is not None + + await azure_openai_assistant_agent.delete() + + assert azure_openai_assistant_agent._is_deleted + + assistant = await azure_openai_assistant_agent.create_assistant() + + assert azure_openai_assistant_agent._is_deleted is False + + +@pytest.mark.asyncio +async def test_get_channel_keys(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + keys = azure_openai_assistant_agent.get_channel_keys() + for key in keys: + assert isinstance(key, str) + + +@pytest.mark.asyncio +async def test_create_channel( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, mock_thread, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.create = AsyncMock(return_value=mock_thread) + + channel = await azure_openai_assistant_agent.create_channel() + + assert channel is not None + + +@pytest.mark.asyncio +async def test_get_assistant_metadata( + azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + assistant = await azure_openai_assistant_agent.create_assistant() + + assistant.metadata is not None + + +@pytest.mark.asyncio +async def test_get_agent_tools(azure_openai_assistant_agent, mock_assistant, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + func = KernelFunctionFromMethod(method=kernel_function(lambda x: x**2, name="square"), plugin_name="math") + azure_openai_assistant_agent.kernel.add_function(plugin_name="test", function=func) + + assistant = await azure_openai_assistant_agent.create_assistant() + + assert assistant.tools is not None + assert len(assistant.tools) == 2 + tools = azure_openai_assistant_agent.tools + assert len(tools) == 3 + assert tools[0] == {"type": "code_interpreter"} + assert tools[1] == {"type": "file_search"} + assert tools[2]["type"].startswith("function") + + +@pytest.mark.asyncio +async def test_get_assistant_tools_throws_when_no_assistant( + azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env +): + with pytest.raises(AgentInitializationException, match="The assistant has not been created."): + _ = azure_openai_assistant_agent.tools + + +@pytest.mark.asyncio +async def test_create_thread(azure_openai_assistant_agent, mock_thread, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.create = AsyncMock(return_value=mock_thread) + + thread_id = await azure_openai_assistant_agent.create_thread( + code_interpreter_file_ids=["file1", "file2"], + vector_store_id="vector_store1", + messages=[ + ChatMessageContent(role=AuthorRole.USER, content="test message"), + ], + metadata={"key": "value"}, + ) + + assert thread_id == "test_thread_id" + mock_client.beta.threads.create.assert_called_once() + _, called_kwargs = mock_client.beta.threads.create.call_args + assert "tool_resources" in called_kwargs + assert called_kwargs["tool_resources"] == { + "code_interpreter": {"file_ids": ["file1", "file2"]}, + "file_search": {"vector_store_ids": ["vector_store1"]}, + } + assert "messages" in called_kwargs + assert called_kwargs["messages"] == [{"role": "user", "content": {"type": "text", "text": "test message"}}] + assert "metadata" in called_kwargs + assert called_kwargs["metadata"] == {"key": "value"} + + +@pytest.mark.asyncio +async def test_create_thread_throws_with_invalid_role(azure_openai_assistant_agent, mock_thread, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.create = AsyncMock(return_value=mock_thread) + + with pytest.raises( + AgentExecutionException, + match="Invalid message role `tool`", + ): + _ = await azure_openai_assistant_agent.create_thread( + messages=[ChatMessageContent(role=AuthorRole.TOOL, content="test message")] + ) + + +@pytest.mark.asyncio +async def test_delete_thread(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.delete = AsyncMock() + + await azure_openai_assistant_agent.delete_thread("test_thread_id") + + mock_client.beta.threads.delete.assert_called_once_with("test_thread_id") + + +@pytest.mark.asyncio +async def test_delete(azure_openai_assistant_agent, mock_assistant, openai_unit_test_env): + azure_openai_assistant_agent.assistant = mock_assistant + + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.delete = AsyncMock() + + azure_openai_assistant_agent._is_deleted = False + result = await azure_openai_assistant_agent.delete() + + assert result == azure_openai_assistant_agent._is_deleted + mock_client.beta.assistants.delete.assert_called_once_with(mock_assistant.id) + + +@pytest.mark.asyncio +async def test_add_file(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.files = MagicMock() + mock_client.files.create = AsyncMock(return_value=MagicMock(id="test_file_id")) + + mock_open_file = mock_open(read_data="file_content") + with patch("builtins.open", mock_open_file): + file_id = await azure_openai_assistant_agent.add_file("test_file_path", "assistants") + + assert file_id == "test_file_id" + mock_open_file.assert_called_once_with("test_file_path", "rb") + mock_client.files.create.assert_called_once() + + +@pytest.mark.asyncio +async def test_add_file_not_found(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.files = MagicMock() + + with patch("builtins.open", mock_open(read_data="file_content")) as mock_open_file: + mock_open_file.side_effect = FileNotFoundError + + with pytest.raises(AgentFileNotFoundException, match="File not found: test_file_path"): + await azure_openai_assistant_agent.add_file("test_file_path", "assistants") + + +@pytest.mark.asyncio +async def test_delete_file(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.files = MagicMock() + mock_client.files.delete = AsyncMock() + + await azure_openai_assistant_agent.delete_file("test_file_id") + + mock_client.files.delete.assert_called_once_with("test_file_id") + + +@pytest.mark.asyncio +async def test_delete_file_raises_exception(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.files = MagicMock() + mock_client.files.delete = AsyncMock(side_effect=Exception("Deletion failed")) + + with pytest.raises(AgentExecutionException, match="Error deleting file."): + await azure_openai_assistant_agent.delete_file("test_file_id") + + mock_client.files.delete.assert_called_once_with("test_file_id") + + +@pytest.mark.asyncio +async def test_create_vector_store(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.vector_stores = MagicMock() + mock_client.beta.vector_stores.create = AsyncMock(return_value=MagicMock(id="test_vector_store_id")) + + vector_store_id = await azure_openai_assistant_agent.create_vector_store(["file_id1", "file_id2"]) + + assert vector_store_id == "test_vector_store_id" + mock_client.beta.vector_stores.create.assert_called_once_with(file_ids=["file_id1", "file_id2"]) + + +@pytest.mark.asyncio +async def test_create_vector_store_single_file_id( + azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.vector_stores = MagicMock() + mock_client.beta.vector_stores.create = AsyncMock(return_value=MagicMock(id="test_vector_store_id")) + + vector_store_id = await azure_openai_assistant_agent.create_vector_store("file_id1") + + assert vector_store_id == "test_vector_store_id" + mock_client.beta.vector_stores.create.assert_called_once_with(file_ids=["file_id1"]) + + +@pytest.mark.asyncio +async def test_create_vector_store_raises_exception( + azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.vector_stores = MagicMock() + mock_client.beta.vector_stores.create = AsyncMock(side_effect=Exception("Creation failed")) + + with pytest.raises(AgentExecutionException, match="Error creating vector store."): + await azure_openai_assistant_agent.create_vector_store("file_id1") + + mock_client.beta.vector_stores.create.assert_called_once_with(file_ids=["file_id1"]) + + +@pytest.mark.asyncio +async def test_delete_vector_store(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.vector_stores = MagicMock() + mock_client.beta.vector_stores.delete = AsyncMock() + + await azure_openai_assistant_agent.delete_vector_store("test_vector_store_id") + + mock_client.beta.vector_stores.delete.assert_called_once_with("test_vector_store_id") + + +@pytest.mark.asyncio +async def test_delete_vector_store_raises_exception( + azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.vector_stores = MagicMock() + mock_client.beta.vector_stores.delete = AsyncMock(side_effect=Exception("Deletion failed")) + + with pytest.raises(AgentExecutionException, match="Error deleting vector store."): + await azure_openai_assistant_agent.delete_vector_store("test_vector_store_id") + + mock_client.beta.vector_stores.delete.assert_called_once_with("test_vector_store_id") + + +@pytest.mark.asyncio +async def test_add_chat_message( + azure_openai_assistant_agent, mock_chat_message_content, mock_message, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.messages = MagicMock() + mock_client.beta.threads.messages.create = AsyncMock(return_value=mock_message) + + result = await azure_openai_assistant_agent.add_chat_message("test_thread_id", mock_chat_message_content) + + assert result.id == "test_message_id" + mock_client.beta.threads.messages.create.assert_called_once_with( + thread_id="test_thread_id", + role="user", + content=[{"type": "text", "text": "test message"}], + ) + + +@pytest.mark.asyncio +async def test_add_chat_message_invalid_role( + azure_openai_assistant_agent, mock_chat_message_content, openai_unit_test_env +): + mock_chat_message_content.role = AuthorRole.TOOL + + with pytest.raises(AgentExecutionException, match="Invalid message role `tool`"): + await azure_openai_assistant_agent.add_chat_message("test_thread_id", mock_chat_message_content) + + +@pytest.mark.asyncio +async def test_get_thread_messages( + azure_openai_assistant_agent, mock_thread_messages, mock_assistant, openai_unit_test_env +): + async def mock_list_messages(*args, **kwargs) -> Any: + return MagicMock(data=mock_thread_messages) + + async def mock_retrieve_assistant(*args, **kwargs) -> Any: + return mock_assistant + + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.messages = MagicMock() + mock_client.beta.threads.messages.list = AsyncMock(side_effect=mock_list_messages) + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.retrieve = AsyncMock(side_effect=mock_retrieve_assistant) + + messages = [message async for message in azure_openai_assistant_agent.get_thread_messages("test_thread_id")] + + assert len(messages) == 2 + assert len(messages[0].items) == 3 + assert isinstance(messages[0].items[0], TextContent) + assert isinstance(messages[0].items[1], AnnotationContent) + assert isinstance(messages[0].items[2], AnnotationContent) + assert messages[0].items[0].text == "Hello" + + assert len(messages[1].items) == 1 + assert isinstance(messages[1].items[0], FileReferenceContent) + assert str(messages[1].items[0].file_id) == "test_file_id" + + +@pytest.mark.asyncio +async def test_invoke( + azure_openai_assistant_agent, + mock_assistant, + mock_run_in_progress, + mock_run_required_action, + mock_chat_message_content, + mock_run_step_tool_call, + mock_run_step_message_creation, + mock_thread_messages, + mock_function_call_content, + openai_unit_test_env, +): + async def mock_poll_run_status(run, thread_id): + run.update_status() + return run + + def mock_get_function_call_contents(run, function_steps): + function_call_content = mock_function_call_content + function_call_content.id = "tool_call_id" # Set expected ID + function_steps[function_call_content.id] = function_call_content + return [function_call_content] + + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + mock_client.beta.threads.runs = MagicMock() + mock_client.beta.threads.runs.create = AsyncMock(return_value=mock_run_in_progress) + mock_client.beta.threads.runs.submit_tool_outputs = AsyncMock() + mock_client.beta.threads.runs.steps = MagicMock() + mock_client.beta.threads.runs.steps.list = AsyncMock( + return_value=MagicMock(data=[mock_run_step_message_creation, mock_run_step_tool_call]) + ) + + azure_openai_assistant_agent.assistant = await azure_openai_assistant_agent.create_assistant() + azure_openai_assistant_agent._get_tools = MagicMock(return_value=["tool"]) + azure_openai_assistant_agent._poll_run_status = AsyncMock(side_effect=mock_poll_run_status) + azure_openai_assistant_agent._invoke_function_calls = AsyncMock() + azure_openai_assistant_agent._format_tool_outputs = MagicMock( + return_value=[{"tool_call_id": "id", "output": "output"}] + ) + azure_openai_assistant_agent._retrieve_message = AsyncMock(return_value=mock_thread_messages[0]) + + with patch( + "semantic_kernel.agents.open_ai.assistant_content_generation.get_function_call_contents", + side_effect=mock_get_function_call_contents, + ): + _ = [message async for message in azure_openai_assistant_agent.invoke("thread_id")] + + +@pytest.mark.asyncio +async def test_invoke_assistant_not_initialized_throws(azure_openai_assistant_agent, openai_unit_test_env): + with pytest.raises(AgentInitializationException, match="The assistant has not been created."): + _ = [message async for message in azure_openai_assistant_agent.invoke("thread_id")] + + +@pytest.mark.asyncio +async def test_invoke_agent_deleted_throws(azure_openai_assistant_agent, mock_assistant, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + azure_openai_assistant_agent.assistant = await azure_openai_assistant_agent.create_assistant() + azure_openai_assistant_agent._is_deleted = True + + with pytest.raises(AgentInitializationException, match="The assistant has been deleted."): + _ = [message async for message in azure_openai_assistant_agent.invoke("thread_id")] + + +@pytest.mark.asyncio +async def test_invoke_raises_error( + azure_openai_assistant_agent, + mock_assistant, + mock_run_in_progress, + mock_run_step_tool_call, + mock_run_step_message_creation, + openai_unit_test_env, +): + async def mock_poll_run_status(run, thread_id): + run.status = "failed" + return run + + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + mock_client.beta.threads.runs = MagicMock() + mock_client.beta.threads.runs.create = AsyncMock(return_value=mock_run_in_progress) + mock_client.beta.threads.runs.submit_tool_outputs = AsyncMock() + mock_client.beta.threads.runs.steps = MagicMock() + mock_client.beta.threads.runs.steps.list = AsyncMock( + return_value=MagicMock(data=[mock_run_step_tool_call, mock_run_step_message_creation]) + ) + + azure_openai_assistant_agent.assistant = await azure_openai_assistant_agent.create_assistant() + azure_openai_assistant_agent._get_tools = MagicMock(return_value=["tool"]) + azure_openai_assistant_agent._poll_run_status = AsyncMock(side_effect=mock_poll_run_status) + + with pytest.raises( + AgentInvokeException, match="Run failed with status: `failed` for agent `test_name` and thread `thread_id`" + ): + _ = [message async for message in azure_openai_assistant_agent.invoke("thread_id")] + + +def test_format_tool_outputs(azure_openai_assistant_agent, openai_unit_test_env): + chat_history = ChatHistory() + fcc = FunctionCallContent( + id="test", name="test-function", arguments='{"input": "world"}', metadata={"test": "test"} + ) + frc = FunctionResultContent.from_function_call_content_and_result(fcc, 123, {"test2": "test2"}) + chat_history.add_message(message=frc.to_chat_message_content()) + + tool_outputs = azure_openai_assistant_agent._format_tool_outputs(chat_history) + assert tool_outputs[0] == {"tool_call_id": "test", "output": 123} + + +@pytest.mark.asyncio +async def test_invoke_function_calls(azure_openai_assistant_agent, openai_unit_test_env): + chat_history = ChatHistory() + fcc = FunctionCallContent( + id="test", name="test-function", arguments='{"input": "world"}', metadata={"test": "test"} + ) + + with patch( + "semantic_kernel.kernel.Kernel.invoke_function_call", new_callable=AsyncMock + ) as mock_invoke_function_call: + mock_invoke_function_call.return_value = "mocked_result" + results = await azure_openai_assistant_agent._invoke_function_calls([fcc], chat_history) + assert results == ["mocked_result"] + mock_invoke_function_call.assert_called_once_with(function_call=fcc, chat_history=chat_history) + + +def test_get_function_call_contents(azure_openai_assistant_agent, mock_run_required_action, openai_unit_test_env): + result = get_function_call_contents(run=mock_run_required_action, function_steps={}) + assert result is not None + + +def test_get_function_call_contents_no_action_required( + azure_openai_assistant_agent, mock_run_required_action, openai_unit_test_env +): + mock_run_required_action.required_action = None + result = get_function_call_contents(run=mock_run_required_action, function_steps={}) + assert result == [] + + +@pytest.mark.asyncio +async def test_get_tools(azure_openai_assistant_agent: AzureAssistantAgent, mock_assistant, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.create = AsyncMock(return_value=mock_assistant) + + azure_openai_assistant_agent.assistant = await azure_openai_assistant_agent.create_assistant() + tools = azure_openai_assistant_agent._get_tools() + assert tools is not None + + +@pytest.mark.asyncio +async def test_get_tools_no_assistant_returns_empty_list( + azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env +): + with pytest.raises(AgentInitializationException, match="The assistant has not been created."): + _ = azure_openai_assistant_agent._get_tools() + + +def test_generate_message_content(azure_openai_assistant_agent, mock_thread_messages, openai_unit_test_env): + for message in mock_thread_messages: + result = generate_message_content(assistant_name="test", message=message) + assert result is not None + + +def test_check_if_deleted_throws(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + azure_openai_assistant_agent._is_deleted = True + with pytest.raises(AgentInitializationException, match="The assistant has been deleted."): + azure_openai_assistant_agent._check_if_deleted() + + +def test_get_message_contents(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + message = ChatMessageContent(role=AuthorRole.USER, content="test message") + message.items = [ + ImageContent(role=AuthorRole.ASSISTANT, content="test message", uri="http://image.url"), + TextContent(role=AuthorRole.ASSISTANT, text="test message"), + FileReferenceContent(role=AuthorRole.ASSISTANT, file_id="test_file_id"), + ] + + result = get_message_contents(message) + assert result is not None + + +@pytest.mark.asyncio +async def test_retrieve_message(azure_openai_assistant_agent, mock_thread_messages, openai_unit_test_env): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + + mock_client.beta.threads.messages.retrieve = AsyncMock(side_effect=mock_thread_messages) + + message = await azure_openai_assistant_agent._retrieve_message( + thread_id="test_thread_id", message_id="test_message_id" + ) + assert message is not None + + +@pytest.mark.asyncio +async def test_retrieve_message_fails_polls_again( + azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env +): + with ( + patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client, + patch("semantic_kernel.agents.open_ai.open_ai_assistant_agent.logger", autospec=True), + ): + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + + mock_client.beta.threads.messages.retrieve = AsyncMock(side_effect=Exception("Unable to retrieve message")) + + message = await azure_openai_assistant_agent._retrieve_message( + thread_id="test_thread_id", message_id="test_message_id" + ) + assert message is None + + +@pytest.mark.asyncio +async def test_poll_run_status( + azure_openai_assistant_agent, mock_run_required_action, mock_run_completed, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + + mock_client.beta.threads.runs.retrieve = AsyncMock(return_value=mock_run_completed) + + run = await azure_openai_assistant_agent._poll_run_status( + run=mock_run_required_action, thread_id="test_thread_id" + ) + assert run.status == "completed" + + +@pytest.mark.asyncio +async def test_poll_run_status_exception_polls_again( + azure_openai_assistant_agent, mock_run_required_action, mock_run_completed, openai_unit_test_env +): + with patch.object(azure_openai_assistant_agent, "client", spec=AsyncAzureOpenAI) as mock_client: + mock_client.beta = MagicMock() + mock_client.beta.assistants = MagicMock() + + mock_client.beta.threads.runs.retrieve = AsyncMock( + side_effect=[Exception("Failed to retrieve message"), mock_run_completed] + ) + + run = await azure_openai_assistant_agent._poll_run_status( + run=mock_run_required_action, thread_id="test_thread_id" + ) + assert run.status == "requires_action" + + +def test_generate_function_result_content( + azure_openai_assistant_agent, mock_function_call_content, openai_unit_test_env +): + mock_tool_call = RequiredActionFunctionToolCall( + id="tool_call_id", type="function", function=Function(arguments="{}", name="function_name", output="result") + ) + + message = generate_function_result_content( + agent_name="test", function_step=mock_function_call_content, tool_call=mock_tool_call + ) + assert message is not None + assert isinstance(message.items[0], FunctionResultContent) + + +def test_generate_function_call_content(azure_openai_assistant_agent, mock_function_call_content, openai_unit_test_env): + message = generate_function_call_content(agent_name="test", fccs=[mock_function_call_content]) + assert message is not None + assert isinstance(message, ChatMessageContent) + assert isinstance(message.items[0], FunctionCallContent) + + +def test_merge_options(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + merged_options = azure_openai_assistant_agent._merge_options( + ai_model_id="model-id", + enable_json_response=True, + enable_code_interpreter=True, + enable_file_search=True, + max_completion_tokens=150, + parallel_tool_calls_enabled=True, + ) + + expected_options = { + "ai_model_id": "model-id", + "enable_code_interpreter": True, + "enable_file_search": True, + "enable_json_response": True, + "max_completion_tokens": 150, + "max_prompt_tokens": None, + "parallel_tool_calls_enabled": True, + "truncation_message_count": None, + "temperature": 0.7, + "top_p": 0.9, + "metadata": {}, + } + + assert merged_options == expected_options, f"Expected {expected_options}, but got {merged_options}" + + +def test_generate_options(azure_openai_assistant_agent: AzureAssistantAgent, openai_unit_test_env): + options = azure_openai_assistant_agent._generate_options( + ai_model_id="model-id", max_completion_tokens=150, metadata={"key1": "value1"} + ) + + expected_options = { + "max_completion_tokens": 150, + "max_prompt_tokens": None, + "model": "model-id", + "top_p": 0.9, + "response_format": None, + "temperature": 0.7, + "truncation_strategy": None, + "metadata": {"key1": "value1"}, + } + + assert options == expected_options, f"Expected {expected_options}, but got {options}" + + +# endregion diff --git a/python/tests/unit/agents/test_open_ai_assistant_channel.py b/python/tests/unit/agents/test_open_ai_assistant_channel.py new file mode 100644 index 000000000000..899ae3799b2c --- /dev/null +++ b/python/tests/unit/agents/test_open_ai_assistant_channel.py @@ -0,0 +1,262 @@ +# Copyright (c) Microsoft. All rights reserved. +from typing import Any +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest +from openai import AsyncOpenAI +from openai.types.beta.assistant import Assistant, ToolResources, ToolResourcesCodeInterpreter, ToolResourcesFileSearch +from openai.types.beta.threads.annotation import FileCitationAnnotation, FilePathAnnotation +from openai.types.beta.threads.file_citation_annotation import FileCitation +from openai.types.beta.threads.file_path_annotation import FilePath +from openai.types.beta.threads.image_file import ImageFile +from openai.types.beta.threads.image_file_content_block import ImageFileContentBlock +from openai.types.beta.threads.text import Text +from openai.types.beta.threads.text_content_block import TextContentBlock + +from semantic_kernel.agents.open_ai.open_ai_assistant_base import OpenAIAssistantBase +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.agent_exceptions import AgentChatException + + +@pytest.fixture +def mock_thread_messages(): + class MockMessage: + def __init__(self, role, content, assistant_id=None): + self.role = role + self.content = content + self.assistant_id = assistant_id + + return [ + MockMessage( + role="user", + content=[ + TextContentBlock( + type="text", + text=Text( + value="Hello", + annotations=[ + FilePathAnnotation( + type="file_path", + file_path=FilePath(file_id="test_file_id"), + end_index=5, + start_index=0, + text="Hello", + ), + FileCitationAnnotation( + type="file_citation", + file_citation=FileCitation(file_id="test_file_id", quote="test quote"), + text="Hello", + start_index=0, + end_index=5, + ), + ], + ), + ) + ], + ), + MockMessage( + role="assistant", + content=[ + ImageFileContentBlock(type="image_file", image_file=ImageFile(file_id="test_file_id", detail="auto")) + ], + assistant_id="assistant_1", + ), + ] + + +@pytest.fixture +def mock_assistant(): + return Assistant( + created_at=123456789, + object="assistant", + metadata={ + "__run_options": { + "max_completion_tokens": 100, + "max_prompt_tokens": 50, + "parallel_tool_calls_enabled": True, + "truncation_message_count": 10, + } + }, + model="test_model", + description="test_description", + id="test_id", + instructions="test_instructions", + name="test_name", + tools=[{"type": "code_interpreter"}, {"type": "file_search"}], + temperature=0.7, + top_p=0.9, + response_format={"type": "json_object"}, + tool_resources=ToolResources( + code_interpreter=ToolResourcesCodeInterpreter(file_ids=["file1", "file2"]), + file_search=ToolResourcesFileSearch(vector_store_ids=["vector_store1"]), + ), + ) + + +@pytest.mark.asyncio +async def test_receive_messages(): + from semantic_kernel.agents.channels.open_ai_assistant_channel import OpenAIAssistantChannel + + client = MagicMock(spec=AsyncOpenAI) + client.beta = AsyncMock() + thread_id = "test_thread" + channel = OpenAIAssistantChannel(client=client, thread_id=thread_id) + history = [ + MagicMock(spec=ChatMessageContent, role=AuthorRole.USER, items=[TextContent(text="test")]) for _ in range(3) + ] + + with patch("semantic_kernel.agents.open_ai.assistant_content_generation.create_chat_message"): + await channel.receive(history) + + +@pytest.mark.asyncio +async def test_invoke_agent(): + from semantic_kernel.agents.channels.open_ai_assistant_channel import OpenAIAssistantChannel + + client = MagicMock(spec=AsyncOpenAI) + thread_id = "test_thread" + agent = MagicMock(spec=OpenAIAssistantBase) + agent._is_deleted = False + channel = OpenAIAssistantChannel(client=client, thread_id=thread_id) + + async def mock_invoke_internal(*args, **kwargs): + for _ in range(3): + yield True, MagicMock(spec=ChatMessageContent) + + agent._invoke_internal.side_effect = mock_invoke_internal + + results = [] + async for is_visible, message in channel.invoke(agent): + results.append((is_visible, message)) + + assert len(results) == 3 + for is_visible, message in results: + assert is_visible is True + assert isinstance(message, ChatMessageContent) + + +@pytest.mark.asyncio +async def test_invoke_agent_deleted(): + from semantic_kernel.agents.channels.open_ai_assistant_channel import OpenAIAssistantChannel + + client = MagicMock(spec=AsyncOpenAI) + thread_id = "test_thread" + agent = MagicMock(spec=OpenAIAssistantBase) + agent._is_deleted = True + channel = OpenAIAssistantChannel(client=client, thread_id=thread_id) + + with pytest.raises(AgentChatException, match="Agent is deleted"): + async for _ in channel.invoke(agent): + pass + + +@pytest.mark.asyncio +async def test_invoke_agent_wrong_type(): + from semantic_kernel.agents.channels.open_ai_assistant_channel import OpenAIAssistantChannel + + client = MagicMock(spec=AsyncOpenAI) + thread_id = "test_thread" + agent = MagicMock() + channel = OpenAIAssistantChannel(client=client, thread_id=thread_id) + + with pytest.raises(AgentChatException, match="Agent is not of the expected type"): + async for _ in channel.invoke(agent): + pass + + +@pytest.mark.asyncio +async def test_get_history(mock_thread_messages, mock_assistant, openai_unit_test_env): + from semantic_kernel.agents.channels.open_ai_assistant_channel import OpenAIAssistantChannel + + async def mock_list_messages(*args, **kwargs) -> Any: + return MagicMock(data=mock_thread_messages) + + async def mock_retrieve_assistant(*args, **kwargs) -> Any: + return mock_assistant + + mock_client = MagicMock(spec=AsyncOpenAI) + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.messages = MagicMock() + mock_client.beta.threads.messages.list = AsyncMock(side_effect=mock_list_messages) + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.retrieve = AsyncMock(side_effect=mock_retrieve_assistant) + + thread_id = "test_thread" + channel = OpenAIAssistantChannel(client=mock_client, thread_id=thread_id) + + results = [] + async for content in channel.get_history(): + results.append(content) + + assert len(results) == 2 + mock_client.beta.threads.messages.list.assert_awaited_once_with(thread_id=thread_id, limit=100, order="desc") + + +@pytest.mark.asyncio +async def test_reset_channel(mock_thread_messages, mock_assistant, openai_unit_test_env): + from semantic_kernel.agents.channels.open_ai_assistant_channel import OpenAIAssistantChannel + + async def mock_list_messages(*args, **kwargs) -> Any: + return MagicMock(data=mock_thread_messages) + + async def mock_retrieve_assistant(*args, **kwargs) -> Any: + return mock_assistant + + mock_client = MagicMock(spec=AsyncOpenAI) + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.messages = MagicMock() + mock_client.beta.threads.messages.list = AsyncMock(side_effect=mock_list_messages) + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.retrieve = AsyncMock(side_effect=mock_retrieve_assistant) + mock_client.beta.threads.delete = AsyncMock() + + thread_id = "test_thread" + channel = OpenAIAssistantChannel(client=mock_client, thread_id=thread_id) + + results = [] + async for content in channel.get_history(): + results.append(content) + + assert len(results) == 2 + mock_client.beta.threads.messages.list.assert_awaited_once_with(thread_id=thread_id, limit=100, order="desc") + + await channel.reset() + + assert channel.thread_id is not None + + +@pytest.mark.asyncio +async def test_reset_channel_error_throws_exception(mock_thread_messages, mock_assistant, openai_unit_test_env): + from semantic_kernel.agents.channels.open_ai_assistant_channel import OpenAIAssistantChannel + + async def mock_list_messages(*args, **kwargs) -> Any: + return MagicMock(data=mock_thread_messages) + + async def mock_retrieve_assistant(*args, **kwargs) -> Any: + return mock_assistant + + mock_client = MagicMock(spec=AsyncOpenAI) + mock_client.beta = MagicMock() + mock_client.beta.threads = MagicMock() + mock_client.beta.threads.messages = MagicMock() + mock_client.beta.threads.messages.list = AsyncMock(side_effect=mock_list_messages) + mock_client.beta.assistants = MagicMock() + mock_client.beta.assistants.retrieve = AsyncMock(side_effect=mock_retrieve_assistant) + mock_client.beta.threads.delete = AsyncMock(side_effect=Exception("Test error")) + + thread_id = "test_thread" + channel = OpenAIAssistantChannel(client=mock_client, thread_id=thread_id) + + results = [] + async for content in channel.get_history(): + results.append(content) + + assert len(results) == 2 + mock_client.beta.threads.messages.list.assert_awaited_once_with(thread_id=thread_id, limit=100, order="desc") + + with pytest.raises(Exception, match="Test error"): + await channel.reset() diff --git a/python/tests/unit/agents/test_run_polling_options.py b/python/tests/unit/agents/test_run_polling_options.py new file mode 100644 index 000000000000..bcec2ed9932e --- /dev/null +++ b/python/tests/unit/agents/test_run_polling_options.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft. All rights reserved. + +from datetime import timedelta + +from semantic_kernel.agents.open_ai.run_polling_options import RunPollingOptions + + +def test_get_polling_interval_below_threshold(): + options = RunPollingOptions() + iteration_count = 1 + expected_interval = timedelta(milliseconds=250) + assert options.get_polling_interval(iteration_count) == expected_interval + + +def test_get_polling_interval_at_threshold(): + options = RunPollingOptions() + iteration_count = 2 + expected_interval = timedelta(milliseconds=250) + assert options.get_polling_interval(iteration_count) == expected_interval + + +def test_get_polling_interval_above_threshold(): + options = RunPollingOptions() + iteration_count = 3 + expected_interval = timedelta(seconds=1) + assert options.get_polling_interval(iteration_count) == expected_interval + + +def test_get_polling_interval_custom_threshold(): + options = RunPollingOptions(run_polling_backoff_threshold=5) + iteration_count = 4 + expected_interval = timedelta(milliseconds=250) + assert options.get_polling_interval(iteration_count) == expected_interval + + iteration_count = 6 + expected_interval = timedelta(seconds=1) + assert options.get_polling_interval(iteration_count) == expected_interval + + +def test_get_polling_interval_custom_intervals(): + options = RunPollingOptions( + run_polling_interval=timedelta(milliseconds=500), run_polling_backoff=timedelta(seconds=2) + ) + iteration_count = 1 + expected_interval = timedelta(milliseconds=500) + assert options.get_polling_interval(iteration_count) == expected_interval + + iteration_count = 3 + expected_interval = timedelta(seconds=2) + assert options.get_polling_interval(iteration_count) == expected_interval diff --git a/python/tests/unit/agents/test_sequential_strategy_selection.py b/python/tests/unit/agents/test_sequential_strategy_selection.py new file mode 100644 index 000000000000..930b071c11aa --- /dev/null +++ b/python/tests/unit/agents/test_sequential_strategy_selection.py @@ -0,0 +1,95 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock + +import pytest + +from semantic_kernel.agents.agent import Agent +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.strategies.selection.sequential_selection_strategy import SequentialSelectionStrategy + + +class MockAgent(Agent): + """A mock agent for testing purposes.""" + + def __init__(self, id: str = None, name: str = "Test Agent", description: str = "A test agent"): + args = { + "name": name, + "description": description, + } + if id is not None: + args["id"] = id + super().__init__(**args) + + def get_channel_keys(self) -> list[str]: + return ["key1", "key2"] + + async def create_channel(self) -> AgentChannel: + return AsyncMock(spec=AgentChannel) + + +@pytest.fixture +def agents(): + """Fixture that provides a list of mock agents.""" + return [MockAgent(id=f"agent-{i}") for i in range(3)] + + +@pytest.mark.asyncio +async def test_sequential_selection_next(agents): + strategy = SequentialSelectionStrategy() + + # Test the sequence of selections + selected_agent_1 = await strategy.next(agents, []) + selected_agent_2 = await strategy.next(agents, []) + selected_agent_3 = await strategy.next(agents, []) + + assert selected_agent_1.id == "agent-0" + assert selected_agent_2.id == "agent-1" + assert selected_agent_3.id == "agent-2" + + +@pytest.mark.asyncio +async def test_sequential_selection_wraps_around(agents): + strategy = SequentialSelectionStrategy() + + for _ in range(3): + await strategy.next(agents, []) + + selected_agent = await strategy.next(agents, []) + assert selected_agent.id == "agent-0" + + +@pytest.mark.asyncio +async def test_sequential_selection_reset(agents): + strategy = SequentialSelectionStrategy() + + # Move the index to the middle of the list + await strategy.next(agents, []) + await strategy.next(agents, []) + + strategy.reset() + + selected_agent = await strategy.next(agents, []) + assert selected_agent.id == "agent-0" + + +@pytest.mark.asyncio +async def test_sequential_selection_exceeds_length(agents): + strategy = SequentialSelectionStrategy() + + strategy._index = len(agents) + + selected_agent = await strategy.next(agents, []) + + assert selected_agent.id == "agent-0" + assert strategy._index == 1 + + +@pytest.mark.asyncio +async def test_sequential_selection_empty_agents(): + strategy = SequentialSelectionStrategy() + + with pytest.raises(ValueError) as excinfo: + await strategy.next([], []) + + assert "No agents to select from" in str(excinfo.value) diff --git a/python/tests/unit/agents/test_termination_strategy.py b/python/tests/unit/agents/test_termination_strategy.py new file mode 100644 index 000000000000..0d2b34308f3b --- /dev/null +++ b/python/tests/unit/agents/test_termination_strategy.py @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, MagicMock + +import pytest + +from semantic_kernel.agents import Agent +from semantic_kernel.agents.channels.agent_channel import AgentChannel +from semantic_kernel.agents.strategies.termination.termination_strategy import TerminationStrategy +from semantic_kernel.contents.chat_message_content import ChatMessageContent + + +class MockAgent(Agent): + """A mock agent for testing purposes.""" + + def __init__(self, id: str = None, name: str = "Test Agent", description: str = "A test agent"): + args = { + "name": name, + "description": description, + } + if id is not None: + args["id"] = id + super().__init__(**args) + + def get_channel_keys(self) -> list[str]: + return ["key1", "key2"] + + async def create_channel(self) -> AgentChannel: + return AsyncMock(spec=AgentChannel) + + +class TestTerminationStrategy(TerminationStrategy): + """A test implementation of TerminationStrategy for testing purposes.""" + + async def should_agent_terminate(self, agent: "Agent", history: list[ChatMessageContent]) -> bool: + """Simple test implementation that always returns True.""" + return True + + +@pytest.mark.asyncio +async def test_should_terminate_with_matching_agent(): + agent = MockAgent(id="test-agent-id") + strategy = TestTerminationStrategy(agents=[agent]) + + # Assuming history is a list of ChatMessageContent; can be mocked or made minimal + history = [MagicMock(spec=ChatMessageContent)] + + result = await strategy.should_terminate(agent, history) + assert result is True + + +@pytest.mark.asyncio +async def test_should_terminate_with_non_matching_agent(): + agent = MockAgent(id="test-agent-id") + non_matching_agent = MockAgent(id="non-matching-agent-id") + strategy = TestTerminationStrategy(agents=[non_matching_agent]) + + # Assuming history is a list of ChatMessageContent; can be mocked or made minimal + history = [MagicMock(spec=ChatMessageContent)] + + result = await strategy.should_terminate(agent, history) + assert result is False + + +@pytest.mark.asyncio +async def test_should_terminate_no_agents_in_strategy(): + agent = MockAgent(id="test-agent-id") + strategy = TestTerminationStrategy() + + # Assuming history is a list of ChatMessageContent; can be mocked or made minimal + history = [MagicMock(spec=ChatMessageContent)] + + result = await strategy.should_terminate(agent, history) + assert result is True + + +@pytest.mark.asyncio +async def test_should_agent_terminate_not_implemented(): + agent = MockAgent(id="test-agent-id") + strategy = TerminationStrategy(agents=[agent]) + + # Assuming history is a list of ChatMessageContent; can be mocked or made minimal + history = [MagicMock(spec=ChatMessageContent)] + + with pytest.raises(NotImplementedError): + await strategy.should_agent_terminate(agent, history) diff --git a/python/tests/unit/connectors/anthropic/services/test_anthropic_chat_completion.py b/python/tests/unit/connectors/anthropic/services/test_anthropic_chat_completion.py new file mode 100644 index 000000000000..11c7882b49df --- /dev/null +++ b/python/tests/unit/connectors/anthropic/services/test_anthropic_chat_completion.py @@ -0,0 +1,222 @@ +# Copyright (c) Microsoft. All rights reserved. +from unittest.mock import AsyncMock, MagicMock + +import pytest +from anthropic import AsyncAnthropic + +from semantic_kernel.connectors.ai.anthropic.prompt_execution_settings.anthropic_prompt_execution_settings import ( + AnthropicChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.anthropic.services.anthropic_chat_completion import AnthropicChatCompletion +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase +from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( + OpenAIChatPromptExecutionSettings, +) +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.exceptions import ServiceInitializationError, ServiceResponseException +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.kernel import Kernel + + +@pytest.fixture +def mock_settings() -> AnthropicChatPromptExecutionSettings: + return AnthropicChatPromptExecutionSettings() + + +@pytest.fixture +def mock_anthropic_client_completion() -> AsyncAnthropic: + client = MagicMock(spec=AsyncAnthropic) + chat_completion_response = AsyncMock() + + content = [MagicMock(finish_reason="stop", message=MagicMock(role="assistant", content="Test"))] + chat_completion_response.content = content + + # Create a MagicMock for the messages attribute + messages_mock = MagicMock() + messages_mock.create = chat_completion_response + + # Assign the messages_mock to the client.messages attribute + client.messages = messages_mock + return client + + +@pytest.fixture +def mock_anthropic_client_completion_stream() -> AsyncAnthropic: + client = MagicMock(spec=AsyncAnthropic) + chat_completion_response = MagicMock() + + content = [ + MagicMock(finish_reason="stop", delta=MagicMock(role="assistant", content="Test")), + MagicMock(finish_reason="stop", delta=MagicMock(role="assistant", content="Test", tool_calls=None)), + ] + chat_completion_response.content = content + + chat_completion_response_empty = MagicMock() + chat_completion_response_empty.content = [] + + # Create a MagicMock for the messages attribute + messages_mock = MagicMock() + messages_mock.stream = chat_completion_response + + generator_mock = MagicMock() + generator_mock.__aiter__.return_value = [chat_completion_response_empty, chat_completion_response] + + client.messages = messages_mock + + return client + + +@pytest.mark.asyncio +async def test_complete_chat_contents( + kernel: Kernel, + mock_settings: AnthropicChatPromptExecutionSettings, + mock_anthropic_client_completion: AsyncAnthropic, +): + chat_history = MagicMock() + arguments = KernelArguments() + chat_completion_base = AnthropicChatCompletion( + ai_model_id="test_model_id", service_id="test", api_key="", async_client=mock_anthropic_client_completion + ) + + content: list[ChatMessageContent] = await chat_completion_base.get_chat_message_contents( + chat_history=chat_history, settings=mock_settings, kernel=kernel, arguments=arguments + ) + assert content is not None + + +@pytest.mark.asyncio +async def test_complete_chat_stream_contents( + kernel: Kernel, + mock_settings: AnthropicChatPromptExecutionSettings, + mock_anthropic_client_completion_stream: AsyncAnthropic, +): + chat_history = MagicMock() + arguments = KernelArguments() + + chat_completion_base = AnthropicChatCompletion( + ai_model_id="test_model_id", + service_id="test", + api_key="", + async_client=mock_anthropic_client_completion_stream, + ) + + async for content in chat_completion_base.get_streaming_chat_message_contents( + chat_history, mock_settings, kernel=kernel, arguments=arguments + ): + assert content is not None + + +@pytest.mark.asyncio +async def test_anthropic_sdk_exception(kernel: Kernel, mock_settings: AnthropicChatPromptExecutionSettings): + chat_history = MagicMock() + arguments = KernelArguments() + client = MagicMock(spec=AsyncAnthropic) + + # Create a MagicMock for the messages attribute + messages_mock = MagicMock() + messages_mock.create.side_effect = Exception("Test Exception") + + # Assign the messages_mock to the client.messages attribute + client.messages = messages_mock + + chat_completion_base = AnthropicChatCompletion( + ai_model_id="test_model_id", service_id="test", api_key="", async_client=client + ) + + with pytest.raises(ServiceResponseException): + await chat_completion_base.get_chat_message_contents( + chat_history=chat_history, settings=mock_settings, kernel=kernel, arguments=arguments + ) + + +@pytest.mark.asyncio +async def test_anthropic_sdk_exception_streaming(kernel: Kernel, mock_settings: AnthropicChatPromptExecutionSettings): + chat_history = MagicMock() + arguments = KernelArguments() + client = MagicMock(spec=AsyncAnthropic) + + # Create a MagicMock for the messages attribute + messages_mock = MagicMock() + messages_mock.stream.side_effect = Exception("Test Exception") + + client.messages = messages_mock + + chat_completion_base = AnthropicChatCompletion( + ai_model_id="test_model_id", service_id="test", api_key="", async_client=client + ) + + with pytest.raises(ServiceResponseException): + async for content in chat_completion_base.get_streaming_chat_message_contents( + chat_history, mock_settings, kernel=kernel, arguments=arguments + ): + assert content is not None + + +def test_anthropic_chat_completion_init(anthropic_unit_test_env) -> None: + # Test successful initialization + anthropic_chat_completion = AnthropicChatCompletion() + + assert anthropic_chat_completion.ai_model_id == anthropic_unit_test_env["ANTHROPIC_CHAT_MODEL_ID"] + assert isinstance(anthropic_chat_completion, ChatCompletionClientBase) + + +@pytest.mark.parametrize("exclude_list", [["ANTHROPIC_API_KEY"]], indirect=True) +def test_anthropic_chat_completion_init_with_empty_api_key(anthropic_unit_test_env) -> None: + ai_model_id = "test_model_id" + + with pytest.raises(ServiceInitializationError): + AnthropicChatCompletion( + ai_model_id=ai_model_id, + env_file_path="test.env", + ) + + +@pytest.mark.parametrize("exclude_list", [["ANTHROPIC_CHAT_MODEL_ID"]], indirect=True) +def test_anthropic_chat_completion_init_with_empty_model_id(anthropic_unit_test_env) -> None: + with pytest.raises(ServiceInitializationError): + AnthropicChatCompletion( + env_file_path="test.env", + ) + + +def test_prompt_execution_settings_class(anthropic_unit_test_env): + anthropic_chat_completion = AnthropicChatCompletion() + prompt_execution_settings = anthropic_chat_completion.get_prompt_execution_settings_class() + assert prompt_execution_settings == AnthropicChatPromptExecutionSettings + + +@pytest.mark.asyncio +async def test_with_different_execution_settings(kernel: Kernel, mock_anthropic_client_completion: MagicMock): + chat_history = MagicMock() + settings = OpenAIChatPromptExecutionSettings(temperature=0.2) + arguments = KernelArguments() + chat_completion_base = AnthropicChatCompletion( + ai_model_id="test_model_id", service_id="test", api_key="", async_client=mock_anthropic_client_completion + ) + + await chat_completion_base.get_chat_message_contents( + chat_history=chat_history, settings=settings, kernel=kernel, arguments=arguments + ) + + assert mock_anthropic_client_completion.messages.create.call_args.kwargs["temperature"] == 0.2 + + +@pytest.mark.asyncio +async def test_with_different_execution_settings_stream( + kernel: Kernel, mock_anthropic_client_completion_stream: MagicMock +): + chat_history = MagicMock() + settings = OpenAIChatPromptExecutionSettings(temperature=0.2, seed=2) + arguments = KernelArguments() + chat_completion_base = AnthropicChatCompletion( + ai_model_id="test_model_id", + service_id="test", + api_key="", + async_client=mock_anthropic_client_completion_stream, + ) + + async for chunk in chat_completion_base.get_streaming_chat_message_contents( + chat_history, settings, kernel=kernel, arguments=arguments + ): + continue + assert mock_anthropic_client_completion_stream.messages.stream.call_args.kwargs["temperature"] == 0.2 diff --git a/python/tests/unit/connectors/anthropic/test_anthropic_request_settings.py b/python/tests/unit/connectors/anthropic/test_anthropic_request_settings.py new file mode 100644 index 000000000000..2cfc2327e655 --- /dev/null +++ b/python/tests/unit/connectors/anthropic/test_anthropic_request_settings.py @@ -0,0 +1,126 @@ +# Copyright (c) Microsoft. All rights reserved. +import pytest + +from semantic_kernel.connectors.ai.anthropic.prompt_execution_settings.anthropic_prompt_execution_settings import ( + AnthropicChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + + +def test_default_anthropic_chat_prompt_execution_settings(): + settings = AnthropicChatPromptExecutionSettings() + assert settings.temperature is None + assert settings.top_p is None + assert settings.max_tokens is None + assert settings.messages is None + + +def test_custom_anthropic_chat_prompt_execution_settings(): + settings = AnthropicChatPromptExecutionSettings( + temperature=0.5, + top_p=0.5, + max_tokens=128, + messages=[{"role": "system", "content": "Hello"}], + ) + assert settings.temperature == 0.5 + assert settings.top_p == 0.5 + assert settings.max_tokens == 128 + assert settings.messages == [{"role": "system", "content": "Hello"}] + + +def test_anthropic_chat_prompt_execution_settings_from_default_completion_config(): + settings = PromptExecutionSettings(service_id="test_service") + chat_settings = AnthropicChatPromptExecutionSettings.from_prompt_execution_settings(settings) + assert chat_settings.service_id == "test_service" + assert chat_settings.temperature is None + assert chat_settings.top_p is None + assert chat_settings.max_tokens is None + + +def test_anthropic_chat_prompt_execution_settings_from_openai_prompt_execution_settings(): + chat_settings = AnthropicChatPromptExecutionSettings(service_id="test_service", temperature=1.0) + new_settings = AnthropicChatPromptExecutionSettings(service_id="test_2", temperature=0.0) + chat_settings.update_from_prompt_execution_settings(new_settings) + assert chat_settings.service_id == "test_2" + assert chat_settings.temperature == 0.0 + + +def test_anthropic_chat_prompt_execution_settings_from_custom_completion_config(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "temperature": 0.5, + "top_p": 0.5, + "max_tokens": 128, + "messages": [{"role": "system", "content": "Hello"}], + }, + ) + chat_settings = AnthropicChatPromptExecutionSettings.from_prompt_execution_settings(settings) + assert chat_settings.temperature == 0.5 + assert chat_settings.top_p == 0.5 + assert chat_settings.max_tokens == 128 + + +def test_openai_chat_prompt_execution_settings_from_custom_completion_config_with_none(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "temperature": 0.5, + "top_p": 0.5, + "max_tokens": 128, + "messages": [{"role": "system", "content": "Hello"}], + }, + ) + chat_settings = AnthropicChatPromptExecutionSettings.from_prompt_execution_settings(settings) + assert chat_settings.temperature == 0.5 + assert chat_settings.top_p == 0.5 + assert chat_settings.max_tokens == 128 + + +def test_openai_chat_prompt_execution_settings_from_custom_completion_config_with_functions(): + settings = PromptExecutionSettings( + service_id="test_service", + extension_data={ + "temperature": 0.5, + "top_p": 0.5, + "max_tokens": 128, + "tools": [{}], + "messages": [{"role": "system", "content": "Hello"}], + }, + ) + chat_settings = AnthropicChatPromptExecutionSettings.from_prompt_execution_settings(settings) + assert chat_settings.temperature == 0.5 + assert chat_settings.top_p == 0.5 + assert chat_settings.max_tokens == 128 + + +def test_create_options(): + settings = AnthropicChatPromptExecutionSettings( + service_id="test_service", + extension_data={ + "temperature": 0.5, + "top_p": 0.5, + "max_tokens": 128, + "tools": [{}], + "messages": [{"role": "system", "content": "Hello"}], + }, + ) + options = settings.prepare_settings_dict() + assert options["temperature"] == 0.5 + assert options["top_p"] == 0.5 + assert options["max_tokens"] == 128 + + +def test_create_options_with_function_choice_behavior(): + with pytest.raises(NotImplementedError): + AnthropicChatPromptExecutionSettings( + service_id="test_service", + function_choice_behavior="auto", + extension_data={ + "temperature": 0.5, + "top_p": 0.5, + "max_tokens": 128, + "tools": [{}], + "messages": [{"role": "system", "content": "Hello"}], + }, + ) diff --git a/python/tests/unit/connectors/google/conftest.py b/python/tests/unit/connectors/google/conftest.py new file mode 100644 index 000000000000..65f2888f4280 --- /dev/null +++ b/python/tests/unit/connectors/google/conftest.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft. All rights reserved. + +import pytest + +from semantic_kernel.contents.chat_history import ChatHistory + + +@pytest.fixture() +def service_id() -> str: + return "test_service_id" + + +@pytest.fixture() +def chat_history() -> ChatHistory: + chat_history = ChatHistory() + chat_history.add_user_message("test_prompt") + return chat_history + + +@pytest.fixture() +def prompt() -> str: + return "test_prompt" diff --git a/python/tests/unit/connectors/google/google_ai/conftest.py b/python/tests/unit/connectors/google/google_ai/conftest.py index 40abd9c169cd..7f27c217713b 100644 --- a/python/tests/unit/connectors/google/google_ai/conftest.py +++ b/python/tests/unit/connectors/google/google_ai/conftest.py @@ -8,25 +8,6 @@ from google.generativeai import protos from google.generativeai.types import AsyncGenerateContentResponse -from semantic_kernel.contents.chat_history import ChatHistory - - -@pytest.fixture() -def service_id() -> str: - return "test_service_id" - - -@pytest.fixture() -def chat_history() -> ChatHistory: - chat_history = ChatHistory() - chat_history.add_user_message("test_prompt") - return chat_history - - -@pytest.fixture() -def prompt() -> str: - return "test_prompt" - @pytest.fixture() def google_ai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): @@ -78,6 +59,40 @@ def mock_google_ai_chat_completion_response() -> AsyncGenerateContentResponse: ) +@pytest.fixture() +def mock_google_ai_chat_completion_response_with_tool_call() -> AsyncGenerateContentResponse: + """Mock Google AI Chat Completion response.""" + candidate = protos.Candidate() + candidate.index = 0 + candidate.content = protos.Content( + role="user", + parts=[ + protos.Part( + function_call=protos.FunctionCall( + name="test_function", + args={"test_arg": "test_value"}, + ) + ) + ], + ) + candidate.finish_reason = protos.Candidate.FinishReason.STOP + + response = protos.GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = protos.GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + cached_content_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + return AsyncGenerateContentResponse( + done=True, + iterator=None, + result=response, + ) + + @pytest_asyncio.fixture() async def mock_google_ai_streaming_chat_completion_response() -> AsyncGenerateContentResponse: """Mock Google AI streaming Chat Completion response.""" @@ -103,6 +118,41 @@ async def mock_google_ai_streaming_chat_completion_response() -> AsyncGenerateCo ) +@pytest_asyncio.fixture() +async def mock_google_ai_streaming_chat_completion_response_with_tool_call() -> AsyncGenerateContentResponse: + """Mock Google AI streaming Chat Completion response with tool call.""" + candidate = protos.Candidate() + candidate.index = 0 + candidate.content = protos.Content( + role="user", + parts=[ + protos.Part( + function_call=protos.FunctionCall( + name="test_function", + args={"test_arg": "test_value"}, + ) + ) + ], + ) + candidate.finish_reason = protos.Candidate.FinishReason.STOP + + response = protos.GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = protos.GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + cached_content_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + iterable = MagicMock(spec=AsyncGenerator) + iterable.__aiter__.return_value = [response] + + return await AsyncGenerateContentResponse.from_aiterator( + iterator=iterable, + ) + + @pytest.fixture() def mock_google_ai_text_completion_response() -> AsyncGenerateContentResponse: """Mock Google AI Text Completion response.""" diff --git a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_chat_completion.py b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_chat_completion.py index 17598d198e61..bc24e98d43c0 100644 --- a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_chat_completion.py +++ b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_chat_completion.py @@ -7,6 +7,7 @@ from google.generativeai.protos import Content from google.generativeai.types import GenerationConfig +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior from semantic_kernel.connectors.ai.google.google_ai.google_ai_prompt_execution_settings import ( GoogleAIChatPromptExecutionSettings, ) @@ -15,7 +16,10 @@ from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.utils.finish_reason import FinishReason -from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError +from semantic_kernel.exceptions.service_exceptions import ( + ServiceInitializationError, + ServiceInvalidExecutionSettingsError, +) # region init @@ -74,7 +78,7 @@ def test_prompt_execution_settings_class(google_ai_unit_test_env) -> None: @pytest.mark.asyncio @patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) async def test_google_ai_chat_completion( - mock_google_model_generate_content_async, + mock_google_ai_model_generate_content_async, google_ai_unit_test_env, chat_history: ChatHistory, mock_google_ai_chat_completion_response, @@ -82,16 +86,18 @@ async def test_google_ai_chat_completion( """Test chat completion with GoogleAIChatCompletion""" settings = GoogleAIChatPromptExecutionSettings() - mock_google_model_generate_content_async.return_value = mock_google_ai_chat_completion_response + mock_google_ai_model_generate_content_async.return_value = mock_google_ai_chat_completion_response google_ai_chat_completion = GoogleAIChatCompletion() responses: list[ChatMessageContent] = await google_ai_chat_completion.get_chat_message_contents( chat_history, settings ) - mock_google_model_generate_content_async.assert_called_once_with( + mock_google_ai_model_generate_content_async.assert_called_once_with( contents=google_ai_chat_completion._prepare_chat_history_for_request(chat_history), generation_config=GenerationConfig(**settings.prepare_settings_dict()), + tools=None, + tool_config=None, ) assert len(responses) == 1 assert responses[0].role == "assistant" @@ -102,6 +108,102 @@ async def test_google_ai_chat_completion( assert responses[0].inner_content == mock_google_ai_chat_completion_response +@pytest.mark.asyncio +async def test_google_ai_chat_completion_with_function_choice_behavior_fail_verification( + chat_history: ChatHistory, + google_ai_unit_test_env, +) -> None: + """Test completion of GoogleAIChatCompletion with function choice behavior expect verification failure""" + + # Missing kernel + with pytest.raises(ServiceInvalidExecutionSettingsError): + settings = GoogleAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + + google_ai_chat_completion = GoogleAIChatCompletion() + + await google_ai_chat_completion.get_chat_message_contents( + chat_history=chat_history, + settings=settings, + ) + + +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_google_ai_chat_completion_with_function_choice_behavior( + mock_google_ai_model_generate_content_async, + google_ai_unit_test_env, + kernel, + chat_history: ChatHistory, + mock_google_ai_chat_completion_response_with_tool_call, +) -> None: + """Test completion of GoogleAIChatCompletion with function choice behavior""" + mock_google_ai_model_generate_content_async.return_value = mock_google_ai_chat_completion_response_with_tool_call + + settings = GoogleAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + settings.function_choice_behavior.maximum_auto_invoke_attempts = 1 + + google_ai_chat_completion = GoogleAIChatCompletion() + + responses = await google_ai_chat_completion.get_chat_message_contents( + chat_history=chat_history, + settings=settings, + kernel=kernel, + ) + + # The function should be called twice: + # One for the tool call and one for the last completion + # after the maximum_auto_invoke_attempts is reached + assert mock_google_ai_model_generate_content_async.call_count == 2 + assert len(responses) == 1 + assert responses[0].role == "assistant" + # Google doesn't return STOP as the finish reason for tool calls + assert responses[0].finish_reason == FinishReason.STOP + + +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_google_ai_chat_completion_with_function_choice_behavior_no_tool_call( + mock_google_ai_model_generate_content_async, + google_ai_unit_test_env, + kernel, + chat_history: ChatHistory, + mock_google_ai_chat_completion_response, +) -> None: + """Test completion of GoogleAIChatCompletion with function choice behavior but no tool call returned""" + mock_google_ai_model_generate_content_async.return_value = mock_google_ai_chat_completion_response + + settings = GoogleAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + settings.function_choice_behavior.maximum_auto_invoke_attempts = 1 + + google_ai_chat_completion = GoogleAIChatCompletion() + + responses = await google_ai_chat_completion.get_chat_message_contents( + chat_history=chat_history, + settings=settings, + kernel=kernel, + ) + + # Remove the latest message since the response from the model will be added to the chat history + # even when the model doesn't return a tool call + chat_history.remove_message(chat_history[-1]) + + mock_google_ai_model_generate_content_async.assert_awaited_once_with( + contents=google_ai_chat_completion._prepare_chat_history_for_request(chat_history), + generation_config=GenerationConfig(**settings.prepare_settings_dict()), + tools=None, + tool_config=None, + ) + assert len(responses) == 1 + assert responses[0].role == "assistant" + assert responses[0].content == mock_google_ai_chat_completion_response.candidates[0].content.parts[0].text + + # endregion chat completion @@ -109,15 +211,15 @@ async def test_google_ai_chat_completion( @pytest.mark.asyncio @patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) async def test_google_ai_streaming_chat_completion( - mock_google_model_generate_content_async, + mock_google_ai_model_generate_content_async, google_ai_unit_test_env, chat_history: ChatHistory, mock_google_ai_streaming_chat_completion_response, ) -> None: - """Test chat completion with GoogleAIChatCompletion""" + """Test streaming chat completion with GoogleAIChatCompletion""" settings = GoogleAIChatPromptExecutionSettings() - mock_google_model_generate_content_async.return_value = mock_google_ai_streaming_chat_completion_response + mock_google_ai_model_generate_content_async.return_value = mock_google_ai_streaming_chat_completion_response google_ai_chat_completion = GoogleAIChatCompletion() async for messages in google_ai_chat_completion.get_streaming_chat_message_contents(chat_history, settings): @@ -127,9 +229,110 @@ async def test_google_ai_streaming_chat_completion( assert "usage" in messages[0].metadata assert "prompt_feedback" in messages[0].metadata - mock_google_model_generate_content_async.assert_called_once_with( + mock_google_ai_model_generate_content_async.assert_called_once_with( + contents=google_ai_chat_completion._prepare_chat_history_for_request(chat_history), + generation_config=GenerationConfig(**settings.prepare_settings_dict()), + tools=None, + tool_config=None, + stream=True, + ) + + +@pytest.mark.asyncio +async def test_google_ai_streaming_chat_completion_with_function_choice_behavior_fail_verification( + chat_history: ChatHistory, + google_ai_unit_test_env, +) -> None: + """Test streaming chat completion of GoogleAIChatCompletion with function choice + behavior expect verification failure""" + + # Missing kernel + with pytest.raises(ServiceInvalidExecutionSettingsError): + settings = GoogleAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + + google_ai_chat_completion = GoogleAIChatCompletion() + + async for _ in google_ai_chat_completion.get_streaming_chat_message_contents( + chat_history=chat_history, + settings=settings, + ): + pass + + +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_google_ai_streaming_chat_completion_with_function_choice_behavior( + mock_google_ai_model_generate_content_async, + google_ai_unit_test_env, + kernel, + chat_history: ChatHistory, + mock_google_ai_streaming_chat_completion_response_with_tool_call, +) -> None: + """Test streaming chat completion of GoogleAIChatCompletion with function choice behavior""" + mock_google_ai_model_generate_content_async.return_value = ( + mock_google_ai_streaming_chat_completion_response_with_tool_call + ) + + settings = GoogleAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + settings.function_choice_behavior.maximum_auto_invoke_attempts = 1 + + google_ai_chat_completion = GoogleAIChatCompletion() + + async for messages in google_ai_chat_completion.get_streaming_chat_message_contents( + chat_history, + settings, + kernel=kernel, + ): + assert len(messages) == 1 + assert messages[0].role == "assistant" + assert messages[0].content == "" + # Google doesn't return STOP as the finish reason for tool calls + assert messages[0].finish_reason == FinishReason.STOP + + # Streaming completion with tool call does not invoke the model + # after maximum_auto_invoke_attempts is reached + assert mock_google_ai_model_generate_content_async.call_count == 1 + + +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_google_ai_streaming_chat_completion_with_function_choice_behavior_no_tool_call( + mock_google_ai_model_generate_content_async, + google_ai_unit_test_env, + kernel, + chat_history: ChatHistory, + mock_google_ai_streaming_chat_completion_response, +) -> None: + """Test completion of GoogleAIChatCompletion with function choice behavior but no tool call returned""" + mock_google_ai_model_generate_content_async.return_value = mock_google_ai_streaming_chat_completion_response + + settings = GoogleAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + settings.function_choice_behavior.maximum_auto_invoke_attempts = 1 + + google_ai_chat_completion = GoogleAIChatCompletion() + + async for messages in google_ai_chat_completion.get_streaming_chat_message_contents( + chat_history=chat_history, + settings=settings, + kernel=kernel, + ): + assert len(messages) == 1 + assert messages[0].role == "assistant" + assert ( + messages[0].content == mock_google_ai_streaming_chat_completion_response.candidates[0].content.parts[0].text + ) + + mock_google_ai_model_generate_content_async.assert_awaited_once_with( contents=google_ai_chat_completion._prepare_chat_history_for_request(chat_history), generation_config=GenerationConfig(**settings.prepare_settings_dict()), + tools=None, + tool_config=None, stream=True, ) @@ -156,14 +359,3 @@ def test_google_ai_chat_completion_parse_chat_history_correctly(google_ai_unit_t assert parsed_chat_history[0].parts[0].text == "test_user_message" assert parsed_chat_history[1].role == "model" assert parsed_chat_history[1].parts[0].text == "test_assistant_message" - - -def test_google_ai_chat_completion_parse_chat_history_throw_unsupported_message(google_ai_unit_test_env) -> None: - """Test _prepare_chat_history_for_request method with unsupported message type""" - google_ai_chat_completion = GoogleAIChatCompletion() - - chat_history = ChatHistory() - chat_history.add_tool_message("test_tool_message") - - with pytest.raises(ValueError): - _ = google_ai_chat_completion._prepare_chat_history_for_request(chat_history) diff --git a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_completion.py b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_completion.py index 9f5f5936fb5c..10c9967e046d 100644 --- a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_completion.py +++ b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_completion.py @@ -39,10 +39,10 @@ def test_google_ai_text_completion_init_with_service_id(google_ai_unit_test_env, def test_google_ai_text_completion_init_with_model_id_in_argument(google_ai_unit_test_env) -> None: """Test initialization of GoogleAIChatCompletion with model_id in argument""" - google_ai_chat_completion = GoogleAITextCompletion(gemini_model_id="custom_model_id") + google_ai_text_completion = GoogleAITextCompletion(gemini_model_id="custom_model_id") - assert google_ai_chat_completion.ai_model_id == "custom_model_id" - assert google_ai_chat_completion.service_id == "custom_model_id" + assert google_ai_text_completion.ai_model_id == "custom_model_id" + assert google_ai_text_completion.service_id == "custom_model_id" @pytest.mark.parametrize("exclude_list", [["GOOGLE_AI_GEMINI_MODEL_ID"]], indirect=True) diff --git a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_embedding.py b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_embedding.py index e8fb4a36d349..6870b95d4b23 100644 --- a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_embedding.py +++ b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_text_embedding.py @@ -36,7 +36,7 @@ def test_google_ai_text_embedding_init_with_service_id(google_ai_unit_test_env, def test_google_ai_text_embedding_init_with_model_id_in_argument(google_ai_unit_test_env) -> None: - """Test initialization of GoogleAIChatCompletion with model_id in argument""" + """Test initialization of GoogleAITextEmbedding with model_id in argument""" google_ai_chat_completion = GoogleAITextEmbedding(embedding_model_id="custom_model_id") assert google_ai_chat_completion.ai_model_id == "custom_model_id" diff --git a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_utils.py b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_utils.py index 76a0f5b4a2e7..1d2a6355e70a 100644 --- a/python/tests/unit/connectors/google/google_ai/services/test_google_ai_utils.py +++ b/python/tests/unit/connectors/google/google_ai/services/test_google_ai_utils.py @@ -4,13 +4,13 @@ from google.generativeai.protos import Candidate, Part from semantic_kernel.connectors.ai.google.google_ai.services.utils import ( - filter_system_message, finish_reason_from_google_ai_to_semantic_kernel, + format_assistant_message, format_user_message, ) -from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent from semantic_kernel.contents.image_content import ImageContent from semantic_kernel.contents.text_content import TextContent from semantic_kernel.contents.utils.author_role import AuthorRole @@ -26,27 +26,6 @@ def test_finish_reason_from_google_ai_to_semantic_kernel(): assert finish_reason_from_google_ai_to_semantic_kernel(Candidate.FinishReason.OTHER) is None -def test_first_system_message(): - """Test filter_system_message.""" - # Test with a single system message - chat_history = ChatHistory() - chat_history.add_system_message("System message") - chat_history.add_user_message("User message") - assert filter_system_message(chat_history) == "System message" - - # Test with no system message - chat_history = ChatHistory() - chat_history.add_user_message("User message") - assert filter_system_message(chat_history) is None - - # Test with multiple system messages - chat_history = ChatHistory() - chat_history.add_system_message("System message 1") - chat_history.add_system_message("System message 2") - with pytest.raises(ServiceInvalidRequestError): - filter_system_message(chat_history) - - def test_format_user_message(): """Test format_user_message.""" user_message = ChatMessageContent(role=AuthorRole.USER, content="User message") @@ -78,13 +57,10 @@ def test_format_user_message(): def test_format_user_message_throws_with_unsupported_items() -> None: """Test format_user_message with unsupported items.""" # Test with unsupported items, any item other than TextContent and ImageContent should raise an error - # Note that method format_user_message will use the content of the message if no ImageContent is found, - # so we need to add an ImageContent to the message to trigger the error user_message = ChatMessageContent( role=AuthorRole.USER, items=[ FunctionCallContent(), - ImageContent(data="image data", mime_type="image/png"), ], ) with pytest.raises(ServiceInvalidRequestError): @@ -99,3 +75,37 @@ def test_format_user_message_throws_with_unsupported_items() -> None: ) with pytest.raises(ServiceInvalidRequestError): format_user_message(user_message) + + +def test_format_assistant_message() -> None: + assistant_message = ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + TextContent(text="test"), + FunctionCallContent(name="test_function", arguments={}), + ImageContent(data="image data", mime_type="image/png"), + ], + ) + + formatted_assistant_message = format_assistant_message(assistant_message) + assert isinstance(formatted_assistant_message, list) + assert len(formatted_assistant_message) == 3 + assert isinstance(formatted_assistant_message[0], Part) + assert formatted_assistant_message[0].text == "test" + assert isinstance(formatted_assistant_message[1], Part) + assert formatted_assistant_message[1].function_call.name == "test_function" + assert formatted_assistant_message[1].function_call.args == {} + assert isinstance(formatted_assistant_message[2], Part) + assert formatted_assistant_message[2].inline_data + + +def test_format_assistant_message_with_unsupported_items() -> None: + assistant_message = ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + FunctionResultContent(id="test_id", function_name="test_function"), + ], + ) + + with pytest.raises(ServiceInvalidRequestError): + format_assistant_message(assistant_message) diff --git a/python/tests/unit/connectors/google/test_shared_utils.py b/python/tests/unit/connectors/google/test_shared_utils.py new file mode 100644 index 000000000000..599c2f9a0364 --- /dev/null +++ b/python/tests/unit/connectors/google/test_shared_utils.py @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft. All rights reserved. + + +import pytest + +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceType +from semantic_kernel.connectors.ai.google.shared_utils import ( + FUNCTION_CHOICE_TYPE_TO_GOOGLE_FUNCTION_CALLING_MODE, + GEMINI_FUNCTION_NAME_SEPARATOR, + filter_system_message, + format_gemini_function_name_to_kernel_function_fully_qualified_name, +) +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.exceptions.service_exceptions import ServiceInvalidRequestError + + +def test_first_system_message(): + """Test filter_system_message.""" + # Test with a single system message + chat_history = ChatHistory() + chat_history.add_system_message("System message") + chat_history.add_user_message("User message") + assert filter_system_message(chat_history) == "System message" + + # Test with no system message + chat_history = ChatHistory() + chat_history.add_user_message("User message") + assert filter_system_message(chat_history) is None + + # Test with multiple system messages + chat_history = ChatHistory() + chat_history.add_system_message("System message 1") + chat_history.add_system_message("System message 2") + with pytest.raises(ServiceInvalidRequestError): + filter_system_message(chat_history) + + +def test_function_choice_type_to_google_function_calling_mode_contain_all_types() -> None: + assert FunctionChoiceType.AUTO in FUNCTION_CHOICE_TYPE_TO_GOOGLE_FUNCTION_CALLING_MODE + assert FunctionChoiceType.NONE in FUNCTION_CHOICE_TYPE_TO_GOOGLE_FUNCTION_CALLING_MODE + assert FunctionChoiceType.REQUIRED in FUNCTION_CHOICE_TYPE_TO_GOOGLE_FUNCTION_CALLING_MODE + + +def test_format_gemini_function_name_to_kernel_function_fully_qualified_name() -> None: + # Contains the separator + gemini_function_name = f"plugin{GEMINI_FUNCTION_NAME_SEPARATOR}function" + assert ( + format_gemini_function_name_to_kernel_function_fully_qualified_name(gemini_function_name) == "plugin-function" + ) + + # Doesn't contain the separator + gemini_function_name = "function" + assert format_gemini_function_name_to_kernel_function_fully_qualified_name(gemini_function_name) == "function" diff --git a/python/tests/unit/connectors/google/vertex_ai/conftest.py b/python/tests/unit/connectors/google/vertex_ai/conftest.py new file mode 100644 index 000000000000..d1efbd80b19a --- /dev/null +++ b/python/tests/unit/connectors/google/vertex_ai/conftest.py @@ -0,0 +1,189 @@ +# Copyright (c) Microsoft. All rights reserved. + +from collections.abc import AsyncGenerator, AsyncIterable +from unittest.mock import MagicMock + +import pytest +from google.cloud.aiplatform_v1beta1.types.content import Candidate, Content, Part +from google.cloud.aiplatform_v1beta1.types.prediction_service import GenerateContentResponse +from google.cloud.aiplatform_v1beta1.types.tool import FunctionCall +from vertexai.generative_models import GenerationResponse +from vertexai.language_models import TextEmbedding + + +@pytest.fixture() +def vertex_ai_unit_test_env(monkeypatch, exclude_list, override_env_param_dict): + """Fixture to set environment variables for Vertex AI Unit Tests.""" + if exclude_list is None: + exclude_list = [] + + if override_env_param_dict is None: + override_env_param_dict = {} + + env_vars = { + "VERTEX_AI_GEMINI_MODEL_ID": "test-gemini-model-id", + "VERTEX_AI_EMBEDDING_MODEL_ID": "test-embedding-model-id", + "VERTEX_AI_PROJECT_ID": "test-project-id", + } + + env_vars.update(override_env_param_dict) + + for key, value in env_vars.items(): + if key not in exclude_list: + monkeypatch.setenv(key, value) + else: + monkeypatch.delenv(key, raising=False) + + return env_vars + + +@pytest.fixture() +def mock_vertex_ai_chat_completion_response() -> GenerationResponse: + """Mock Vertex AI Chat Completion response.""" + candidate = Candidate() + candidate.index = 0 + candidate.content = Content(role="user", parts=[Part(text="Test content")]) + candidate.finish_reason = Candidate.FinishReason.STOP + + response = GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + return GenerationResponse._from_gapic(response) + + +@pytest.fixture() +def mock_vertex_ai_chat_completion_response_with_tool_call() -> GenerationResponse: + """Mock Vertex AI Chat Completion response.""" + candidate = Candidate() + candidate.index = 0 + candidate.content = Content( + role="user", + parts=[ + Part( + function_call=FunctionCall( + name="test_function", + args={"test_arg": "test_value"}, + ) + ) + ], + ) + candidate.finish_reason = Candidate.FinishReason.STOP + + response = GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + return GenerationResponse._from_gapic(response) + + +@pytest.fixture() +def mock_vertex_ai_streaming_chat_completion_response() -> AsyncIterable[GenerationResponse]: + """Mock Vertex AI streaming Chat Completion response.""" + candidate = Candidate() + candidate.index = 0 + candidate.content = Content(role="user", parts=[Part(text="Test content")]) + candidate.finish_reason = Candidate.FinishReason.STOP + + response = GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + iterable = MagicMock(spec=AsyncGenerator) + iterable.__aiter__.return_value = [GenerationResponse._from_gapic(response)] + + return iterable + + +@pytest.fixture() +def mock_vertex_ai_streaming_chat_completion_response_with_tool_call() -> AsyncIterable[GenerationResponse]: + """Mock Vertex AI streaming Chat Completion response.""" + candidate = Candidate() + candidate.index = 0 + candidate.content = Content( + role="user", + parts=[ + Part( + function_call=FunctionCall( + name="test_function", + args={"test_arg": "test_value"}, + ) + ) + ], + ) + candidate.finish_reason = Candidate.FinishReason.STOP + + response = GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + iterable = MagicMock(spec=AsyncGenerator) + iterable.__aiter__.return_value = [GenerationResponse._from_gapic(response)] + + return iterable + + +@pytest.fixture() +def mock_vertex_ai_text_completion_response() -> GenerationResponse: + """Mock Vertex AI Text Completion response.""" + candidate = Candidate() + candidate.index = 0 + candidate.content = Content(parts=[Part(text="Test content")]) + + response = GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + return GenerationResponse._from_gapic(response) + + +@pytest.fixture() +def mock_vertex_ai_streaming_text_completion_response() -> AsyncIterable[GenerationResponse]: + """Mock Vertex AI streaming Text Completion response.""" + candidate = Candidate() + candidate.index = 0 + candidate.content = Content(parts=[Part(text="Test content")]) + + response = GenerateContentResponse() + response.candidates.append(candidate) + response.usage_metadata = GenerateContentResponse.UsageMetadata( + prompt_token_count=0, + candidates_token_count=0, + total_token_count=0, + ) + + iterable = MagicMock(spec=AsyncGenerator) + iterable.__aiter__.return_value = [GenerationResponse._from_gapic(response)] + + return iterable + + +class MockTextEmbeddingModel: + async def get_embeddings_async( + self, + texts: list[str], + *, + auto_truncate: bool = True, + output_dimensionality: int | None = None, + ) -> list[TextEmbedding]: + pass diff --git a/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_chat_completion.py b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_chat_completion.py new file mode 100644 index 000000000000..7bed2ae9e776 --- /dev/null +++ b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_chat_completion.py @@ -0,0 +1,358 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from unittest.mock import AsyncMock, patch + +import pytest +from google.cloud.aiplatform_v1beta1.types.content import Content +from vertexai.generative_models import GenerativeModel + +from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_chat_completion import VertexAIChatCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.finish_reason import FinishReason +from semantic_kernel.exceptions.service_exceptions import ( + ServiceInitializationError, + ServiceInvalidExecutionSettingsError, +) + + +# region init +def test_vertex_ai_chat_completion_init(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAIChatCompletion""" + model_id = vertex_ai_unit_test_env["VERTEX_AI_GEMINI_MODEL_ID"] + project_id = vertex_ai_unit_test_env["VERTEX_AI_PROJECT_ID"] + vertex_ai_chat_completion = VertexAIChatCompletion() + + assert vertex_ai_chat_completion.ai_model_id == model_id + assert vertex_ai_chat_completion.service_id == model_id + + assert isinstance(vertex_ai_chat_completion.service_settings, VertexAISettings) + assert vertex_ai_chat_completion.service_settings.gemini_model_id == model_id + assert vertex_ai_chat_completion.service_settings.project_id == project_id + + +def test_vertex_ai_chat_completion_init_with_service_id(vertex_ai_unit_test_env, service_id) -> None: + """Test initialization of VertexAIChatCompletion with a service id that is not the model id""" + vertex_ai_chat_completion = VertexAIChatCompletion(service_id=service_id) + + assert vertex_ai_chat_completion.service_id == service_id + + +def test_vertex_ai_chat_completion_init_with_model_id_in_argument(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAIChatCompletion with model id in argument""" + vertex_ai_chat_completion = VertexAIChatCompletion(gemini_model_id="custom_model_id") + + assert vertex_ai_chat_completion.ai_model_id == "custom_model_id" + assert vertex_ai_chat_completion.service_id == "custom_model_id" + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_GEMINI_MODEL_ID"]], indirect=True) +def test_vertex_ai_chat_completion_init_with_empty_model_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAIChatCompletion with an empty model id""" + with pytest.raises(ServiceInitializationError): + VertexAIChatCompletion(env_file_path="fake_env_file_path.env") + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_PROJECT_ID"]], indirect=True) +def test_vertex_ai_chat_completion_init_with_empty_project_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAIChatCompletion with an empty project id""" + with pytest.raises(ServiceInitializationError): + VertexAIChatCompletion(env_file_path="fake_env_file_path.env") + + +def test_prompt_execution_settings_class(vertex_ai_unit_test_env) -> None: + vertex_ai_chat_completion = VertexAIChatCompletion() + assert vertex_ai_chat_completion.get_prompt_execution_settings_class() == VertexAIChatPromptExecutionSettings + + +# endregion init + + +# region chat completion +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_chat_completion( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + chat_history: ChatHistory, + mock_vertex_ai_chat_completion_response, +) -> None: + """Test chat completion with VertexAIChatCompletion""" + settings = VertexAIChatPromptExecutionSettings() + + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_chat_completion_response + + vertex_ai_chat_completion = VertexAIChatCompletion() + responses: list[ChatMessageContent] = await vertex_ai_chat_completion.get_chat_message_contents( + chat_history, settings + ) + + mock_vertex_ai_model_generate_content_async.assert_called_once_with( + contents=vertex_ai_chat_completion._prepare_chat_history_for_request(chat_history), + generation_config=settings.prepare_settings_dict(), + tools=None, + tool_config=None, + ) + assert len(responses) == 1 + assert responses[0].role == "assistant" + assert responses[0].content == mock_vertex_ai_chat_completion_response.candidates[0].content.parts[0].text + assert responses[0].finish_reason == FinishReason.STOP + assert "usage" in responses[0].metadata + assert "prompt_feedback" in responses[0].metadata + assert responses[0].inner_content == mock_vertex_ai_chat_completion_response + + +@pytest.mark.asyncio +async def test_vertex_ai_chat_completion_with_function_choice_behavior_fail_verification( + chat_history: ChatHistory, + vertex_ai_unit_test_env, +) -> None: + """Test completion of VertexAIChatCompletion with function choice behavior expect verification failure""" + + # Missing kernel + with pytest.raises(ServiceInvalidExecutionSettingsError): + settings = VertexAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + + vertex_ai_chat_completion = VertexAIChatCompletion() + + await vertex_ai_chat_completion.get_chat_message_contents( + chat_history=chat_history, + settings=settings, + ) + + +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_chat_completion_with_function_choice_behavior( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + kernel, + chat_history: ChatHistory, + mock_vertex_ai_chat_completion_response_with_tool_call, +) -> None: + """Test completion of VertexAIChatCompletion with function choice behavior""" + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_chat_completion_response_with_tool_call + + settings = VertexAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + settings.function_choice_behavior.maximum_auto_invoke_attempts = 1 + + vertex_ai_chat_completion = VertexAIChatCompletion() + + responses = await vertex_ai_chat_completion.get_chat_message_contents( + chat_history=chat_history, + settings=settings, + kernel=kernel, + ) + + # The function should be called twice: + # One for the tool call and one for the last completion + # after the maximum_auto_invoke_attempts is reached + assert mock_vertex_ai_model_generate_content_async.call_count == 2 + assert len(responses) == 1 + assert responses[0].role == "assistant" + # Google doesn't return STOP as the finish reason for tool calls + assert responses[0].finish_reason == FinishReason.STOP + + +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_chat_completion_with_function_choice_behavior_no_tool_call( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + kernel, + chat_history: ChatHistory, + mock_vertex_ai_chat_completion_response, +) -> None: + """Test completion of VertexAIChatCompletion with function choice behavior but no tool call returned""" + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_chat_completion_response + + settings = VertexAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + settings.function_choice_behavior.maximum_auto_invoke_attempts = 1 + + vertex_ai_chat_completion = VertexAIChatCompletion() + + responses = await vertex_ai_chat_completion.get_chat_message_contents( + chat_history=chat_history, + settings=settings, + kernel=kernel, + ) + + # Remove the latest message since the response from the model will be added to the chat history + # even when the model doesn't return a tool call + chat_history.remove_message(chat_history[-1]) + + mock_vertex_ai_model_generate_content_async.assert_awaited_once_with( + contents=vertex_ai_chat_completion._prepare_chat_history_for_request(chat_history), + generation_config=settings.prepare_settings_dict(), + tools=None, + tool_config=None, + ) + assert len(responses) == 1 + assert responses[0].role == "assistant" + assert responses[0].content == mock_vertex_ai_chat_completion_response.candidates[0].content.parts[0].text + + +# endregion chat completion + + +# region streaming chat completion +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_streaming_chat_completion( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + chat_history: ChatHistory, + mock_vertex_ai_streaming_chat_completion_response, +) -> None: + """Test chat completion with VertexAIChatCompletion""" + settings = VertexAIChatPromptExecutionSettings() + + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_streaming_chat_completion_response + + vertex_ai_chat_completion = VertexAIChatCompletion() + async for messages in vertex_ai_chat_completion.get_streaming_chat_message_contents(chat_history, settings): + assert len(messages) == 1 + assert messages[0].role == "assistant" + assert messages[0].finish_reason == FinishReason.STOP + assert "usage" in messages[0].metadata + assert "prompt_feedback" in messages[0].metadata + + mock_vertex_ai_model_generate_content_async.assert_called_once_with( + contents=vertex_ai_chat_completion._prepare_chat_history_for_request(chat_history), + generation_config=settings.prepare_settings_dict(), + tools=None, + tool_config=None, + stream=True, + ) + + +@pytest.mark.asyncio +async def test_vertex_ai_streaming_chat_completion_with_function_choice_behavior_fail_verification( + chat_history: ChatHistory, + vertex_ai_unit_test_env, +) -> None: + """Test streaming chat completion of VertexAIChatCompletion with function choice + behavior expect verification failure""" + + # Missing kernel + with pytest.raises(ServiceInvalidExecutionSettingsError): + settings = VertexAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + + vertex_ai_chat_completion = VertexAIChatCompletion() + + async for _ in vertex_ai_chat_completion.get_streaming_chat_message_contents( + chat_history=chat_history, + settings=settings, + ): + pass + + +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_streaming_chat_completion_with_function_choice_behavior( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + kernel, + chat_history: ChatHistory, + mock_vertex_ai_streaming_chat_completion_response_with_tool_call, +) -> None: + """Test streaming chat completion of VertexAIChatCompletion with function choice behavior""" + mock_vertex_ai_model_generate_content_async.return_value = ( + mock_vertex_ai_streaming_chat_completion_response_with_tool_call + ) + + settings = VertexAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + settings.function_choice_behavior.maximum_auto_invoke_attempts = 1 + + vertex_ai_chat_completion = VertexAIChatCompletion() + + async for messages in vertex_ai_chat_completion.get_streaming_chat_message_contents( + chat_history, + settings, + kernel=kernel, + ): + assert len(messages) == 1 + assert messages[0].role == "assistant" + assert messages[0].content == "" + # Google doesn't return STOP as the finish reason for tool calls + assert messages[0].finish_reason == FinishReason.STOP + + # Streaming completion with tool call does not invoke the model + # after maximum_auto_invoke_attempts is reached + assert mock_vertex_ai_model_generate_content_async.call_count == 1 + + +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_streaming_chat_completion_with_function_choice_behavior_no_tool_call( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + kernel, + chat_history: ChatHistory, + mock_vertex_ai_streaming_chat_completion_response, +) -> None: + """Test completion of VertexAIChatCompletion with function choice behavior but no tool call returned""" + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_streaming_chat_completion_response + + settings = VertexAIChatPromptExecutionSettings( + function_choice_behavior=FunctionChoiceBehavior.Auto(), + ) + settings.function_choice_behavior.maximum_auto_invoke_attempts = 1 + + vertex_ai_chat_completion = VertexAIChatCompletion() + + async for messages in vertex_ai_chat_completion.get_streaming_chat_message_contents( + chat_history=chat_history, + settings=settings, + kernel=kernel, + ): + assert len(messages) == 1 + assert messages[0].role == "assistant" + + mock_vertex_ai_model_generate_content_async.assert_awaited_once_with( + contents=vertex_ai_chat_completion._prepare_chat_history_for_request(chat_history), + generation_config=settings.prepare_settings_dict(), + tools=None, + tool_config=None, + stream=True, + ) + + +# endregion streaming chat completion + + +def test_vertex_ai_chat_completion_parse_chat_history_correctly(vertex_ai_unit_test_env) -> None: + """Test _prepare_chat_history_for_request method""" + vertex_ai_chat_completion = VertexAIChatCompletion() + + chat_history = ChatHistory() + chat_history.add_system_message("test_system_message") + chat_history.add_user_message("test_user_message") + chat_history.add_assistant_message("test_assistant_message") + + parsed_chat_history = vertex_ai_chat_completion._prepare_chat_history_for_request(chat_history) + + assert isinstance(parsed_chat_history, list) + # System message should be ignored + assert len(parsed_chat_history) == 2 + assert all(isinstance(message, Content) for message in parsed_chat_history) + assert parsed_chat_history[0].role == "user" + assert parsed_chat_history[0].parts[0].text == "test_user_message" + assert parsed_chat_history[1].role == "model" + assert parsed_chat_history[1].parts[0].text == "test_assistant_message" diff --git a/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_completion.py b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_completion.py new file mode 100644 index 000000000000..0ad9c61f3444 --- /dev/null +++ b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_completion.py @@ -0,0 +1,128 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from unittest.mock import AsyncMock, patch + +import pytest +from vertexai.generative_models import GenerativeModel + +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_text_completion import VertexAITextCompletion +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAITextPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + + +# region init +def test_vertex_ai_text_completion_init(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextCompletion""" + model_id = vertex_ai_unit_test_env["VERTEX_AI_GEMINI_MODEL_ID"] + project_id = vertex_ai_unit_test_env["VERTEX_AI_PROJECT_ID"] + vertex_ai_text_completion = VertexAITextCompletion() + + assert vertex_ai_text_completion.ai_model_id == model_id + assert vertex_ai_text_completion.service_id == model_id + + assert isinstance(vertex_ai_text_completion.service_settings, VertexAISettings) + assert vertex_ai_text_completion.service_settings.gemini_model_id == model_id + assert vertex_ai_text_completion.service_settings.project_id == project_id + + +def test_vertex_ai_text_completion_init_with_service_id(vertex_ai_unit_test_env, service_id) -> None: + """Test initialization of VertexAITextCompletion with a service id that is not the model id""" + vertex_ai_text_completion = VertexAITextCompletion(service_id=service_id) + + assert vertex_ai_text_completion.service_id == service_id + + +def test_vertex_ai_text_completion_init_with_model_id_in_argument(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAIChatCompletion with model id in argument""" + vertex_ai_text_completion = VertexAITextCompletion(gemini_model_id="custom_model_id") + + assert vertex_ai_text_completion.ai_model_id == "custom_model_id" + assert vertex_ai_text_completion.service_id == "custom_model_id" + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_GEMINI_MODEL_ID"]], indirect=True) +def test_vertex_ai_text_completion_init_with_empty_model_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextCompletion with an empty model id""" + with pytest.raises(ServiceInitializationError): + VertexAITextCompletion(env_file_path="fake_env_file_path.env") + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_PROJECT_ID"]], indirect=True) +def test_vertex_ai_text_completion_init_with_empty_project_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextCompletion with an empty project id""" + with pytest.raises(ServiceInitializationError): + VertexAITextCompletion(env_file_path="fake_env_file_path.env") + + +def test_prompt_execution_settings_class(vertex_ai_unit_test_env) -> None: + vertex_ai_text_completion = VertexAITextCompletion() + assert vertex_ai_text_completion.get_prompt_execution_settings_class() == VertexAITextPromptExecutionSettings + + +# endregion init + + +# region text completion +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_text_completion( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + prompt: str, + mock_vertex_ai_text_completion_response, +) -> None: + """Test text completion with VertexAITextCompletion""" + settings = VertexAITextPromptExecutionSettings() + + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_text_completion_response + + vertex_ai_text_completion = VertexAITextCompletion() + responses: list[TextContent] = await vertex_ai_text_completion.get_text_contents(prompt, settings) + + mock_vertex_ai_model_generate_content_async.assert_called_once_with( + contents=prompt, + generation_config=settings.prepare_settings_dict(), + ) + assert len(responses) == 1 + assert responses[0].text == mock_vertex_ai_text_completion_response.candidates[0].content.parts[0].text + assert "usage" in responses[0].metadata + assert "prompt_feedback" in responses[0].metadata + assert responses[0].inner_content == mock_vertex_ai_text_completion_response + + +# endregion text completion + + +# region streaming text completion +@pytest.mark.asyncio +@patch.object(GenerativeModel, "generate_content_async", new_callable=AsyncMock) +async def test_vertex_ai_streaming_text_completion( + mock_vertex_ai_model_generate_content_async, + vertex_ai_unit_test_env, + prompt: str, + mock_vertex_ai_streaming_text_completion_response, +) -> None: + """Test streaming text completion with VertexAITextCompletion""" + settings = VertexAITextPromptExecutionSettings() + + mock_vertex_ai_model_generate_content_async.return_value = mock_vertex_ai_streaming_text_completion_response + + vertex_ai_text_completion = VertexAITextCompletion() + async for chunks in vertex_ai_text_completion.get_streaming_text_contents(prompt, settings): + assert len(chunks) == 1 + assert "usage" in chunks[0].metadata + assert "prompt_feedback" in chunks[0].metadata + + mock_vertex_ai_model_generate_content_async.assert_called_once_with( + contents=prompt, + generation_config=settings.prepare_settings_dict(), + stream=True, + ) + + +# endregion streaming text completion diff --git a/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_embedding.py b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_embedding.py new file mode 100644 index 000000000000..a11ad1c5bb59 --- /dev/null +++ b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_text_embedding.py @@ -0,0 +1,165 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from unittest.mock import AsyncMock, patch + +import pytest +from numpy import array, ndarray +from vertexai.language_models import TextEmbedding, TextEmbeddingModel + +from semantic_kernel.connectors.ai.google.vertex_ai.services.vertex_ai_text_embedding import VertexAITextEmbedding +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_prompt_execution_settings import ( + VertexAIEmbeddingPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.google.vertex_ai.vertex_ai_settings import VertexAISettings +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError +from tests.unit.connectors.google.vertex_ai.conftest import MockTextEmbeddingModel + + +# region init +def test_vertex_ai_text_embedding_init(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextEmbedding""" + model_id = vertex_ai_unit_test_env["VERTEX_AI_EMBEDDING_MODEL_ID"] + project_id = vertex_ai_unit_test_env["VERTEX_AI_PROJECT_ID"] + vertex_ai_text_embedding = VertexAITextEmbedding() + + assert vertex_ai_text_embedding.ai_model_id == model_id + assert vertex_ai_text_embedding.service_id == model_id + + assert isinstance(vertex_ai_text_embedding.service_settings, VertexAISettings) + assert vertex_ai_text_embedding.service_settings.embedding_model_id == model_id + assert vertex_ai_text_embedding.service_settings.project_id == project_id + + +def test_vertex_ai_text_embedding_init_with_service_id(vertex_ai_unit_test_env, service_id) -> None: + """Test initialization of VertexAITextEmbedding with a service id that is not the model id""" + vertex_ai_text_embedding = VertexAITextEmbedding(service_id=service_id) + + assert vertex_ai_text_embedding.service_id == service_id + + +def test_vertex_ai_text_embedding_init_with_model_id_in_argument(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextEmbedding with model id in argument""" + vertex_ai_chat_completion = VertexAITextEmbedding(embedding_model_id="custom_model_id") + + assert vertex_ai_chat_completion.ai_model_id == "custom_model_id" + assert vertex_ai_chat_completion.service_id == "custom_model_id" + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_EMBEDDING_MODEL_ID"]], indirect=True) +def test_vertex_ai_text_embedding_init_with_empty_model_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextEmbedding with an empty model id""" + with pytest.raises(ServiceInitializationError): + VertexAITextEmbedding(env_file_path="fake_env_file_path.env") + + +@pytest.mark.parametrize("exclude_list", [["VERTEX_AI_PROJECT_ID"]], indirect=True) +def test_vertex_ai_text_embedding_init_with_empty_project_id(vertex_ai_unit_test_env) -> None: + """Test initialization of VertexAITextEmbedding with an empty project id""" + with pytest.raises(ServiceInitializationError): + VertexAITextEmbedding(env_file_path="fake_env_file_path.env") + + +def test_prompt_execution_settings_class(vertex_ai_unit_test_env) -> None: + vertex_ai_text_embedding = VertexAITextEmbedding() + assert vertex_ai_text_embedding.get_prompt_execution_settings_class() == VertexAIEmbeddingPromptExecutionSettings + + +# endregion init + + +@pytest.mark.asyncio +@patch.object(TextEmbeddingModel, "from_pretrained") +@patch.object(MockTextEmbeddingModel, "get_embeddings_async", new_callable=AsyncMock) +async def test_embedding(mock_embedding_client, mock_from_pretrained, vertex_ai_unit_test_env, prompt): + """Test that the service initializes and generates embeddings correctly.""" + mock_from_pretrained.return_value = MockTextEmbeddingModel() + mock_embedding_client.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3])] + + settings = VertexAIEmbeddingPromptExecutionSettings() + + vertex_ai_text_embedding = VertexAITextEmbedding() + response: ndarray = await vertex_ai_text_embedding.generate_embeddings( + [prompt], + settings=settings, + ) + + assert len(response) == 1 + assert response.all() == array([0.1, 0.2, 0.3]).all() + mock_embedding_client.assert_called_once_with([prompt]) + + +@pytest.mark.asyncio +@patch.object(TextEmbeddingModel, "from_pretrained") +@patch.object(MockTextEmbeddingModel, "get_embeddings_async", new_callable=AsyncMock) +async def test_embedding_with_settings(mock_embedding_client, mock_from_pretrained, vertex_ai_unit_test_env, prompt): + """Test that the service initializes and generates embeddings correctly.""" + mock_from_pretrained.return_value = MockTextEmbeddingModel() + mock_embedding_client.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3])] + + settings = VertexAIEmbeddingPromptExecutionSettings() + settings.output_dimensionality = 3 + settings.auto_truncate = True + + vertex_ai_text_embedding = VertexAITextEmbedding() + response: ndarray = await vertex_ai_text_embedding.generate_embeddings( + [prompt], + settings=settings, + ) + + assert len(response) == 1 + assert response.all() == array([0.1, 0.2, 0.3]).all() + mock_embedding_client.assert_called_once_with( + [prompt], + **settings.prepare_settings_dict(), + ) + + +@pytest.mark.asyncio +@patch.object(TextEmbeddingModel, "from_pretrained") +@patch.object(MockTextEmbeddingModel, "get_embeddings_async", new_callable=AsyncMock) +async def test_embedding_without_settings(mock_embedding_client, mock_from_pretrained, vertex_ai_unit_test_env, prompt): + """Test that the service initializes and generates embeddings correctly without settings.""" + mock_from_pretrained.return_value = MockTextEmbeddingModel() + mock_embedding_client.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3])] + + vertex_ai_text_embedding = VertexAITextEmbedding() + response: ndarray = await vertex_ai_text_embedding.generate_embeddings([prompt]) + + assert len(response) == 1 + assert response.all() == array([0.1, 0.2, 0.3]).all() + mock_embedding_client.assert_called_once_with([prompt]) + + +@pytest.mark.asyncio +@patch.object(TextEmbeddingModel, "from_pretrained") +@patch.object(MockTextEmbeddingModel, "get_embeddings_async", new_callable=AsyncMock) +async def test_embedding_list_input(mock_embedding_client, mock_from_pretrained, vertex_ai_unit_test_env, prompt): + """Test that the service initializes and generates embeddings correctly with a list of prompts.""" + mock_from_pretrained.return_value = MockTextEmbeddingModel() + mock_embedding_client.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3]), TextEmbedding(values=[0.1, 0.2, 0.3])] + + vertex_ai_text_embedding = VertexAITextEmbedding() + response: ndarray = await vertex_ai_text_embedding.generate_embeddings([prompt, prompt]) + + assert len(response) == 2 + assert response.all() == array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]).all() + mock_embedding_client.assert_called_once_with([prompt, prompt]) + + +@pytest.mark.asyncio +@patch.object(TextEmbeddingModel, "from_pretrained") +@patch.object(MockTextEmbeddingModel, "get_embeddings_async", new_callable=AsyncMock) +async def test_raw_embedding(mock_embedding_client, mock_from_pretrained, vertex_ai_unit_test_env, prompt): + """Test that the service initializes and generates embeddings correctly.""" + mock_from_pretrained.return_value = MockTextEmbeddingModel() + mock_embedding_client.return_value = [TextEmbedding(values=[0.1, 0.2, 0.3])] + + settings = VertexAIEmbeddingPromptExecutionSettings() + + vertex_ai_text_embedding = VertexAITextEmbedding() + response: ndarray = await vertex_ai_text_embedding.generate_raw_embeddings([prompt], settings) + + assert len(response) == 1 + assert response[0] == [0.1, 0.2, 0.3] + mock_embedding_client.assert_called_once_with([prompt]) diff --git a/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_utils.py b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_utils.py new file mode 100644 index 000000000000..e874262e69ef --- /dev/null +++ b/python/tests/unit/connectors/google/vertex_ai/services/test_vertex_ai_utils.py @@ -0,0 +1,111 @@ +# Copyright (c) Microsoft. All rights reserved. + +import pytest +from google.cloud.aiplatform_v1beta1.types.content import Candidate, Part + +from semantic_kernel.connectors.ai.google.vertex_ai.services.utils import ( + finish_reason_from_vertex_ai_to_semantic_kernel, + format_assistant_message, + format_user_message, +) +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason +from semantic_kernel.exceptions.service_exceptions import ServiceInvalidRequestError + + +def test_finish_reason_from_vertex_ai_to_semantic_kernel(): + """Test finish_reason_from_vertex_ai_to_semantic_kernel.""" + assert finish_reason_from_vertex_ai_to_semantic_kernel(Candidate.FinishReason.STOP) == FinishReason.STOP + assert finish_reason_from_vertex_ai_to_semantic_kernel(Candidate.FinishReason.MAX_TOKENS) == FinishReason.LENGTH + assert finish_reason_from_vertex_ai_to_semantic_kernel(Candidate.FinishReason.SAFETY) == FinishReason.CONTENT_FILTER + assert finish_reason_from_vertex_ai_to_semantic_kernel(Candidate.FinishReason.OTHER) is None + + +def test_format_user_message(): + """Test format_user_message.""" + user_message = ChatMessageContent(role=AuthorRole.USER, content="User message") + formatted_user_message = format_user_message(user_message) + + assert len(formatted_user_message) == 1 + assert isinstance(formatted_user_message[0], Part) + assert formatted_user_message[0].text == "User message" + + # Test with an image content + image_content = ImageContent(data="image data", mime_type="image/png") + user_message = ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="Text content"), + image_content, + ], + ) + formatted_user_message = format_user_message(user_message) + + assert len(formatted_user_message) == 2 + assert isinstance(formatted_user_message[0], Part) + assert formatted_user_message[0].text == "Text content" + assert isinstance(formatted_user_message[1], Part) + assert formatted_user_message[1].inline_data.mime_type == "image/png" + assert formatted_user_message[1].inline_data.data == image_content.data + + +def test_format_user_message_throws_with_unsupported_items() -> None: + """Test format_user_message with unsupported items.""" + # Test with unsupported items, any item other than TextContent and ImageContent should raise an error + user_message = ChatMessageContent( + role=AuthorRole.USER, + items=[ + FunctionCallContent(), + ], + ) + with pytest.raises(ServiceInvalidRequestError): + format_user_message(user_message) + + # Test with an ImageContent that has no data_uri + user_message = ChatMessageContent( + role=AuthorRole.USER, + items=[ + ImageContent(data_uri=""), + ], + ) + with pytest.raises(ServiceInvalidRequestError): + format_user_message(user_message) + + +def test_format_assistant_message() -> None: + assistant_message = ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + TextContent(text="test"), + FunctionCallContent(name="test_function", arguments={}), + ImageContent(data="image data", mime_type="image/png"), + ], + ) + + formatted_assistant_message = format_assistant_message(assistant_message) + assert isinstance(formatted_assistant_message, list) + assert len(formatted_assistant_message) == 3 + assert isinstance(formatted_assistant_message[0], Part) + assert formatted_assistant_message[0].text == "test" + assert isinstance(formatted_assistant_message[1], Part) + assert formatted_assistant_message[1].function_call.name == "test_function" + assert formatted_assistant_message[1].function_call.args == {} + assert isinstance(formatted_assistant_message[2], Part) + assert formatted_assistant_message[2].inline_data + + +def test_format_assistant_message_with_unsupported_items() -> None: + assistant_message = ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + FunctionResultContent(id="test_id", function_name="test_function"), + ], + ) + + with pytest.raises(ServiceInvalidRequestError): + format_assistant_message(assistant_message) diff --git a/python/tests/unit/connectors/memory/conftest.py b/python/tests/unit/connectors/memory/conftest.py new file mode 100644 index 000000000000..2a2f9265659e --- /dev/null +++ b/python/tests/unit/connectors/memory/conftest.py @@ -0,0 +1,68 @@ +# Copyright (c) Microsoft. All rights reserved. + +from dataclasses import dataclass, field +from typing import Annotated +from uuid import uuid4 + +from pydantic import BaseModel +from pytest import fixture + +from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( + OpenAIEmbeddingPromptExecutionSettings, +) +from semantic_kernel.data.vector_store_model_decorator import vectorstoremodel +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) + + +@fixture +def dataclass_vector_data_model() -> object: + @vectorstoremodel + @dataclass + class MyDataModel: + vector: Annotated[ + list[float] | None, + VectorStoreRecordVectorField( + embedding_settings={"default": OpenAIEmbeddingPromptExecutionSettings(dimensions=1536)}, + index_kind="hnsw", + dimensions=1536, + distance_function="cosine", + property_type="float", + ), + ] = None + other: str | None = None + id: Annotated[str, VectorStoreRecordKeyField()] = field(default_factory=lambda: str(uuid4())) + content: Annotated[ + str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector", property_type="str") + ] = "content1" + + return MyDataModel + + +@fixture +def data_model_definition() -> object: + return VectorStoreRecordDefinition( + fields={ + "id": VectorStoreRecordKeyField(), + "content": VectorStoreRecordDataField( + has_embedding=True, + embedding_property_name="vector", + ), + "vector": VectorStoreRecordVectorField(dimensions=3), + } + ) + + +@fixture +def data_model_type(): + @vectorstoremodel + class DataModelClass(BaseModel): + content: Annotated[str, VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector")] + vector: Annotated[list[float], VectorStoreRecordVectorField()] + id: Annotated[str, VectorStoreRecordKeyField()] + + return DataModelClass diff --git a/python/tests/unit/connectors/memory/test_azure_ai_search.py b/python/tests/unit/connectors/memory/test_azure_ai_search.py new file mode 100644 index 000000000000..8e475d1faa58 --- /dev/null +++ b/python/tests/unit/connectors/memory/test_azure_ai_search.py @@ -0,0 +1,312 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from unittest.mock import MagicMock, Mock, patch + +from pytest import fixture, mark, raises + +from semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_collection import AzureAISearchCollection +from semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_settings import AzureAISearchSettings +from semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_store import AzureAISearchStore +from semantic_kernel.connectors.memory.azure_ai_search.utils import ( + SearchClientWrapper, + SearchIndexClientWrapper, + data_model_definition_to_azure_ai_search_index, + get_search_index_client, +) +from semantic_kernel.exceptions.memory_connector_exceptions import ( + MemoryConnectorException, + MemoryConnectorInitializationError, +) +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + +BASE_PATH_SEARCH_CLIENT = "azure.search.documents.aio.SearchClient" +BASE_PATH_INDEX_CLIENT = "azure.search.documents.indexes.aio.SearchIndexClient" + + +class AsyncIter: + def __init__(self, items): + self.items = items + + async def __aiter__(self): + for item in self.items: + yield item + + +@fixture +def vector_store(azure_ai_search_unit_test_env): + """Fixture to instantiate AzureCognitiveSearchMemoryStore with basic configuration.""" + return AzureAISearchStore() + + +@fixture +def mock_create_collection(): + """Fixture to patch 'SearchIndexClient' and its 'create_index' method.""" + with patch(f"{BASE_PATH_INDEX_CLIENT}.create_index") as mock_create_index: + yield mock_create_index + + +@fixture +def mock_delete_collection(): + """Fixture to patch 'SearchIndexClient' and its 'create_index' method.""" + with patch(f"{BASE_PATH_INDEX_CLIENT}.delete_index") as mock_delete_index: + yield mock_delete_index + + +@fixture +def mock_list_collection_names(): + """Fixture to patch 'SearchIndexClient' and its 'create_index' method.""" + with patch(f"{BASE_PATH_INDEX_CLIENT}.list_index_names") as mock_list_index_names: + # Setup the mock to return a specific SearchIndex instance when called + mock_list_index_names.return_value = AsyncIter(["test"]) + yield mock_list_index_names + + +@fixture +def mock_upsert(): + with patch(f"{BASE_PATH_SEARCH_CLIENT}.merge_or_upload_documents") as mock_merge_or_upload_documents: + from azure.search.documents.models import IndexingResult + + result = MagicMock(spec=IndexingResult) + result.key = "id1" + mock_merge_or_upload_documents.return_value = [result] + yield mock_merge_or_upload_documents + + +@fixture +def mock_get(): + with patch(f"{BASE_PATH_SEARCH_CLIENT}.get_document") as mock_get_document: + mock_get_document.return_value = {"id": "id1", "content": "content", "vector": [1.0, 2.0, 3.0]} + yield mock_get_document + + +@fixture +def mock_delete(): + with patch(f"{BASE_PATH_SEARCH_CLIENT}.delete_documents") as mock_delete_documents: + yield mock_delete_documents + + +@fixture +def collection(azure_ai_search_unit_test_env, data_model_definition): + return AzureAISearchCollection(data_model_type=dict, data_model_definition=data_model_definition) + + +def test_init(azure_ai_search_unit_test_env, data_model_definition): + collection = AzureAISearchCollection(data_model_type=dict, data_model_definition=data_model_definition) + assert collection is not None + assert collection.data_model_type is dict + assert collection.data_model_definition == data_model_definition + assert collection.collection_name == "test-index-name" + assert collection.search_index_client is not None + assert collection.search_client is not None + + +def test_init_with_type(azure_ai_search_unit_test_env, data_model_type): + collection = AzureAISearchCollection(data_model_type=data_model_type) + assert collection is not None + assert collection.data_model_type is data_model_type + assert collection.collection_name == "test-index-name" + assert collection.search_index_client is not None + assert collection.search_client is not None + + +@mark.parametrize("exclude_list", [["AZURE_AI_SEARCH_ENDPOINT"]], indirect=True) +def test_init_endpoint_fail(azure_ai_search_unit_test_env, data_model_definition): + with raises(MemoryConnectorInitializationError): + AzureAISearchCollection( + data_model_type=dict, data_model_definition=data_model_definition, env_file_path="test.env" + ) + + +@mark.parametrize("exclude_list", [["AZURE_AI_SEARCH_INDEX_NAME"]], indirect=True) +def test_init_index_fail(azure_ai_search_unit_test_env, data_model_definition): + with raises(MemoryConnectorInitializationError): + AzureAISearchCollection( + data_model_type=dict, data_model_definition=data_model_definition, env_file_path="test.env" + ) + + +def test_init_with_clients(azure_ai_search_unit_test_env, data_model_definition): + search_index_client = MagicMock(spec=SearchIndexClientWrapper) + search_client = MagicMock(spec=SearchClientWrapper) + search_client._index_name = "test-index-name" + + collection = AzureAISearchCollection( + data_model_type=dict, + data_model_definition=data_model_definition, + search_index_client=search_index_client, + search_client=search_client, + ) + assert collection is not None + assert collection.data_model_type is dict + assert collection.data_model_definition == data_model_definition + assert collection.collection_name == "test-index-name" + assert collection.search_index_client == search_index_client + assert collection.search_client == search_client + + +def test_init_with_search_index_client(azure_ai_search_unit_test_env, data_model_definition): + search_index_client = MagicMock(spec=SearchIndexClientWrapper) + with patch( + "semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_collection.get_search_client" + ) as get_search_client: + search_client = MagicMock(spec=SearchClientWrapper) + get_search_client.return_value = search_client + + collection = AzureAISearchCollection( + data_model_type=dict, + data_model_definition=data_model_definition, + collection_name="test", + search_index_client=search_index_client, + ) + assert collection is not None + assert collection.data_model_type is dict + assert collection.data_model_definition == data_model_definition + assert collection.collection_name == "test" + assert collection.search_index_client == search_index_client + assert collection.search_client == search_client + + +def test_init_with_search_index_client_fail(azure_ai_search_unit_test_env, data_model_definition): + search_index_client = MagicMock(spec=SearchIndexClientWrapper) + with raises(MemoryConnectorInitializationError, match="Collection name is required."): + AzureAISearchCollection( + data_model_type=dict, + data_model_definition=data_model_definition, + search_index_client=search_index_client, + ) + + +def test_init_with_clients_fail(azure_ai_search_unit_test_env, data_model_definition): + search_index_client = MagicMock(spec=SearchIndexClientWrapper) + search_client = MagicMock(spec=SearchClientWrapper) + search_client._index_name = "test-index-name" + + with raises( + MemoryConnectorInitializationError, match="Search client and search index client have different index names." + ): + AzureAISearchCollection( + data_model_type=dict, + data_model_definition=data_model_definition, + collection_name="test", + search_index_client=search_index_client, + search_client=search_client, + ) + + +@mark.asyncio +async def test_upsert(collection, mock_upsert): + ids = await collection._inner_upsert({"id": "id1", "name": "test"}) + assert ids[0] == "id1" + + ids = await collection.upsert(record={"id": "id1", "content": "content", "vector": [1.0, 2.0, 3.0]}) + assert ids == "id1" + + +@mark.asyncio +async def test_get(collection, mock_get): + records = await collection._inner_get(["id1"]) + assert records is not None + + records = await collection.get("id1") + assert records is not None + + +@mark.asyncio +async def test_delete(collection, mock_delete): + await collection._inner_delete(["id1"]) + + +@mark.asyncio +async def test_does_collection_exist(collection, mock_list_collection_names): + await collection.does_collection_exist() + + +@mark.asyncio +async def test_delete_collection(collection, mock_delete_collection): + await collection.delete_collection() + + +@mark.asyncio +async def test_create_index_from_index(collection, mock_create_collection): + from azure.search.documents.indexes.models import SearchIndex + + index = MagicMock(spec=SearchIndex) + await collection.create_collection(index=index) + + +@mark.asyncio +async def test_create_index_from_definition(collection, mock_create_collection): + from azure.search.documents.indexes.models import SearchIndex + + with patch( + "semantic_kernel.connectors.memory.azure_ai_search.azure_ai_search_collection.data_model_definition_to_azure_ai_search_index", + return_value=MagicMock(spec=SearchIndex), + ): + await collection.create_collection() + + +@mark.asyncio +async def test_create_index_from_index_fail(collection, mock_create_collection): + index = Mock() + with raises(MemoryConnectorException): + await collection.create_collection(index=index) + + +def test_data_model_definition_to_azure_ai_search_index(data_model_definition): + index = data_model_definition_to_azure_ai_search_index("test", data_model_definition) + assert index is not None + assert index.name == "test" + assert len(index.fields) == 3 + + +@mark.asyncio +@mark.parametrize("exclude_list", [["AZURE_AI_SEARCH_ENDPOINT"]], indirect=True) +async def test_vector_store_fail(azure_ai_search_unit_test_env): + with raises(MemoryConnectorInitializationError): + AzureAISearchStore(env_file_path="test.env") + + +@mark.asyncio +async def test_vector_store_list_collection_names(vector_store, mock_list_collection_names): + assert vector_store.search_index_client is not None + collection_names = await vector_store.list_collection_names() + assert collection_names == ["test"] + mock_list_collection_names.assert_called_once() + + +def test_get_collection(vector_store, data_model_definition): + collection = vector_store.get_collection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + assert collection is not None + assert collection.collection_name == "test" + assert collection.search_index_client == vector_store.search_index_client + assert collection.search_client is not None + assert collection.search_client._endpoint == vector_store.search_index_client._endpoint + assert vector_store.vector_record_collections["test"] == collection + + +@mark.parametrize("exclude_list", [["AZURE_AI_SEARCH_API_KEY"]], indirect=True) +def test_get_search_index_client(azure_ai_search_unit_test_env): + from azure.core.credentials import AzureKeyCredential, TokenCredential + + settings = AzureAISearchSettings.create(**azure_ai_search_unit_test_env, env_file_path="test.env") + + azure_credential = MagicMock(spec=AzureKeyCredential) + client = get_search_index_client(settings, azure_credential=azure_credential) + assert client is not None + assert client._credential == azure_credential + + token_credential = MagicMock(spec=TokenCredential) + client2 = get_search_index_client( + settings, + token_credential=token_credential, + ) + assert client2 is not None + assert client2._credential == token_credential + + with raises(ServiceInitializationError): + get_search_index_client(settings) diff --git a/python/tests/unit/connectors/memory/test_qdrant.py b/python/tests/unit/connectors/memory/test_qdrant.py new file mode 100644 index 000000000000..294fe729cdd4 --- /dev/null +++ b/python/tests/unit/connectors/memory/test_qdrant.py @@ -0,0 +1,271 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import MagicMock, patch + +from pytest import fixture, mark, raises +from qdrant_client.async_qdrant_client import AsyncQdrantClient +from qdrant_client.models import Datatype, Distance, VectorParams + +from semantic_kernel.connectors.memory.qdrant.qdrant_collection import QdrantCollection +from semantic_kernel.connectors.memory.qdrant.qdrant_store import QdrantStore +from semantic_kernel.data.vector_store_record_fields import VectorStoreRecordVectorField +from semantic_kernel.exceptions.memory_connector_exceptions import ( + MemoryConnectorException, + MemoryConnectorInitializationError, + VectorStoreModelValidationError, +) + +BASE_PATH = "qdrant_client.async_qdrant_client.AsyncQdrantClient" + + +@fixture +def vector_store(qdrant_unit_test_env): + return QdrantStore(env_file_path="test.env") + + +@fixture +def collection(qdrant_unit_test_env, data_model_definition): + return QdrantCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + env_file_path="test.env", + ) + + +@fixture +def collection_without_named_vectors(qdrant_unit_test_env, data_model_definition): + return QdrantCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + named_vectors=False, + env_file_path="test.env", + ) + + +@fixture(autouse=True) +def mock_list_collection_names(): + with patch(f"{BASE_PATH}.get_collections") as mock_get_collections: + from qdrant_client.conversions.common_types import CollectionsResponse + from qdrant_client.http.models import CollectionDescription + + response = MagicMock(spec=CollectionsResponse) + response.collections = [CollectionDescription(name="test")] + mock_get_collections.return_value = response + yield mock_get_collections + + +@fixture(autouse=True) +def mock_does_collection_exist(): + with patch(f"{BASE_PATH}.collection_exists") as mock_collection_exists: + mock_collection_exists.return_value = True + yield mock_collection_exists + + +@fixture(autouse=True) +def mock_create_collection(): + with patch(f"{BASE_PATH}.create_collection") as mock_recreate_collection: + yield mock_recreate_collection + + +@fixture(autouse=True) +def mock_delete_collection(): + with patch(f"{BASE_PATH}.delete_collection") as mock_delete_collection: + mock_delete_collection.return_value = True + yield mock_delete_collection + + +@fixture(autouse=True) +def mock_upsert(): + with patch(f"{BASE_PATH}.upsert") as mock_upsert: + from qdrant_client.conversions.common_types import UpdateResult + + result = MagicMock(spec=UpdateResult) + result.status = "completed" + mock_upsert.return_value = result + yield mock_upsert + + +@fixture(autouse=True) +def mock_get(collection): + with patch(f"{BASE_PATH}.retrieve") as mock_retrieve: + from qdrant_client.http.models import Record + + if collection.named_vectors: + mock_retrieve.return_value = [ + Record(id="id1", payload={"content": "content"}, vector={"vector": [1.0, 2.0, 3.0]}) + ] + else: + mock_retrieve.return_value = [Record(id="id1", payload={"content": "content"}, vector=[1.0, 2.0, 3.0])] + yield mock_retrieve + + +@fixture(autouse=True) +def mock_delete(): + with patch(f"{BASE_PATH}.delete") as mock_delete: + yield mock_delete + + +def test_vector_store_defaults(vector_store): + assert vector_store.qdrant_client is not None + assert vector_store.qdrant_client._client.rest_uri == "http://localhost:6333" + + +def test_vector_store_with_client(): + qdrant_store = QdrantStore(client=AsyncQdrantClient()) + assert qdrant_store.qdrant_client is not None + assert qdrant_store.qdrant_client._client.rest_uri == "http://localhost:6333" + + +@mark.parametrize("exclude_list", [["QDRANT_LOCATION"]], indirect=True) +def test_vector_store_in_memory(qdrant_unit_test_env): + from qdrant_client.local.async_qdrant_local import AsyncQdrantLocal + + qdrant_store = QdrantStore(api_key="supersecretkey", env_file_path="test.env") + assert qdrant_store.qdrant_client is not None + assert isinstance(qdrant_store.qdrant_client._client, AsyncQdrantLocal) + assert qdrant_store.qdrant_client._client.location == ":memory:" + + +def test_vector_store_fail(): + with raises(MemoryConnectorInitializationError, match="Failed to create Qdrant settings."): + QdrantStore(location="localhost", url="localhost", env_file_path="test.env") + + with raises(MemoryConnectorInitializationError, match="Failed to create Qdrant client."): + QdrantStore(location="localhost", url="http://localhost", env_file_path="test.env") + + +@mark.asyncio +async def test_store_list_collection_names(vector_store): + collections = await vector_store.list_collection_names() + assert collections == ["test"] + + +def test_get_collection(vector_store, data_model_definition, qdrant_unit_test_env): + collection = vector_store.get_collection("test", data_model_type=dict, data_model_definition=data_model_definition) + assert collection.collection_name == "test" + assert collection.qdrant_client == vector_store.qdrant_client + assert collection.data_model_type is dict + assert collection.data_model_definition == data_model_definition + assert vector_store.vector_record_collections["test"] == collection + + +def test_collection_init(data_model_definition, qdrant_unit_test_env): + collection = QdrantCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + env_file_path="test.env", + ) + assert collection.collection_name == "test" + assert collection.qdrant_client is not None + assert collection.data_model_type is dict + assert collection.data_model_definition == data_model_definition + assert collection.named_vectors + + +def test_collection_init_fail(data_model_definition): + with raises(MemoryConnectorInitializationError, match="Failed to create Qdrant settings."): + QdrantCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + url="localhost", + env_file_path="test.env", + ) + with raises(MemoryConnectorInitializationError, match="Failed to create Qdrant client."): + QdrantCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + location="localhost", + url="http://localhost", + env_file_path="test.env", + ) + with raises( + VectorStoreModelValidationError, match="Only one vector field is allowed when not using named vectors." + ): + data_model_definition.fields["vector2"] = VectorStoreRecordVectorField(name="vector2", dimensions=3) + QdrantCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + named_vectors=False, + env_file_path="test.env", + ) + + +@mark.asyncio +@mark.parametrize("collection_to_use", ["collection", "collection_without_named_vectors"]) +async def test_upsert(collection_to_use, request): + from qdrant_client.models import PointStruct + + collection = request.getfixturevalue(collection_to_use) + if collection.named_vectors: + record = PointStruct(id="id1", payload={"content": "content"}, vector={"vector": [1.0, 2.0, 3.0]}) + else: + record = PointStruct(id="id1", payload={"content": "content"}, vector=[1.0, 2.0, 3.0]) + ids = await collection._inner_upsert([record]) + assert ids[0] == "id1" + + ids = await collection.upsert(record={"id": "id1", "content": "content", "vector": [1.0, 2.0, 3.0]}) + assert ids == "id1" + + +@mark.asyncio +async def test_get(collection): + records = await collection._inner_get(["id1"]) + assert records is not None + + records = await collection.get("id1") + assert records is not None + + +@mark.asyncio +async def test_delete(collection): + await collection._inner_delete(["id1"]) + + +@mark.asyncio +async def test_does_collection_exist(collection): + await collection.does_collection_exist() + + +@mark.asyncio +async def test_delete_collection(collection): + await collection.delete_collection() + + +@mark.asyncio +@mark.parametrize( + "collection_to_use, results", + [ + ( + "collection", + { + "collection_name": "test", + "vectors_config": {"vector": VectorParams(size=3, distance=Distance.COSINE, datatype=Datatype.FLOAT32)}, + }, + ), + ( + "collection_without_named_vectors", + { + "collection_name": "test", + "vectors_config": VectorParams(size=3, distance=Distance.COSINE, datatype=Datatype.FLOAT32), + }, + ), + ], +) +async def test_create_index_with_named_vectors(collection_to_use, results, mock_create_collection, request): + await request.getfixturevalue(collection_to_use).create_collection() + mock_create_collection.assert_called_once_with(**results) + + +@mark.asyncio +@mark.parametrize("collection_to_use", ["collection", "collection_without_named_vectors"]) +async def test_create_index_fail(collection_to_use, request): + collection = request.getfixturevalue(collection_to_use) + collection.data_model_definition.fields["vector"].dimensions = None + with raises(MemoryConnectorException, match="Vector field must have dimensions."): + await collection.create_collection() diff --git a/python/tests/unit/connectors/memory/test_redis_store.py b/python/tests/unit/connectors/memory/test_redis_store.py new file mode 100644 index 000000000000..f233bbc73e9d --- /dev/null +++ b/python/tests/unit/connectors/memory/test_redis_store.py @@ -0,0 +1,341 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, patch + +import numpy as np +from pytest import fixture, mark, raises +from redis.asyncio.client import Redis + +from semantic_kernel.connectors.memory.redis.const import RedisCollectionTypes +from semantic_kernel.connectors.memory.redis.redis_collection import RedisHashsetCollection, RedisJsonCollection +from semantic_kernel.connectors.memory.redis.redis_store import RedisStore +from semantic_kernel.exceptions.memory_connector_exceptions import ( + MemoryConnectorException, + MemoryConnectorInitializationError, +) + +BASE_PATH = "redis.asyncio.client.Redis" +BASE_PATH_FT = "redis.commands.search.AsyncSearch" +BASE_PATH_JSON = "redis.commands.json.commands.JSONCommands" + + +@fixture +def vector_store(redis_unit_test_env): + return RedisStore(env_file_path="test.env") + + +@fixture +def collection_hash(redis_unit_test_env, data_model_definition): + return RedisHashsetCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + env_file_path="test.env", + ) + + +@fixture +def collection_json(redis_unit_test_env, data_model_definition): + return RedisJsonCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + env_file_path="test.env", + ) + + +@fixture +def collection_with_prefix_hash(redis_unit_test_env, data_model_definition): + return RedisHashsetCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + prefix_collection_name_to_key_names=True, + env_file_path="test.env", + ) + + +@fixture +def collection_with_prefix_json(redis_unit_test_env, data_model_definition): + return RedisJsonCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + prefix_collection_name_to_key_names=True, + env_file_path="test.env", + ) + + +@fixture(autouse=True) +def moc_list_collection_names(): + with patch(f"{BASE_PATH}.execute_command") as mock_get_collections: + mock_get_collections.return_value = [b"test"] + yield mock_get_collections + + +@fixture(autouse=True) +def mock_does_collection_exist(): + with patch(f"{BASE_PATH_FT}.info", new=AsyncMock()) as mock_collection_exists: + mock_collection_exists.return_value = True + yield mock_collection_exists + + +@fixture(autouse=True) +def mock_create_collection(): + with patch(f"{BASE_PATH_FT}.create_index", new=AsyncMock()) as mock_recreate_collection: + yield mock_recreate_collection + + +@fixture(autouse=True) +def mock_delete_collection(): + with patch(f"{BASE_PATH_FT}.dropindex", new=AsyncMock()) as mock_delete_collection: + yield mock_delete_collection + + +@fixture(autouse=True) +def mock_upsert_hash(): + with patch(f"{BASE_PATH}.hset", new=AsyncMock()) as mock_upsert: + yield mock_upsert + + +@fixture(autouse=True) +def mock_upsert_json(): + with patch(f"{BASE_PATH_JSON}.set", new=AsyncMock()) as mock_upsert: + yield mock_upsert + + +@fixture(autouse=True) +def mock_get_hash(): + with patch(f"{BASE_PATH}.hgetall", new=AsyncMock()) as mock_get: + mock_get.return_value = { + b"metadata": b'{"content": "content"}', + b"vector": np.array([1.0, 2.0, 3.0]).tobytes(), + } + yield mock_get + + +@fixture(autouse=True) +def mock_get_json(): + with patch(f"{BASE_PATH_JSON}.mget", new=AsyncMock()) as mock_get: + mock_get.return_value = [ + [ + { + "content": "content", + "vector": [1.0, 2.0, 3.0], + } + ] + ] + yield mock_get + + +@fixture(autouse=True) +def mock_delete_hash(): + with patch(f"{BASE_PATH}.delete", new=AsyncMock()) as mock_delete: + yield mock_delete + + +@fixture(autouse=True) +def mock_delete_json(): + with patch(f"{BASE_PATH_JSON}.delete", new=AsyncMock()) as mock_delete: + yield mock_delete + + +def test_vector_store_defaults(vector_store): + assert vector_store.redis_database is not None + assert vector_store.redis_database.connection_pool.connection_kwargs["host"] == "localhost" + + +def test_vector_store_with_client(redis_unit_test_env): + vector_store = RedisStore(redis_database=Redis.from_url(redis_unit_test_env["REDIS_CONNECTION_STRING"])) + assert vector_store.redis_database is not None + assert vector_store.redis_database.connection_pool.connection_kwargs["host"] == "localhost" + + +@mark.parametrize("exclude_list", [["REDIS_CONNECTION_STRING"]], indirect=True) +def test_vector_store_fail(redis_unit_test_env): + with raises(MemoryConnectorInitializationError, match="Failed to create Redis settings."): + RedisStore(env_file_path="test.env") + + +@mark.asyncio +async def test_store_list_collection_names(vector_store, moc_list_collection_names): + collections = await vector_store.list_collection_names() + assert collections == ["test"] + + +@mark.parametrize("type_", ["hashset", "json"]) +def test_get_collection(vector_store, data_model_definition, type_): + if type_ == "hashset": + collection = vector_store.get_collection( + "test", + data_model_type=dict, + data_model_definition=data_model_definition, + collection_type=RedisCollectionTypes.HASHSET, + ) + assert isinstance(collection, RedisHashsetCollection) + else: + collection = vector_store.get_collection( + "test", + data_model_type=dict, + data_model_definition=data_model_definition, + collection_type=RedisCollectionTypes.JSON, + ) + assert isinstance(collection, RedisJsonCollection) + assert collection.collection_name == "test" + assert collection.redis_database == vector_store.redis_database + assert collection.data_model_type is dict + assert collection.data_model_definition == data_model_definition + assert vector_store.vector_record_collections["test"] == collection + + +@mark.parametrize("type_", ["hashset", "json"]) +def test_collection_init(redis_unit_test_env, data_model_definition, type_): + if type_ == "hashset": + collection = RedisHashsetCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + env_file_path="test.env", + ) + else: + collection = RedisJsonCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + env_file_path="test.env", + ) + assert collection.collection_name == "test" + assert collection.redis_database is not None + assert collection.data_model_type is dict + assert collection.data_model_definition == data_model_definition + assert collection.prefix_collection_name_to_key_names is False + + +@mark.parametrize("type_", ["hashset", "json"]) +def test_init_with_type(redis_unit_test_env, data_model_type, type_): + if type_ == "hashset": + collection = RedisHashsetCollection(data_model_type=data_model_type, collection_name="test") + else: + collection = RedisJsonCollection(data_model_type=data_model_type, collection_name="test") + assert collection is not None + assert collection.data_model_type is data_model_type + assert collection.collection_name == "test" + + +@mark.parametrize("exclude_list", [["REDIS_CONNECTION_STRING"]], indirect=True) +def test_collection_fail(redis_unit_test_env, data_model_definition): + with raises(MemoryConnectorInitializationError, match="Failed to create Redis settings."): + RedisHashsetCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + env_file_path="test.env", + ) + with raises(MemoryConnectorInitializationError, match="Failed to create Redis settings."): + RedisJsonCollection( + data_model_type=dict, + collection_name="test", + data_model_definition=data_model_definition, + env_file_path="test.env", + ) + + +@mark.asyncio +@mark.parametrize("type_", ["hashset", "json"]) +async def test_upsert(collection_hash, collection_json, type_): + if type_ == "hashset": + record = { + "name": "id1", + "mapping": { + "metadata": {"content": "content"}, + "vector": [1.0, 2.0, 3.0], + }, + } + else: + record = { + "name": "id1", + "value": { + "content": "content", + "vector": [1.0, 2.0, 3.0], + }, + } + collection = collection_hash if type_ == "hashset" else collection_json + ids = await collection._inner_upsert([record]) + assert ids[0] == "id1" + + ids = await collection.upsert(record={"id": "id1", "content": "content", "vector": [1.0, 2.0, 3.0]}) + assert ids == "id1" + + +@mark.asyncio +async def test_upsert_with_prefix(collection_with_prefix_hash, collection_with_prefix_json): + ids = await collection_with_prefix_hash.upsert( + record={"id": "id1", "content": "content", "vector": [1.0, 2.0, 3.0]} + ) + assert ids == "id1" + ids = await collection_with_prefix_json.upsert( + record={"id": "id1", "content": "content", "vector": [1.0, 2.0, 3.0]} + ) + assert ids == "id1" + + +@mark.asyncio +@mark.parametrize("prefix", [True, False]) +@mark.parametrize("type_", ["hashset", "json"]) +async def test_get( + collection_hash, collection_json, collection_with_prefix_hash, collection_with_prefix_json, type_, prefix +): + if prefix: + collection = collection_with_prefix_hash if type_ == "hashset" else collection_with_prefix_json + else: + collection = collection_hash if type_ == "hashset" else collection_json + records = await collection._inner_get(["id1"]) + assert records is not None + + records = await collection.get("id1") + assert records is not None + + +@mark.asyncio +@mark.parametrize("type_", ["hashset", "json"]) +async def test_delete(collection_hash, collection_json, type_): + collection = collection_hash if type_ == "hashset" else collection_json + await collection._inner_delete(["id1"]) + + +@mark.asyncio +async def test_does_collection_exist(collection_hash, mock_does_collection_exist): + await collection_hash.does_collection_exist() + + +@mark.asyncio +async def test_does_collection_exist_false(collection_hash, mock_does_collection_exist): + mock_does_collection_exist.side_effect = Exception + exists = await collection_hash.does_collection_exist() + assert not exists + + +@mark.asyncio +async def test_delete_collection(collection_hash, mock_delete_collection): + await collection_hash.delete_collection() + await collection_hash.delete_collection() + + +@mark.asyncio +async def test_create_index(collection_hash, mock_create_collection): + await collection_hash.create_collection() + + +@mark.asyncio +async def test_create_index_manual(collection_hash, mock_create_collection): + from redis.commands.search.indexDefinition import IndexDefinition, IndexType + + fields = ["fields"] + index_definition = IndexDefinition(prefix="test:", index_type=IndexType.HASH) + await collection_hash.create_collection(index_definition=index_definition, fields=fields) + + +@mark.asyncio +async def test_create_index_fail(collection_hash, mock_create_collection): + with raises(MemoryConnectorException, match="Invalid index type supplied."): + await collection_hash.create_collection(index_definition="index_definition", fields="fields") diff --git a/python/tests/unit/connectors/memory/test_volatile.py b/python/tests/unit/connectors/memory/test_volatile.py new file mode 100644 index 000000000000..84ed8e86087c --- /dev/null +++ b/python/tests/unit/connectors/memory/test_volatile.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft. All rights reserved. + +from pytest import fixture, mark + +from semantic_kernel.connectors.memory.volatile.volatile_collection import VolatileCollection +from semantic_kernel.connectors.memory.volatile.volatile_store import VolatileStore + + +@fixture +def collection(data_model_definition): + return VolatileCollection("test", dict, data_model_definition) + + +def test_store_init(): + store = VolatileStore() + assert store.vector_record_collections == {} + + +@mark.asyncio +async def test_store_get_collection(data_model_definition): + store = VolatileStore() + collection = store.get_collection("test", dict, data_model_definition) + assert collection.collection_name == "test" + assert collection.data_model_type is dict + assert collection.data_model_definition == data_model_definition + assert collection.inner_storage == {} + assert (await store.list_collection_names()) == ["test"] + + +@mark.asyncio +async def test_upsert(collection): + record = {"id": "testid", "content": "test content", "vector": [0.1, 0.2, 0.3, 0.4, 0.5]} + key = await collection.upsert(record) + assert key == "testid" + assert collection.inner_storage == {"testid": record} + + +@mark.asyncio +async def test_get(collection): + record = {"id": "testid", "content": "test content", "vector": [0.1, 0.2, 0.3, 0.4, 0.5]} + await collection.upsert(record) + result = await collection.get("testid") + assert result == record + + +@mark.asyncio +async def test_get_missing(collection): + result = await collection.get("testid") + assert result is None + + +@mark.asyncio +async def test_delete(collection): + record = {"id": "testid", "content": "test content", "vector": [0.1, 0.2, 0.3, 0.4, 0.5]} + await collection.upsert(record) + await collection.delete("testid") + assert collection.inner_storage == {} + + +@mark.asyncio +async def test_does_collection_exist(collection): + assert await collection.does_collection_exist() is True + + +@mark.asyncio +async def test_delete_collection(collection): + record = {"id": "testid", "content": "test content", "vector": [0.1, 0.2, 0.3, 0.4, 0.5]} + await collection.upsert(record) + assert collection.inner_storage == {"testid": record} + await collection.delete_collection() + assert collection.inner_storage == {} + + +@mark.asyncio +async def test_create_collection(collection): + await collection.create_collection() diff --git a/python/tests/unit/connectors/mistral_ai/services/test_mistralai_chat_completion.py b/python/tests/unit/connectors/mistral_ai/services/test_mistralai_chat_completion.py index 1fe0a868a9ff..5a4e18521c5d 100644 --- a/python/tests/unit/connectors/mistral_ai/services/test_mistralai_chat_completion.py +++ b/python/tests/unit/connectors/mistral_ai/services/test_mistralai_chat_completion.py @@ -130,20 +130,53 @@ def test_mistral_ai_chat_completion_init(mistralai_unit_test_env) -> None: mistral_ai_chat_completion = MistralAIChatCompletion() assert mistral_ai_chat_completion.ai_model_id == mistralai_unit_test_env["MISTRALAI_CHAT_MODEL_ID"] + assert mistral_ai_chat_completion.async_client._api_key == mistralai_unit_test_env["MISTRALAI_API_KEY"] assert isinstance(mistral_ai_chat_completion, ChatCompletionClientBase) -@pytest.mark.parametrize("exclude_list", [["MISTRALAI_API_KEY"]], indirect=True) -def test_mistral_ai_chat_completion_init_with_empty_api_key(mistralai_unit_test_env) -> None: - ai_model_id = "test_model_id" +@pytest.mark.parametrize("exclude_list", [["MISTRALAI_API_KEY", "MISTRALAI_CHAT_MODEL_ID"]], indirect=True) +def test_mistral_ai_chat_completion_init_constructor(mistralai_unit_test_env) -> None: + # Test successful initialization + mistral_ai_chat_completion = MistralAIChatCompletion( + api_key="overwrite_api_key", + ai_model_id="overwrite_model_id", + env_file_path="test.env", + ) + + assert mistral_ai_chat_completion.ai_model_id == "overwrite_model_id" + assert mistral_ai_chat_completion.async_client._api_key == "overwrite_api_key" + assert isinstance(mistral_ai_chat_completion, ChatCompletionClientBase) + +@pytest.mark.parametrize("exclude_list", [["MISTRALAI_API_KEY", "MISTRALAI_CHAT_MODEL_ID"]], indirect=True) +def test_mistral_ai_chat_completion_init_constructor_missing_model(mistralai_unit_test_env) -> None: + # Test successful initialization with pytest.raises(ServiceInitializationError): MistralAIChatCompletion( - ai_model_id=ai_model_id, - env_file_path="test.env", + api_key="overwrite_api_key", + env_file_path="test.env" ) +@pytest.mark.parametrize("exclude_list", [["MISTRALAI_API_KEY", "MISTRALAI_CHAT_MODEL_ID"]], indirect=True) +def test_mistral_ai_chat_completion_init_constructor_missing_api_key(mistralai_unit_test_env) -> None: + # Test successful initialization + with pytest.raises(ServiceInitializationError): + MistralAIChatCompletion( + ai_model_id="overwrite_model_id", + env_file_path="test.env" + ) + + +def test_mistral_ai_chat_completion_init_hybrid(mistralai_unit_test_env) -> None: + mistral_ai_chat_completion = MistralAIChatCompletion( + ai_model_id="overwrite_model_id", + env_file_path="test.env", + ) + assert mistral_ai_chat_completion.ai_model_id == "overwrite_model_id" + assert mistral_ai_chat_completion.async_client._api_key == "test_api_key" + + @pytest.mark.parametrize("exclude_list", [["MISTRALAI_CHAT_MODEL_ID"]], indirect=True) def test_mistral_ai_chat_completion_init_with_empty_model_id(mistralai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): diff --git a/python/tests/unit/connectors/mistral_ai/services/test_mistralai_text_embeddings.py b/python/tests/unit/connectors/mistral_ai/services/test_mistralai_text_embeddings.py new file mode 100644 index 000000000000..98550ca6f1ad --- /dev/null +++ b/python/tests/unit/connectors/mistral_ai/services/test_mistralai_text_embeddings.py @@ -0,0 +1,114 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, MagicMock + +import pytest +from mistralai.async_client import MistralAsyncClient +from mistralai.models.embeddings import EmbeddingResponse + +from semantic_kernel.connectors.ai.mistral_ai.services.mistral_ai_text_embedding import MistralAITextEmbedding +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError, ServiceResponseException + + +def test_embedding_with_env_variables(mistralai_unit_test_env): + text_embedding = MistralAITextEmbedding() + assert text_embedding.ai_model_id == "test_embedding_model_id" + assert text_embedding.client._api_key == "test_api_key" + + +@pytest.mark.parametrize("exclude_list", [["MISTRALAI_API_KEY", "MISTRALAI_EMBEDDING_MODEL_ID"]], indirect=True) +def test_embedding_with_constructor(mistralai_unit_test_env): + text_embedding = MistralAITextEmbedding( + api_key="overwrite-api-key", + ai_model_id="overwrite-model", + ) + assert text_embedding.ai_model_id == "overwrite-model" + assert text_embedding.client._api_key == "overwrite-api-key" + + +def test_embedding_with_client(mistralai_unit_test_env): + client = MagicMock(spec=MistralAsyncClient) + text_embedding = MistralAITextEmbedding(client=client) + assert text_embedding.client == client + assert text_embedding.ai_model_id == "test_embedding_model_id" + + +def test_embedding_with_api_key(mistralai_unit_test_env): + text_embedding = MistralAITextEmbedding(api_key="overwrite-api-key") + assert text_embedding.client._api_key == "overwrite-api-key" + assert text_embedding.ai_model_id == "test_embedding_model_id" + + +def test_embedding_with_model(mistralai_unit_test_env): + text_embedding = MistralAITextEmbedding(ai_model_id="overwrite-model") + assert text_embedding.ai_model_id == "overwrite-model" + assert text_embedding.client._api_key == "test_api_key" + + +@pytest.mark.parametrize("exclude_list", [["MISTRALAI_EMBEDDING_MODEL_ID"]], indirect=True) +def test_embedding_with_model_without_env(mistralai_unit_test_env): + text_embedding = MistralAITextEmbedding(ai_model_id="overwrite-model") + assert text_embedding.ai_model_id == "overwrite-model" + assert text_embedding.client._api_key == "test_api_key" + + +@pytest.mark.parametrize("exclude_list", [["MISTRALAI_EMBEDDING_MODEL_ID"]], indirect=True) +def test_embedding_missing_model(mistralai_unit_test_env): + with pytest.raises(ServiceInitializationError): + MistralAITextEmbedding( + env_file_path="test.env", + ) + + +@pytest.mark.parametrize("exclude_list", [["MISTRALAI_API_KEY"]], indirect=True) +def test_embedding_missing_api_key(mistralai_unit_test_env): + with pytest.raises(ServiceInitializationError): + MistralAITextEmbedding( + env_file_path="test.env", + ) + + +@pytest.mark.parametrize("exclude_list", [["MISTRALAI_API_KEY", "MISTRALAI_EMBEDDING_MODEL_ID"]], indirect=True) +def test_embedding_missing_api_key_constructor(mistralai_unit_test_env): + with pytest.raises(ServiceInitializationError): + MistralAITextEmbedding( + env_file_path="test.env", + ) + + +@pytest.mark.parametrize("exclude_list", [["MISTRALAI_API_KEY", "MISTRALAI_EMBEDDING_MODEL_ID"]], indirect=True) +def test_embedding_missing_model_constructor(mistralai_unit_test_env): + with pytest.raises(ServiceInitializationError): + MistralAITextEmbedding( + api_key="test_api_key", + env_file_path="test.env", + ) + + +@pytest.mark.asyncio +async def test_embedding_generate_raw_embedding(mistralai_unit_test_env): + mock_client = AsyncMock(spec=MistralAsyncClient) + mock_embedding_response = MagicMock(spec=EmbeddingResponse, data=[MagicMock(embedding=[1, 2, 3, 4, 5])]) + mock_client.embeddings.return_value = mock_embedding_response + text_embedding = MistralAITextEmbedding(client=mock_client) + embedding = await text_embedding.generate_raw_embeddings(["test"]) + assert embedding == [[1, 2, 3, 4, 5]] + + +@pytest.mark.asyncio +async def test_embedding_generate_embedding(mistralai_unit_test_env): + mock_client = AsyncMock(spec=MistralAsyncClient) + mock_embedding_response = MagicMock(spec=EmbeddingResponse, data=[MagicMock(embedding=[1, 2, 3, 4, 5])]) + mock_client.embeddings.return_value = mock_embedding_response + text_embedding = MistralAITextEmbedding(client=mock_client) + embedding = await text_embedding.generate_embeddings(["test"]) + assert embedding.tolist() == [[1, 2, 3, 4, 5]] + + +@pytest.mark.asyncio +async def test_embedding_generate_embedding_exception(mistralai_unit_test_env): + mock_client = AsyncMock(spec=MistralAsyncClient) + mock_client.embeddings.side_effect = Exception("Test Exception") + text_embedding = MistralAITextEmbedding(client=mock_client) + with pytest.raises(ServiceResponseException): + await text_embedding.generate_embeddings(["test"]) diff --git a/python/tests/unit/connectors/open_ai/services/test_azure_text_to_image.py b/python/tests/unit/connectors/open_ai/services/test_azure_text_to_image.py new file mode 100644 index 000000000000..756412739efe --- /dev/null +++ b/python/tests/unit/connectors/open_ai/services/test_azure_text_to_image.py @@ -0,0 +1,94 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, patch + +import pytest +from openai import AsyncAzureOpenAI +from openai.resources.images import AsyncImages + +from semantic_kernel.connectors.ai.open_ai.services.azure_text_to_image import AzureTextToImage +from semantic_kernel.connectors.ai.text_to_image_client_base import TextToImageClientBase +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError + + +def test_azure_text_to_image_init(azure_openai_unit_test_env) -> None: + # Test successful initialization + azure_text_to_image = AzureTextToImage() + + assert azure_text_to_image.client is not None + assert isinstance(azure_text_to_image.client, AsyncAzureOpenAI) + assert azure_text_to_image.ai_model_id == azure_openai_unit_test_env["AZURE_OPENAI_TEXT_TO_IMAGE_DEPLOYMENT_NAME"] + assert isinstance(azure_text_to_image, TextToImageClientBase) + + +@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_TEXT_TO_IMAGE_DEPLOYMENT_NAME"]], indirect=True) +def test_azure_text_to_image_init_with_empty_deployment_name(azure_openai_unit_test_env) -> None: + with pytest.raises(ServiceInitializationError): + AzureTextToImage(env_file_path="test.env") + + +@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_KEY"]], indirect=True) +def test_azure_text_to_image_init_with_empty_api_key(azure_openai_unit_test_env) -> None: + with pytest.raises(ServiceInitializationError): + AzureTextToImage(env_file_path="test.env") + + +@pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_BASE_URL"]], indirect=True) +def test_azure_text_to_image_init_with_empty_endpoint_and_base_url(azure_openai_unit_test_env) -> None: + with pytest.raises(ServiceInitializationError): + AzureTextToImage(env_file_path="test.env") + + +@pytest.mark.parametrize("override_env_param_dict", [{"AZURE_OPENAI_ENDPOINT": "http://test.com"}], indirect=True) +def test_azure_text_to_image_init_with_invalid_endpoint(azure_openai_unit_test_env) -> None: + with pytest.raises(ServiceInitializationError): + AzureTextToImage() + + +@pytest.mark.parametrize( + "override_env_param_dict", + [{"AZURE_OPENAI_BASE_URL": "https://test_text_to_image_deployment.test-base-url.com"}], + indirect=True, +) +def test_azure_text_to_image_init_with_from_dict(azure_openai_unit_test_env) -> None: + default_headers = {"test_header": "test_value"} + + settings = { + "deployment_name": azure_openai_unit_test_env["AZURE_OPENAI_TEXT_TO_IMAGE_DEPLOYMENT_NAME"], + "endpoint": azure_openai_unit_test_env["AZURE_OPENAI_ENDPOINT"], + "api_key": azure_openai_unit_test_env["AZURE_OPENAI_API_KEY"], + "api_version": azure_openai_unit_test_env["AZURE_OPENAI_API_VERSION"], + "default_headers": default_headers, + } + + azure_text_to_image = AzureTextToImage.from_dict(settings=settings) + + assert azure_text_to_image.client is not None + assert isinstance(azure_text_to_image.client, AsyncAzureOpenAI) + assert azure_text_to_image.ai_model_id == azure_openai_unit_test_env["AZURE_OPENAI_TEXT_TO_IMAGE_DEPLOYMENT_NAME"] + assert isinstance(azure_text_to_image, TextToImageClientBase) + assert settings["deployment_name"] in str(azure_text_to_image.client.base_url) + assert azure_text_to_image.client.api_key == azure_openai_unit_test_env["AZURE_OPENAI_API_KEY"] + + # Assert that the default header we added is present in the client's default headers + for key, value in default_headers.items(): + assert key in azure_text_to_image.client.default_headers + assert azure_text_to_image.client.default_headers[key] == value + + +@pytest.mark.asyncio +@patch.object(AsyncImages, "generate", new_callable=AsyncMock) +async def test_azure_text_to_image_calls_with_parameters(mock_generate, azure_openai_unit_test_env) -> None: + prompt = "A painting of a vase with flowers" + width = 512 + + azure_text_to_image = AzureTextToImage() + + await azure_text_to_image.generate_image(prompt, width, width) + + mock_generate.assert_awaited_once_with( + prompt=prompt, + model=azure_openai_unit_test_env["AZURE_OPENAI_TEXT_TO_IMAGE_DEPLOYMENT_NAME"], + size=f"{width}x{width}", + response_format="url", + ) diff --git a/python/tests/unit/connectors/open_ai/services/test_openai_text_to_image.py b/python/tests/unit/connectors/open_ai/services/test_openai_text_to_image.py new file mode 100644 index 000000000000..53ef87a244c9 --- /dev/null +++ b/python/tests/unit/connectors/open_ai/services/test_openai_text_to_image.py @@ -0,0 +1,96 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, patch + +import pytest +from openai import AsyncClient +from openai.resources.images import AsyncImages +from openai.types.images_response import ImagesResponse + +from semantic_kernel.connectors.ai.open_ai.services.open_ai_text_to_image import OpenAITextToImage +from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError, ServiceResponseException + + +def test_init(openai_unit_test_env): + openai_text_to_image = OpenAITextToImage() + + assert openai_text_to_image.client is not None + assert isinstance(openai_text_to_image.client, AsyncClient) + assert openai_text_to_image.ai_model_id == openai_unit_test_env["OPENAI_TEXT_TO_IMAGE_MODEL_ID"] + + +def test_init_validation_fail() -> None: + with pytest.raises(ServiceInitializationError): + OpenAITextToImage(api_key="34523", ai_model_id={"test": "dict"}) + + +def test_init_to_from_dict(openai_unit_test_env): + default_headers = {"X-Unit-Test": "test-guid"} + + settings = { + "ai_model_id": openai_unit_test_env["OPENAI_TEXT_TO_IMAGE_MODEL_ID"], + "api_key": openai_unit_test_env["OPENAI_API_KEY"], + "default_headers": default_headers, + } + text_embedding = OpenAITextToImage.from_dict(settings) + dumped_settings = text_embedding.to_dict() + assert dumped_settings["ai_model_id"] == settings["ai_model_id"] + assert dumped_settings["api_key"] == settings["api_key"] + + +@pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) +def test_init_with_empty_api_key(openai_unit_test_env) -> None: + with pytest.raises(ServiceInitializationError): + OpenAITextToImage( + env_file_path="test.env", + ) + + +@pytest.mark.parametrize("exclude_list", [["OPENAI_TEXT_TO_IMAGE_MODEL_ID"]], indirect=True) +def test_init_with_no_model_id(openai_unit_test_env) -> None: + with pytest.raises(ServiceInitializationError): + OpenAITextToImage( + env_file_path="test.env", + ) + + +@pytest.mark.asyncio +@patch.object(AsyncImages, "generate", new_callable=AsyncMock) +async def test_generate_calls_with_parameters(mock_generate, openai_unit_test_env) -> None: + ai_model_id = "test_model_id" + prompt = "painting of flowers in vase" + width = 512 + + openai_text_to_image = OpenAITextToImage(ai_model_id=ai_model_id) + + await openai_text_to_image.generate_image(description=prompt, width=width, height=width) + + mock_generate.assert_awaited_once_with( + prompt=prompt, + model=ai_model_id, + size=f"{width}x{width}", + response_format="url", + ) + + +@pytest.mark.asyncio +@patch.object(AsyncImages, "generate", new_callable=AsyncMock, side_effect=Exception) +async def test_generate_fail(mock_generate, openai_unit_test_env) -> None: + ai_model_id = "test_model_id" + width = 512 + + openai_text_to_image = OpenAITextToImage(ai_model_id=ai_model_id) + with pytest.raises(ServiceResponseException): + await openai_text_to_image.generate_image(description="painting of flowers in vase", width=width, height=width) + + +@pytest.mark.asyncio +@patch.object(AsyncImages, "generate", new_callable=AsyncMock) +async def test_generate_no_result(mock_generate, openai_unit_test_env) -> None: + mock_generate.return_value = ImagesResponse(created=0, data=[]) + ai_model_id = "test_model_id" + width = 512 + + openai_text_to_image = OpenAITextToImage(ai_model_id=ai_model_id) + with pytest.raises(ServiceResponseException): + await openai_text_to_image.generate_image(description="painting of flowers in vase", width=width, height=width) diff --git a/python/tests/unit/connectors/test_function_choice_behavior.py b/python/tests/unit/connectors/test_function_choice_behavior.py index 5d8c6bd2301a..89e211881c08 100644 --- a/python/tests/unit/connectors/test_function_choice_behavior.py +++ b/python/tests/unit/connectors/test_function_choice_behavior.py @@ -32,20 +32,20 @@ def update_settings_callback(): def test_function_choice_behavior_auto(): behavior = FunctionChoiceBehavior.Auto(auto_invoke=True) - assert behavior.type == FunctionChoiceType.AUTO + assert behavior.type_ == FunctionChoiceType.AUTO assert behavior.maximum_auto_invoke_attempts == DEFAULT_MAX_AUTO_INVOKE_ATTEMPTS def test_function_choice_behavior_none_invoke(): behavior = FunctionChoiceBehavior.NoneInvoke() - assert behavior.type == FunctionChoiceType.NONE + assert behavior.type_ == FunctionChoiceType.NONE assert behavior.maximum_auto_invoke_attempts == 0 def test_function_choice_behavior_required(): expected_filters = {"included_functions": ["plugin1-func1"]} behavior = FunctionChoiceBehavior.Required(auto_invoke=True, filters=expected_filters) - assert behavior.type == FunctionChoiceType.REQUIRED + assert behavior.type_ == FunctionChoiceType.REQUIRED assert behavior.maximum_auto_invoke_attempts == 1 assert behavior.filters == expected_filters @@ -53,14 +53,14 @@ def test_function_choice_behavior_required(): def test_from_function_call_behavior_kernel_functions(): behavior = FunctionCallBehavior.AutoInvokeKernelFunctions() new_behavior = FunctionChoiceBehavior.from_function_call_behavior(behavior) - assert new_behavior.type == FunctionChoiceType.AUTO + assert new_behavior.type_ == FunctionChoiceType.AUTO assert new_behavior.auto_invoke_kernel_functions is True def test_from_function_call_behavior_required(): behavior = FunctionCallBehavior.RequiredFunction(auto_invoke=True, function_fully_qualified_name="plugin1-func1") new_behavior = FunctionChoiceBehavior.from_function_call_behavior(behavior) - assert new_behavior.type == FunctionChoiceType.REQUIRED + assert new_behavior.type_ == FunctionChoiceType.REQUIRED assert new_behavior.auto_invoke_kernel_functions is True assert new_behavior.filters == {"included_functions": ["plugin1-func1"]} @@ -69,7 +69,7 @@ def test_from_function_call_behavior_enabled_functions(): expected_filters = {"included_functions": ["plugin1-func1"]} behavior = FunctionCallBehavior.EnableFunctions(auto_invoke=True, filters=expected_filters) new_behavior = FunctionChoiceBehavior.from_function_call_behavior(behavior) - assert new_behavior.type == FunctionChoiceType.AUTO + assert new_behavior.type_ == FunctionChoiceType.AUTO assert new_behavior.auto_invoke_kernel_functions is True assert new_behavior.filters == expected_filters @@ -90,7 +90,7 @@ def test_auto_function_choice_behavior_from_dict(type: str, max_auto_invoke_atte "maximum_auto_invoke_attempts": max_auto_invoke_attempts, } behavior = FunctionChoiceBehavior.from_dict(data) - assert behavior.type == FunctionChoiceType(type) + assert behavior.type_ == FunctionChoiceType(type) assert behavior.filters == {"included_functions": ["plugin1-func1", "plugin2-func2"]} assert behavior.maximum_auto_invoke_attempts == max_auto_invoke_attempts @@ -106,7 +106,7 @@ def test_auto_function_choice_behavior_from_dict_with_same_filters_and_functions "maximum_auto_invoke_attempts": max_auto_invoke_attempts, } behavior = FunctionChoiceBehavior.from_dict(data) - assert behavior.type == FunctionChoiceType(type) + assert behavior.type_ == FunctionChoiceType(type) assert behavior.filters == {"included_functions": ["plugin1-func1", "plugin2-func2"]} assert behavior.maximum_auto_invoke_attempts == max_auto_invoke_attempts @@ -122,7 +122,7 @@ def test_auto_function_choice_behavior_from_dict_with_different_filters_and_func "maximum_auto_invoke_attempts": max_auto_invoke_attempts, } behavior = FunctionChoiceBehavior.from_dict(data) - assert behavior.type == FunctionChoiceType(type) + assert behavior.type_ == FunctionChoiceType(type) assert behavior.filters == {"included_functions": ["plugin1-func1", "plugin2-func2", "plugin3-func3"]} assert behavior.maximum_auto_invoke_attempts == max_auto_invoke_attempts diff --git a/python/tests/unit/contents/test_annotation_content.py b/python/tests/unit/contents/test_annotation_content.py new file mode 100644 index 000000000000..3f40e0c0fb61 --- /dev/null +++ b/python/tests/unit/contents/test_annotation_content.py @@ -0,0 +1,120 @@ +# Copyright (c) Microsoft. All rights reserved. + +from xml.etree.ElementTree import Element + +import pytest + +from semantic_kernel.contents.annotation_content import AnnotationContent + +test_cases = [ + pytest.param(AnnotationContent(file_id="12345"), id="file_id"), + pytest.param(AnnotationContent(quote="This is a quote."), id="quote"), + pytest.param(AnnotationContent(start_index=5, end_index=20), id="indices"), + pytest.param( + AnnotationContent(file_id="12345", quote="This is a quote.", start_index=5, end_index=20), id="all_fields" + ), +] + + +def test_create_empty(): + annotation = AnnotationContent() + assert annotation.file_id is None + assert annotation.quote is None + assert annotation.start_index is None + assert annotation.end_index is None + + +def test_create_file_id(): + annotation = AnnotationContent(file_id="12345") + assert annotation.file_id == "12345" + + +def test_create_quote(): + annotation = AnnotationContent(quote="This is a quote.") + assert annotation.quote == "This is a quote." + + +def test_create_indices(): + annotation = AnnotationContent(start_index=5, end_index=20) + assert annotation.start_index == 5 + assert annotation.end_index == 20 + + +def test_create_all_fields(): + annotation = AnnotationContent(file_id="12345", quote="This is a quote.", start_index=5, end_index=20) + assert annotation.file_id == "12345" + assert annotation.quote == "This is a quote." + assert annotation.start_index == 5 + assert annotation.end_index == 20 + + +def test_update_file_id(): + annotation = AnnotationContent() + annotation.file_id = "12345" + assert annotation.file_id == "12345" + + +def test_update_quote(): + annotation = AnnotationContent() + annotation.quote = "This is a quote." + assert annotation.quote == "This is a quote." + + +def test_update_indices(): + annotation = AnnotationContent() + annotation.start_index = 5 + annotation.end_index = 20 + assert annotation.start_index == 5 + assert annotation.end_index == 20 + + +def test_to_str(): + annotation = AnnotationContent(file_id="12345", quote="This is a quote.", start_index=5, end_index=20) + assert str(annotation) == "AnnotationContent(file_id=12345, quote=This is a quote., start_index=5, end_index=20)" + + +def test_to_element(): + annotation = AnnotationContent(file_id="12345", quote="This is a quote.", start_index=5, end_index=20) + element = annotation.to_element() + assert element.tag == "annotation" + assert element.get("file_id") == "12345" + assert element.get("quote") == "This is a quote." + assert element.get("start_index") == "5" + assert element.get("end_index") == "20" + + +def test_from_element(): + element = Element("AnnotationContent") + element.set("file_id", "12345") + element.set("quote", "This is a quote.") + element.set("start_index", "5") + element.set("end_index", "20") + annotation = AnnotationContent.from_element(element) + assert annotation.file_id == "12345" + assert annotation.quote == "This is a quote." + assert annotation.start_index == 5 + assert annotation.end_index == 20 + + +def test_to_dict(): + annotation = AnnotationContent(file_id="12345", quote="This is a quote.", start_index=5, end_index=20) + assert annotation.to_dict() == { + "type": "text", + "text": f"{annotation.file_id} {annotation.quote} (Start Index={annotation.start_index}->End Index={annotation.end_index})", # noqa: E501 + } + + +@pytest.mark.parametrize("annotation", test_cases) +def test_element_roundtrip(annotation): + element = annotation.to_element() + new_annotation = AnnotationContent.from_element(element) + assert new_annotation == annotation + + +@pytest.mark.parametrize("annotation", test_cases) +def test_to_dict_call(annotation): + expected_dict = { + "type": "text", + "text": f"{annotation.file_id} {annotation.quote} (Start Index={annotation.start_index}->End Index={annotation.end_index})", # noqa: E501 + } + assert annotation.to_dict() == expected_dict diff --git a/python/tests/unit/contents/test_file_reference_content.py b/python/tests/unit/contents/test_file_reference_content.py new file mode 100644 index 000000000000..6f1a0cb18ad2 --- /dev/null +++ b/python/tests/unit/contents/test_file_reference_content.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft. All rights reserved. + +from xml.etree.ElementTree import Element + +import pytest + +from semantic_kernel.contents.file_reference_content import FileReferenceContent + + +def test_create_empty(): + file_reference = FileReferenceContent() + assert file_reference.file_id is None + + +def test_create_file_id(): + file_reference = FileReferenceContent(file_id="12345") + assert file_reference.file_id == "12345" + + +def test_update_file_id(): + file_reference = FileReferenceContent() + file_reference.file_id = "12345" + assert file_reference.file_id == "12345" + + +def test_to_str(): + file_reference = FileReferenceContent(file_id="12345") + assert str(file_reference) == "FileReferenceContent(file_id=12345)" + + +def test_to_element(): + file_reference = FileReferenceContent(file_id="12345") + element = file_reference.to_element() + assert element.tag == "file_reference" + assert element.get("file_id") == "12345" + + +def test_from_element(): + element = Element("FileReferenceContent") + element.set("file_id", "12345") + file_reference = FileReferenceContent.from_element(element) + assert file_reference.file_id == "12345" + + +def test_to_dict_simple(): + file_reference = FileReferenceContent(file_id="12345") + assert file_reference.to_dict() == { + "file_id": "12345", + } + + +@pytest.mark.parametrize( + "file_reference", + [ + pytest.param(FileReferenceContent(file_id="12345"), id="file_id"), + pytest.param(FileReferenceContent(), id="empty"), + ], +) +def test_element_roundtrip(file_reference): + element = file_reference.to_element() + new_file_reference = FileReferenceContent.from_element(element) + assert new_file_reference == file_reference + + +@pytest.mark.parametrize( + "file_reference", + [ + pytest.param(FileReferenceContent(file_id="12345"), id="file_id"), + pytest.param(FileReferenceContent(), id="empty"), + ], +) +def test_to_dict(file_reference): + expected_dict = { + "file_id": file_reference.file_id, + } + assert file_reference.to_dict() == expected_dict diff --git a/python/tests/unit/contents/test_function_result_content.py b/python/tests/unit/contents/test_function_result_content.py index a745c6c255ea..4b013d8a83dd 100644 --- a/python/tests/unit/contents/test_function_result_content.py +++ b/python/tests/unit/contents/test_function_result_content.py @@ -74,15 +74,11 @@ def test_from_fcc_and_result(result: Any): assert frc.metadata == {"test": "test", "test2": "test2"} -@pytest.mark.parametrize("unwrap", [True, False], ids=["unwrap", "no-unwrap"]) -def test_to_cmc(unwrap: bool): +def test_to_cmc(): frc = FunctionResultContent(id="test", name="test-function", result="test-result") - cmc = frc.to_chat_message_content(unwrap=unwrap) + cmc = frc.to_chat_message_content() assert cmc.role.value == "tool" - if unwrap: - assert cmc.items[0].text == "test-result" - else: - assert cmc.items[0].result == "test-result" + assert cmc.items[0].result == "test-result" def test_serialize(): diff --git a/python/tests/unit/data/conftest.py b/python/tests/unit/data/conftest.py new file mode 100644 index 000000000000..fd8532dc1896 --- /dev/null +++ b/python/tests/unit/data/conftest.py @@ -0,0 +1,312 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from collections.abc import Mapping, Sequence +from dataclasses import dataclass +from typing import Annotated, Any + +import numpy as np +from pydantic import BaseModel, Field +from pytest import fixture + +from semantic_kernel.data.vector_store_model_decorator import vectorstoremodel +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) + + +@fixture +def DictVectorStoreRecordCollection(): + class DictVectorStoreRecordCollection(VectorStoreRecordCollection[str, Any]): + inner_storage: dict[str, Any] = Field(default_factory=dict) + + async def _inner_delete(self, keys: Sequence[str], **kwargs: Any) -> None: + for key in keys: + self.inner_storage.pop(key, None) + + async def _inner_get(self, keys: Sequence[str], **kwargs: Any) -> Any | Sequence[Any] | None: + return [self.inner_storage[key] for key in keys if key in self.inner_storage] + + async def _inner_upsert(self, records: Sequence[Any], **kwargs: Any) -> Sequence[str]: + updated_keys = [] + for record in records: + key = ( + record[self._key_field_name] + if isinstance(record, Mapping) + else getattr(record, self._key_field_name) + ) + self.inner_storage[key] = record + updated_keys.append(key) + return updated_keys + + def _deserialize_store_models_to_dicts(self, records: Sequence[Any], **kwargs: Any) -> Sequence[dict[str, Any]]: + return records + + def _serialize_dicts_to_store_models(self, records: Sequence[dict[str, Any]], **kwargs: Any) -> Sequence[Any]: + return records + + async def create_collection(self, **kwargs: Any) -> None: + pass + + async def delete_collection(self, **kwargs: Any) -> None: + self.inner_storage = {} + + async def does_collection_exist(self, **kwargs: Any) -> bool: + return True + + return DictVectorStoreRecordCollection + + +@fixture +def data_model_definition() -> object: + return VectorStoreRecordDefinition( + fields={ + "id": VectorStoreRecordKeyField(), + "content": VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector"), + "vector": VectorStoreRecordVectorField(), + } + ) + + +@fixture +def data_model_serialize_definition() -> object: + def serialize(record, **kwargs): + return record + + def deserialize(records, **kwargs): + return records + + return VectorStoreRecordDefinition( + fields={ + "id": VectorStoreRecordKeyField(), + "content": VectorStoreRecordDataField(), + "vector": VectorStoreRecordVectorField(), + }, + serialize=serialize, + deserialize=deserialize, + ) + + +@fixture +def data_model_to_from_dict_definition() -> object: + def to_dict(record, **kwargs): + return record + + def from_dict(records, **kwargs): + return records + + return VectorStoreRecordDefinition( + fields={ + "id": VectorStoreRecordKeyField(), + "content": VectorStoreRecordDataField(), + "vector": VectorStoreRecordVectorField(), + }, + to_dict=to_dict, + from_dict=from_dict, + ) + + +@fixture +def data_model_container_definition() -> object: + def to_dict(record: dict[str, dict[str, Any]], **kwargs) -> list[dict[str, Any]]: + return [{"id": key} | value for key, value in record.items()] + + def from_dict(records: list[dict[str, Any]], **kwargs) -> dict[str, dict[str, Any]]: + ret = {} + for record in records: + id = record.pop("id") + ret[id] = record + return ret + + return VectorStoreRecordDefinition( + fields={ + "id": VectorStoreRecordKeyField(), + "content": VectorStoreRecordDataField(), + "vector": VectorStoreRecordVectorField(), + }, + container_mode=True, + to_dict=to_dict, + from_dict=from_dict, + ) + + +@fixture +def data_model_container_serialize_definition() -> object: + def serialize(record: dict[str, dict[str, Any]], **kwargs) -> list[dict[str, Any]]: + return [{"id": key} | value for key, value in record.items()] + + def deserialize(records: list[dict[str, Any]], **kwargs) -> dict[str, dict[str, Any]]: + ret = {} + for record in records: + id = record.pop("id") + ret[id] = record + return ret + + return VectorStoreRecordDefinition( + fields={ + "id": VectorStoreRecordKeyField(), + "content": VectorStoreRecordDataField(), + "vector": VectorStoreRecordVectorField(), + }, + container_mode=True, + serialize=serialize, + deserialize=deserialize, + ) + + +@fixture +def data_model_pandas_definition() -> object: + from pandas import DataFrame + + return VectorStoreRecordDefinition( + fields={ + "vector": VectorStoreRecordVectorField( + name="vector", + index_kind="hnsw", + dimensions=5, + distance_function="cosine", + property_type="float", + ), + "id": VectorStoreRecordKeyField(name="id"), + "content": VectorStoreRecordDataField( + name="content", + has_embedding=True, + embedding_property_name="vector", + property_type="str", + ), + }, + container_mode=True, + to_dict=lambda x: x.to_dict(orient="records"), + from_dict=lambda x, **_: DataFrame(x), + ) + + +@fixture +def data_model_type_vanilla(): + @vectorstoremodel + class DataModelClass: + def __init__( + self, + content: Annotated[str, VectorStoreRecordDataField()], + vector: Annotated[list[float], VectorStoreRecordVectorField()], + id: Annotated[str, VectorStoreRecordKeyField()], + ): + self.content = content + self.vector = vector + self.id = id + + def __eq__(self, other) -> bool: + return self.content == other.content and self.id == other.id and self.vector == other.vector + + return DataModelClass + + +@fixture +def data_model_type_vector_array(): + @vectorstoremodel + class DataModelClass: + def __init__( + self, + content: Annotated[str, VectorStoreRecordDataField()], + vector: Annotated[ + np.array, + VectorStoreRecordVectorField( + serialize_function=np.ndarray.tolist, + deserialize_function=np.array, + ), + ], + id: Annotated[str, VectorStoreRecordKeyField()], + ): + self.content = content + self.vector = vector + self.id = id + + def __eq__(self, other) -> bool: + return self.content == other.content and self.id == other.id and self.vector == other.vector + + return DataModelClass + + +@fixture +def data_model_type_vanilla_serialize(): + @vectorstoremodel + class DataModelClass: + def __init__( + self, + content: Annotated[str, VectorStoreRecordDataField()], + vector: Annotated[list[float], VectorStoreRecordVectorField()], + id: Annotated[str, VectorStoreRecordKeyField()], + ): + self.content = content + self.vector = vector + self.id = id + + def serialize(self, **kwargs: Any) -> Any: + """Serialize the object to the format required by the data store.""" + return {"id": self.id, "content": self.content, "vector": self.vector} + + @classmethod + def deserialize(cls, obj: Any, **kwargs: Any): + """Deserialize the output of the data store to an object.""" + return cls(**obj) + + def __eq__(self, other) -> bool: + return self.content == other.content and self.id == other.id and self.vector == other.vector + + return DataModelClass + + +@fixture +def data_model_type_vanilla_to_from_dict(): + @vectorstoremodel + class DataModelClass: + def __init__( + self, + content: Annotated[str, VectorStoreRecordDataField()], + vector: Annotated[list[float], VectorStoreRecordVectorField()], + id: Annotated[str, VectorStoreRecordKeyField()], + ): + self.content = content + self.vector = vector + self.id = id + + def to_dict(self, **kwargs: Any) -> Any: + """Serialize the object to the format required by the data store.""" + return {"id": self.id, "content": self.content, "vector": self.vector} + + @classmethod + def from_dict(cls, *args: Any, **kwargs: Any): + """Deserialize the output of the data store to an object.""" + return cls(**args[0]) + + def __eq__(self, other) -> bool: + return self.content == other.content and self.id == other.id and self.vector == other.vector + + return DataModelClass + + +@fixture +def data_model_type_pydantic(): + @vectorstoremodel + class DataModelClass(BaseModel): + content: Annotated[str, VectorStoreRecordDataField()] + vector: Annotated[list[float], VectorStoreRecordVectorField()] + id: Annotated[str, VectorStoreRecordKeyField()] + + return DataModelClass + + +@fixture +def data_model_type_dataclass(): + @vectorstoremodel + @dataclass + class DataModelClass: + content: Annotated[str, VectorStoreRecordDataField()] + vector: Annotated[list[float], VectorStoreRecordVectorField()] + id: Annotated[str, VectorStoreRecordKeyField()] + + return DataModelClass diff --git a/python/tests/unit/data/test_vector_store_model_decorator.py b/python/tests/unit/data/test_vector_store_model_decorator.py new file mode 100644 index 000000000000..b690e18dbb78 --- /dev/null +++ b/python/tests/unit/data/test_vector_store_model_decorator.py @@ -0,0 +1,224 @@ +# Copyright (c) Microsoft. All rights reserved. + + +from dataclasses import dataclass +from typing import Annotated + +from pydantic import BaseModel +from pydantic.dataclasses import dataclass as pydantic_dataclass +from pytest import raises + +from semantic_kernel.data.vector_store_model_decorator import vectorstoremodel +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) +from semantic_kernel.exceptions.memory_connector_exceptions import VectorStoreModelException + + +def test_vanilla(): + @vectorstoremodel + class DataModelClass: + def __init__( + self, + content: Annotated[str, VectorStoreRecordDataField()], + content2: Annotated[str, VectorStoreRecordDataField], + vector: Annotated[list[float], VectorStoreRecordVectorField()], + id: Annotated[str, VectorStoreRecordKeyField()], + non_vector_store_content: str | None = None, + optional_content: Annotated[str | None, VectorStoreRecordDataField()] = None, + annotated_content: Annotated[str | None, "description"] = None, + ): + self.content = content + self.content2 = content2 + self.vector = vector + self.id = id + self.optional_content = optional_content + self.non_vector_store_content = non_vector_store_content + self.annotated_content = annotated_content + + assert hasattr(DataModelClass, "__kernel_vectorstoremodel__") + assert hasattr(DataModelClass, "__kernel_vectorstoremodel_definition__") + data_model_definition: VectorStoreRecordDefinition = DataModelClass.__kernel_vectorstoremodel_definition__ + assert len(data_model_definition.fields) == 5 + assert data_model_definition.fields["content"].name == "content" + assert data_model_definition.fields["content"].property_type == "str" + assert data_model_definition.fields["content2"].name == "content2" + assert data_model_definition.fields["content2"].property_type == "str" + assert data_model_definition.fields["vector"].name == "vector" + assert data_model_definition.fields["id"].name == "id" + assert data_model_definition.fields["optional_content"].name == "optional_content" + assert data_model_definition.fields["optional_content"].property_type == "str" + assert data_model_definition.key_field_name == "id" + assert data_model_definition.container_mode is False + assert data_model_definition.vector_field_names == ["vector"] + + +def test_vanilla_2(): + @vectorstoremodel() + class DataModelClass: + def __init__( + self, + content: Annotated[str, VectorStoreRecordDataField()], + id: Annotated[str, VectorStoreRecordKeyField()], + ): + self.content = content + self.id = id + + assert hasattr(DataModelClass, "__kernel_vectorstoremodel__") + assert hasattr(DataModelClass, "__kernel_vectorstoremodel_definition__") + data_model_definition: VectorStoreRecordDefinition = DataModelClass.__kernel_vectorstoremodel_definition__ + assert len(data_model_definition.fields) == 2 + + +def test_dataclass(): + @vectorstoremodel + @dataclass + class DataModelClass: + content: Annotated[str, VectorStoreRecordDataField()] + content2: Annotated[str, VectorStoreRecordDataField] + vector: Annotated[list[float], VectorStoreRecordVectorField()] + id: Annotated[str, VectorStoreRecordKeyField()] + non_vector_store_content: str | None = None + optional_content: Annotated[str | None, VectorStoreRecordDataField()] = None + annotated_content: Annotated[str | None, "description"] = None + + assert hasattr(DataModelClass, "__kernel_vectorstoremodel__") + assert hasattr(DataModelClass, "__kernel_vectorstoremodel_definition__") + data_model_definition: VectorStoreRecordDefinition = DataModelClass.__kernel_vectorstoremodel_definition__ + assert len(data_model_definition.fields) == 5 + assert data_model_definition.fields["content"].name == "content" + assert data_model_definition.fields["content"].property_type == "str" + assert data_model_definition.fields["content2"].name == "content2" + assert data_model_definition.fields["content2"].property_type == "str" + assert data_model_definition.fields["vector"].name == "vector" + assert data_model_definition.fields["id"].name == "id" + assert data_model_definition.fields["optional_content"].name == "optional_content" + assert data_model_definition.fields["optional_content"].property_type == "str" + assert data_model_definition.key_field_name == "id" + assert data_model_definition.container_mode is False + assert data_model_definition.vector_field_names == ["vector"] + + +def test_dataclass_inverse_fail(): + with raises(VectorStoreModelException): + + @dataclass + @vectorstoremodel + class DataModelClass: + id: Annotated[str, VectorStoreRecordKeyField()] + content: Annotated[str, VectorStoreRecordDataField()] + + +def test_pydantic_base_model(): + @vectorstoremodel + class DataModelClass(BaseModel): + content: Annotated[str, VectorStoreRecordDataField()] + content2: Annotated[str, VectorStoreRecordDataField] + vector: Annotated[list[float], VectorStoreRecordVectorField()] + id: Annotated[str, VectorStoreRecordKeyField()] + non_vector_store_content: str | None = None + optional_content: Annotated[str | None, VectorStoreRecordDataField()] = None + annotated_content: Annotated[str | None, "description"] = None + + assert hasattr(DataModelClass, "__kernel_vectorstoremodel__") + assert hasattr(DataModelClass, "__kernel_vectorstoremodel_definition__") + data_model_definition: VectorStoreRecordDefinition = DataModelClass.__kernel_vectorstoremodel_definition__ + assert len(data_model_definition.fields) == 5 + assert data_model_definition.fields["content"].name == "content" + assert data_model_definition.fields["content"].property_type == "str" + assert data_model_definition.fields["content2"].name == "content2" + assert data_model_definition.fields["content2"].property_type == "str" + assert data_model_definition.fields["vector"].name == "vector" + assert data_model_definition.fields["id"].name == "id" + assert data_model_definition.fields["optional_content"].name == "optional_content" + assert data_model_definition.fields["optional_content"].property_type == "str" + assert data_model_definition.key_field_name == "id" + assert data_model_definition.container_mode is False + assert data_model_definition.vector_field_names == ["vector"] + + +def test_pydantic_dataclass(): + @vectorstoremodel + @pydantic_dataclass + class DataModelClass: + content: Annotated[str, VectorStoreRecordDataField()] + content2: Annotated[str, VectorStoreRecordDataField] + vector: Annotated[list[float], VectorStoreRecordVectorField()] + id: Annotated[str, VectorStoreRecordKeyField()] + non_vector_store_content: str | None = None + optional_content: Annotated[str | None, VectorStoreRecordDataField()] = None + annotated_content: Annotated[str | None, "description"] = None + + assert hasattr(DataModelClass, "__kernel_vectorstoremodel__") + assert hasattr(DataModelClass, "__kernel_vectorstoremodel_definition__") + data_model_definition: VectorStoreRecordDefinition = DataModelClass.__kernel_vectorstoremodel_definition__ + assert len(data_model_definition.fields) == 5 + assert data_model_definition.fields["content"].name == "content" + assert data_model_definition.fields["content"].property_type == "str" + assert data_model_definition.fields["content2"].name == "content2" + assert data_model_definition.fields["content2"].property_type == "str" + assert data_model_definition.fields["vector"].name == "vector" + assert data_model_definition.fields["id"].name == "id" + assert data_model_definition.fields["optional_content"].name == "optional_content" + assert data_model_definition.fields["optional_content"].property_type == "str" + assert data_model_definition.key_field_name == "id" + assert data_model_definition.container_mode is False + assert data_model_definition.vector_field_names == ["vector"] + + +def test_empty_model(): + with raises(VectorStoreModelException): + + @vectorstoremodel + class DataModelClass: + def __init__(self): + pass + + +def test_non_annotated_no_default(): + with raises(VectorStoreModelException): + + @vectorstoremodel + class DataModelClass: + def __init__(self, non_vector_store_content: str): + self.non_vector_store_content = non_vector_store_content + + +def test_annotated_no_vsr_field_no_default(): + with raises(VectorStoreModelException): + + @vectorstoremodel + class DataModelClass: + def __init__( + self, + annotated_content: Annotated[str, "description"], + ): + self.annotated_content = annotated_content + + +def test_non_vector_list_and_dict(): + @vectorstoremodel + @dataclass + class DataModelClass: + key: Annotated[str, VectorStoreRecordKeyField()] + list1: Annotated[list[int], VectorStoreRecordDataField()] + list2: Annotated[list[str], VectorStoreRecordDataField] + dict1: Annotated[dict[str, int], VectorStoreRecordDataField()] + dict2: Annotated[dict[str, str], VectorStoreRecordDataField] + + assert hasattr(DataModelClass, "__kernel_vectorstoremodel__") + assert hasattr(DataModelClass, "__kernel_vectorstoremodel_definition__") + data_model_definition: VectorStoreRecordDefinition = DataModelClass.__kernel_vectorstoremodel_definition__ + assert len(data_model_definition.fields) == 5 + assert data_model_definition.fields["list1"].name == "list1" + assert data_model_definition.fields["list1"].property_type == "list[int]" + assert data_model_definition.fields["list2"].name == "list2" + assert data_model_definition.fields["list2"].property_type == "list[str]" + assert data_model_definition.fields["dict1"].name == "dict1" + assert data_model_definition.fields["dict1"].property_type == "dict" + assert data_model_definition.fields["dict2"].name == "dict2" + assert data_model_definition.fields["dict2"].property_type == "dict" + assert data_model_definition.container_mode is False diff --git a/python/tests/unit/data/test_vector_store_record_collection.py b/python/tests/unit/data/test_vector_store_record_collection.py new file mode 100644 index 000000000000..104205f081e5 --- /dev/null +++ b/python/tests/unit/data/test_vector_store_record_collection.py @@ -0,0 +1,559 @@ +# Copyright (c) Microsoft. All rights reserved. + +from copy import deepcopy +from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch + +import numpy as np +from pandas import DataFrame +from pytest import fixture, mark, raises + +from semantic_kernel.data.vector_store_record_collection import VectorStoreRecordCollection +from semantic_kernel.exceptions.memory_connector_exceptions import ( + MemoryConnectorException, + VectorStoreModelDeserializationException, + VectorStoreModelSerializationException, + VectorStoreModelValidationError, +) + + +@fixture(scope="function") +def vector_store_record_collection( + DictVectorStoreRecordCollection, + data_model_definition, + data_model_serialize_definition, + data_model_to_from_dict_definition, + data_model_container_definition, + data_model_container_serialize_definition, + data_model_pandas_definition, + data_model_type_vanilla, + data_model_type_vanilla_serialize, + data_model_type_vanilla_to_from_dict, + data_model_type_pydantic, + data_model_type_dataclass, + data_model_type_vector_array, + request, +) -> VectorStoreRecordCollection: + item = request.param if request and hasattr(request, "param") else "definition_basic" + defs = { + "definition_basic": data_model_definition, + "definition_with_serialize": data_model_serialize_definition, + "definition_with_to_from": data_model_to_from_dict_definition, + "definition_container": data_model_container_definition, + "definition_container_serialize": data_model_container_serialize_definition, + "definition_pandas": data_model_pandas_definition, + "type_vanilla": data_model_type_vanilla, + "type_vanilla_with_serialize": data_model_type_vanilla_serialize, + "type_vanilla_with_to_from_dict": data_model_type_vanilla_to_from_dict, + "type_pydantic": data_model_type_pydantic, + "type_dataclass": data_model_type_dataclass, + "type_vector_array": data_model_type_vector_array, + } + if item.endswith("pandas"): + return DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=DataFrame, + data_model_definition=defs[item], + ) + if item.startswith("definition_"): + return DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=defs[item], + ) + return DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=defs[item], + ) + + +def test_init(DictVectorStoreRecordCollection, data_model_definition): + vsrc = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + assert vsrc.collection_name == "test" + assert vsrc.data_model_type is dict + assert vsrc._container_mode is False + assert vsrc.data_model_definition == data_model_definition + assert vsrc._key_field_name == "id" + + +@mark.asyncio +async def test_context_manager(DictVectorStoreRecordCollection, data_model_definition): + DictVectorStoreRecordCollection.close = AsyncMock() + async with DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ): + pass + DictVectorStoreRecordCollection.close.assert_called() + + +@mark.asyncio +@mark.parametrize( + "vector_store_record_collection", + [ + "definition_basic", + "definition_with_serialize", + "definition_with_to_from", + "type_vanilla", + "type_vanilla_with_serialize", + "type_vanilla_with_to_from_dict", + "type_pydantic", + "type_dataclass", + "type_vector_array", + ], + indirect=True, +) +async def test_crud_operations(vector_store_record_collection): + id = "test_id" + record = {"id": id, "content": "test_content", "vector": [1.0, 2.0, 3.0]} + if vector_store_record_collection.data_model_definition.fields["vector"].deserialize_function is not None: + record["vector"] = vector_store_record_collection.data_model_definition.fields["vector"].deserialize_function( + record["vector"] + ) + if vector_store_record_collection.data_model_type is not dict: + model = vector_store_record_collection.data_model_type + record = model(**record) + no_records = await vector_store_record_collection.get(id) + assert no_records is None + await vector_store_record_collection.upsert(record) + assert len(vector_store_record_collection.inner_storage) == 1 + if vector_store_record_collection.data_model_type is dict: + assert vector_store_record_collection.inner_storage[id] == record + else: + assert vector_store_record_collection.inner_storage[id]["content"] == record.content + record_2 = await vector_store_record_collection.get(id) + if vector_store_record_collection.data_model_type is dict: + assert record_2 == record + else: + if isinstance(record.vector, list): + assert record_2 == record + else: + assert record_2.id == record.id + assert record_2.content == record.content + assert np.array_equal(record_2.vector, record.vector) + await vector_store_record_collection.delete(id) + assert len(vector_store_record_collection.inner_storage) == 0 + + +@mark.asyncio +@mark.parametrize( + "vector_store_record_collection", + [ + "definition_basic", + "definition_with_serialize", + "definition_with_to_from", + "type_vanilla", + "type_vanilla_with_serialize", + "type_vanilla_with_to_from_dict", + "type_pydantic", + "type_dataclass", + ], + indirect=True, +) +async def test_crud_batch_operations(vector_store_record_collection): + ids = ["test_id_1", "test_id_2"] + batch = [ + {"id": ids[0], "content": "test_content", "vector": [1.0, 2.0, 3.0]}, + {"id": ids[1], "content": "test_content", "vector": [1.0, 2.0, 3.0]}, + ] + if vector_store_record_collection.data_model_type is not dict: + model = vector_store_record_collection.data_model_type + batch = [model(**record) for record in batch] + no_records = await vector_store_record_collection.get_batch(ids) + assert no_records is None + await vector_store_record_collection.upsert_batch(batch) + assert len(vector_store_record_collection.inner_storage) == 2 + if vector_store_record_collection.data_model_type is dict: + assert vector_store_record_collection.inner_storage[ids[0]] == batch[0] + else: + assert vector_store_record_collection.inner_storage[ids[0]]["content"] == batch[0].content + records = await vector_store_record_collection.get_batch(ids) + assert records == batch + await vector_store_record_collection.delete_batch(ids) + assert len(vector_store_record_collection.inner_storage) == 0 + + +@mark.asyncio +@mark.parametrize( + "vector_store_record_collection", + ["definition_container", "definition_container_serialize"], + indirect=True, +) +async def test_crud_operations_container(vector_store_record_collection): + id = "test_id" + record = {id: {"content": "test_content", "vector": [1.0, 2.0, 3.0]}} + no_records = await vector_store_record_collection.get(id) + assert no_records is None + await vector_store_record_collection.upsert(record) + assert len(vector_store_record_collection.inner_storage) == 1 + assert vector_store_record_collection.inner_storage[id]["content"] == record[id]["content"] + assert vector_store_record_collection.inner_storage[id]["vector"] == record[id]["vector"] + record_2 = await vector_store_record_collection.get(id) + assert record_2 == record + await vector_store_record_collection.delete(id) + assert len(vector_store_record_collection.inner_storage) == 0 + + +@mark.asyncio +@mark.parametrize( + "vector_store_record_collection", + ["definition_container", "definition_container_serialize"], + indirect=True, +) +async def test_crud_batch_operations_container(vector_store_record_collection): + ids = ["test_id_1", "test_id_2"] + batch = { + ids[0]: {"content": "test_content", "vector": [1.0, 2.0, 3.0]}, + ids[1]: {"content": "test_content", "vector": [1.0, 2.0, 3.0]}, + } + no_records = await vector_store_record_collection.get_batch(ids) + assert no_records is None + await vector_store_record_collection.upsert_batch(batch) + assert len(vector_store_record_collection.inner_storage) == 2 + assert vector_store_record_collection.inner_storage[ids[0]]["content"] == batch[ids[0]]["content"] + assert vector_store_record_collection.inner_storage[ids[0]]["vector"] == batch[ids[0]]["vector"] + records = await vector_store_record_collection.get_batch(ids) + assert records == batch + await vector_store_record_collection.delete_batch(ids) + assert len(vector_store_record_collection.inner_storage) == 0 + + +@mark.asyncio +@mark.parametrize( + "vector_store_record_collection", + ["definition_pandas"], + indirect=True, +) +async def test_crud_operations_pandas(vector_store_record_collection): + id = "test_id" + record = DataFrame([{"id": id, "content": "test_content", "vector": [1.0, 2.0, 3.0]}]) + no_records = await vector_store_record_collection.get(id) + assert no_records is None + await vector_store_record_collection.upsert(record) + assert len(vector_store_record_collection.inner_storage) == 1 + + assert vector_store_record_collection.inner_storage[id]["content"] == record["content"].values[0] + assert vector_store_record_collection.inner_storage[id]["vector"] == record["vector"].values[0] + record_2 = await vector_store_record_collection.get(id) + assert record_2.equals(record) + await vector_store_record_collection.delete(id) + assert len(vector_store_record_collection.inner_storage) == 0 + + +@mark.asyncio +@mark.parametrize( + "vector_store_record_collection", + ["definition_pandas"], + indirect=True, +) +async def test_crud_batch_operations_pandas(vector_store_record_collection): + ids = ["test_id_1", "test_id_2"] + + batch = DataFrame([{"id": id, "content": "test_content", "vector": [1.0, 2.0, 3.0]} for id in ids]) + no_records = await vector_store_record_collection.get_batch(ids) + assert no_records is None + await vector_store_record_collection.upsert_batch(batch) + assert len(vector_store_record_collection.inner_storage) == 2 + assert vector_store_record_collection.inner_storage[ids[0]]["content"] == batch["content"].values[0] + assert vector_store_record_collection.inner_storage[ids[0]]["vector"] == batch["vector"].values[0] + records = await vector_store_record_collection.get_batch(ids) + assert records.equals(batch) + await vector_store_record_collection.delete_batch(ids) + assert len(vector_store_record_collection.inner_storage) == 0 + + +@mark.asyncio +async def test_upsert_fail(DictVectorStoreRecordCollection, data_model_definition): + DictVectorStoreRecordCollection._inner_upsert = MagicMock(side_effect=Exception) + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + record = {"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]} + with raises(MemoryConnectorException, match="Error upserting record:"): + await vector_store_record_collection.upsert(record) + with raises(MemoryConnectorException, match="Error upserting records:"): + await vector_store_record_collection.upsert_batch([record]) + assert len(vector_store_record_collection.inner_storage) == 0 + + +@mark.asyncio +async def test_get_fail(DictVectorStoreRecordCollection, data_model_definition): + DictVectorStoreRecordCollection._inner_get = MagicMock(side_effect=Exception) + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + record = {"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]} + await vector_store_record_collection.upsert(record) + assert len(vector_store_record_collection.inner_storage) == 1 + with raises(MemoryConnectorException, match="Error getting record:"): + await vector_store_record_collection.get("test_id") + with raises(MemoryConnectorException, match="Error getting records:"): + await vector_store_record_collection.get_batch(["test_id"]) + + +@mark.asyncio +async def test_get_fail_multiple(DictVectorStoreRecordCollection, data_model_definition): + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + record = {"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]} + await vector_store_record_collection.upsert(record) + assert len(vector_store_record_collection.inner_storage) == 1 + with ( + patch( + "semantic_kernel.data.vector_store_record_collection.VectorStoreRecordCollection.deserialize" + ) as deserialize_mock, + raises(MemoryConnectorException, match="Error deserializing record, multiple records returned:"), + ): + deserialize_mock.return_value = [ + {"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]}, + {"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]}, + ] + await vector_store_record_collection.get("test_id") + + +@mark.asyncio +async def test_serialize_fail(DictVectorStoreRecordCollection, data_model_definition): + DictVectorStoreRecordCollection.serialize = MagicMock(side_effect=Exception) + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + record = {"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]} + with raises(MemoryConnectorException, match="Error serializing record"): + await vector_store_record_collection.upsert(record) + with raises(MemoryConnectorException, match="Error serializing record"): + await vector_store_record_collection.upsert_batch([record]) + + +@mark.asyncio +async def test_deserialize_fail(DictVectorStoreRecordCollection, data_model_definition): + DictVectorStoreRecordCollection.deserialize = MagicMock(side_effect=Exception) + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + record = {"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]} + vector_store_record_collection.inner_storage["test_id"] = record + with raises(MemoryConnectorException, match="Error deserializing record"): + await vector_store_record_collection.get("test_id") + with raises(MemoryConnectorException, match="Error deserializing record"): + await vector_store_record_collection.get_batch(["test_id"]) + + +def test_serialize_custom_fail(DictVectorStoreRecordCollection, data_model_type_vanilla_serialize): + data_model_type_vanilla_serialize.serialize = MagicMock(side_effect=Exception) + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=data_model_type_vanilla_serialize, + ) + record = data_model_type_vanilla_serialize( + content="test_content", + vector=[1.0, 2.0, 3.0], + id="test_id", + ) + with raises(VectorStoreModelSerializationException, match="Error serializing record:"): + vector_store_record_collection.serialize(record) + + +def test_deserialize_custom_fail(DictVectorStoreRecordCollection, data_model_type_vanilla_serialize): + data_model_type_vanilla_serialize.deserialize = MagicMock(side_effect=Exception) + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=data_model_type_vanilla_serialize, + ) + record = {"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]} + with raises(VectorStoreModelSerializationException, match="Error deserializing record:"): + vector_store_record_collection.deserialize(record) + + +def test_serialize_data_model_to_dict_fail_mapping(DictVectorStoreRecordCollection, data_model_definition): + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + record = {"content": "test_content", "vector": [1.0, 2.0, 3.0]} + with raises(VectorStoreModelSerializationException, match="Error serializing record"): + vector_store_record_collection._serialize_data_model_to_dict(record) + + +def test_serialize_data_model_to_dict_fail_object(DictVectorStoreRecordCollection, data_model_type_vanilla): + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=data_model_type_vanilla, + ) + record = Mock(spec=data_model_type_vanilla) + with raises(VectorStoreModelSerializationException, match="Error serializing record"): + vector_store_record_collection._serialize_data_model_to_dict(record) + + +def test_deserialize_dict_data_model_fail_sequence(DictVectorStoreRecordCollection, data_model_type_vanilla): + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=data_model_type_vanilla, + ) + with raises(VectorStoreModelDeserializationException, match="Cannot deserialize multiple records"): + vector_store_record_collection._deserialize_dict_to_data_model([{}, {}]) + + +def test_deserialize_dict_data_model_fail(DictVectorStoreRecordCollection, data_model_definition): + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + with raises(VectorStoreModelDeserializationException, match="Error deserializing record"): + vector_store_record_collection._deserialize_dict_to_data_model( + {"content": "test_content", "vector": [1.0, 2.0, 3.0]} + ) + + +def test_deserialize_dict_data_model_shortcut(DictVectorStoreRecordCollection, data_model_definition): + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + record = vector_store_record_collection._deserialize_dict_to_data_model( + [{"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]}] + ) + assert record == {"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]} + + +@mark.asyncio +@mark.parametrize("vector_store_record_collection", ["type_pydantic"], indirect=True) +async def test_pydantic_fail(vector_store_record_collection): + id = "test_id" + model = deepcopy(vector_store_record_collection.data_model_type) + dict_record = {"id": id, "content": "test_content", "vector": [1.0, 2.0, 3.0]} + record = model(**dict_record) + model.model_dump = MagicMock(side_effect=Exception) + with raises(VectorStoreModelSerializationException, match="Error serializing record:"): + vector_store_record_collection.serialize(record) + with raises(MemoryConnectorException, match="Error serializing record:"): + await vector_store_record_collection.upsert(record) + model.model_validate = MagicMock(side_effect=Exception) + with raises(VectorStoreModelDeserializationException, match="Error deserializing record:"): + vector_store_record_collection.deserialize(dict_record) + + +@mark.parametrize("vector_store_record_collection", ["type_vanilla_with_to_from_dict"], indirect=True) +def test_to_from_dict_fail(vector_store_record_collection): + id = "test_id" + model = deepcopy(vector_store_record_collection.data_model_type) + dict_record = {"id": id, "content": "test_content", "vector": [1.0, 2.0, 3.0]} + record = model(**dict_record) + model.to_dict = MagicMock(side_effect=Exception) + with raises(VectorStoreModelSerializationException, match="Error serializing record:"): + vector_store_record_collection.serialize(record) + model.from_dict = MagicMock(side_effect=Exception) + with raises(VectorStoreModelDeserializationException, match="Error deserializing record:"): + vector_store_record_collection.deserialize(dict_record) + + +@mark.asyncio +async def test_delete_fail(DictVectorStoreRecordCollection, data_model_definition): + DictVectorStoreRecordCollection._inner_delete = MagicMock(side_effect=Exception) + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + record = {"id": "test_id", "content": "test_content", "vector": [1.0, 2.0, 3.0]} + await vector_store_record_collection.upsert(record) + assert len(vector_store_record_collection.inner_storage) == 1 + with raises(MemoryConnectorException, match="Error deleting record:"): + await vector_store_record_collection.delete("test_id") + with raises(MemoryConnectorException, match="Error deleting records:"): + await vector_store_record_collection.delete_batch(["test_id"]) + assert len(vector_store_record_collection.inner_storage) == 1 + + +@mark.asyncio +async def test_collection_operations(vector_store_record_collection): + await vector_store_record_collection.create_collection() + assert await vector_store_record_collection.does_collection_exist() + record = {"id": "id", "content": "test_content", "vector": [1.0, 2.0, 3.0]} + await vector_store_record_collection.upsert(record) + assert len(vector_store_record_collection.inner_storage) == 1 + await vector_store_record_collection.delete_collection() + assert vector_store_record_collection.inner_storage == {} + await vector_store_record_collection.create_collection_if_not_exists() + + +@mark.asyncio +async def test_collection_create_if_not_exists(DictVectorStoreRecordCollection, data_model_definition): + DictVectorStoreRecordCollection.does_collection_exist = AsyncMock(return_value=False) + create_mock = AsyncMock() + DictVectorStoreRecordCollection.create_collection = create_mock + vector_store_record_collection = DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=dict, + data_model_definition=data_model_definition, + ) + await vector_store_record_collection.create_collection_if_not_exists() + create_mock.assert_called_once() + + +def test_data_model_validation(data_model_type_vanilla, DictVectorStoreRecordCollection): + DictVectorStoreRecordCollection.supported_key_types = PropertyMock(return_value=["str"]) + DictVectorStoreRecordCollection.supported_vector_types = PropertyMock(return_value=["float"]) + DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=data_model_type_vanilla, + ) + + +def test_data_model_validation_key_fail(data_model_type_vanilla, DictVectorStoreRecordCollection): + DictVectorStoreRecordCollection.supported_key_types = PropertyMock(return_value=["int"]) + with raises(VectorStoreModelValidationError, match="Key field must be one of"): + DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=data_model_type_vanilla, + ) + + +def test_data_model_validation_vector_fail(data_model_type_vanilla, DictVectorStoreRecordCollection): + DictVectorStoreRecordCollection.supported_vector_types = PropertyMock(return_value=["list[int]"]) + with raises(VectorStoreModelValidationError, match="Vector field "): + DictVectorStoreRecordCollection( + collection_name="test", + data_model_type=data_model_type_vanilla, + ) + + +@mark.asyncio +async def test_upsert_with_vectorizing(vector_store_record_collection): + record = {"id": "test_id", "content": "test_content"} + record2 = {"id": "test_id", "content": "test_content"} + + async def embedding_func(record, type, definition): + if isinstance(record, list): + for r in record: + r["vector"] = [1.0, 2.0, 3.0] + return record + record["vector"] = [1.0, 2.0, 3.0] + return record + + await vector_store_record_collection.upsert(record, embedding_generation_function=embedding_func) + assert vector_store_record_collection.inner_storage["test_id"]["vector"] == [1.0, 2.0, 3.0] + await vector_store_record_collection.delete("test_id") + assert len(vector_store_record_collection.inner_storage) == 0 + await vector_store_record_collection.upsert_batch([record2], embedding_generation_function=embedding_func) + assert vector_store_record_collection.inner_storage["test_id"]["vector"] == [1.0, 2.0, 3.0] + + +# TODO (eavanvalkenburg): pandas container test diff --git a/python/tests/unit/data/test_vector_store_record_definition.py b/python/tests/unit/data/test_vector_store_record_definition.py new file mode 100644 index 000000000000..da70fe7bef99 --- /dev/null +++ b/python/tests/unit/data/test_vector_store_record_definition.py @@ -0,0 +1,54 @@ +# Copyright (c) Microsoft. All rights reserved. + +from pytest import raises + +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_fields import VectorStoreRecordDataField, VectorStoreRecordKeyField +from semantic_kernel.exceptions.memory_connector_exceptions import VectorStoreModelException + + +def test_vector_store_record_definition(): + id_field = VectorStoreRecordKeyField() + vsrd = VectorStoreRecordDefinition(fields={"id": id_field}) + assert vsrd.fields == {"id": VectorStoreRecordKeyField(name="id")} + assert vsrd.key_field_name == "id" + assert vsrd.key_field == id_field + assert vsrd.field_names == ["id"] + assert vsrd.vector_field_names == [] + assert vsrd.container_mode is False + assert vsrd.to_dict is None + assert vsrd.from_dict is None + assert vsrd.serialize is None + assert vsrd.deserialize is None + + +def test_no_fields_fail(): + with raises(VectorStoreModelException): + VectorStoreRecordDefinition(fields={}) + + +def test_no_name_fields_fail(): + with raises(VectorStoreModelException): + VectorStoreRecordDefinition(fields={None: VectorStoreRecordKeyField()}) # type: ignore + with raises(VectorStoreModelException): + VectorStoreRecordDefinition(fields={"": VectorStoreRecordKeyField()}) + + +def test_no_key_field_fail(): + with raises(VectorStoreModelException): + VectorStoreRecordDefinition(fields={"content": VectorStoreRecordDataField()}) + + +def test_multiple_key_field_fail(): + with raises(VectorStoreModelException): + VectorStoreRecordDefinition(fields={"key1": VectorStoreRecordKeyField(), "key2": VectorStoreRecordKeyField()}) + + +def test_no_matching_vector_field_fail(): + with raises(VectorStoreModelException): + VectorStoreRecordDefinition( + fields={ + "id": VectorStoreRecordKeyField(), + "content": VectorStoreRecordDataField(has_embedding=True, embedding_property_name="vector"), + } + ) diff --git a/python/tests/unit/data/test_vector_store_record_utils.py b/python/tests/unit/data/test_vector_store_record_utils.py new file mode 100644 index 000000000000..01a1d832c0b3 --- /dev/null +++ b/python/tests/unit/data/test_vector_store_record_utils.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft. All rights reserved. + +from unittest.mock import AsyncMock, MagicMock + +from pytest import mark, raises + +from semantic_kernel.data.vector_store_model_definition import VectorStoreRecordDefinition +from semantic_kernel.data.vector_store_record_fields import ( + VectorStoreRecordDataField, + VectorStoreRecordKeyField, + VectorStoreRecordVectorField, +) +from semantic_kernel.data.vector_store_record_utils import VectorStoreRecordUtils +from semantic_kernel.exceptions.memory_connector_exceptions import VectorStoreModelException +from semantic_kernel.kernel import Kernel + + +@mark.asyncio +async def test_add_vector_to_records(data_model_definition): + kernel = MagicMock(spec=Kernel) + kernel.add_embedding_to_object = AsyncMock() + utils = VectorStoreRecordUtils(kernel) + assert utils is not None + record = {"id": "test_id", "content": "content"} + await utils.add_vector_to_records(record, None, data_model_definition) + kernel.add_embedding_to_object.assert_called_once() + + +@mark.asyncio +async def test_add_vector_wrong_fields(): + data_model = VectorStoreRecordDefinition( + fields={ + "id": VectorStoreRecordKeyField(), + "content": VectorStoreRecordDataField(has_embedding=True, embedding_property_name="id"), + "vector": VectorStoreRecordVectorField(), + } + ) + kernel = MagicMock(spec=Kernel) + kernel.add_embedding_to_object = AsyncMock() + utils = VectorStoreRecordUtils(kernel) + assert utils is not None + record = {"id": "test_id", "content": "content"} + with raises(VectorStoreModelException, match="Embedding field"): + await utils.add_vector_to_records(record, None, data_model) diff --git a/python/tests/unit/kernel/test_kernel.py b/python/tests/unit/kernel/test_kernel.py index 3f504035dd00..305ab8b7288d 100644 --- a/python/tests/unit/kernel/test_kernel.py +++ b/python/tests/unit/kernel/test_kernel.py @@ -166,6 +166,16 @@ async def test_invoke_function_fail(kernel: Kernel, create_mock_function): pass +@pytest.mark.asyncio +async def test_invoke_function_cancelled(kernel: Kernel, create_mock_function): + mock_function = create_mock_function(name="test_function") + mock_function._invoke_internal = AsyncMock(side_effect=OperationCancelledException("Operation cancelled")) + kernel.add_plugin(KernelPlugin(name="test", functions=[mock_function])) + + result = await kernel.invoke(mock_function, arguments=KernelArguments()) + assert result is None + + @pytest.mark.asyncio async def test_invoke_stream_function(kernel: Kernel, create_mock_function): mock_function = create_mock_function(name="test_function")