diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 374f1b3e5ac8..e2455f84be12 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,8 +1,9 @@ --- name: Bug report about: Create a report to help us improve -title: '' -labels: '' +title: 'Bug: ' +labels: ["bug"] +projects: ["semantic-kernel"] assignees: '' --- diff --git a/.github/ISSUE_TEMPLATE/feature_graduation.md b/.github/ISSUE_TEMPLATE/feature_graduation.md new file mode 100644 index 000000000000..37d207ea1888 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_graduation.md @@ -0,0 +1,29 @@ +--- +name: Feature graduation +about: Plan the graduation of an experimental feature +title: 'Graduate XXX feature' +labels: ["feature_graduation"] +projects: ["semantic-kernel"] +assignees: '' + +--- + +--- +name: Feature graduation +about: Plan the graduation of an experimental feature + +--- + +Checklist to be completed when graduating an experimental feature + +- [ ] Notify PM's and EM's that feature is read for graduation +- [ ] Contact PM for list of sample use cases +- [ ] Verify there are sample implementations​ for each of the use cases +- [ ] Verify telemetry and logging are complete +- [ ] ​Verify API docs are complete and arrange to have them published +- [ ] Make appropriate updates to Learn docs​ +- [ ] Make appropriate updates to Concept samples +- [ ] Male appropriate updates to Blog posts +- [ ] Verify there are no serious open Issues​​ +- [ ] Update table in EXPERIMENTS.md +- [ ] Remove SKEXP​ flag from the experimental code diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 2d490077748e..3289535f2120 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,8 +1,9 @@ --- name: Feature request about: Suggest an idea for this project -title: '' +title: 'New Feature: ' labels: '' +projects: ["semantic-kernel"] assignees: '' --- diff --git a/.github/_typos.toml b/.github/_typos.toml index a56c70770c47..917745e1ae83 100644 --- a/.github/_typos.toml +++ b/.github/_typos.toml @@ -15,6 +15,7 @@ extend-exclude = [ "CodeTokenizerTests.cs", "test_code_tokenizer.py", "*response.json", + "test_content.txt", ] [default.extend-words] @@ -28,6 +29,7 @@ ans = "ans" # Short for answers arange = "arange" # Method in Python numpy package prompty = "prompty" # prompty is a format name. ist = "ist" # German language +dall = "dall" # OpenAI model name [default.extend-identifiers] ags = "ags" # Azure Graph Service diff --git a/.github/workflows/python-integration-tests.yml b/.github/workflows/python-integration-tests.yml index b02fc8eae1ed..20516a4164e3 100644 --- a/.github/workflows/python-integration-tests.yml +++ b/.github/workflows/python-integration-tests.yml @@ -92,6 +92,10 @@ jobs: AZURE_AI_SEARCH_API_KEY: ${{secrets.AZURE_AI_SEARCH_API_KEY}} AZURE_AI_SEARCH_ENDPOINT: ${{secrets.AZURE_AI_SEARCH_ENDPOINT}} MONGODB_ATLAS_CONNECTION_STRING: ${{secrets.MONGODB_ATLAS_CONNECTION_STRING}} + AZURE_KEY_VAULT_ENDPOINT: ${{secrets.AZURE_KEY_VAULT_ENDPOINT}} + AZURE_KEY_VAULT_CLIENT_ID: ${{secrets.AZURE_KEY_VAULT_CLIENT_ID}} + AZURE_KEY_VAULT_CLIENT_SECRET: ${{secrets.AZURE_KEY_VAULT_CLIENT_SECRET}} + ACA_POOL_MANAGEMENT_ENDPOINT: ${{secrets.ACA_POOL_MANAGEMENT_ENDPOINT}} run: | if ${{ matrix.os == 'ubuntu-latest' }}; then docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest @@ -99,6 +103,7 @@ jobs: cd python poetry run pytest ./tests/integration -v + poetry run pytest ./tests/samples -v python-integration-tests: needs: paths-filter @@ -154,6 +159,10 @@ jobs: AZURE_AI_SEARCH_API_KEY: ${{secrets.AZURE_AI_SEARCH_API_KEY}} AZURE_AI_SEARCH_ENDPOINT: ${{secrets.AZURE_AI_SEARCH_ENDPOINT}} MONGODB_ATLAS_CONNECTION_STRING: ${{secrets.MONGODB_ATLAS_CONNECTION_STRING}} + AZURE_KEY_VAULT_ENDPOINT: ${{secrets.AZURE_KEY_VAULT_ENDPOINT}} + AZURE_KEY_VAULT_CLIENT_ID: ${{secrets.AZURE_KEY_VAULT_CLIENT_ID}} + AZURE_KEY_VAULT_CLIENT_SECRET: ${{secrets.AZURE_KEY_VAULT_CLIENT_SECRET}} + ACA_POOL_MANAGEMENT_ENDPOINT: ${{secrets.ACA_POOL_MANAGEMENT_ENDPOINT}} run: | if ${{ matrix.os == 'ubuntu-latest' }}; then docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest @@ -161,6 +170,7 @@ jobs: cd python poetry run pytest ./tests/integration -v + poetry run pytest ./tests/samples -v # This final job is required to satisfy the merge queue. It must only run (or succeed) if no tests failed python-integration-tests-check: diff --git a/.github/workflows/python-lint.yml b/.github/workflows/python-lint.yml index 15f339747c96..3f20ae2f0d02 100644 --- a/.github/workflows/python-lint.yml +++ b/.github/workflows/python-lint.yml @@ -1,7 +1,7 @@ name: Python Code Quality Checks on: workflow_dispatch: - pull_request_target: + pull_request: branches: [ "main", "feature*" ] paths: - 'python/**' diff --git a/README.md b/README.md index c400ede21d35..e8518c0ef1cf 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,15 @@ # Semantic Kernel +## Status + + - Python
[![Python package](https://img.shields.io/pypi/v/semantic-kernel)](https://pypi.org/project/semantic-kernel/) -[![Nuget package](https://img.shields.io/nuget/vpre/Microsoft.SemanticKernel)](https://www.nuget.org/packages/Microsoft.SemanticKernel/) -[![dotnet Docker](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-docker.yml/badge.svg?branch=main)](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-docker.yml) -[![dotnet Windows](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-windows.yml/badge.svg?branch=main)](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-windows.yml) + - .NET
+[![Nuget package](https://img.shields.io/nuget/vpre/Microsoft.SemanticKernel)](https://www.nuget.org/packages/Microsoft.SemanticKernel/)[![dotnet Docker](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-docker.yml/badge.svg?branch=main)](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-docker.yml)[![dotnet Windows](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-windows.yml/badge.svg?branch=main)](https://github.com/microsoft/semantic-kernel/actions/workflows/dotnet-ci-windows.yml) + - Java
+[![Java CICD Builds](https://github.com/microsoft/semantic-kernel/actions/workflows/java-build.yml/badge.svg?branch=java-development)](https://github.com/microsoft/semantic-kernel/actions/workflows/java-build.yml)[![Maven Central](https://maven-badges.herokuapp.com/maven-central/com.microsoft.semantic-kernel/semantickernel-api/badge.svg)](https://maven-badges.herokuapp.com/maven-central/com.microsoft.semantic-kernel/semantickernel-api) + +## Overview [![License: MIT](https://img.shields.io/github/license/microsoft/semantic-kernel)](https://github.com/microsoft/semantic-kernel/blob/main/LICENSE) [![Discord](https://img.shields.io/discord/1063152441819942922?label=Discord&logo=discord&logoColor=white&color=d82679)](https://aka.ms/SKDiscord) @@ -107,6 +113,7 @@ Finally, refer to our API references for more details on the C# and Python APIs: - [C# API reference](https://learn.microsoft.com/en-us/dotnet/api/microsoft.semantickernel?view=semantic-kernel-dotnet) - Python API reference (coming soon) +- Java API reference (coming soon) ## Join the community diff --git a/docs/EUCLIDEAN_DISTANCE.md b/docs/EUCLIDEAN_DISTANCE.md index 58e93555b97b..e67cea62664d 100644 --- a/docs/EUCLIDEAN_DISTANCE.md +++ b/docs/EUCLIDEAN_DISTANCE.md @@ -1,15 +1,15 @@ -# Euclidean distance +# Euclidean Distance Euclidean distance is a mathematical concept that measures the straight-line distance between two points in a Euclidean space. It is named after the ancient Greek mathematician Euclid, who is often referred to as the "father of geometry". The formula for calculating -Euclidean distance is based on the Pythagorean theorem and can be expressed as: +Euclidean distance is based on the Pythagorean Theorem and can be expressed as: - d = √(x2 - x1)² + (y2 - y1)² +$$d = \sqrt{(x_2 - x_1)^2 + (y_2 - y_1)^2}$$ -In higher dimensions, this formula can be generalized to: +For higher dimensions, this formula can be generalized to: - d = √(x2 - x1)² + (y2 - y1)² + ... + (zn - zn-1)² +$$d(p, q) = \sqrt{\sum\limits_{i\=1}^{n} (q_i - p_i)^2}$$ Euclidean distance has many applications in computer science and artificial intelligence, particularly when working with [embeddings](EMBEDDINGS.md). Embeddings are numerical diff --git a/docs/decisions/0045-breaking-changes-guidance.md b/docs/decisions/0045-breaking-changes-guidance.md new file mode 100644 index 000000000000..59e1f5a50f3d --- /dev/null +++ b/docs/decisions/0045-breaking-changes-guidance.md @@ -0,0 +1,40 @@ +--- +status: accepted +contact: markwallace +date: 2024-06-10 +deciders: sergeymenshykh, mbolan, rbarreto, dmytrostruk, westey +consulted: +informed: +--- + +# Guidance for Breaking Changes + +## Context and Problem Statement + +We must avoid breaking changes in .Net because of the well known [diamond dependency issue](https://learn.microsoft.com/en-us/dotnet/standard/library-guidance/dependencies#diamond-dependencies) where breaking changes between different versions of the same package cause bugs and exceptions at run time. + +## Decision Drivers + +Breaking changes are only allowed under the following circumstances: + +- Updates to an experimental feature i.e. we have learnt something new and need to modify the design of an experimental feature. +- When one of our dependencies introduces an unavoidable breaking change. + +All breaking changes must be clearly documented, definitely in the release notes and possibly also via a migration guide Blog post. + +- Include a detailed description of the breaking change in the PR description so that it is included in the release notes. +- Update Learn Site migration guide documentation and have this published to coincide with the release which includes the breaking change. + +In all other cases we must avoid breaking changes. There will be situations where we need to move to accommodate a change to one of our dependencies or introduce a new capability e.g. + +- When we find a security issue or a severe bug (e.g. data loss). +- One of our dependencies introduces a major breaking change e.g. the introduction of the new OpenAI SDK. +- When we find a severe limitation in our current implementation e.g. when the AI services introduce a new capability. + +In these cases we will plan to obsolete the API(s) and provide a documented migration path to the new preferred pattern. +An example of this will be the switch to the new OpenAI .Net SDK. +During this transition there will be a period where the new and old API's will be supported to allow customers to migrate. + +## Decision Outcome + +Chosen option: We must avoid breaking changes in .Net because of the well known diamond dependency issue. diff --git a/docs/decisions/0046-azure-model-as-a-service.md b/docs/decisions/0046-azure-model-as-a-service.md new file mode 100644 index 000000000000..a91468e253b0 --- /dev/null +++ b/docs/decisions/0046-azure-model-as-a-service.md @@ -0,0 +1,44 @@ +--- +# These are optional elements. Feel free to remove any of them. +status: { accepted } +contact: { rogerbarreto, taochen } +date: { 2024-06-20 } +deciders: { alliscode, moonbox3, eavanvalkenburg } +consulted: {} +informed: {} +--- + +# Support for Azure Model-as-a-Service in SK + +## Context and Problem Statement + +There has been a demand from customers for the implementation of Model-as-a-Service (MaaS) in SK. MaaS, which is also referred to as [serverless API](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/model-catalog-overview#model-deployment-managed-compute-and-serverless-api-pay-as-you-go), is available in [Azure AI Studio](https://learn.microsoft.com/en-us/azure/ai-studio/what-is-ai-studio). This mode of consumption operates on a pay-as-you-go basis, typically using tokens for billing purposes. Clients can access the service via the [Azure AI Model Inference API](https://learn.microsoft.com/en-us/azure/ai-studio/reference/reference-model-inference-api?tabs=azure-studio) or client SDKs. + +At present, there is no official support for MaaS in SK. The purpose of this ADR is to examine the constraints of the service and explore potential solutions to enable support for the service in SK via the development of a new AI connector. + +## Client SDK + +The Azure team will be providing a new client library, namely `Azure.AI.Inference` in .Net and `azure-ai-inference` in Python, for effectively interacting with the service. While the service API is OpenAI-compatible, it is not permissible to use the OpenAI and the Azure OpenAI client libraries for interacting with the service as they are not independent with respect to both the models and their providers. This is because Azure AI Studio features a diverse range of open-source models, other than OpenAI models. + +### Limitations + +The initial release of the client SDK will only support chat completion and text/image embedding generation, with image generation to be added later. + +Plans to support for text completion are currently unclear, and it is highly unlikely that the SDK will ever include support for text completion. As a result, the new AI connector will **NOT** support text completions in the initial version until we get more customer signals or the client SDK adds support. + +## AI Connector + +### Naming options + +- Azure +- AzureAI +- AzureAIInference +- AzureAIModelInference + + Decision: `AzureAIInference` + +### Support for model-specific parameters + +Models can possess supplementary parameters that are not part of the default API. The service API and the client SDK enable the provision of model-specific parameters. Users can provide model-specific settings via a dedicated argument along with other settings, such as `temperature` and `top_p`, among others. + +In the context of SK, execution parameters are categorized under `PromptExecutionSettings`, which is inherited by all connector-specific setting classes. The settings of the new connector will contain a member of type `dictionary`, which will group together the model-specific parameters. diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props index d47e0ca791f4..d514e22cb5f4 100644 --- a/dotnet/Directory.Packages.props +++ b/dotnet/Directory.Packages.props @@ -8,7 +8,7 @@ - + @@ -27,8 +27,8 @@ - - + + @@ -71,8 +71,8 @@ - - + + @@ -80,7 +80,7 @@ - + @@ -110,7 +110,7 @@ all runtime; build; native; contentfiles; analyzers; buildtransitive - + all runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/dotnet/nuget/nuget-package.props b/dotnet/nuget/nuget-package.props index 5a07d43e119f..6a48e76f58fc 100644 --- a/dotnet/nuget/nuget-package.props +++ b/dotnet/nuget/nuget-package.props @@ -1,7 +1,7 @@ - 1.14.1 + 1.15.0 $(VersionPrefix)-$(VersionSuffix) $(VersionPrefix) diff --git a/dotnet/samples/Concepts/Agents/ComplexChat_NestedShopper.cs b/dotnet/samples/Concepts/Agents/ComplexChat_NestedShopper.cs index 58813da9032a..0802980422cd 100644 --- a/dotnet/samples/Concepts/Agents/ComplexChat_NestedShopper.cs +++ b/dotnet/samples/Concepts/Agents/ComplexChat_NestedShopper.cs @@ -94,7 +94,7 @@ Select which participant will take the next turn based on the conversation histo """; [Fact] - public async Task RunAsync() + public async Task NestedChatWithAggregatorAgentAsync() { Console.WriteLine($"! {Model}"); diff --git a/dotnet/samples/Concepts/Agents/MixedChat_Agents.cs b/dotnet/samples/Concepts/Agents/MixedChat_Agents.cs index 86e6a46cb8ec..68052ef99cf2 100644 --- a/dotnet/samples/Concepts/Agents/MixedChat_Agents.cs +++ b/dotnet/samples/Concepts/Agents/MixedChat_Agents.cs @@ -33,7 +33,7 @@ Consider suggestions when refining an idea. """; [Fact] - public async Task RunAsync() + public async Task ChatWithOpenAIAssistantAgentAndChatCompletionAgentAsync() { // Define the agents: one of each type ChatCompletionAgent agentReviewer = @@ -55,7 +55,7 @@ await OpenAIAssistantAgent.CreateAsync( ModelId = this.Model, }); - // Create a nexus for agent interaction. + // Create a chat for agent interaction. var chat = new AgentGroupChat(agentWriter, agentReviewer) { diff --git a/dotnet/samples/Concepts/Agents/OpenAIAssistant_ChartMaker.cs b/dotnet/samples/Concepts/Agents/OpenAIAssistant_ChartMaker.cs index 3d6f714b7b26..5617784b780c 100644 --- a/dotnet/samples/Concepts/Agents/OpenAIAssistant_ChartMaker.cs +++ b/dotnet/samples/Concepts/Agents/OpenAIAssistant_ChartMaker.cs @@ -21,7 +21,7 @@ public class OpenAIAssistant_ChartMaker(ITestOutputHelper output) : BaseTest(out private const string AgentInstructions = "Create charts as requested without explanation."; [Fact] - public async Task RunAsync() + public async Task GenerateChartWithOpenAIAssistantAgentAsync() { // Define the agent OpenAIAssistantAgent agent = @@ -77,7 +77,7 @@ async Task InvokeAgentAsync(string input) foreach (var fileReference in message.Items.OfType()) { - Console.WriteLine($"# {message.Role} - {message.AuthorName ?? "*"}: #{fileReference.FileId}"); + Console.WriteLine($"# {message.Role} - {message.AuthorName ?? "*"}: @{fileReference.FileId}"); } } } diff --git a/dotnet/samples/Concepts/Agents/OpenAIAssistant_CodeInterpreter.cs b/dotnet/samples/Concepts/Agents/OpenAIAssistant_CodeInterpreter.cs index 46b4599c9a10..636f70636126 100644 --- a/dotnet/samples/Concepts/Agents/OpenAIAssistant_CodeInterpreter.cs +++ b/dotnet/samples/Concepts/Agents/OpenAIAssistant_CodeInterpreter.cs @@ -11,8 +11,10 @@ namespace Agents; /// public class OpenAIAssistant_CodeInterpreter(ITestOutputHelper output) : BaseTest(output) { + protected override bool ForceOpenAI => true; + [Fact] - public async Task RunAsync() + public async Task UseCodeInterpreterToolWithOpenAIAssistantAgentAsync() { // Define the agent OpenAIAssistantAgent agent = @@ -31,8 +33,7 @@ await OpenAIAssistantAgent.CreateAsync( // Respond to user input try { - await InvokeAgentAsync("What is the solution to `3x + 2 = 14`?"); - await InvokeAgentAsync("What is the fibinacci sequence until 101?"); + await InvokeAgentAsync("Use code to determine the values in the Fibonacci sequence that that are less then the value of 101?"); } finally { diff --git a/dotnet/samples/Concepts/Agents/OpenAIAssistant_FileManipulation.cs b/dotnet/samples/Concepts/Agents/OpenAIAssistant_FileManipulation.cs new file mode 100644 index 000000000000..dbe9d17ba90a --- /dev/null +++ b/dotnet/samples/Concepts/Agents/OpenAIAssistant_FileManipulation.cs @@ -0,0 +1,83 @@ +// Copyright (c) Microsoft. All rights reserved. +using System.Text; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Agents; +using Microsoft.SemanticKernel.Agents.OpenAI; +using Microsoft.SemanticKernel.ChatCompletion; +using Microsoft.SemanticKernel.Connectors.OpenAI; +using Resources; + +namespace Agents; + +/// +/// Demonstrate using code-interpreter to manipulate and generate csv files with . +/// +public class OpenAIAssistant_FileManipulation(ITestOutputHelper output) : BaseTest(output) +{ + /// + /// Target OpenAI services. + /// + protected override bool ForceOpenAI => true; + + [Fact] + public async Task AnalyzeCSVFileUsingOpenAIAssistantAgentAsync() + { + OpenAIFileService fileService = new(TestConfiguration.OpenAI.ApiKey); + + OpenAIFileReference uploadFile = + await fileService.UploadContentAsync( + new BinaryContent(await EmbeddedResource.ReadAllAsync("sales.csv"), mimeType: "text/plain"), + new OpenAIFileUploadExecutionSettings("sales.csv", OpenAIFilePurpose.Assistants)); + + Console.WriteLine(this.ApiKey); + + // Define the agent + OpenAIAssistantAgent agent = + await OpenAIAssistantAgent.CreateAsync( + kernel: new(), + config: new(this.ApiKey, this.Endpoint), + new() + { + EnableCodeInterpreter = true, // Enable code-interpreter + ModelId = this.Model, + FileIds = [uploadFile.Id] // Associate uploaded file + }); + + // Create a chat for agent interaction. + var chat = new AgentGroupChat(); + + // Respond to user input + try + { + await InvokeAgentAsync("Which segment had the most sales?"); + await InvokeAgentAsync("List the top 5 countries that generated the most profit."); + await InvokeAgentAsync("Create a tab delimited file report of profit by each country per month."); + } + finally + { + await agent.DeleteAsync(); + await fileService.DeleteFileAsync(uploadFile.Id); + } + + // Local function to invoke agent and display the conversation messages. + async Task InvokeAgentAsync(string input) + { + chat.AddChatMessage(new ChatMessageContent(AuthorRole.User, input)); + + Console.WriteLine($"# {AuthorRole.User}: '{input}'"); + + await foreach (var content in chat.InvokeAsync(agent)) + { + Console.WriteLine($"# {content.Role} - {content.AuthorName ?? "*"}: '{content.Content}'"); + + foreach (var annotation in content.Items.OfType()) + { + Console.WriteLine($"\n* '{annotation.Quote}' => {annotation.FileId}"); + BinaryContent fileContent = await fileService.GetFileContentAsync(annotation.FileId!); + byte[] byteContent = fileContent.Data?.ToArray() ?? []; + Console.WriteLine(Encoding.Default.GetString(byteContent)); + } + } + } + } +} diff --git a/dotnet/samples/Concepts/Agents/OpenAIAssistant_FileService.cs b/dotnet/samples/Concepts/Agents/OpenAIAssistant_FileService.cs new file mode 100644 index 000000000000..7537f53da726 --- /dev/null +++ b/dotnet/samples/Concepts/Agents/OpenAIAssistant_FileService.cs @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft. All rights reserved. +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.OpenAI; +using Resources; + +namespace Agents; + +/// +/// Demonstrate using . +/// +public class OpenAIAssistant_FileService(ITestOutputHelper output) : BaseTest(output) +{ + /// + /// Retrieval tool not supported on Azure OpenAI. + /// + protected override bool ForceOpenAI => true; + + [Fact] + public async Task UploadAndRetrieveFilesAsync() + { + OpenAIFileService fileService = new(TestConfiguration.OpenAI.ApiKey); + + BinaryContent[] files = [ + new AudioContent(await EmbeddedResource.ReadAllAsync("test_audio.wav")!, mimeType: "audio/wav") { InnerContent = "test_audio.wav" }, + new ImageContent(await EmbeddedResource.ReadAllAsync("sample_image.jpg")!, mimeType: "image/jpeg") { InnerContent = "sample_image.jpg" }, + new ImageContent(await EmbeddedResource.ReadAllAsync("test_image.jpg")!, mimeType: "image/jpeg") { InnerContent = "test_image.jpg" }, + new BinaryContent(data: await EmbeddedResource.ReadAllAsync("travelinfo.txt"), mimeType: "text/plain") { InnerContent = "travelinfo.txt" } + ]; + + var fileContents = new Dictionary(); + foreach (BinaryContent file in files) + { + OpenAIFileReference result = await fileService.UploadContentAsync(file, new(file.InnerContent!.ToString()!, OpenAIFilePurpose.FineTune)); + fileContents.Add(result.Id, file); + } + + foreach (OpenAIFileReference fileReference in await fileService.GetFilesAsync(OpenAIFilePurpose.FineTune)) + { + // Only interested in the files we uploaded + if (!fileContents.ContainsKey(fileReference.Id)) + { + continue; + } + + BinaryContent content = await fileService.GetFileContentAsync(fileReference.Id); + + string? mimeType = fileContents[fileReference.Id].MimeType; + string? fileName = fileContents[fileReference.Id].InnerContent!.ToString(); + ReadOnlyMemory data = content.Data ?? new(); + + var typedContent = mimeType switch + { + "image/jpeg" => new ImageContent(data, mimeType) { Uri = content.Uri, InnerContent = fileName, Metadata = content.Metadata }, + "audio/wav" => new AudioContent(data, mimeType) { Uri = content.Uri, InnerContent = fileName, Metadata = content.Metadata }, + _ => new BinaryContent(data, mimeType) { Uri = content.Uri, InnerContent = fileName, Metadata = content.Metadata } + }; + + Console.WriteLine($"\nFile: {fileName} - {mimeType}"); + Console.WriteLine($"Type: {typedContent}"); + Console.WriteLine($"Uri: {typedContent.Uri}"); + + // Delete the test file remotely + await fileService.DeleteFileAsync(fileReference.Id); + } + } +} diff --git a/dotnet/samples/Concepts/Agents/OpenAIAssistant_MultipleContents.cs b/dotnet/samples/Concepts/Agents/OpenAIAssistant_MultipleContents.cs deleted file mode 100644 index 49f36edce0f4..000000000000 --- a/dotnet/samples/Concepts/Agents/OpenAIAssistant_MultipleContents.cs +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. -using Azure.AI.OpenAI.Assistants; -using Microsoft.SemanticKernel; -using Microsoft.SemanticKernel.Agents; -using Microsoft.SemanticKernel.Agents.OpenAI; -using Microsoft.SemanticKernel.ChatCompletion; -using Microsoft.SemanticKernel.Connectors.OpenAI; -using Resources; - -namespace Agents; - -/// -/// Demonstrate using retrieval on . -/// -public class OpenAIAssistant_MultipleContents(ITestOutputHelper output) : BaseTest(output) -{ - /// - /// Retrieval tool not supported on Azure OpenAI. - /// - protected override bool ForceOpenAI => true; - - [Fact] - public async Task RunAsync() - { - OpenAIFileService fileService = new(TestConfiguration.OpenAI.ApiKey); - - BinaryContent[] files = [ - // Audio is not supported by Assistant API - // new AudioContent(await EmbeddedResource.ReadAllAsync("test_audio.wav")!, mimeType:"audio/wav", innerContent: "test_audio.wav"), - new ImageContent(await EmbeddedResource.ReadAllAsync("sample_image.jpg")!, mimeType: "image/jpeg") { InnerContent = "sample_image.jpg" }, - new ImageContent(await EmbeddedResource.ReadAllAsync("test_image.jpg")!, mimeType: "image/jpeg") { InnerContent = "test_image.jpg" }, - new BinaryContent(data: await EmbeddedResource.ReadAllAsync("travelinfo.txt"), mimeType: "text/plain") - { - InnerContent = "travelinfo.txt" - } - ]; - - var fileIds = new List(); - foreach (var file in files) - { - try - { - var uploadFile = await fileService.UploadContentAsync(file, - new OpenAIFileUploadExecutionSettings(file.InnerContent!.ToString()!, Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFilePurpose.Assistants)); - - fileIds.Add(uploadFile.Id); - } - catch (HttpOperationException hex) - { - Console.WriteLine(hex.ResponseContent); - Assert.Fail($"Failed to upload file: {hex.Message}"); - } - } - - // Define the agent - OpenAIAssistantAgent agent = - await OpenAIAssistantAgent.CreateAsync( - kernel: new(), - config: new(this.ApiKey, this.Endpoint), - new() - { - EnableRetrieval = true, // Enable retrieval - ModelId = this.Model, - // FileIds = fileIds Currently Assistant API only supports text files, no images or audio. - FileIds = [fileIds.Last()] - }); - - // Create a chat for agent interaction. - var chat = new AgentGroupChat(); - - // Respond to user input - try - { - await InvokeAgentAsync("Where did sam go?"); - await InvokeAgentAsync("When does the flight leave Seattle?"); - await InvokeAgentAsync("What is the hotel contact info at the destination?"); - } - finally - { - await agent.DeleteAsync(); - } - - // Local function to invoke agent and display the conversation messages. - async Task InvokeAgentAsync(string input) - { - chat.AddChatMessage(new ChatMessageContent(AuthorRole.User, input)); - - Console.WriteLine($"# {AuthorRole.User}: '{input}'"); - - await foreach (var content in chat.InvokeAsync(agent)) - { - Console.WriteLine($"# {content.Role} - {content.AuthorName ?? "*"}: '{content.Content}'"); - } - } - } - - [Fact] - public async Task SendingAndRetrievingFilesAsync() - { - var openAIClient = new AssistantsClient(TestConfiguration.OpenAI.ApiKey); - OpenAIFileService fileService = new(TestConfiguration.OpenAI.ApiKey); - - BinaryContent[] files = [ - new AudioContent(await EmbeddedResource.ReadAllAsync("test_audio.wav")!, mimeType: "audio/wav") { InnerContent = "test_audio.wav" }, - new ImageContent(await EmbeddedResource.ReadAllAsync("sample_image.jpg")!, mimeType: "image/jpeg") { InnerContent = "sample_image.jpg" }, - new ImageContent(await EmbeddedResource.ReadAllAsync("test_image.jpg")!, mimeType: "image/jpeg") { InnerContent = "test_image.jpg" }, - new BinaryContent(data: await EmbeddedResource.ReadAllAsync("travelinfo.txt"), mimeType: "text/plain") { InnerContent = "travelinfo.txt" } - ]; - - var fileIds = new Dictionary(); - foreach (var file in files) - { - var result = await openAIClient.UploadFileAsync(new BinaryData(file.Data), Azure.AI.OpenAI.Assistants.OpenAIFilePurpose.FineTune); - fileIds.Add(result.Value.Id, file); - } - - foreach (var file in (await openAIClient.GetFilesAsync(Azure.AI.OpenAI.Assistants.OpenAIFilePurpose.FineTune)).Value) - { - if (!fileIds.ContainsKey(file.Id)) - { - continue; - } - - var data = (await openAIClient.GetFileContentAsync(file.Id)).Value; - - var mimeType = fileIds[file.Id].MimeType; - var fileName = fileIds[file.Id].InnerContent!.ToString(); - var metadata = new Dictionary { ["id"] = file.Id }; - var uri = new Uri($"https://api.openai.com/v1/files/{file.Id}/content"); - var content = mimeType switch - { - "image/jpeg" => new ImageContent(data, mimeType) { Uri = uri, InnerContent = fileName, Metadata = metadata }, - "audio/wav" => new AudioContent(data, mimeType) { Uri = uri, InnerContent = fileName, Metadata = metadata }, - _ => new BinaryContent(data, mimeType) { Uri = uri, InnerContent = fileName, Metadata = metadata } - }; - - Console.WriteLine($"File: {fileName} - {mimeType}"); - - // Images tostring are different from the graduated contents for retrocompatibility - Console.WriteLine(content.ToString()); - - // Delete the test file remotely - await openAIClient.DeleteFileAsync(file.Id); - } - } -} diff --git a/dotnet/samples/Concepts/Agents/OpenAIAssistant_Retrieval.cs b/dotnet/samples/Concepts/Agents/OpenAIAssistant_Retrieval.cs index 2df655d07630..9c7c9bb46f43 100644 --- a/dotnet/samples/Concepts/Agents/OpenAIAssistant_Retrieval.cs +++ b/dotnet/samples/Concepts/Agents/OpenAIAssistant_Retrieval.cs @@ -19,7 +19,7 @@ public class OpenAIAssistant_Retrieval(ITestOutputHelper output) : BaseTest(outp protected override bool ForceOpenAI => true; [Fact] - public async Task RunAsync() + public async Task UseRetrievalToolWithOpenAIAssistantAgentAsync() { OpenAIFileService fileService = new(TestConfiguration.OpenAI.ApiKey); diff --git a/dotnet/samples/Concepts/ChatCompletion/Connectors_WithMultipleLLMs.cs b/dotnet/samples/Concepts/ChatCompletion/Connectors_WithMultipleLLMs.cs index 592146da6799..81fbc2492d4a 100644 --- a/dotnet/samples/Concepts/ChatCompletion/Connectors_WithMultipleLLMs.cs +++ b/dotnet/samples/Concepts/ChatCompletion/Connectors_WithMultipleLLMs.cs @@ -1,82 +1,185 @@ // Copyright (c) Microsoft. All rights reserved. using Microsoft.SemanticKernel; -using xRetry; namespace ChatCompletion; public class Connectors_WithMultipleLLMs(ITestOutputHelper output) : BaseTest(output) { - /// - /// Show how to run a prompt function and specify a specific service to use. - /// - [RetryFact(typeof(HttpOperationException))] - public async Task RunAsync() + private const string ChatPrompt = "Hello AI, what can you do for me?"; + + private static Kernel BuildKernel() { - Kernel kernel = Kernel.CreateBuilder() - .AddAzureOpenAIChatCompletion( - deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, - endpoint: TestConfiguration.AzureOpenAI.Endpoint, - apiKey: TestConfiguration.AzureOpenAI.ApiKey, - serviceId: "AzureOpenAIChat", - modelId: TestConfiguration.AzureOpenAI.ChatModelId) - .AddOpenAIChatCompletion( - modelId: TestConfiguration.OpenAI.ChatModelId, - apiKey: TestConfiguration.OpenAI.ApiKey, - serviceId: "OpenAIChat") - .Build(); - - await RunByServiceIdAsync(kernel, "AzureOpenAIChat"); - await RunByModelIdAsync(kernel, TestConfiguration.OpenAI.ChatModelId); - await RunByFirstModelIdAsync(kernel, "gpt-4-1106-preview", TestConfiguration.AzureOpenAI.ChatModelId, TestConfiguration.OpenAI.ChatModelId); + return Kernel.CreateBuilder() + .AddAzureOpenAIChatCompletion( + deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, + endpoint: TestConfiguration.AzureOpenAI.Endpoint, + apiKey: TestConfiguration.AzureOpenAI.ApiKey, + serviceId: "AzureOpenAIChat", + modelId: TestConfiguration.AzureOpenAI.ChatModelId) + .AddOpenAIChatCompletion( + modelId: TestConfiguration.OpenAI.ChatModelId, + apiKey: TestConfiguration.OpenAI.ApiKey, + serviceId: "OpenAIChat") + .Build(); } - private async Task RunByServiceIdAsync(Kernel kernel, string serviceId) + /// + /// Shows how to invoke a prompt and specify the service id of the preferred AI service. When the prompt is executed the AI Service with the matching service id will be selected. + /// + /// Service Id + [Theory] + [InlineData("AzureOpenAIChat")] + public async Task InvokePromptByServiceIdAsync(string serviceId) { + var kernel = BuildKernel(); Console.WriteLine($"======== Service Id: {serviceId} ========"); - var prompt = "Hello AI, what can you do for me?"; + var result = await kernel.InvokePromptAsync(ChatPrompt, new(new PromptExecutionSettings { ServiceId = serviceId })); - KernelArguments arguments = []; - arguments.ExecutionSettings = new Dictionary() - { - { serviceId, new PromptExecutionSettings() } - }; - var result = await kernel.InvokePromptAsync(prompt, arguments); Console.WriteLine(result.GetValue()); } - private async Task RunByModelIdAsync(Kernel kernel, string modelId) + /// + /// Shows how to invoke a prompt and specify the model id of the preferred AI service. When the prompt is executed the AI Service with the matching model id will be selected. + /// + [Fact] + private async Task InvokePromptByModelIdAsync() { + var modelId = TestConfiguration.OpenAI.ChatModelId; + var kernel = BuildKernel(); Console.WriteLine($"======== Model Id: {modelId} ========"); - var prompt = "Hello AI, what can you do for me?"; + var result = await kernel.InvokePromptAsync(ChatPrompt, new(new PromptExecutionSettings() { ModelId = modelId })); - var result = await kernel.InvokePromptAsync( - prompt, - new(new PromptExecutionSettings() - { - ModelId = modelId - })); Console.WriteLine(result.GetValue()); } - private async Task RunByFirstModelIdAsync(Kernel kernel, params string[] modelIds) + /// + /// Shows how to invoke a prompt and specify the service ids of the preferred AI services. + /// When the prompt is executed the AI Service will be selected based on the order of the provided service ids. + /// + [Fact] + public async Task InvokePromptFunctionWithFirstMatchingServiceIdAsync() + { + string[] serviceIds = ["NotFound", "AzureOpenAIChat", "OpenAIChat"]; + var kernel = BuildKernel(); + Console.WriteLine($"======== Service Ids: {string.Join(", ", serviceIds)} ========"); + + var result = await kernel.InvokePromptAsync(ChatPrompt, new(serviceIds.Select(serviceId => new PromptExecutionSettings { ServiceId = serviceId }))); + + Console.WriteLine(result.GetValue()); + } + + /// + /// Shows how to invoke a prompt and specify the model ids of the preferred AI services. + /// When the prompt is executed the AI Service will be selected based on the order of the provided model ids. + /// + [Fact] + public async Task InvokePromptFunctionWithFirstMatchingModelIdAsync() { + string[] modelIds = ["gpt-4-1106-preview", TestConfiguration.AzureOpenAI.ChatModelId, TestConfiguration.OpenAI.ChatModelId]; + var kernel = BuildKernel(); Console.WriteLine($"======== Model Ids: {string.Join(", ", modelIds)} ========"); - var prompt = "Hello AI, what can you do for me?"; + var result = await kernel.InvokePromptAsync(ChatPrompt, new(modelIds.Select((modelId, index) => new PromptExecutionSettings { ServiceId = $"service-{index}", ModelId = modelId }))); - var modelSettings = new Dictionary(); - foreach (var modelId in modelIds) - { - modelSettings.Add(modelId, new PromptExecutionSettings() { ModelId = modelId }); - } - var promptConfig = new PromptTemplateConfig(prompt) { Name = "HelloAI", ExecutionSettings = modelSettings }; + Console.WriteLine(result.GetValue()); + } + + /// + /// Shows how to create a KernelFunction from a prompt and specify the service ids of the preferred AI services. + /// When the function is invoked the AI Service will be selected based on the order of the provided service ids. + /// + [Fact] + public async Task InvokePreconfiguredFunctionWithFirstMatchingServiceIdAsync() + { + string[] serviceIds = ["NotFound", "AzureOpenAIChat", "OpenAIChat"]; + var kernel = BuildKernel(); + Console.WriteLine($"======== Service Ids: {string.Join(", ", serviceIds)} ========"); + + var function = kernel.CreateFunctionFromPrompt(ChatPrompt, serviceIds.Select(serviceId => new PromptExecutionSettings { ServiceId = serviceId })); + var result = await kernel.InvokeAsync(function); - var function = kernel.CreateFunctionFromPrompt(promptConfig); + Console.WriteLine(result.GetValue()); + } + + /// + /// Shows how to create a KernelFunction from a prompt and specify the model ids of the preferred AI services. + /// When the function is invoked the AI Service will be selected based on the order of the provided model ids. + /// + [Fact] + public async Task InvokePreconfiguredFunctionWithFirstMatchingModelIdAsync() + { + string[] modelIds = ["gpt-4-1106-preview", TestConfiguration.AzureOpenAI.ChatModelId, TestConfiguration.OpenAI.ChatModelId]; + var kernel = BuildKernel(); + + Console.WriteLine($"======== Model Ids: {string.Join(", ", modelIds)} ========"); + var function = kernel.CreateFunctionFromPrompt(ChatPrompt, modelIds.Select((modelId, index) => new PromptExecutionSettings { ServiceId = $"service-{index}", ModelId = modelId })); var result = await kernel.InvokeAsync(function); + Console.WriteLine(result.GetValue()); } + + /// + /// Shows how to invoke a KernelFunction and specify the model id of the AI Service the function will use. + /// + [Fact] + public async Task InvokePreconfiguredFunctionByModelIdAsync() + { + var modelId = TestConfiguration.OpenAI.ChatModelId; + var kernel = BuildKernel(); + Console.WriteLine($"======== Model Id: {modelId} ========"); + + var function = kernel.CreateFunctionFromPrompt(ChatPrompt); + var result = await kernel.InvokeAsync(function, new(new PromptExecutionSettings { ModelId = modelId })); + + Console.WriteLine(result.GetValue()); + } + + /// + /// Shows how to invoke a KernelFunction and specify the service id of the AI Service the function will use. + /// + /// Service Id + [Theory] + [InlineData("AzureOpenAIChat")] + public async Task InvokePreconfiguredFunctionByServiceIdAsync(string serviceId) + { + var kernel = BuildKernel(); + Console.WriteLine($"======== Service Id: {serviceId} ========"); + + var function = kernel.CreateFunctionFromPrompt(ChatPrompt); + var result = await kernel.InvokeAsync(function, new(new PromptExecutionSettings { ServiceId = serviceId })); + + Console.WriteLine(result.GetValue()); + } + + /// + /// Shows when specifying a non-existent ServiceId the kernel throws an exception. + /// + /// Service Id + [Theory] + [InlineData("NotFound")] + public async Task InvokePromptByNonExistingServiceIdThrowsExceptionAsync(string serviceId) + { + var kernel = BuildKernel(); + Console.WriteLine($"======== Service Id: {serviceId} ========"); + + await Assert.ThrowsAsync(async () => await kernel.InvokePromptAsync(ChatPrompt, new(new PromptExecutionSettings { ServiceId = serviceId }))); + } + + /// + /// Shows how in the execution settings when no model id is found it falls back to the default service. + /// + /// Model Id + [Theory] + [InlineData("NotFound")] + public async Task InvokePromptByNonExistingModelIdUsesDefaultServiceAsync(string modelId) + { + var kernel = BuildKernel(); + Console.WriteLine($"======== Model Id: {modelId} ========"); + + await kernel.InvokePromptAsync(ChatPrompt, new(new PromptExecutionSettings { ModelId = modelId })); + } } diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionMultipleChoices.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionMultipleChoices.cs index a9ab68aa6281..9534cac09a63 100644 --- a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionMultipleChoices.cs +++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionMultipleChoices.cs @@ -1,60 +1,133 @@ // Copyright (c) Microsoft. All rights reserved. +using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.ChatCompletion; using Microsoft.SemanticKernel.Connectors.OpenAI; namespace ChatCompletion; -// The following example shows how to use Semantic Kernel with streaming Multiple Results Chat Completion. +/// +/// The following example shows how to use Semantic Kernel with multiple chat completion results. +/// public class OpenAI_ChatCompletionMultipleChoices(ITestOutputHelper output) : BaseTest(output) { + /// + /// Example with multiple chat completion results using . + /// [Fact] - public Task AzureOpenAIMultiChatCompletionAsync() + public async Task MultipleChatCompletionResultsUsingKernelAsync() { - Console.WriteLine("======== Azure OpenAI - Multiple Chat Completion ========"); + var kernel = Kernel + .CreateBuilder() + .AddOpenAIChatCompletion( + modelId: TestConfiguration.OpenAI.ChatModelId, + apiKey: TestConfiguration.OpenAI.ApiKey) + .Build(); - var chatCompletionService = new AzureOpenAIChatCompletionService( - deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, - endpoint: TestConfiguration.AzureOpenAI.Endpoint, - apiKey: TestConfiguration.AzureOpenAI.ApiKey, - modelId: TestConfiguration.AzureOpenAI.ChatModelId); + // Execution settings with configured ResultsPerPrompt property. + var executionSettings = new OpenAIPromptExecutionSettings { MaxTokens = 200, ResultsPerPrompt = 3 }; - return ChatCompletionAsync(chatCompletionService); + var contents = await kernel.InvokePromptAsync>("Write a paragraph about why AI is awesome", new(executionSettings)); + + foreach (var content in contents!) + { + Console.Write(content.ToString() ?? string.Empty); + Console.WriteLine("\n-------------\n"); + } } + /// + /// Example with multiple chat completion results using . + /// [Fact] - public Task OpenAIMultiChatCompletionAsync() + public async Task MultipleChatCompletionResultsUsingChatCompletionServiceAsync() { - Console.WriteLine("======== Open AI - Multiple Chat Completion ========"); + var kernel = Kernel + .CreateBuilder() + .AddOpenAIChatCompletion( + modelId: TestConfiguration.OpenAI.ChatModelId, + apiKey: TestConfiguration.OpenAI.ApiKey) + .Build(); + + // Execution settings with configured ResultsPerPrompt property. + var executionSettings = new OpenAIPromptExecutionSettings { MaxTokens = 200, ResultsPerPrompt = 3 }; + + var chatHistory = new ChatHistory(); + chatHistory.AddUserMessage("Write a paragraph about why AI is awesome"); - var chatCompletionService = new OpenAIChatCompletionService( - TestConfiguration.OpenAI.ChatModelId, - TestConfiguration.OpenAI.ApiKey); + var chatCompletionService = kernel.GetRequiredService(); - return ChatCompletionAsync(chatCompletionService); + foreach (var chatMessageContent in await chatCompletionService.GetChatMessageContentsAsync(chatHistory, executionSettings)) + { + Console.Write(chatMessageContent.Content ?? string.Empty); + Console.WriteLine("\n-------------\n"); + } } - private async Task ChatCompletionAsync(IChatCompletionService chatCompletionService) + /// + /// This example shows how to handle multiple results in case if prompt template contains a call to another prompt function. + /// is used for result selection. + /// + [Fact] + public async Task MultipleChatCompletionResultsInPromptTemplateAsync() { - var executionSettings = new OpenAIPromptExecutionSettings() - { - MaxTokens = 200, - FrequencyPenalty = 0, - PresencePenalty = 0, - Temperature = 1, - TopP = 0.5, - ResultsPerPrompt = 2, - }; + var kernel = Kernel + .CreateBuilder() + .AddOpenAIChatCompletion( + modelId: TestConfiguration.OpenAI.ChatModelId, + apiKey: TestConfiguration.OpenAI.ApiKey) + .Build(); - var chatHistory = new ChatHistory(); - chatHistory.AddUserMessage("Write one paragraph about why AI is awesome"); + var executionSettings = new OpenAIPromptExecutionSettings { MaxTokens = 200, ResultsPerPrompt = 3 }; + + // Initializing a function with execution settings for multiple results. + // We ask AI to write one paragraph, but in execution settings we specified that we want 3 different results for this request. + var function = KernelFunctionFactory.CreateFromPrompt("Write a paragraph about why AI is awesome", executionSettings, "GetParagraph"); + var plugin = KernelPluginFactory.CreateFromFunctions("MyPlugin", [function]); - foreach (var chatMessageChoice in await chatCompletionService.GetChatMessageContentsAsync(chatHistory, executionSettings)) + kernel.Plugins.Add(plugin); + + // Add function result selection filter. + kernel.FunctionInvocationFilters.Add(new FunctionResultSelectionFilter(this.Output)); + + // Inside our main request, we call MyPlugin.GetParagraph function for text summarization. + // Taking into account that MyPlugin.GetParagraph function produces 3 results, for text summarization we need to choose only one of them. + // Registered filter will be invoked during execution, which will select and return only 1 result, and this result will be inserted in our main request for summarization. + var result = await kernel.InvokePromptAsync("Summarize this text: {{MyPlugin.GetParagraph}}"); + + // It's possible to check what prompt was rendered for our main request. + Console.WriteLine($"Rendered prompt: '{result.RenderedPrompt}'"); + + // Output: + // Rendered prompt: 'Summarize this text: AI is awesome because...' + } + + /// + /// Example of filter which is responsible for result selection in case if some function produces multiple results. + /// + private sealed class FunctionResultSelectionFilter(ITestOutputHelper output) : IFunctionInvocationFilter + { + public async Task OnFunctionInvocationAsync(FunctionInvocationContext context, Func next) { - Console.Write(chatMessageChoice.Content ?? string.Empty); - Console.WriteLine("\n-------------\n"); - } + await next(context); + + // Selection logic for function which is expected to produce multiple results. + if (context.Function.Name == "GetParagraph") + { + // Get multiple results from function invocation + var contents = context.Result.GetValue>()!; - Console.WriteLine(); + output.WriteLine("Multiple results:"); + + foreach (var content in contents) + { + output.WriteLine(content.ToString()); + } + + // Select first result for correct prompt rendering + var selectedContent = contents[0]; + context.Result = new FunctionResult(context.Function, selectedContent, context.Kernel.Culture, selectedContent.Metadata); + } + } } } diff --git a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionStreaming.cs b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionStreaming.cs index bb33ebb51cab..4836dcf03d9f 100644 --- a/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionStreaming.cs +++ b/dotnet/samples/Concepts/ChatCompletion/OpenAI_ChatCompletionStreaming.cs @@ -1,27 +1,36 @@ // Copyright (c) Microsoft. All rights reserved. +using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.ChatCompletion; using Microsoft.SemanticKernel.Connectors.OpenAI; namespace ChatCompletion; -// The following example shows how to use Semantic Kernel with streaming Chat Completion +/// +/// These examples demonstrate the ways different content types are streamed by OpenAI LLM via the chat completion service. +/// public class OpenAI_ChatCompletionStreaming(ITestOutputHelper output) : BaseTest(output) { + /// + /// This example demonstrates chat completion streaming using OpenAI. + /// [Fact] - public Task OpenAIChatStreamSampleAsync() + public Task StreamOpenAIChatAsync() { - Console.WriteLine("======== Open AI - ChatGPT Streaming ========"); + Console.WriteLine("======== Open AI Chat Completion Streaming ========"); OpenAIChatCompletionService chatCompletionService = new(TestConfiguration.OpenAI.ChatModelId, TestConfiguration.OpenAI.ApiKey); return this.StartStreamingChatAsync(chatCompletionService); } + /// + /// This example demonstrates chat completion streaming using Azure OpenAI. + /// [Fact] - public Task AzureOpenAIChatStreamSampleAsync() + public Task StreamAzureOpenAIChatAsync() { - Console.WriteLine("======== Azure Open AI - ChatGPT Streaming ========"); + Console.WriteLine("======== Azure Open AI Chat Completion Streaming ========"); AzureOpenAIChatCompletionService chatCompletionService = new( deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, @@ -32,24 +41,98 @@ public Task AzureOpenAIChatStreamSampleAsync() return this.StartStreamingChatAsync(chatCompletionService); } + /// + /// This example demonstrates how the chat completion service streams text content. + /// It shows how to access the response update via StreamingChatMessageContent.Content property + /// and alternatively via the StreamingChatMessageContent.Items property. + /// + [Fact] + public async Task StreamTextContentAsync() + { + Console.WriteLine("======== Stream Text Content ========"); + + // Create chat completion service + AzureOpenAIChatCompletionService chatCompletionService = new( + deploymentName: TestConfiguration.AzureOpenAI.ChatDeploymentName, + endpoint: TestConfiguration.AzureOpenAI.Endpoint, + apiKey: TestConfiguration.AzureOpenAI.ApiKey, + modelId: TestConfiguration.AzureOpenAI.ChatModelId); + + // Create chat history with initial system and user messages + ChatHistory chatHistory = new("You are a librarian, an expert on books."); + chatHistory.AddUserMessage("Hi, I'm looking for book suggestions."); + chatHistory.AddUserMessage("I love history and philosophy. I'd like to learn something new about Greece, any suggestion?"); + + // Start streaming chat based on the chat history + await foreach (StreamingChatMessageContent chatUpdate in chatCompletionService.GetStreamingChatMessageContentsAsync(chatHistory)) + { + // Access the response update via StreamingChatMessageContent.Content property + Console.Write(chatUpdate.Content); + + // Alternatively, the response update can be accessed via the StreamingChatMessageContent.Items property + Console.Write(chatUpdate.Items.OfType().FirstOrDefault()); + } + } + + /// + /// This example demonstrates how the chat completion service streams raw function call content. + /// See for a sample demonstrating how to simplify + /// function call content building out of streamed function call updates using the . + /// + [Fact] + public async Task StreamFunctionCallContentAsync() + { + Console.WriteLine("======== Stream Function Call Content ========"); + + // Create chat completion service + OpenAIChatCompletionService chatCompletionService = new(TestConfiguration.OpenAI.ChatModelId, TestConfiguration.OpenAI.ApiKey); + + // Create kernel with helper plugin. + Kernel kernel = new(); + kernel.ImportPluginFromFunctions("HelperFunctions", + [ + kernel.CreateFunctionFromMethod((string longTestString) => DateTime.UtcNow.ToString("R"), "GetCurrentUtcTime", "Retrieves the current time in UTC."), + ]); + + // Create execution settings with manual function calling + OpenAIPromptExecutionSettings settings = new() { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions }; + + // Create chat history with initial user question + ChatHistory chatHistory = new(); + chatHistory.AddUserMessage("Hi, what is the current time?"); + + // Start streaming chat based on the chat history + await foreach (StreamingChatMessageContent chatUpdate in chatCompletionService.GetStreamingChatMessageContentsAsync(chatHistory, settings, kernel)) + { + // Getting list of function call updates requested by LLM + var streamingFunctionCallUpdates = chatUpdate.Items.OfType(); + + // Iterating over function call updates. Please use the unctionCallContentBuilder to simplify function call content building. + foreach (StreamingFunctionCallUpdateContent update in streamingFunctionCallUpdates) + { + Console.WriteLine($"Function call update: callId={update.CallId}, name={update.Name}, arguments={update.Arguments?.Replace("\n", "\\n")}, functionCallIndex={update.FunctionCallIndex}"); + } + } + } + private async Task StartStreamingChatAsync(IChatCompletionService chatCompletionService) { Console.WriteLine("Chat content:"); Console.WriteLine("------------------------"); var chatHistory = new ChatHistory("You are a librarian, expert about books"); - await MessageOutputAsync(chatHistory); + OutputLastMessage(chatHistory); // First user message chatHistory.AddUserMessage("Hi, I'm looking for book suggestions"); - await MessageOutputAsync(chatHistory); + OutputLastMessage(chatHistory); // First bot assistant message await StreamMessageOutputAsync(chatCompletionService, chatHistory, AuthorRole.Assistant); // Second user message chatHistory.AddUserMessage("I love history and philosophy, I'd like to learn something new about Greece, any suggestion?"); - await MessageOutputAsync(chatHistory); + OutputLastMessage(chatHistory); // Second bot assistant message await StreamMessageOutputAsync(chatCompletionService, chatHistory, AuthorRole.Assistant); @@ -82,13 +165,11 @@ private async Task StreamMessageOutputAsync(IChatCompletionService chatCompletio /// /// Outputs the last message of the chat history /// - private Task MessageOutputAsync(ChatHistory chatHistory) + private void OutputLastMessage(ChatHistory chatHistory) { var message = chatHistory.Last(); Console.WriteLine($"{message.Role}: {message.Content}"); Console.WriteLine("------------------------"); - - return Task.CompletedTask; } } diff --git a/dotnet/samples/Concepts/Filtering/AutoFunctionInvocationFiltering.cs b/dotnet/samples/Concepts/Filtering/AutoFunctionInvocationFiltering.cs index 7d149b038b4a..1e56b8f36878 100644 --- a/dotnet/samples/Concepts/Filtering/AutoFunctionInvocationFiltering.cs +++ b/dotnet/samples/Concepts/Filtering/AutoFunctionInvocationFiltering.cs @@ -8,6 +8,9 @@ namespace Filtering; public class AutoFunctionInvocationFiltering(ITestOutputHelper output) : BaseTest(output) { + /// + /// Shows how to use . + /// [Fact] public async Task AutoFunctionInvocationFilterAsync() { @@ -16,7 +19,7 @@ public async Task AutoFunctionInvocationFilterAsync() builder.AddOpenAIChatCompletion("gpt-4", TestConfiguration.OpenAI.ApiKey); // This filter outputs information about auto function invocation and returns overridden result. - builder.Services.AddSingleton(new AutoFunctionInvocationFilterExample(this.Output)); + builder.Services.AddSingleton(new AutoFunctionInvocationFilter(this.Output)); var kernel = builder.Build(); @@ -40,11 +43,56 @@ public async Task AutoFunctionInvocationFilterAsync() // Result from auto function invocation filter. } - /// Shows syntax for auto function invocation filter. - private sealed class AutoFunctionInvocationFilterExample(ITestOutputHelper output) : IAutoFunctionInvocationFilter + /// + /// Shows how to get list of function calls by using . + /// + [Fact] + public async Task GetFunctionCallsWithFilterAsync() { - private readonly ITestOutputHelper _output = output; + var builder = Kernel.CreateBuilder(); + + builder.AddOpenAIChatCompletion("gpt-3.5-turbo-1106", TestConfiguration.OpenAI.ApiKey); + + builder.Services.AddSingleton(new FunctionCallsFilter(this.Output)); + + var kernel = builder.Build(); + + kernel.ImportPluginFromFunctions("HelperFunctions", + [ + kernel.CreateFunctionFromMethod(() => DateTime.UtcNow.ToString("R"), "GetCurrentUtcTime", "Retrieves the current time in UTC."), + kernel.CreateFunctionFromMethod((string cityName) => + cityName switch + { + "Boston" => "61 and rainy", + "London" => "55 and cloudy", + "Miami" => "80 and sunny", + "Paris" => "60 and rainy", + "Tokyo" => "50 and sunny", + "Sydney" => "75 and sunny", + "Tel Aviv" => "80 and sunny", + _ => "31 and snowing", + }, "GetWeatherForCity", "Gets the current weather for the specified city"), + ]); + + var executionSettings = new OpenAIPromptExecutionSettings + { + ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions + }; + + await foreach (var chunk in kernel.InvokePromptStreamingAsync("Check current UTC time and return current weather in Boston city.", new(executionSettings))) + { + Console.WriteLine(chunk.ToString()); + } + // Output: + // Request #0. Function call: HelperFunctions.GetCurrentUtcTime. + // Request #0. Function call: HelperFunctions.GetWeatherForCity. + // The current UTC time is {time of execution}, and the current weather in Boston is 61°F and rainy. + } + + /// Shows available syntax for auto function invocation filter. + private sealed class AutoFunctionInvocationFilter(ITestOutputHelper output) : IAutoFunctionInvocationFilter + { public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext context, Func next) { // Example: get function information @@ -56,14 +104,31 @@ public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext co // Example: get information about all functions which will be invoked var functionCalls = FunctionCallContent.GetFunctionCalls(context.ChatHistory.Last()); + // In function calling functionality there are two loops. + // Outer loop is "request" loop - it performs multiple requests to LLM until user ask will be satisfied. + // Inner loop is "function" loop - it handles LLM response with multiple function calls. + + // Workflow example: + // 1. Request to LLM #1 -> Response with 3 functions to call. + // 1.1. Function #1 called. + // 1.2. Function #2 called. + // 1.3. Function #3 called. + // 2. Request to LLM #2 -> Response with 2 functions to call. + // 2.1. Function #1 called. + // 2.2. Function #2 called. + + // context.RequestSequenceIndex - it's a sequence number of outer/request loop operation. + // context.FunctionSequenceIndex - it's a sequence number of inner/function loop operation. + // context.FunctionCount - number of functions which will be called per request (based on example above: 3 for first request, 2 for second request). + // Example: get request sequence index - this._output.WriteLine($"Request sequence index: {context.RequestSequenceIndex}"); + output.WriteLine($"Request sequence index: {context.RequestSequenceIndex}"); // Example: get function sequence index - this._output.WriteLine($"Function sequence index: {context.FunctionSequenceIndex}"); + output.WriteLine($"Function sequence index: {context.FunctionSequenceIndex}"); // Example: get total number of functions which will be called - this._output.WriteLine($"Total number of functions: {context.FunctionCount}"); + output.WriteLine($"Total number of functions: {context.FunctionCount}"); // Calling next filter in pipeline or function itself. // By skipping this call, next filters and function won't be invoked, and function call loop will proceed to the next function. @@ -79,4 +144,24 @@ public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext co context.Terminate = true; } } + + /// Shows how to get list of all function calls per request. + private sealed class FunctionCallsFilter(ITestOutputHelper output) : IAutoFunctionInvocationFilter + { + public async Task OnAutoFunctionInvocationAsync(AutoFunctionInvocationContext context, Func next) + { + var chatHistory = context.ChatHistory; + var functionCalls = FunctionCallContent.GetFunctionCalls(chatHistory.Last()).ToArray(); + + if (functionCalls is { Length: > 0 }) + { + foreach (var functionCall in functionCalls) + { + output.WriteLine($"Request #{context.RequestSequenceIndex}. Function call: {functionCall.PluginName}.{functionCall.FunctionName}."); + } + } + + await next(context); + } + } } diff --git a/dotnet/samples/Concepts/FunctionCalling/Gemini_FunctionCalling.cs b/dotnet/samples/Concepts/FunctionCalling/Gemini_FunctionCalling.cs index 0a03b5daecfc..33784679a886 100644 --- a/dotnet/samples/Concepts/FunctionCalling/Gemini_FunctionCalling.cs +++ b/dotnet/samples/Concepts/FunctionCalling/Gemini_FunctionCalling.cs @@ -5,7 +5,7 @@ using Microsoft.SemanticKernel.Connectors.Google; using xRetry; -namespace AutoFunctionCalling; +namespace FunctionCalling; /// /// These examples demonstrate two ways functions called by the Gemini LLM can be invoked using the SK streaming and non-streaming AI API: @@ -18,10 +18,10 @@ namespace AutoFunctionCalling; /// /// 2. Manual Invocation by a Caller: /// Functions called by the LLM are returned to the AI API caller. The caller controls the invocation phase where -/// they may decide which function to call, when to call them, how to handle exceptions, etc. The caller then -/// adds the function results or exceptions to the chat history and returns it to the LLM, which reasons about it +/// they may decide which function to call, when to call them, how to handle exceptions, call them in parallel or sequentially, etc. +/// The caller then adds the function results or exceptions to the chat history and returns it to the LLM, which reasons about it /// and generates the final response. -/// This approach is more manual and requires more manual intervention from the caller. +/// This approach is manual and provides more control over the function invocation phase to the caller. /// public sealed class Gemini_FunctionCalling(ITestOutputHelper output) : BaseTest(output) { diff --git a/dotnet/samples/Concepts/FunctionCalling/OpenAI_FunctionCalling.cs b/dotnet/samples/Concepts/FunctionCalling/OpenAI_FunctionCalling.cs index 506239d55323..1b817fbc60fe 100644 --- a/dotnet/samples/Concepts/FunctionCalling/OpenAI_FunctionCalling.cs +++ b/dotnet/samples/Concepts/FunctionCalling/OpenAI_FunctionCalling.cs @@ -20,10 +20,10 @@ namespace FunctionCalling; /// /// 2. Manual Invocation by a Caller: /// Functions called by the LLM are returned to the AI API caller. The caller controls the invocation phase where -/// they may decide which function to call, when to call them, how to handle exceptions, etc. The caller then -/// adds the function results or exceptions to the chat history and returns it to the LLM, which reasons about it +/// they may decide which function to call, when to call them, how to handle exceptions, call them in parallel or sequentially, etc. +/// The caller then adds the function results or exceptions to the chat history and returns it to the LLM, which reasons about it /// and generates the final response. -/// This approach is more manual and requires more manual intervention from the caller. +/// This approach is manual and provides more control over the function invocation phase to the caller. /// public class OpenAI_FunctionCalling(ITestOutputHelper output) : BaseTest(output) { @@ -61,54 +61,127 @@ public async Task RunStreamingPromptAutoFunctionCallingAsync() } /// - /// This example demonstrates manual function calling with a non-streaming prompt. + /// This example demonstrates manual function calling with a non-streaming chat API. /// [Fact] - public async Task RunNonStreamingPromptWithManualFunctionCallingAsync() + public async Task RunNonStreamingChatAPIWithManualFunctionCallingAsync() { Console.WriteLine("Manual function calling with a non-streaming prompt."); + // Create kernel and chat service Kernel kernel = CreateKernel(); IChatCompletionService chat = kernel.GetRequiredService(); + // Configure the chat service to enable manual function calling OpenAIPromptExecutionSettings settings = new() { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions }; + // Create chat history with the initial user message ChatHistory chatHistory = new(); chatHistory.AddUserMessage("Given the current time of day and weather, what is the likely color of the sky in Boston?"); while (true) { + // Start or continue chat based on the chat history ChatMessageContent result = await chat.GetChatMessageContentAsync(chatHistory, settings, kernel); if (result.Content is not null) { Console.Write(result.Content); } + // Get function calls from the chat message content and quit the chat loop if no function calls are found. IEnumerable functionCalls = FunctionCallContent.GetFunctionCalls(result); if (!functionCalls.Any()) { break; } - chatHistory.Add(result); // Adding LLM response containing function calls(requests) to chat history as it's required by LLMs. + // Preserving the original chat message content with function calls in the chat history. + chatHistory.Add(result); + // Iterating over the requested function calls and invoking them foreach (FunctionCallContent functionCall in functionCalls) { try { - FunctionResultContent resultContent = await functionCall.InvokeAsync(kernel); // Executing each function. + // Invoking the function + FunctionResultContent resultContent = await functionCall.InvokeAsync(kernel); + // Adding the function result to the chat history chatHistory.Add(resultContent.ToChatMessage()); } catch (Exception ex) { - chatHistory.Add(new FunctionResultContent(functionCall, ex).ToChatMessage()); // Adding function result to chat history. - // Adding exception to chat history. + // Adding function exception to the chat history. + chatHistory.Add(new FunctionResultContent(functionCall, ex).ToChatMessage()); // or - //string message = "Error details that LLM can reason about."; - //chatHistory.Add(new FunctionResultContent(functionCall, message).ToChatMessageContent()); // Adding function result to chat history. + //chatHistory.Add(new FunctionResultContent(functionCall, "Error details that LLM can reason about.").ToChatMessage()); + } + } + + Console.WriteLine(); + } + } + + /// + /// This example demonstrates manual function calling with a streaming chat API. + /// + [Fact] + public async Task RunStreamingChatAPIWithManualFunctionCallingAsync() + { + Console.WriteLine("Manual function calling with a streaming prompt."); + + // Create kernel and chat service + Kernel kernel = CreateKernel(); + + IChatCompletionService chat = kernel.GetRequiredService(); + + // Configure the chat service to enable manual function calling + OpenAIPromptExecutionSettings settings = new() { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions }; + + // Create chat history with the initial user message + ChatHistory chatHistory = new(); + chatHistory.AddUserMessage("Given the current time of day and weather, what is the likely color of the sky in Boston?"); + + while (true) + { + AuthorRole? authorRole = null; + var fccBuilder = new FunctionCallContentBuilder(); + + // Start or continue streaming chat based on the chat history + await foreach (var streamingContent in chat.GetStreamingChatMessageContentsAsync(chatHistory, settings, kernel)) + { + if (streamingContent.Content is not null) + { + Console.Write(streamingContent.Content); } + authorRole ??= streamingContent.Role; + fccBuilder.Append(streamingContent); + } + + // Build the function calls from the streaming content and quit the chat loop if no function calls are found + var functionCalls = fccBuilder.Build(); + if (!functionCalls.Any()) + { + break; + } + + // Creating and adding chat message content to preserve the original function calls in the chat history. + // The function calls are added to the chat message a few lines below. + var fcContent = new ChatMessageContent(role: authorRole ?? default, content: null); + chatHistory.Add(fcContent); + + // Iterating over the requested function calls and invoking them + foreach (var functionCall in functionCalls) + { + // Adding the original function call to the chat message content + fcContent.Items.Add(functionCall); + + // Invoking the function + var functionResult = await functionCall.InvokeAsync(kernel); + + // Adding the function result to the chat history + chatHistory.Add(functionResult.ToChatMessage()); } Console.WriteLine(); diff --git a/dotnet/samples/Concepts/Optimization/FrugalGPT.cs b/dotnet/samples/Concepts/Optimization/FrugalGPT.cs new file mode 100644 index 000000000000..f5ede1764789 --- /dev/null +++ b/dotnet/samples/Concepts/Optimization/FrugalGPT.cs @@ -0,0 +1,308 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Runtime.CompilerServices; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.ChatCompletion; +using Microsoft.SemanticKernel.Embeddings; +using Microsoft.SemanticKernel.Memory; +using Microsoft.SemanticKernel.PromptTemplates.Handlebars; +using Microsoft.SemanticKernel.Services; + +namespace Optimization; + +/// +/// This example shows how to use FrugalGPT techniques to reduce cost and improve LLM-related task performance. +/// More information here: https://arxiv.org/abs/2305.05176. +/// +public sealed class FrugalGPT(ITestOutputHelper output) : BaseTest(output) +{ + /// + /// One of the FrugalGPT techniques is to reduce prompt size when using few-shot prompts. + /// If prompt contains a lof of examples to help LLM to provide the best result, it's possible to send only a couple of them to reduce amount of tokens. + /// Vector similarity can be used to pick the best examples from example set for specific request. + /// Following example shows how to optimize email classification request by reducing prompt size with vector similarity search. + /// + [Fact] + public async Task ReducePromptSizeAsync() + { + // Define email classification examples with email body and labels. + var examples = new List + { + "Hey, just checking in to see how you're doing! - Personal", + "Can you pick up some groceries on your way back home? We need milk and bread. - Personal, Tasks", + "Happy Birthday! Wishing you a fantastic day filled with love and joy. - Personal", + "Let's catch up over coffee this Saturday. It's been too long! - Personal, Events", + "Please review the attached document and provide your feedback by EOD. - Work", + "Our team meeting is scheduled for 10 AM tomorrow in the main conference room. - Work", + "The quarterly financial report is due next Monday. Ensure all data is updated. - Work, Tasks", + "Can you send me the latest version of the project plan? Thanks! - Work", + "You're invited to our annual summer picnic! RSVP by June 25th. - Events", + "Join us for a webinar on digital marketing trends this Thursday at 3 PM. - Events", + "Save the date for our charity gala on September 15th. We hope to see you there! - Events", + "Don't miss our customer appreciation event next week. Sign up now! - Events, Notifications", + "Your order has been shipped and will arrive by June 20th. - Notifications", + "We've updated our policies. Please review the changes. - Notifications", + "Your username was successfully changed. If this wasn't you, contact support immediately. - Notifications", + "The system upgrade will occur this weekend. - Notifications, Work", + "Don't forget to submit your timesheet by 5 PM today. - Tasks, Work", + "Pick up the dry cleaning before they close at 7 PM. - Tasks", + "Complete the online training module by the end of the week. - Tasks, Work", + "Send out the meeting invites for next week's project kickoff. - Tasks, Work" + }; + + // Initialize kernel with chat completion and embedding generation services. + // It's possible to combine different models from different AI providers to achieve the lowest token usage. + var kernel = Kernel.CreateBuilder() + .AddOpenAIChatCompletion( + modelId: "gpt-4", + apiKey: TestConfiguration.OpenAI.ApiKey) + .AddOpenAITextEmbeddingGeneration( + modelId: "text-embedding-3-small", + apiKey: TestConfiguration.OpenAI.ApiKey) + .Build(); + + // Initialize few-shot prompt. + var function = kernel.CreateFunctionFromPrompt( + new() + { + Template = + """ + Available classification labels: Personal, Work, Events, Notifications, Tasks + Email classification examples: + {{#each Examples}} + {{this}} + {{/each}} + + Email body to classify: + {{Request}} + """, + TemplateFormat = "handlebars" + }, + new HandlebarsPromptTemplateFactory() + ); + + // Define arguments with few-shot examples and actual email for classification. + var arguments = new KernelArguments + { + ["Examples"] = examples, + ["Request"] = "Your dentist appointment is tomorrow at 10 AM. Please remember to bring your insurance card." + }; + + // Invoke defined function to see initial result. + var result = await kernel.InvokeAsync(function, arguments); + + Console.WriteLine(result); // Personal, Notifications + Console.WriteLine(result.Metadata?["Usage"]?.AsJson()); // Total tokens: ~430 + + // Add few-shot prompt optimization filter. + // The filter uses in-memory store for vector similarity search and text embedding generation service to generate embeddings. + var memoryStore = new VolatileMemoryStore(); + var textEmbeddingGenerationService = kernel.GetRequiredService(); + + // Register optimization filter. + kernel.PromptRenderFilters.Add(new FewShotPromptOptimizationFilter(memoryStore, textEmbeddingGenerationService)); + + // Get result again and compare the usage. + result = await kernel.InvokeAsync(function, arguments); + + Console.WriteLine(result); // Personal, Notifications + Console.WriteLine(result.Metadata?["Usage"]?.AsJson()); // Total tokens: ~150 + } + + /// + /// LLM cascade technique allows to use multiple LLMs sequentially starting from cheaper model, + /// evaluate LLM result and return it in case it meets the quality criteria. Otherwise, proceed with next LLM in queue, + /// until the result will be acceptable. + /// Following example uses mock result generation and evaluation for demonstration purposes. + /// Result evaluation examples including BERTScore, BLEU, METEOR and COMET metrics can be found here: + /// https://github.com/microsoft/semantic-kernel/tree/main/dotnet/samples/Demos/QualityCheck. + /// + [Fact] + public async Task LLMCascadeAsync() + { + // Create kernel builder. + var builder = Kernel.CreateBuilder(); + + // Register chat completion services for demonstration purposes. + // This registration is similar to AddAzureOpenAIChatCompletion and AddOpenAIChatCompletion methods. + builder.Services.AddSingleton(new MockChatCompletionService("model1", "Hi there! I'm doing well, thank you! How about yourself?")); + builder.Services.AddSingleton(new MockChatCompletionService("model2", "Hello! I'm great, thanks for asking. How are you doing today?")); + builder.Services.AddSingleton(new MockChatCompletionService("model3", "Hey! I'm fine, thanks. How's your day going so far?")); + + // Register LLM cascade filter with model execution order, acceptance criteria for result and service for output. + // In real use-cases, execution order should start from cheaper to more expensive models. + // If first model will produce acceptable result, then it will be returned immediately. + builder.Services.AddSingleton(new LLMCascadeFilter( + modelExecutionOrder: ["model1", "model2", "model3"], + acceptanceCriteria: result => result.Contains("Hey!"), + output: this.Output)); + + // Build kernel. + var kernel = builder.Build(); + + // Send a request. + var result = await kernel.InvokePromptAsync("Hi, how are you today?"); + + Console.WriteLine($"\nFinal result: {result}"); + + // Output: + // Executing request with model: model1 + // Result from model1: Hi there! I'm doing well, thank you! How about yourself? + // Result does not meet the acceptance criteria, moving to the next model. + + // Executing request with model: model2 + // Result from model2: Hello! I'm great, thanks for asking. How are you doing today? + // Result does not meet the acceptance criteria, moving to the next model. + + // Executing request with model: model3 + // Result from model3: Hey! I'm fine, thanks. How's your day going so far? + // Returning result as it meets the acceptance criteria. + + // Final result: Hey! I'm fine, thanks. How's your day going so far? + } + + /// + /// Few-shot prompt optimization filter which takes all examples from kernel arguments and selects first examples, + /// which are similar to original request. + /// + private sealed class FewShotPromptOptimizationFilter( + IMemoryStore memoryStore, + ITextEmbeddingGenerationService textEmbeddingGenerationService) : IPromptRenderFilter + { + /// + /// Maximum number of examples to use which are similar to original request. + /// + private const int TopN = 5; + + /// + /// Collection name to use in memory store. + /// + private const string CollectionName = "examples"; + + public async Task OnPromptRenderAsync(PromptRenderContext context, Func next) + { + // Get examples and original request from arguments. + var examples = context.Arguments["Examples"] as List; + var request = context.Arguments["Request"] as string; + + if (examples is { Count: > 0 } && !string.IsNullOrEmpty(request)) + { + var memoryRecords = new List(); + + // Generate embedding for each example. + var embeddings = await textEmbeddingGenerationService.GenerateEmbeddingsAsync(examples); + + // Create memory record instances with example text and embedding. + for (var i = 0; i < examples.Count; i++) + { + memoryRecords.Add(MemoryRecord.LocalRecord(Guid.NewGuid().ToString(), examples[i], "description", embeddings[i])); + } + + // Create collection and upsert all memory records for search. + // It's possible to do it only once and re-use the same examples for future requests. + await memoryStore.CreateCollectionAsync(CollectionName); + await memoryStore.UpsertBatchAsync(CollectionName, memoryRecords).ToListAsync(); + + // Generate embedding for original request. + var requestEmbedding = await textEmbeddingGenerationService.GenerateEmbeddingAsync(request); + + // Find top N examples which are similar to original request. + var topNExamples = await memoryStore.GetNearestMatchesAsync(CollectionName, requestEmbedding, TopN).ToListAsync(); + + // Override arguments to use only top N examples, which will be sent to LLM. + context.Arguments["Examples"] = topNExamples.Select(l => l.Item1.Metadata.Text); + } + + // Continue prompt rendering operation. + await next(context); + } + } + + /// + /// Example of LLM cascade filter which will invoke a function using multiple LLMs in specific order, + /// until the result will meet specified acceptance criteria. + /// + private sealed class LLMCascadeFilter( + List modelExecutionOrder, + Predicate acceptanceCriteria, + ITestOutputHelper output) : IFunctionInvocationFilter + { + public async Task OnFunctionInvocationAsync(FunctionInvocationContext context, Func next) + { + // Get registered chat completion services from kernel. + var registeredServices = context.Kernel + .GetAllServices() + .Select(service => (ModelId: service.GetModelId()!, Service: service)); + + // Define order of execution. + var order = modelExecutionOrder + .Select((value, index) => new { Value = value, Index = index }) + .ToDictionary(k => k.Value, v => v.Index); + + // Sort services by specified order. + var orderedServices = registeredServices.OrderBy(service => order[service.ModelId]); + + // Try to invoke a function with each service and check the result. + foreach (var service in orderedServices) + { + // Define execution settings with model ID. + context.Arguments.ExecutionSettings = new Dictionary + { + { PromptExecutionSettings.DefaultServiceId, new() { ModelId = service.ModelId } } + }; + + output.WriteLine($"Executing request with model: {service.ModelId}"); + + // Invoke a function. + await next(context); + + // Get a result. + var result = context.Result.ToString()!; + + output.WriteLine($"Result from {service.ModelId}: {result}"); + + // Check if result meets specified acceptance criteria. + // If yes, stop execution loop, so last result will be returned. + if (acceptanceCriteria(result)) + { + output.WriteLine("Returning result as it meets the acceptance criteria."); + return; + } + + // Otherwise, proceed with next model. + output.WriteLine("Result does not meet the acceptance criteria, moving to the next model.\n"); + } + + // If LLMs didn't return acceptable result, the last result will be returned. + // It's also possible to throw an exception in such cases if needed. + // throw new Exception("Models didn't return a result that meets the acceptance criteria"). + } + } + + /// + /// Mock chat completion service for demonstration purposes. + /// + private sealed class MockChatCompletionService(string modelId, string mockResult) : IChatCompletionService + { + public IReadOnlyDictionary Attributes => new Dictionary { { AIServiceExtensions.ModelIdKey, modelId } }; + + public Task> GetChatMessageContentsAsync( + ChatHistory chatHistory, + PromptExecutionSettings? executionSettings = null, + Kernel? kernel = null, + CancellationToken cancellationToken = default) + { + return Task.FromResult>([new ChatMessageContent(AuthorRole.Assistant, mockResult)]); + } + + public async IAsyncEnumerable GetStreamingChatMessageContentsAsync( + ChatHistory chatHistory, + PromptExecutionSettings? executionSettings = null, + Kernel? kernel = null, + [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + yield return new StreamingChatMessageContent(AuthorRole.Assistant, mockResult); + } + } +} diff --git a/dotnet/samples/Concepts/Plugins/OpenAIPlugins.cs b/dotnet/samples/Concepts/Plugins/OpenAIPlugins.cs index 7608bfd7b08f..77846b0d5290 100644 --- a/dotnet/samples/Concepts/Plugins/OpenAIPlugins.cs +++ b/dotnet/samples/Concepts/Plugins/OpenAIPlugins.cs @@ -8,10 +8,13 @@ namespace Plugins; public class OpenAIPlugins(ITestOutputHelper output) : BaseTest(output) { /// - /// Generic template on how to call OpenAI plugins + /// This sample shows how to invoke an OpenAI plugin. /// + /// + /// You must provide the plugin name and a URI to the Open API manifest before running this sample. + /// [Fact(Skip = "Run it only after filling the template below")] - public async Task RunOpenAIPluginAsync() + public async Task InvokeOpenAIPluginAsync() { Kernel kernel = new(); @@ -32,8 +35,11 @@ public async Task RunOpenAIPluginAsync() Console.WriteLine($"Function execution result: {result?.Content}"); } + /// + /// This sample shows how to invoke the Klarna Get Products function as an OpenAPI plugin. + /// [Fact] - public async Task CallKlarnaAsync() + public async Task InvokeKlarnaGetProductsAsOpenAPIPluginAsync() { Kernel kernel = new(); @@ -54,4 +60,63 @@ public async Task CallKlarnaAsync() Console.WriteLine($"Function execution result: {result?.Content}"); } + + /// + /// This sample shows how to use a delegating handler when invoking an OpenAPI function. + /// + /// + /// An instances of will be set in the `HttpRequestMessage.Options` (for .NET 5.0 or higher) or + /// in the `HttpRequestMessage.Properties` dictionary (for .NET Standard) with the key `KernelFunctionContextKey`. + /// The contains the , and . + /// + [Fact] + public async Task UseDelegatingHandlerWhenInvokingAnOpenAPIFunctionAsync() + { + using var httpHandler = new HttpClientHandler(); + using var customHandler = new CustomHandler(httpHandler); + using HttpClient httpClient = new(customHandler); + + Kernel kernel = new(); + + var plugin = await kernel.ImportPluginFromOpenAIAsync("Klarna", new Uri("https://www.klarna.com/.well-known/ai-plugin.json"), new OpenAIFunctionExecutionParameters(httpClient)); + + var arguments = new KernelArguments + { + ["q"] = "Laptop", // Category or product that needs to be searched for. + ["size"] = "3", // Number of products to return + ["budget"] = "200", // Maximum price of the matching product in local currency + ["countryCode"] = "US" // ISO 3166 country code with 2 characters based on the user location. + }; + // Currently, only US, GB, DE, SE and DK are supported. + + var functionResult = await kernel.InvokeAsync(plugin["productsUsingGET"], arguments); + + var result = functionResult.GetValue(); + + Console.WriteLine($"Function execution result: {result?.Content}"); + } + + /// + /// Custom delegating handler to modify the before sending it. + /// + private sealed class CustomHandler(HttpMessageHandler innerHandler) : DelegatingHandler(innerHandler) + { + protected override async Task SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) + { +#if NET5_0_OR_GREATER + request.Options.TryGetValue(OpenApiKernelFunctionContext.KernelFunctionContextKey, out var functionContext); +#else + request.Properties.TryGetValue(OpenApiKernelFunctionContext.KernelFunctionContextKey, out var functionContext); +#endif + // Function context is only set when the Plugin is invoked via the Kernel + if (functionContext is not null) + { + // Modify the HttpRequestMessage + request.Headers.Add("Kernel-Function-Name", functionContext?.Function?.Name); + } + + // Call the next handler in the pipeline + return await base.SendAsync(request, cancellationToken); + } + } } diff --git a/dotnet/samples/Concepts/README.md b/dotnet/samples/Concepts/README.md index f0896534852c..7eaa2a8a7ae6 100644 --- a/dotnet/samples/Concepts/README.md +++ b/dotnet/samples/Concepts/README.md @@ -100,6 +100,10 @@ Down below you can find the code snippets that demonstrate the usage of many Sem - [TextMemoryPlugin_GeminiEmbeddingGeneration](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Memory/TextMemoryPlugin_GeminiEmbeddingGeneration.cs) - [TextMemoryPlugin_MultipleMemoryStore](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Memory/TextMemoryPlugin_MultipleMemoryStore.cs) +## Optimization - Examples of different cost and performance optimization techniques + +- [FrugalGPT](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Optimization/FrugalGPT.cs) + ## Planners - Examples on using `Planners` - [FunctionCallStepwisePlanning](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Concepts/Planners/FunctionCallStepwisePlanning.cs) diff --git a/dotnet/samples/Concepts/Resources/sales.csv b/dotnet/samples/Concepts/Resources/sales.csv new file mode 100644 index 000000000000..4a355d11bf83 --- /dev/null +++ b/dotnet/samples/Concepts/Resources/sales.csv @@ -0,0 +1,701 @@ +Segment,Country,Product,Units Sold,Sale Price,Gross Sales,Discounts,Sales,COGS,Profit,Date,Month Number,Month Name,Year +Government,Canada,Carretera,1618.5,20.00,32370.00,0.00,32370.00,16185.00,16185.00,1/1/2014,1,January,2014 +Government,Germany,Carretera,1321,20.00,26420.00,0.00,26420.00,13210.00,13210.00,1/1/2014,1,January,2014 +Midmarket,France,Carretera,2178,15.00,32670.00,0.00,32670.00,21780.00,10890.00,6/1/2014,6,June,2014 +Midmarket,Germany,Carretera,888,15.00,13320.00,0.00,13320.00,8880.00,4440.00,6/1/2014,6,June,2014 +Midmarket,Mexico,Carretera,2470,15.00,37050.00,0.00,37050.00,24700.00,12350.00,6/1/2014,6,June,2014 +Government,Germany,Carretera,1513,350.00,529550.00,0.00,529550.00,393380.00,136170.00,12/1/2014,12,December,2014 +Midmarket,Germany,Montana,921,15.00,13815.00,0.00,13815.00,9210.00,4605.00,3/1/2014,3,March,2014 +Channel Partners,Canada,Montana,2518,12.00,30216.00,0.00,30216.00,7554.00,22662.00,6/1/2014,6,June,2014 +Government,France,Montana,1899,20.00,37980.00,0.00,37980.00,18990.00,18990.00,6/1/2014,6,June,2014 +Channel Partners,Germany,Montana,1545,12.00,18540.00,0.00,18540.00,4635.00,13905.00,6/1/2014,6,June,2014 +Midmarket,Mexico,Montana,2470,15.00,37050.00,0.00,37050.00,24700.00,12350.00,6/1/2014,6,June,2014 +Enterprise,Canada,Montana,2665.5,125.00,333187.50,0.00,333187.50,319860.00,13327.50,7/1/2014,7,July,2014 +Small Business,Mexico,Montana,958,300.00,287400.00,0.00,287400.00,239500.00,47900.00,8/1/2014,8,August,2014 +Government,Germany,Montana,2146,7.00,15022.00,0.00,15022.00,10730.00,4292.00,9/1/2014,9,September,2014 +Enterprise,Canada,Montana,345,125.00,43125.00,0.00,43125.00,41400.00,1725.00,10/1/2013,10,October,2013 +Midmarket,United States of America,Montana,615,15.00,9225.00,0.00,9225.00,6150.00,3075.00,12/1/2014,12,December,2014 +Government,Canada,Paseo,292,20.00,5840.00,0.00,5840.00,2920.00,2920.00,2/1/2014,2,February,2014 +Midmarket,Mexico,Paseo,974,15.00,14610.00,0.00,14610.00,9740.00,4870.00,2/1/2014,2,February,2014 +Channel Partners,Canada,Paseo,2518,12.00,30216.00,0.00,30216.00,7554.00,22662.00,6/1/2014,6,June,2014 +Government,Germany,Paseo,1006,350.00,352100.00,0.00,352100.00,261560.00,90540.00,6/1/2014,6,June,2014 +Channel Partners,Germany,Paseo,367,12.00,4404.00,0.00,4404.00,1101.00,3303.00,7/1/2014,7,July,2014 +Government,Mexico,Paseo,883,7.00,6181.00,0.00,6181.00,4415.00,1766.00,8/1/2014,8,August,2014 +Midmarket,France,Paseo,549,15.00,8235.00,0.00,8235.00,5490.00,2745.00,9/1/2013,9,September,2013 +Small Business,Mexico,Paseo,788,300.00,236400.00,0.00,236400.00,197000.00,39400.00,9/1/2013,9,September,2013 +Midmarket,Mexico,Paseo,2472,15.00,37080.00,0.00,37080.00,24720.00,12360.00,9/1/2014,9,September,2014 +Government,United States of America,Paseo,1143,7.00,8001.00,0.00,8001.00,5715.00,2286.00,10/1/2014,10,October,2014 +Government,Canada,Paseo,1725,350.00,603750.00,0.00,603750.00,448500.00,155250.00,11/1/2013,11,November,2013 +Channel Partners,United States of America,Paseo,912,12.00,10944.00,0.00,10944.00,2736.00,8208.00,11/1/2013,11,November,2013 +Midmarket,Canada,Paseo,2152,15.00,32280.00,0.00,32280.00,21520.00,10760.00,12/1/2013,12,December,2013 +Government,Canada,Paseo,1817,20.00,36340.00,0.00,36340.00,18170.00,18170.00,12/1/2014,12,December,2014 +Government,Germany,Paseo,1513,350.00,529550.00,0.00,529550.00,393380.00,136170.00,12/1/2014,12,December,2014 +Government,Mexico,Velo,1493,7.00,10451.00,0.00,10451.00,7465.00,2986.00,1/1/2014,1,January,2014 +Enterprise,France,Velo,1804,125.00,225500.00,0.00,225500.00,216480.00,9020.00,2/1/2014,2,February,2014 +Channel Partners,Germany,Velo,2161,12.00,25932.00,0.00,25932.00,6483.00,19449.00,3/1/2014,3,March,2014 +Government,Germany,Velo,1006,350.00,352100.00,0.00,352100.00,261560.00,90540.00,6/1/2014,6,June,2014 +Channel Partners,Germany,Velo,1545,12.00,18540.00,0.00,18540.00,4635.00,13905.00,6/1/2014,6,June,2014 +Enterprise,United States of America,Velo,2821,125.00,352625.00,0.00,352625.00,338520.00,14105.00,8/1/2014,8,August,2014 +Enterprise,Canada,Velo,345,125.00,43125.00,0.00,43125.00,41400.00,1725.00,10/1/2013,10,October,2013 +Small Business,Canada,VTT,2001,300.00,600300.00,0.00,600300.00,500250.00,100050.00,2/1/2014,2,February,2014 +Channel Partners,Germany,VTT,2838,12.00,34056.00,0.00,34056.00,8514.00,25542.00,4/1/2014,4,April,2014 +Midmarket,France,VTT,2178,15.00,32670.00,0.00,32670.00,21780.00,10890.00,6/1/2014,6,June,2014 +Midmarket,Germany,VTT,888,15.00,13320.00,0.00,13320.00,8880.00,4440.00,6/1/2014,6,June,2014 +Government,France,VTT,1527,350.00,534450.00,0.00,534450.00,397020.00,137430.00,9/1/2013,9,September,2013 +Small Business,France,VTT,2151,300.00,645300.00,0.00,645300.00,537750.00,107550.00,9/1/2014,9,September,2014 +Government,Canada,VTT,1817,20.00,36340.00,0.00,36340.00,18170.00,18170.00,12/1/2014,12,December,2014 +Government,France,Amarilla,2750,350.00,962500.00,0.00,962500.00,715000.00,247500.00,2/1/2014,2,February,2014 +Channel Partners,United States of America,Amarilla,1953,12.00,23436.00,0.00,23436.00,5859.00,17577.00,4/1/2014,4,April,2014 +Enterprise,Germany,Amarilla,4219.5,125.00,527437.50,0.00,527437.50,506340.00,21097.50,4/1/2014,4,April,2014 +Government,France,Amarilla,1899,20.00,37980.00,0.00,37980.00,18990.00,18990.00,6/1/2014,6,June,2014 +Government,Germany,Amarilla,1686,7.00,11802.00,0.00,11802.00,8430.00,3372.00,7/1/2014,7,July,2014 +Channel Partners,United States of America,Amarilla,2141,12.00,25692.00,0.00,25692.00,6423.00,19269.00,8/1/2014,8,August,2014 +Government,United States of America,Amarilla,1143,7.00,8001.00,0.00,8001.00,5715.00,2286.00,10/1/2014,10,October,2014 +Midmarket,United States of America,Amarilla,615,15.00,9225.00,0.00,9225.00,6150.00,3075.00,12/1/2014,12,December,2014 +Government,France,Paseo,3945,7.00,27615.00,276.15,27338.85,19725.00,7613.85,1/1/2014,1,January,2014 +Midmarket,France,Paseo,2296,15.00,34440.00,344.40,34095.60,22960.00,11135.60,2/1/2014,2,February,2014 +Government,France,Paseo,1030,7.00,7210.00,72.10,7137.90,5150.00,1987.90,5/1/2014,5,May,2014 +Government,France,Velo,639,7.00,4473.00,44.73,4428.27,3195.00,1233.27,11/1/2014,11,November,2014 +Government,Canada,VTT,1326,7.00,9282.00,92.82,9189.18,6630.00,2559.18,3/1/2014,3,March,2014 +Channel Partners,United States of America,Carretera,1858,12.00,22296.00,222.96,22073.04,5574.00,16499.04,2/1/2014,2,February,2014 +Government,Mexico,Carretera,1210,350.00,423500.00,4235.00,419265.00,314600.00,104665.00,3/1/2014,3,March,2014 +Government,United States of America,Carretera,2529,7.00,17703.00,177.03,17525.97,12645.00,4880.97,7/1/2014,7,July,2014 +Channel Partners,Canada,Carretera,1445,12.00,17340.00,173.40,17166.60,4335.00,12831.60,9/1/2014,9,September,2014 +Enterprise,United States of America,Carretera,330,125.00,41250.00,412.50,40837.50,39600.00,1237.50,9/1/2013,9,September,2013 +Channel Partners,France,Carretera,2671,12.00,32052.00,320.52,31731.48,8013.00,23718.48,9/1/2014,9,September,2014 +Channel Partners,Germany,Carretera,766,12.00,9192.00,91.92,9100.08,2298.00,6802.08,10/1/2013,10,October,2013 +Small Business,Mexico,Carretera,494,300.00,148200.00,1482.00,146718.00,123500.00,23218.00,10/1/2013,10,October,2013 +Government,Mexico,Carretera,1397,350.00,488950.00,4889.50,484060.50,363220.00,120840.50,10/1/2014,10,October,2014 +Government,France,Carretera,2155,350.00,754250.00,7542.50,746707.50,560300.00,186407.50,12/1/2014,12,December,2014 +Midmarket,Mexico,Montana,2214,15.00,33210.00,332.10,32877.90,22140.00,10737.90,3/1/2014,3,March,2014 +Small Business,United States of America,Montana,2301,300.00,690300.00,6903.00,683397.00,575250.00,108147.00,4/1/2014,4,April,2014 +Government,France,Montana,1375.5,20.00,27510.00,275.10,27234.90,13755.00,13479.90,7/1/2014,7,July,2014 +Government,Canada,Montana,1830,7.00,12810.00,128.10,12681.90,9150.00,3531.90,8/1/2014,8,August,2014 +Small Business,United States of America,Montana,2498,300.00,749400.00,7494.00,741906.00,624500.00,117406.00,9/1/2013,9,September,2013 +Enterprise,United States of America,Montana,663,125.00,82875.00,828.75,82046.25,79560.00,2486.25,10/1/2013,10,October,2013 +Midmarket,United States of America,Paseo,1514,15.00,22710.00,227.10,22482.90,15140.00,7342.90,2/1/2014,2,February,2014 +Government,United States of America,Paseo,4492.5,7.00,31447.50,314.48,31133.03,22462.50,8670.53,4/1/2014,4,April,2014 +Enterprise,United States of America,Paseo,727,125.00,90875.00,908.75,89966.25,87240.00,2726.25,6/1/2014,6,June,2014 +Enterprise,France,Paseo,787,125.00,98375.00,983.75,97391.25,94440.00,2951.25,6/1/2014,6,June,2014 +Enterprise,Mexico,Paseo,1823,125.00,227875.00,2278.75,225596.25,218760.00,6836.25,7/1/2014,7,July,2014 +Midmarket,Germany,Paseo,747,15.00,11205.00,112.05,11092.95,7470.00,3622.95,9/1/2014,9,September,2014 +Channel Partners,Germany,Paseo,766,12.00,9192.00,91.92,9100.08,2298.00,6802.08,10/1/2013,10,October,2013 +Small Business,United States of America,Paseo,2905,300.00,871500.00,8715.00,862785.00,726250.00,136535.00,11/1/2014,11,November,2014 +Government,France,Paseo,2155,350.00,754250.00,7542.50,746707.50,560300.00,186407.50,12/1/2014,12,December,2014 +Government,France,Velo,3864,20.00,77280.00,772.80,76507.20,38640.00,37867.20,4/1/2014,4,April,2014 +Government,Mexico,Velo,362,7.00,2534.00,25.34,2508.66,1810.00,698.66,5/1/2014,5,May,2014 +Enterprise,Canada,Velo,923,125.00,115375.00,1153.75,114221.25,110760.00,3461.25,8/1/2014,8,August,2014 +Enterprise,United States of America,Velo,663,125.00,82875.00,828.75,82046.25,79560.00,2486.25,10/1/2013,10,October,2013 +Government,Canada,Velo,2092,7.00,14644.00,146.44,14497.56,10460.00,4037.56,11/1/2013,11,November,2013 +Government,Germany,VTT,263,7.00,1841.00,18.41,1822.59,1315.00,507.59,3/1/2014,3,March,2014 +Government,Canada,VTT,943.5,350.00,330225.00,3302.25,326922.75,245310.00,81612.75,4/1/2014,4,April,2014 +Enterprise,United States of America,VTT,727,125.00,90875.00,908.75,89966.25,87240.00,2726.25,6/1/2014,6,June,2014 +Enterprise,France,VTT,787,125.00,98375.00,983.75,97391.25,94440.00,2951.25,6/1/2014,6,June,2014 +Small Business,Germany,VTT,986,300.00,295800.00,2958.00,292842.00,246500.00,46342.00,9/1/2014,9,September,2014 +Small Business,Mexico,VTT,494,300.00,148200.00,1482.00,146718.00,123500.00,23218.00,10/1/2013,10,October,2013 +Government,Mexico,VTT,1397,350.00,488950.00,4889.50,484060.50,363220.00,120840.50,10/1/2014,10,October,2014 +Enterprise,France,VTT,1744,125.00,218000.00,2180.00,215820.00,209280.00,6540.00,11/1/2014,11,November,2014 +Channel Partners,United States of America,Amarilla,1989,12.00,23868.00,238.68,23629.32,5967.00,17662.32,9/1/2013,9,September,2013 +Midmarket,France,Amarilla,321,15.00,4815.00,48.15,4766.85,3210.00,1556.85,11/1/2013,11,November,2013 +Enterprise,Canada,Carretera,742.5,125.00,92812.50,1856.25,90956.25,89100.00,1856.25,4/1/2014,4,April,2014 +Channel Partners,Canada,Carretera,1295,12.00,15540.00,310.80,15229.20,3885.00,11344.20,10/1/2014,10,October,2014 +Small Business,Germany,Carretera,214,300.00,64200.00,1284.00,62916.00,53500.00,9416.00,10/1/2013,10,October,2013 +Government,France,Carretera,2145,7.00,15015.00,300.30,14714.70,10725.00,3989.70,11/1/2013,11,November,2013 +Government,Canada,Carretera,2852,350.00,998200.00,19964.00,978236.00,741520.00,236716.00,12/1/2014,12,December,2014 +Channel Partners,United States of America,Montana,1142,12.00,13704.00,274.08,13429.92,3426.00,10003.92,6/1/2014,6,June,2014 +Government,United States of America,Montana,1566,20.00,31320.00,626.40,30693.60,15660.00,15033.60,10/1/2014,10,October,2014 +Channel Partners,Mexico,Montana,690,12.00,8280.00,165.60,8114.40,2070.00,6044.40,11/1/2014,11,November,2014 +Enterprise,Mexico,Montana,1660,125.00,207500.00,4150.00,203350.00,199200.00,4150.00,11/1/2013,11,November,2013 +Midmarket,Canada,Paseo,2363,15.00,35445.00,708.90,34736.10,23630.00,11106.10,2/1/2014,2,February,2014 +Small Business,France,Paseo,918,300.00,275400.00,5508.00,269892.00,229500.00,40392.00,5/1/2014,5,May,2014 +Small Business,Germany,Paseo,1728,300.00,518400.00,10368.00,508032.00,432000.00,76032.00,5/1/2014,5,May,2014 +Channel Partners,United States of America,Paseo,1142,12.00,13704.00,274.08,13429.92,3426.00,10003.92,6/1/2014,6,June,2014 +Enterprise,Mexico,Paseo,662,125.00,82750.00,1655.00,81095.00,79440.00,1655.00,6/1/2014,6,June,2014 +Channel Partners,Canada,Paseo,1295,12.00,15540.00,310.80,15229.20,3885.00,11344.20,10/1/2014,10,October,2014 +Enterprise,Germany,Paseo,809,125.00,101125.00,2022.50,99102.50,97080.00,2022.50,10/1/2013,10,October,2013 +Enterprise,Mexico,Paseo,2145,125.00,268125.00,5362.50,262762.50,257400.00,5362.50,10/1/2013,10,October,2013 +Channel Partners,France,Paseo,1785,12.00,21420.00,428.40,20991.60,5355.00,15636.60,11/1/2013,11,November,2013 +Small Business,Canada,Paseo,1916,300.00,574800.00,11496.00,563304.00,479000.00,84304.00,12/1/2014,12,December,2014 +Government,Canada,Paseo,2852,350.00,998200.00,19964.00,978236.00,741520.00,236716.00,12/1/2014,12,December,2014 +Enterprise,Canada,Paseo,2729,125.00,341125.00,6822.50,334302.50,327480.00,6822.50,12/1/2014,12,December,2014 +Midmarket,United States of America,Paseo,1925,15.00,28875.00,577.50,28297.50,19250.00,9047.50,12/1/2013,12,December,2013 +Government,United States of America,Paseo,2013,7.00,14091.00,281.82,13809.18,10065.00,3744.18,12/1/2013,12,December,2013 +Channel Partners,France,Paseo,1055,12.00,12660.00,253.20,12406.80,3165.00,9241.80,12/1/2014,12,December,2014 +Channel Partners,Mexico,Paseo,1084,12.00,13008.00,260.16,12747.84,3252.00,9495.84,12/1/2014,12,December,2014 +Government,United States of America,Velo,1566,20.00,31320.00,626.40,30693.60,15660.00,15033.60,10/1/2014,10,October,2014 +Government,Germany,Velo,2966,350.00,1038100.00,20762.00,1017338.00,771160.00,246178.00,10/1/2013,10,October,2013 +Government,Germany,Velo,2877,350.00,1006950.00,20139.00,986811.00,748020.00,238791.00,10/1/2014,10,October,2014 +Enterprise,Germany,Velo,809,125.00,101125.00,2022.50,99102.50,97080.00,2022.50,10/1/2013,10,October,2013 +Enterprise,Mexico,Velo,2145,125.00,268125.00,5362.50,262762.50,257400.00,5362.50,10/1/2013,10,October,2013 +Channel Partners,France,Velo,1055,12.00,12660.00,253.20,12406.80,3165.00,9241.80,12/1/2014,12,December,2014 +Government,Mexico,Velo,544,20.00,10880.00,217.60,10662.40,5440.00,5222.40,12/1/2013,12,December,2013 +Channel Partners,Mexico,Velo,1084,12.00,13008.00,260.16,12747.84,3252.00,9495.84,12/1/2014,12,December,2014 +Enterprise,Mexico,VTT,662,125.00,82750.00,1655.00,81095.00,79440.00,1655.00,6/1/2014,6,June,2014 +Small Business,Germany,VTT,214,300.00,64200.00,1284.00,62916.00,53500.00,9416.00,10/1/2013,10,October,2013 +Government,Germany,VTT,2877,350.00,1006950.00,20139.00,986811.00,748020.00,238791.00,10/1/2014,10,October,2014 +Enterprise,Canada,VTT,2729,125.00,341125.00,6822.50,334302.50,327480.00,6822.50,12/1/2014,12,December,2014 +Government,United States of America,VTT,266,350.00,93100.00,1862.00,91238.00,69160.00,22078.00,12/1/2013,12,December,2013 +Government,Mexico,VTT,1940,350.00,679000.00,13580.00,665420.00,504400.00,161020.00,12/1/2013,12,December,2013 +Small Business,Germany,Amarilla,259,300.00,77700.00,1554.00,76146.00,64750.00,11396.00,3/1/2014,3,March,2014 +Small Business,Mexico,Amarilla,1101,300.00,330300.00,6606.00,323694.00,275250.00,48444.00,3/1/2014,3,March,2014 +Enterprise,Germany,Amarilla,2276,125.00,284500.00,5690.00,278810.00,273120.00,5690.00,5/1/2014,5,May,2014 +Government,Germany,Amarilla,2966,350.00,1038100.00,20762.00,1017338.00,771160.00,246178.00,10/1/2013,10,October,2013 +Government,United States of America,Amarilla,1236,20.00,24720.00,494.40,24225.60,12360.00,11865.60,11/1/2014,11,November,2014 +Government,France,Amarilla,941,20.00,18820.00,376.40,18443.60,9410.00,9033.60,11/1/2014,11,November,2014 +Small Business,Canada,Amarilla,1916,300.00,574800.00,11496.00,563304.00,479000.00,84304.00,12/1/2014,12,December,2014 +Enterprise,France,Carretera,4243.5,125.00,530437.50,15913.13,514524.38,509220.00,5304.38,4/1/2014,4,April,2014 +Government,Germany,Carretera,2580,20.00,51600.00,1548.00,50052.00,25800.00,24252.00,4/1/2014,4,April,2014 +Small Business,Germany,Carretera,689,300.00,206700.00,6201.00,200499.00,172250.00,28249.00,6/1/2014,6,June,2014 +Channel Partners,United States of America,Carretera,1947,12.00,23364.00,700.92,22663.08,5841.00,16822.08,9/1/2014,9,September,2014 +Channel Partners,Canada,Carretera,908,12.00,10896.00,326.88,10569.12,2724.00,7845.12,12/1/2013,12,December,2013 +Government,Germany,Montana,1958,7.00,13706.00,411.18,13294.82,9790.00,3504.82,2/1/2014,2,February,2014 +Channel Partners,France,Montana,1901,12.00,22812.00,684.36,22127.64,5703.00,16424.64,6/1/2014,6,June,2014 +Government,France,Montana,544,7.00,3808.00,114.24,3693.76,2720.00,973.76,9/1/2014,9,September,2014 +Government,Germany,Montana,1797,350.00,628950.00,18868.50,610081.50,467220.00,142861.50,9/1/2013,9,September,2013 +Enterprise,France,Montana,1287,125.00,160875.00,4826.25,156048.75,154440.00,1608.75,12/1/2014,12,December,2014 +Enterprise,Germany,Montana,1706,125.00,213250.00,6397.50,206852.50,204720.00,2132.50,12/1/2014,12,December,2014 +Small Business,France,Paseo,2434.5,300.00,730350.00,21910.50,708439.50,608625.00,99814.50,1/1/2014,1,January,2014 +Enterprise,Canada,Paseo,1774,125.00,221750.00,6652.50,215097.50,212880.00,2217.50,3/1/2014,3,March,2014 +Channel Partners,France,Paseo,1901,12.00,22812.00,684.36,22127.64,5703.00,16424.64,6/1/2014,6,June,2014 +Small Business,Germany,Paseo,689,300.00,206700.00,6201.00,200499.00,172250.00,28249.00,6/1/2014,6,June,2014 +Enterprise,Germany,Paseo,1570,125.00,196250.00,5887.50,190362.50,188400.00,1962.50,6/1/2014,6,June,2014 +Channel Partners,United States of America,Paseo,1369.5,12.00,16434.00,493.02,15940.98,4108.50,11832.48,7/1/2014,7,July,2014 +Enterprise,Canada,Paseo,2009,125.00,251125.00,7533.75,243591.25,241080.00,2511.25,10/1/2014,10,October,2014 +Midmarket,Germany,Paseo,1945,15.00,29175.00,875.25,28299.75,19450.00,8849.75,10/1/2013,10,October,2013 +Enterprise,France,Paseo,1287,125.00,160875.00,4826.25,156048.75,154440.00,1608.75,12/1/2014,12,December,2014 +Enterprise,Germany,Paseo,1706,125.00,213250.00,6397.50,206852.50,204720.00,2132.50,12/1/2014,12,December,2014 +Enterprise,Canada,Velo,2009,125.00,251125.00,7533.75,243591.25,241080.00,2511.25,10/1/2014,10,October,2014 +Small Business,United States of America,VTT,2844,300.00,853200.00,25596.00,827604.00,711000.00,116604.00,2/1/2014,2,February,2014 +Channel Partners,Mexico,VTT,1916,12.00,22992.00,689.76,22302.24,5748.00,16554.24,4/1/2014,4,April,2014 +Enterprise,Germany,VTT,1570,125.00,196250.00,5887.50,190362.50,188400.00,1962.50,6/1/2014,6,June,2014 +Small Business,Canada,VTT,1874,300.00,562200.00,16866.00,545334.00,468500.00,76834.00,8/1/2014,8,August,2014 +Government,Mexico,VTT,1642,350.00,574700.00,17241.00,557459.00,426920.00,130539.00,8/1/2014,8,August,2014 +Midmarket,Germany,VTT,1945,15.00,29175.00,875.25,28299.75,19450.00,8849.75,10/1/2013,10,October,2013 +Government,Canada,Carretera,831,20.00,16620.00,498.60,16121.40,8310.00,7811.40,5/1/2014,5,May,2014 +Government,Mexico,Paseo,1760,7.00,12320.00,369.60,11950.40,8800.00,3150.40,9/1/2013,9,September,2013 +Government,Canada,Velo,3850.5,20.00,77010.00,2310.30,74699.70,38505.00,36194.70,4/1/2014,4,April,2014 +Channel Partners,Germany,VTT,2479,12.00,29748.00,892.44,28855.56,7437.00,21418.56,1/1/2014,1,January,2014 +Midmarket,Mexico,Montana,2031,15.00,30465.00,1218.60,29246.40,20310.00,8936.40,10/1/2014,10,October,2014 +Midmarket,Mexico,Paseo,2031,15.00,30465.00,1218.60,29246.40,20310.00,8936.40,10/1/2014,10,October,2014 +Midmarket,France,Paseo,2261,15.00,33915.00,1356.60,32558.40,22610.00,9948.40,12/1/2013,12,December,2013 +Government,United States of America,Velo,736,20.00,14720.00,588.80,14131.20,7360.00,6771.20,9/1/2013,9,September,2013 +Government,Canada,Carretera,2851,7.00,19957.00,798.28,19158.72,14255.00,4903.72,10/1/2013,10,October,2013 +Small Business,Germany,Carretera,2021,300.00,606300.00,24252.00,582048.00,505250.00,76798.00,10/1/2014,10,October,2014 +Government,United States of America,Carretera,274,350.00,95900.00,3836.00,92064.00,71240.00,20824.00,12/1/2014,12,December,2014 +Midmarket,Canada,Montana,1967,15.00,29505.00,1180.20,28324.80,19670.00,8654.80,3/1/2014,3,March,2014 +Small Business,Germany,Montana,1859,300.00,557700.00,22308.00,535392.00,464750.00,70642.00,8/1/2014,8,August,2014 +Government,Canada,Montana,2851,7.00,19957.00,798.28,19158.72,14255.00,4903.72,10/1/2013,10,October,2013 +Small Business,Germany,Montana,2021,300.00,606300.00,24252.00,582048.00,505250.00,76798.00,10/1/2014,10,October,2014 +Enterprise,Mexico,Montana,1138,125.00,142250.00,5690.00,136560.00,136560.00,0.00,12/1/2014,12,December,2014 +Government,Canada,Paseo,4251,7.00,29757.00,1190.28,28566.72,21255.00,7311.72,1/1/2014,1,January,2014 +Enterprise,Germany,Paseo,795,125.00,99375.00,3975.00,95400.00,95400.00,0.00,3/1/2014,3,March,2014 +Small Business,Germany,Paseo,1414.5,300.00,424350.00,16974.00,407376.00,353625.00,53751.00,4/1/2014,4,April,2014 +Small Business,United States of America,Paseo,2918,300.00,875400.00,35016.00,840384.00,729500.00,110884.00,5/1/2014,5,May,2014 +Government,United States of America,Paseo,3450,350.00,1207500.00,48300.00,1159200.00,897000.00,262200.00,7/1/2014,7,July,2014 +Enterprise,France,Paseo,2988,125.00,373500.00,14940.00,358560.00,358560.00,0.00,7/1/2014,7,July,2014 +Midmarket,Canada,Paseo,218,15.00,3270.00,130.80,3139.20,2180.00,959.20,9/1/2014,9,September,2014 +Government,Canada,Paseo,2074,20.00,41480.00,1659.20,39820.80,20740.00,19080.80,9/1/2014,9,September,2014 +Government,United States of America,Paseo,1056,20.00,21120.00,844.80,20275.20,10560.00,9715.20,9/1/2014,9,September,2014 +Midmarket,United States of America,Paseo,671,15.00,10065.00,402.60,9662.40,6710.00,2952.40,10/1/2013,10,October,2013 +Midmarket,Mexico,Paseo,1514,15.00,22710.00,908.40,21801.60,15140.00,6661.60,10/1/2013,10,October,2013 +Government,United States of America,Paseo,274,350.00,95900.00,3836.00,92064.00,71240.00,20824.00,12/1/2014,12,December,2014 +Enterprise,Mexico,Paseo,1138,125.00,142250.00,5690.00,136560.00,136560.00,0.00,12/1/2014,12,December,2014 +Channel Partners,United States of America,Velo,1465,12.00,17580.00,703.20,16876.80,4395.00,12481.80,3/1/2014,3,March,2014 +Government,Canada,Velo,2646,20.00,52920.00,2116.80,50803.20,26460.00,24343.20,9/1/2013,9,September,2013 +Government,France,Velo,2177,350.00,761950.00,30478.00,731472.00,566020.00,165452.00,10/1/2014,10,October,2014 +Channel Partners,France,VTT,866,12.00,10392.00,415.68,9976.32,2598.00,7378.32,5/1/2014,5,May,2014 +Government,United States of America,VTT,349,350.00,122150.00,4886.00,117264.00,90740.00,26524.00,9/1/2013,9,September,2013 +Government,France,VTT,2177,350.00,761950.00,30478.00,731472.00,566020.00,165452.00,10/1/2014,10,October,2014 +Midmarket,Mexico,VTT,1514,15.00,22710.00,908.40,21801.60,15140.00,6661.60,10/1/2013,10,October,2013 +Government,Mexico,Amarilla,1865,350.00,652750.00,26110.00,626640.00,484900.00,141740.00,2/1/2014,2,February,2014 +Enterprise,Mexico,Amarilla,1074,125.00,134250.00,5370.00,128880.00,128880.00,0.00,4/1/2014,4,April,2014 +Government,Germany,Amarilla,1907,350.00,667450.00,26698.00,640752.00,495820.00,144932.00,9/1/2014,9,September,2014 +Midmarket,United States of America,Amarilla,671,15.00,10065.00,402.60,9662.40,6710.00,2952.40,10/1/2013,10,October,2013 +Government,Canada,Amarilla,1778,350.00,622300.00,24892.00,597408.00,462280.00,135128.00,12/1/2013,12,December,2013 +Government,Germany,Montana,1159,7.00,8113.00,405.65,7707.35,5795.00,1912.35,10/1/2013,10,October,2013 +Government,Germany,Paseo,1372,7.00,9604.00,480.20,9123.80,6860.00,2263.80,1/1/2014,1,January,2014 +Government,Canada,Paseo,2349,7.00,16443.00,822.15,15620.85,11745.00,3875.85,9/1/2013,9,September,2013 +Government,Mexico,Paseo,2689,7.00,18823.00,941.15,17881.85,13445.00,4436.85,10/1/2014,10,October,2014 +Channel Partners,Canada,Paseo,2431,12.00,29172.00,1458.60,27713.40,7293.00,20420.40,12/1/2014,12,December,2014 +Channel Partners,Canada,Velo,2431,12.00,29172.00,1458.60,27713.40,7293.00,20420.40,12/1/2014,12,December,2014 +Government,Mexico,VTT,2689,7.00,18823.00,941.15,17881.85,13445.00,4436.85,10/1/2014,10,October,2014 +Government,Mexico,Amarilla,1683,7.00,11781.00,589.05,11191.95,8415.00,2776.95,7/1/2014,7,July,2014 +Channel Partners,Mexico,Amarilla,1123,12.00,13476.00,673.80,12802.20,3369.00,9433.20,8/1/2014,8,August,2014 +Government,Germany,Amarilla,1159,7.00,8113.00,405.65,7707.35,5795.00,1912.35,10/1/2013,10,October,2013 +Channel Partners,France,Carretera,1865,12.00,22380.00,1119.00,21261.00,5595.00,15666.00,2/1/2014,2,February,2014 +Channel Partners,Germany,Carretera,1116,12.00,13392.00,669.60,12722.40,3348.00,9374.40,2/1/2014,2,February,2014 +Government,France,Carretera,1563,20.00,31260.00,1563.00,29697.00,15630.00,14067.00,5/1/2014,5,May,2014 +Small Business,United States of America,Carretera,991,300.00,297300.00,14865.00,282435.00,247750.00,34685.00,6/1/2014,6,June,2014 +Government,Germany,Carretera,1016,7.00,7112.00,355.60,6756.40,5080.00,1676.40,11/1/2013,11,November,2013 +Midmarket,Mexico,Carretera,2791,15.00,41865.00,2093.25,39771.75,27910.00,11861.75,11/1/2014,11,November,2014 +Government,United States of America,Carretera,570,7.00,3990.00,199.50,3790.50,2850.00,940.50,12/1/2014,12,December,2014 +Government,France,Carretera,2487,7.00,17409.00,870.45,16538.55,12435.00,4103.55,12/1/2014,12,December,2014 +Government,France,Montana,1384.5,350.00,484575.00,24228.75,460346.25,359970.00,100376.25,1/1/2014,1,January,2014 +Enterprise,United States of America,Montana,3627,125.00,453375.00,22668.75,430706.25,435240.00,-4533.75,7/1/2014,7,July,2014 +Government,Mexico,Montana,720,350.00,252000.00,12600.00,239400.00,187200.00,52200.00,9/1/2013,9,September,2013 +Channel Partners,Germany,Montana,2342,12.00,28104.00,1405.20,26698.80,7026.00,19672.80,11/1/2014,11,November,2014 +Small Business,Mexico,Montana,1100,300.00,330000.00,16500.00,313500.00,275000.00,38500.00,12/1/2013,12,December,2013 +Government,France,Paseo,1303,20.00,26060.00,1303.00,24757.00,13030.00,11727.00,2/1/2014,2,February,2014 +Enterprise,United States of America,Paseo,2992,125.00,374000.00,18700.00,355300.00,359040.00,-3740.00,3/1/2014,3,March,2014 +Enterprise,France,Paseo,2385,125.00,298125.00,14906.25,283218.75,286200.00,-2981.25,3/1/2014,3,March,2014 +Small Business,Mexico,Paseo,1607,300.00,482100.00,24105.00,457995.00,401750.00,56245.00,4/1/2014,4,April,2014 +Government,United States of America,Paseo,2327,7.00,16289.00,814.45,15474.55,11635.00,3839.55,5/1/2014,5,May,2014 +Small Business,United States of America,Paseo,991,300.00,297300.00,14865.00,282435.00,247750.00,34685.00,6/1/2014,6,June,2014 +Government,United States of America,Paseo,602,350.00,210700.00,10535.00,200165.00,156520.00,43645.00,6/1/2014,6,June,2014 +Midmarket,France,Paseo,2620,15.00,39300.00,1965.00,37335.00,26200.00,11135.00,9/1/2014,9,September,2014 +Government,Canada,Paseo,1228,350.00,429800.00,21490.00,408310.00,319280.00,89030.00,10/1/2013,10,October,2013 +Government,Canada,Paseo,1389,20.00,27780.00,1389.00,26391.00,13890.00,12501.00,10/1/2013,10,October,2013 +Enterprise,United States of America,Paseo,861,125.00,107625.00,5381.25,102243.75,103320.00,-1076.25,10/1/2014,10,October,2014 +Enterprise,France,Paseo,704,125.00,88000.00,4400.00,83600.00,84480.00,-880.00,10/1/2013,10,October,2013 +Government,Canada,Paseo,1802,20.00,36040.00,1802.00,34238.00,18020.00,16218.00,12/1/2013,12,December,2013 +Government,United States of America,Paseo,2663,20.00,53260.00,2663.00,50597.00,26630.00,23967.00,12/1/2014,12,December,2014 +Government,France,Paseo,2136,7.00,14952.00,747.60,14204.40,10680.00,3524.40,12/1/2013,12,December,2013 +Midmarket,Germany,Paseo,2116,15.00,31740.00,1587.00,30153.00,21160.00,8993.00,12/1/2013,12,December,2013 +Midmarket,United States of America,Velo,555,15.00,8325.00,416.25,7908.75,5550.00,2358.75,1/1/2014,1,January,2014 +Midmarket,Mexico,Velo,2861,15.00,42915.00,2145.75,40769.25,28610.00,12159.25,1/1/2014,1,January,2014 +Enterprise,Germany,Velo,807,125.00,100875.00,5043.75,95831.25,96840.00,-1008.75,2/1/2014,2,February,2014 +Government,United States of America,Velo,602,350.00,210700.00,10535.00,200165.00,156520.00,43645.00,6/1/2014,6,June,2014 +Government,United States of America,Velo,2832,20.00,56640.00,2832.00,53808.00,28320.00,25488.00,8/1/2014,8,August,2014 +Government,France,Velo,1579,20.00,31580.00,1579.00,30001.00,15790.00,14211.00,8/1/2014,8,August,2014 +Enterprise,United States of America,Velo,861,125.00,107625.00,5381.25,102243.75,103320.00,-1076.25,10/1/2014,10,October,2014 +Enterprise,France,Velo,704,125.00,88000.00,4400.00,83600.00,84480.00,-880.00,10/1/2013,10,October,2013 +Government,France,Velo,1033,20.00,20660.00,1033.00,19627.00,10330.00,9297.00,12/1/2013,12,December,2013 +Small Business,Germany,Velo,1250,300.00,375000.00,18750.00,356250.00,312500.00,43750.00,12/1/2014,12,December,2014 +Government,Canada,VTT,1389,20.00,27780.00,1389.00,26391.00,13890.00,12501.00,10/1/2013,10,October,2013 +Government,United States of America,VTT,1265,20.00,25300.00,1265.00,24035.00,12650.00,11385.00,11/1/2013,11,November,2013 +Government,Germany,VTT,2297,20.00,45940.00,2297.00,43643.00,22970.00,20673.00,11/1/2013,11,November,2013 +Government,United States of America,VTT,2663,20.00,53260.00,2663.00,50597.00,26630.00,23967.00,12/1/2014,12,December,2014 +Government,United States of America,VTT,570,7.00,3990.00,199.50,3790.50,2850.00,940.50,12/1/2014,12,December,2014 +Government,France,VTT,2487,7.00,17409.00,870.45,16538.55,12435.00,4103.55,12/1/2014,12,December,2014 +Government,Germany,Amarilla,1350,350.00,472500.00,23625.00,448875.00,351000.00,97875.00,2/1/2014,2,February,2014 +Government,Canada,Amarilla,552,350.00,193200.00,9660.00,183540.00,143520.00,40020.00,8/1/2014,8,August,2014 +Government,Canada,Amarilla,1228,350.00,429800.00,21490.00,408310.00,319280.00,89030.00,10/1/2013,10,October,2013 +Small Business,Germany,Amarilla,1250,300.00,375000.00,18750.00,356250.00,312500.00,43750.00,12/1/2014,12,December,2014 +Midmarket,France,Paseo,3801,15.00,57015.00,3420.90,53594.10,38010.00,15584.10,4/1/2014,4,April,2014 +Government,United States of America,Carretera,1117.5,20.00,22350.00,1341.00,21009.00,11175.00,9834.00,1/1/2014,1,January,2014 +Midmarket,Canada,Carretera,2844,15.00,42660.00,2559.60,40100.40,28440.00,11660.40,6/1/2014,6,June,2014 +Channel Partners,Mexico,Carretera,562,12.00,6744.00,404.64,6339.36,1686.00,4653.36,9/1/2014,9,September,2014 +Channel Partners,Canada,Carretera,2299,12.00,27588.00,1655.28,25932.72,6897.00,19035.72,10/1/2013,10,October,2013 +Midmarket,United States of America,Carretera,2030,15.00,30450.00,1827.00,28623.00,20300.00,8323.00,11/1/2014,11,November,2014 +Government,United States of America,Carretera,263,7.00,1841.00,110.46,1730.54,1315.00,415.54,11/1/2013,11,November,2013 +Enterprise,Germany,Carretera,887,125.00,110875.00,6652.50,104222.50,106440.00,-2217.50,12/1/2013,12,December,2013 +Government,Mexico,Montana,980,350.00,343000.00,20580.00,322420.00,254800.00,67620.00,4/1/2014,4,April,2014 +Government,Germany,Montana,1460,350.00,511000.00,30660.00,480340.00,379600.00,100740.00,5/1/2014,5,May,2014 +Government,France,Montana,1403,7.00,9821.00,589.26,9231.74,7015.00,2216.74,10/1/2013,10,October,2013 +Channel Partners,United States of America,Montana,2723,12.00,32676.00,1960.56,30715.44,8169.00,22546.44,11/1/2014,11,November,2014 +Government,France,Paseo,1496,350.00,523600.00,31416.00,492184.00,388960.00,103224.00,6/1/2014,6,June,2014 +Channel Partners,Canada,Paseo,2299,12.00,27588.00,1655.28,25932.72,6897.00,19035.72,10/1/2013,10,October,2013 +Government,United States of America,Paseo,727,350.00,254450.00,15267.00,239183.00,189020.00,50163.00,10/1/2013,10,October,2013 +Enterprise,Canada,Velo,952,125.00,119000.00,7140.00,111860.00,114240.00,-2380.00,2/1/2014,2,February,2014 +Enterprise,United States of America,Velo,2755,125.00,344375.00,20662.50,323712.50,330600.00,-6887.50,2/1/2014,2,February,2014 +Midmarket,Germany,Velo,1530,15.00,22950.00,1377.00,21573.00,15300.00,6273.00,5/1/2014,5,May,2014 +Government,France,Velo,1496,350.00,523600.00,31416.00,492184.00,388960.00,103224.00,6/1/2014,6,June,2014 +Government,Mexico,Velo,1498,7.00,10486.00,629.16,9856.84,7490.00,2366.84,6/1/2014,6,June,2014 +Small Business,France,Velo,1221,300.00,366300.00,21978.00,344322.00,305250.00,39072.00,10/1/2013,10,October,2013 +Government,France,Velo,2076,350.00,726600.00,43596.00,683004.00,539760.00,143244.00,10/1/2013,10,October,2013 +Midmarket,Canada,VTT,2844,15.00,42660.00,2559.60,40100.40,28440.00,11660.40,6/1/2014,6,June,2014 +Government,Mexico,VTT,1498,7.00,10486.00,629.16,9856.84,7490.00,2366.84,6/1/2014,6,June,2014 +Small Business,France,VTT,1221,300.00,366300.00,21978.00,344322.00,305250.00,39072.00,10/1/2013,10,October,2013 +Government,Mexico,VTT,1123,20.00,22460.00,1347.60,21112.40,11230.00,9882.40,11/1/2013,11,November,2013 +Small Business,Canada,VTT,2436,300.00,730800.00,43848.00,686952.00,609000.00,77952.00,12/1/2013,12,December,2013 +Enterprise,France,Amarilla,1987.5,125.00,248437.50,14906.25,233531.25,238500.00,-4968.75,1/1/2014,1,January,2014 +Government,Mexico,Amarilla,1679,350.00,587650.00,35259.00,552391.00,436540.00,115851.00,9/1/2014,9,September,2014 +Government,United States of America,Amarilla,727,350.00,254450.00,15267.00,239183.00,189020.00,50163.00,10/1/2013,10,October,2013 +Government,France,Amarilla,1403,7.00,9821.00,589.26,9231.74,7015.00,2216.74,10/1/2013,10,October,2013 +Government,France,Amarilla,2076,350.00,726600.00,43596.00,683004.00,539760.00,143244.00,10/1/2013,10,October,2013 +Government,France,Montana,1757,20.00,35140.00,2108.40,33031.60,17570.00,15461.60,10/1/2013,10,October,2013 +Midmarket,United States of America,Paseo,2198,15.00,32970.00,1978.20,30991.80,21980.00,9011.80,8/1/2014,8,August,2014 +Midmarket,Germany,Paseo,1743,15.00,26145.00,1568.70,24576.30,17430.00,7146.30,8/1/2014,8,August,2014 +Midmarket,United States of America,Paseo,1153,15.00,17295.00,1037.70,16257.30,11530.00,4727.30,10/1/2014,10,October,2014 +Government,France,Paseo,1757,20.00,35140.00,2108.40,33031.60,17570.00,15461.60,10/1/2013,10,October,2013 +Government,Germany,Velo,1001,20.00,20020.00,1201.20,18818.80,10010.00,8808.80,8/1/2014,8,August,2014 +Government,Mexico,Velo,1333,7.00,9331.00,559.86,8771.14,6665.00,2106.14,11/1/2014,11,November,2014 +Midmarket,United States of America,VTT,1153,15.00,17295.00,1037.70,16257.30,11530.00,4727.30,10/1/2014,10,October,2014 +Channel Partners,Mexico,Carretera,727,12.00,8724.00,610.68,8113.32,2181.00,5932.32,2/1/2014,2,February,2014 +Channel Partners,Canada,Carretera,1884,12.00,22608.00,1582.56,21025.44,5652.00,15373.44,8/1/2014,8,August,2014 +Government,Mexico,Carretera,1834,20.00,36680.00,2567.60,34112.40,18340.00,15772.40,9/1/2013,9,September,2013 +Channel Partners,Mexico,Montana,2340,12.00,28080.00,1965.60,26114.40,7020.00,19094.40,1/1/2014,1,January,2014 +Channel Partners,France,Montana,2342,12.00,28104.00,1967.28,26136.72,7026.00,19110.72,11/1/2014,11,November,2014 +Government,France,Paseo,1031,7.00,7217.00,505.19,6711.81,5155.00,1556.81,9/1/2013,9,September,2013 +Midmarket,Canada,Velo,1262,15.00,18930.00,1325.10,17604.90,12620.00,4984.90,5/1/2014,5,May,2014 +Government,Canada,Velo,1135,7.00,7945.00,556.15,7388.85,5675.00,1713.85,6/1/2014,6,June,2014 +Government,United States of America,Velo,547,7.00,3829.00,268.03,3560.97,2735.00,825.97,11/1/2014,11,November,2014 +Government,Canada,Velo,1582,7.00,11074.00,775.18,10298.82,7910.00,2388.82,12/1/2014,12,December,2014 +Channel Partners,France,VTT,1738.5,12.00,20862.00,1460.34,19401.66,5215.50,14186.16,4/1/2014,4,April,2014 +Channel Partners,Germany,VTT,2215,12.00,26580.00,1860.60,24719.40,6645.00,18074.40,9/1/2013,9,September,2013 +Government,Canada,VTT,1582,7.00,11074.00,775.18,10298.82,7910.00,2388.82,12/1/2014,12,December,2014 +Government,Canada,Amarilla,1135,7.00,7945.00,556.15,7388.85,5675.00,1713.85,6/1/2014,6,June,2014 +Government,United States of America,Carretera,1761,350.00,616350.00,43144.50,573205.50,457860.00,115345.50,3/1/2014,3,March,2014 +Small Business,France,Carretera,448,300.00,134400.00,9408.00,124992.00,112000.00,12992.00,6/1/2014,6,June,2014 +Small Business,France,Carretera,2181,300.00,654300.00,45801.00,608499.00,545250.00,63249.00,10/1/2014,10,October,2014 +Government,France,Montana,1976,20.00,39520.00,2766.40,36753.60,19760.00,16993.60,10/1/2014,10,October,2014 +Small Business,France,Montana,2181,300.00,654300.00,45801.00,608499.00,545250.00,63249.00,10/1/2014,10,October,2014 +Enterprise,Germany,Montana,2500,125.00,312500.00,21875.00,290625.00,300000.00,-9375.00,11/1/2013,11,November,2013 +Small Business,Canada,Paseo,1702,300.00,510600.00,35742.00,474858.00,425500.00,49358.00,5/1/2014,5,May,2014 +Small Business,France,Paseo,448,300.00,134400.00,9408.00,124992.00,112000.00,12992.00,6/1/2014,6,June,2014 +Enterprise,Germany,Paseo,3513,125.00,439125.00,30738.75,408386.25,421560.00,-13173.75,7/1/2014,7,July,2014 +Midmarket,France,Paseo,2101,15.00,31515.00,2206.05,29308.95,21010.00,8298.95,8/1/2014,8,August,2014 +Midmarket,United States of America,Paseo,2931,15.00,43965.00,3077.55,40887.45,29310.00,11577.45,9/1/2013,9,September,2013 +Government,France,Paseo,1535,20.00,30700.00,2149.00,28551.00,15350.00,13201.00,9/1/2014,9,September,2014 +Small Business,Germany,Paseo,1123,300.00,336900.00,23583.00,313317.00,280750.00,32567.00,9/1/2013,9,September,2013 +Small Business,Canada,Paseo,1404,300.00,421200.00,29484.00,391716.00,351000.00,40716.00,11/1/2013,11,November,2013 +Channel Partners,Mexico,Paseo,2763,12.00,33156.00,2320.92,30835.08,8289.00,22546.08,11/1/2013,11,November,2013 +Government,Germany,Paseo,2125,7.00,14875.00,1041.25,13833.75,10625.00,3208.75,12/1/2013,12,December,2013 +Small Business,France,Velo,1659,300.00,497700.00,34839.00,462861.00,414750.00,48111.00,7/1/2014,7,July,2014 +Government,Mexico,Velo,609,20.00,12180.00,852.60,11327.40,6090.00,5237.40,8/1/2014,8,August,2014 +Enterprise,Germany,Velo,2087,125.00,260875.00,18261.25,242613.75,250440.00,-7826.25,9/1/2014,9,September,2014 +Government,France,Velo,1976,20.00,39520.00,2766.40,36753.60,19760.00,16993.60,10/1/2014,10,October,2014 +Government,United States of America,Velo,1421,20.00,28420.00,1989.40,26430.60,14210.00,12220.60,12/1/2013,12,December,2013 +Small Business,United States of America,Velo,1372,300.00,411600.00,28812.00,382788.00,343000.00,39788.00,12/1/2014,12,December,2014 +Government,Germany,Velo,588,20.00,11760.00,823.20,10936.80,5880.00,5056.80,12/1/2013,12,December,2013 +Channel Partners,Canada,VTT,3244.5,12.00,38934.00,2725.38,36208.62,9733.50,26475.12,1/1/2014,1,January,2014 +Small Business,France,VTT,959,300.00,287700.00,20139.00,267561.00,239750.00,27811.00,2/1/2014,2,February,2014 +Small Business,Mexico,VTT,2747,300.00,824100.00,57687.00,766413.00,686750.00,79663.00,2/1/2014,2,February,2014 +Enterprise,Canada,Amarilla,1645,125.00,205625.00,14393.75,191231.25,197400.00,-6168.75,5/1/2014,5,May,2014 +Government,France,Amarilla,2876,350.00,1006600.00,70462.00,936138.00,747760.00,188378.00,9/1/2014,9,September,2014 +Enterprise,Germany,Amarilla,994,125.00,124250.00,8697.50,115552.50,119280.00,-3727.50,9/1/2013,9,September,2013 +Government,Canada,Amarilla,1118,20.00,22360.00,1565.20,20794.80,11180.00,9614.80,11/1/2014,11,November,2014 +Small Business,United States of America,Amarilla,1372,300.00,411600.00,28812.00,382788.00,343000.00,39788.00,12/1/2014,12,December,2014 +Government,Canada,Montana,488,7.00,3416.00,273.28,3142.72,2440.00,702.72,2/1/2014,2,February,2014 +Government,United States of America,Montana,1282,20.00,25640.00,2051.20,23588.80,12820.00,10768.80,6/1/2014,6,June,2014 +Government,Canada,Paseo,257,7.00,1799.00,143.92,1655.08,1285.00,370.08,5/1/2014,5,May,2014 +Government,United States of America,Amarilla,1282,20.00,25640.00,2051.20,23588.80,12820.00,10768.80,6/1/2014,6,June,2014 +Enterprise,Mexico,Carretera,1540,125.00,192500.00,15400.00,177100.00,184800.00,-7700.00,8/1/2014,8,August,2014 +Midmarket,France,Carretera,490,15.00,7350.00,588.00,6762.00,4900.00,1862.00,11/1/2014,11,November,2014 +Government,Mexico,Carretera,1362,350.00,476700.00,38136.00,438564.00,354120.00,84444.00,12/1/2014,12,December,2014 +Midmarket,France,Montana,2501,15.00,37515.00,3001.20,34513.80,25010.00,9503.80,3/1/2014,3,March,2014 +Government,Canada,Montana,708,20.00,14160.00,1132.80,13027.20,7080.00,5947.20,6/1/2014,6,June,2014 +Government,Germany,Montana,645,20.00,12900.00,1032.00,11868.00,6450.00,5418.00,7/1/2014,7,July,2014 +Small Business,France,Montana,1562,300.00,468600.00,37488.00,431112.00,390500.00,40612.00,8/1/2014,8,August,2014 +Small Business,Canada,Montana,1283,300.00,384900.00,30792.00,354108.00,320750.00,33358.00,9/1/2013,9,September,2013 +Midmarket,Germany,Montana,711,15.00,10665.00,853.20,9811.80,7110.00,2701.80,12/1/2014,12,December,2014 +Enterprise,Mexico,Paseo,1114,125.00,139250.00,11140.00,128110.00,133680.00,-5570.00,3/1/2014,3,March,2014 +Government,Germany,Paseo,1259,7.00,8813.00,705.04,8107.96,6295.00,1812.96,4/1/2014,4,April,2014 +Government,Germany,Paseo,1095,7.00,7665.00,613.20,7051.80,5475.00,1576.80,5/1/2014,5,May,2014 +Government,Germany,Paseo,1366,20.00,27320.00,2185.60,25134.40,13660.00,11474.40,6/1/2014,6,June,2014 +Small Business,Mexico,Paseo,2460,300.00,738000.00,59040.00,678960.00,615000.00,63960.00,6/1/2014,6,June,2014 +Government,United States of America,Paseo,678,7.00,4746.00,379.68,4366.32,3390.00,976.32,8/1/2014,8,August,2014 +Government,Germany,Paseo,1598,7.00,11186.00,894.88,10291.12,7990.00,2301.12,8/1/2014,8,August,2014 +Government,Germany,Paseo,2409,7.00,16863.00,1349.04,15513.96,12045.00,3468.96,9/1/2013,9,September,2013 +Government,Germany,Paseo,1934,20.00,38680.00,3094.40,35585.60,19340.00,16245.60,9/1/2014,9,September,2014 +Government,Mexico,Paseo,2993,20.00,59860.00,4788.80,55071.20,29930.00,25141.20,9/1/2014,9,September,2014 +Government,Germany,Paseo,2146,350.00,751100.00,60088.00,691012.00,557960.00,133052.00,11/1/2013,11,November,2013 +Government,Mexico,Paseo,1946,7.00,13622.00,1089.76,12532.24,9730.00,2802.24,12/1/2013,12,December,2013 +Government,Mexico,Paseo,1362,350.00,476700.00,38136.00,438564.00,354120.00,84444.00,12/1/2014,12,December,2014 +Channel Partners,Canada,Velo,598,12.00,7176.00,574.08,6601.92,1794.00,4807.92,3/1/2014,3,March,2014 +Government,United States of America,Velo,2907,7.00,20349.00,1627.92,18721.08,14535.00,4186.08,6/1/2014,6,June,2014 +Government,Germany,Velo,2338,7.00,16366.00,1309.28,15056.72,11690.00,3366.72,6/1/2014,6,June,2014 +Small Business,France,Velo,386,300.00,115800.00,9264.00,106536.00,96500.00,10036.00,11/1/2013,11,November,2013 +Small Business,Mexico,Velo,635,300.00,190500.00,15240.00,175260.00,158750.00,16510.00,12/1/2014,12,December,2014 +Government,France,VTT,574.5,350.00,201075.00,16086.00,184989.00,149370.00,35619.00,4/1/2014,4,April,2014 +Government,Germany,VTT,2338,7.00,16366.00,1309.28,15056.72,11690.00,3366.72,6/1/2014,6,June,2014 +Government,France,VTT,381,350.00,133350.00,10668.00,122682.00,99060.00,23622.00,8/1/2014,8,August,2014 +Government,Germany,VTT,422,350.00,147700.00,11816.00,135884.00,109720.00,26164.00,8/1/2014,8,August,2014 +Small Business,Canada,VTT,2134,300.00,640200.00,51216.00,588984.00,533500.00,55484.00,9/1/2014,9,September,2014 +Small Business,United States of America,VTT,808,300.00,242400.00,19392.00,223008.00,202000.00,21008.00,12/1/2013,12,December,2013 +Government,Canada,Amarilla,708,20.00,14160.00,1132.80,13027.20,7080.00,5947.20,6/1/2014,6,June,2014 +Government,United States of America,Amarilla,2907,7.00,20349.00,1627.92,18721.08,14535.00,4186.08,6/1/2014,6,June,2014 +Government,Germany,Amarilla,1366,20.00,27320.00,2185.60,25134.40,13660.00,11474.40,6/1/2014,6,June,2014 +Small Business,Mexico,Amarilla,2460,300.00,738000.00,59040.00,678960.00,615000.00,63960.00,6/1/2014,6,June,2014 +Government,Germany,Amarilla,1520,20.00,30400.00,2432.00,27968.00,15200.00,12768.00,11/1/2014,11,November,2014 +Midmarket,Germany,Amarilla,711,15.00,10665.00,853.20,9811.80,7110.00,2701.80,12/1/2014,12,December,2014 +Channel Partners,Mexico,Amarilla,1375,12.00,16500.00,1320.00,15180.00,4125.00,11055.00,12/1/2013,12,December,2013 +Small Business,Mexico,Amarilla,635,300.00,190500.00,15240.00,175260.00,158750.00,16510.00,12/1/2014,12,December,2014 +Government,United States of America,VTT,436.5,20.00,8730.00,698.40,8031.60,4365.00,3666.60,7/1/2014,7,July,2014 +Small Business,Canada,Carretera,1094,300.00,328200.00,29538.00,298662.00,273500.00,25162.00,6/1/2014,6,June,2014 +Channel Partners,Mexico,Carretera,367,12.00,4404.00,396.36,4007.64,1101.00,2906.64,10/1/2013,10,October,2013 +Small Business,Canada,Montana,3802.5,300.00,1140750.00,102667.50,1038082.50,950625.00,87457.50,4/1/2014,4,April,2014 +Government,France,Montana,1666,350.00,583100.00,52479.00,530621.00,433160.00,97461.00,5/1/2014,5,May,2014 +Small Business,France,Montana,322,300.00,96600.00,8694.00,87906.00,80500.00,7406.00,9/1/2013,9,September,2013 +Channel Partners,Canada,Montana,2321,12.00,27852.00,2506.68,25345.32,6963.00,18382.32,11/1/2014,11,November,2014 +Enterprise,France,Montana,1857,125.00,232125.00,20891.25,211233.75,222840.00,-11606.25,11/1/2013,11,November,2013 +Government,Canada,Montana,1611,7.00,11277.00,1014.93,10262.07,8055.00,2207.07,12/1/2013,12,December,2013 +Enterprise,United States of America,Montana,2797,125.00,349625.00,31466.25,318158.75,335640.00,-17481.25,12/1/2014,12,December,2014 +Small Business,Germany,Montana,334,300.00,100200.00,9018.00,91182.00,83500.00,7682.00,12/1/2013,12,December,2013 +Small Business,Mexico,Paseo,2565,300.00,769500.00,69255.00,700245.00,641250.00,58995.00,1/1/2014,1,January,2014 +Government,Mexico,Paseo,2417,350.00,845950.00,76135.50,769814.50,628420.00,141394.50,1/1/2014,1,January,2014 +Midmarket,United States of America,Paseo,3675,15.00,55125.00,4961.25,50163.75,36750.00,13413.75,4/1/2014,4,April,2014 +Small Business,Canada,Paseo,1094,300.00,328200.00,29538.00,298662.00,273500.00,25162.00,6/1/2014,6,June,2014 +Midmarket,France,Paseo,1227,15.00,18405.00,1656.45,16748.55,12270.00,4478.55,10/1/2014,10,October,2014 +Channel Partners,Mexico,Paseo,367,12.00,4404.00,396.36,4007.64,1101.00,2906.64,10/1/2013,10,October,2013 +Small Business,France,Paseo,1324,300.00,397200.00,35748.00,361452.00,331000.00,30452.00,11/1/2014,11,November,2014 +Channel Partners,Germany,Paseo,1775,12.00,21300.00,1917.00,19383.00,5325.00,14058.00,11/1/2013,11,November,2013 +Enterprise,United States of America,Paseo,2797,125.00,349625.00,31466.25,318158.75,335640.00,-17481.25,12/1/2014,12,December,2014 +Midmarket,Mexico,Velo,245,15.00,3675.00,330.75,3344.25,2450.00,894.25,5/1/2014,5,May,2014 +Small Business,Canada,Velo,3793.5,300.00,1138050.00,102424.50,1035625.50,948375.00,87250.50,7/1/2014,7,July,2014 +Government,Germany,Velo,1307,350.00,457450.00,41170.50,416279.50,339820.00,76459.50,7/1/2014,7,July,2014 +Enterprise,Canada,Velo,567,125.00,70875.00,6378.75,64496.25,68040.00,-3543.75,9/1/2014,9,September,2014 +Enterprise,Mexico,Velo,2110,125.00,263750.00,23737.50,240012.50,253200.00,-13187.50,9/1/2014,9,September,2014 +Government,Canada,Velo,1269,350.00,444150.00,39973.50,404176.50,329940.00,74236.50,10/1/2014,10,October,2014 +Channel Partners,United States of America,VTT,1956,12.00,23472.00,2112.48,21359.52,5868.00,15491.52,1/1/2014,1,January,2014 +Small Business,Germany,VTT,2659,300.00,797700.00,71793.00,725907.00,664750.00,61157.00,2/1/2014,2,February,2014 +Government,United States of America,VTT,1351.5,350.00,473025.00,42572.25,430452.75,351390.00,79062.75,4/1/2014,4,April,2014 +Channel Partners,Germany,VTT,880,12.00,10560.00,950.40,9609.60,2640.00,6969.60,5/1/2014,5,May,2014 +Small Business,United States of America,VTT,1867,300.00,560100.00,50409.00,509691.00,466750.00,42941.00,9/1/2014,9,September,2014 +Channel Partners,France,VTT,2234,12.00,26808.00,2412.72,24395.28,6702.00,17693.28,9/1/2013,9,September,2013 +Midmarket,France,VTT,1227,15.00,18405.00,1656.45,16748.55,12270.00,4478.55,10/1/2014,10,October,2014 +Enterprise,Mexico,VTT,877,125.00,109625.00,9866.25,99758.75,105240.00,-5481.25,11/1/2014,11,November,2014 +Government,United States of America,Amarilla,2071,350.00,724850.00,65236.50,659613.50,538460.00,121153.50,9/1/2014,9,September,2014 +Government,Canada,Amarilla,1269,350.00,444150.00,39973.50,404176.50,329940.00,74236.50,10/1/2014,10,October,2014 +Midmarket,Germany,Amarilla,970,15.00,14550.00,1309.50,13240.50,9700.00,3540.50,11/1/2013,11,November,2013 +Government,Mexico,Amarilla,1694,20.00,33880.00,3049.20,30830.80,16940.00,13890.80,11/1/2014,11,November,2014 +Government,Germany,Carretera,663,20.00,13260.00,1193.40,12066.60,6630.00,5436.60,5/1/2014,5,May,2014 +Government,Canada,Carretera,819,7.00,5733.00,515.97,5217.03,4095.00,1122.03,7/1/2014,7,July,2014 +Channel Partners,Germany,Carretera,1580,12.00,18960.00,1706.40,17253.60,4740.00,12513.60,9/1/2014,9,September,2014 +Government,Mexico,Carretera,521,7.00,3647.00,328.23,3318.77,2605.00,713.77,12/1/2014,12,December,2014 +Government,United States of America,Paseo,973,20.00,19460.00,1751.40,17708.60,9730.00,7978.60,3/1/2014,3,March,2014 +Government,Mexico,Paseo,1038,20.00,20760.00,1868.40,18891.60,10380.00,8511.60,6/1/2014,6,June,2014 +Government,Germany,Paseo,360,7.00,2520.00,226.80,2293.20,1800.00,493.20,10/1/2014,10,October,2014 +Channel Partners,France,Velo,1967,12.00,23604.00,2124.36,21479.64,5901.00,15578.64,3/1/2014,3,March,2014 +Midmarket,Mexico,Velo,2628,15.00,39420.00,3547.80,35872.20,26280.00,9592.20,4/1/2014,4,April,2014 +Government,Germany,VTT,360,7.00,2520.00,226.80,2293.20,1800.00,493.20,10/1/2014,10,October,2014 +Government,France,VTT,2682,20.00,53640.00,4827.60,48812.40,26820.00,21992.40,11/1/2013,11,November,2013 +Government,Mexico,VTT,521,7.00,3647.00,328.23,3318.77,2605.00,713.77,12/1/2014,12,December,2014 +Government,Mexico,Amarilla,1038,20.00,20760.00,1868.40,18891.60,10380.00,8511.60,6/1/2014,6,June,2014 +Midmarket,Canada,Amarilla,1630.5,15.00,24457.50,2201.18,22256.33,16305.00,5951.33,7/1/2014,7,July,2014 +Channel Partners,France,Amarilla,306,12.00,3672.00,330.48,3341.52,918.00,2423.52,12/1/2013,12,December,2013 +Channel Partners,United States of America,Carretera,386,12.00,4632.00,463.20,4168.80,1158.00,3010.80,10/1/2013,10,October,2013 +Government,United States of America,Montana,2328,7.00,16296.00,1629.60,14666.40,11640.00,3026.40,9/1/2014,9,September,2014 +Channel Partners,United States of America,Paseo,386,12.00,4632.00,463.20,4168.80,1158.00,3010.80,10/1/2013,10,October,2013 +Enterprise,United States of America,Carretera,3445.5,125.00,430687.50,43068.75,387618.75,413460.00,-25841.25,4/1/2014,4,April,2014 +Enterprise,France,Carretera,1482,125.00,185250.00,18525.00,166725.00,177840.00,-11115.00,12/1/2013,12,December,2013 +Government,United States of America,Montana,2313,350.00,809550.00,80955.00,728595.00,601380.00,127215.00,5/1/2014,5,May,2014 +Enterprise,United States of America,Montana,1804,125.00,225500.00,22550.00,202950.00,216480.00,-13530.00,11/1/2013,11,November,2013 +Midmarket,France,Montana,2072,15.00,31080.00,3108.00,27972.00,20720.00,7252.00,12/1/2014,12,December,2014 +Government,France,Paseo,1954,20.00,39080.00,3908.00,35172.00,19540.00,15632.00,3/1/2014,3,March,2014 +Small Business,Mexico,Paseo,591,300.00,177300.00,17730.00,159570.00,147750.00,11820.00,5/1/2014,5,May,2014 +Midmarket,France,Paseo,2167,15.00,32505.00,3250.50,29254.50,21670.00,7584.50,10/1/2013,10,October,2013 +Government,Germany,Paseo,241,20.00,4820.00,482.00,4338.00,2410.00,1928.00,10/1/2014,10,October,2014 +Midmarket,Germany,Velo,681,15.00,10215.00,1021.50,9193.50,6810.00,2383.50,1/1/2014,1,January,2014 +Midmarket,Germany,Velo,510,15.00,7650.00,765.00,6885.00,5100.00,1785.00,4/1/2014,4,April,2014 +Midmarket,United States of America,Velo,790,15.00,11850.00,1185.00,10665.00,7900.00,2765.00,5/1/2014,5,May,2014 +Government,France,Velo,639,350.00,223650.00,22365.00,201285.00,166140.00,35145.00,7/1/2014,7,July,2014 +Enterprise,United States of America,Velo,1596,125.00,199500.00,19950.00,179550.00,191520.00,-11970.00,9/1/2014,9,September,2014 +Small Business,United States of America,Velo,2294,300.00,688200.00,68820.00,619380.00,573500.00,45880.00,10/1/2013,10,October,2013 +Government,Germany,Velo,241,20.00,4820.00,482.00,4338.00,2410.00,1928.00,10/1/2014,10,October,2014 +Government,Germany,Velo,2665,7.00,18655.00,1865.50,16789.50,13325.00,3464.50,11/1/2014,11,November,2014 +Enterprise,Canada,Velo,1916,125.00,239500.00,23950.00,215550.00,229920.00,-14370.00,12/1/2013,12,December,2013 +Small Business,France,Velo,853,300.00,255900.00,25590.00,230310.00,213250.00,17060.00,12/1/2014,12,December,2014 +Enterprise,Mexico,VTT,341,125.00,42625.00,4262.50,38362.50,40920.00,-2557.50,5/1/2014,5,May,2014 +Midmarket,Mexico,VTT,641,15.00,9615.00,961.50,8653.50,6410.00,2243.50,7/1/2014,7,July,2014 +Government,United States of America,VTT,2807,350.00,982450.00,98245.00,884205.00,729820.00,154385.00,8/1/2014,8,August,2014 +Small Business,Mexico,VTT,432,300.00,129600.00,12960.00,116640.00,108000.00,8640.00,9/1/2014,9,September,2014 +Small Business,United States of America,VTT,2294,300.00,688200.00,68820.00,619380.00,573500.00,45880.00,10/1/2013,10,October,2013 +Midmarket,France,VTT,2167,15.00,32505.00,3250.50,29254.50,21670.00,7584.50,10/1/2013,10,October,2013 +Enterprise,Canada,VTT,2529,125.00,316125.00,31612.50,284512.50,303480.00,-18967.50,11/1/2014,11,November,2014 +Government,Germany,VTT,1870,350.00,654500.00,65450.00,589050.00,486200.00,102850.00,12/1/2013,12,December,2013 +Enterprise,United States of America,Amarilla,579,125.00,72375.00,7237.50,65137.50,69480.00,-4342.50,1/1/2014,1,January,2014 +Government,Canada,Amarilla,2240,350.00,784000.00,78400.00,705600.00,582400.00,123200.00,2/1/2014,2,February,2014 +Small Business,United States of America,Amarilla,2993,300.00,897900.00,89790.00,808110.00,748250.00,59860.00,3/1/2014,3,March,2014 +Channel Partners,Canada,Amarilla,3520.5,12.00,42246.00,4224.60,38021.40,10561.50,27459.90,4/1/2014,4,April,2014 +Government,Mexico,Amarilla,2039,20.00,40780.00,4078.00,36702.00,20390.00,16312.00,5/1/2014,5,May,2014 +Channel Partners,Germany,Amarilla,2574,12.00,30888.00,3088.80,27799.20,7722.00,20077.20,8/1/2014,8,August,2014 +Government,Canada,Amarilla,707,350.00,247450.00,24745.00,222705.00,183820.00,38885.00,9/1/2014,9,September,2014 +Midmarket,France,Amarilla,2072,15.00,31080.00,3108.00,27972.00,20720.00,7252.00,12/1/2014,12,December,2014 +Small Business,France,Amarilla,853,300.00,255900.00,25590.00,230310.00,213250.00,17060.00,12/1/2014,12,December,2014 +Channel Partners,France,Carretera,1198,12.00,14376.00,1581.36,12794.64,3594.00,9200.64,10/1/2013,10,October,2013 +Government,France,Paseo,2532,7.00,17724.00,1949.64,15774.36,12660.00,3114.36,4/1/2014,4,April,2014 +Channel Partners,France,Paseo,1198,12.00,14376.00,1581.36,12794.64,3594.00,9200.64,10/1/2013,10,October,2013 +Midmarket,Canada,Velo,384,15.00,5760.00,633.60,5126.40,3840.00,1286.40,1/1/2014,1,January,2014 +Channel Partners,Germany,Velo,472,12.00,5664.00,623.04,5040.96,1416.00,3624.96,10/1/2014,10,October,2014 +Government,United States of America,VTT,1579,7.00,11053.00,1215.83,9837.17,7895.00,1942.17,3/1/2014,3,March,2014 +Channel Partners,Mexico,VTT,1005,12.00,12060.00,1326.60,10733.40,3015.00,7718.40,9/1/2013,9,September,2013 +Midmarket,United States of America,Amarilla,3199.5,15.00,47992.50,5279.18,42713.33,31995.00,10718.33,7/1/2014,7,July,2014 +Channel Partners,Germany,Amarilla,472,12.00,5664.00,623.04,5040.96,1416.00,3624.96,10/1/2014,10,October,2014 +Channel Partners,Canada,Carretera,1937,12.00,23244.00,2556.84,20687.16,5811.00,14876.16,2/1/2014,2,February,2014 +Government,Germany,Carretera,792,350.00,277200.00,30492.00,246708.00,205920.00,40788.00,3/1/2014,3,March,2014 +Small Business,Germany,Carretera,2811,300.00,843300.00,92763.00,750537.00,702750.00,47787.00,7/1/2014,7,July,2014 +Enterprise,France,Carretera,2441,125.00,305125.00,33563.75,271561.25,292920.00,-21358.75,10/1/2014,10,October,2014 +Midmarket,Canada,Carretera,1560,15.00,23400.00,2574.00,20826.00,15600.00,5226.00,11/1/2013,11,November,2013 +Government,Mexico,Carretera,2706,7.00,18942.00,2083.62,16858.38,13530.00,3328.38,11/1/2013,11,November,2013 +Government,Germany,Montana,766,350.00,268100.00,29491.00,238609.00,199160.00,39449.00,1/1/2014,1,January,2014 +Government,Germany,Montana,2992,20.00,59840.00,6582.40,53257.60,29920.00,23337.60,10/1/2013,10,October,2013 +Midmarket,Mexico,Montana,2157,15.00,32355.00,3559.05,28795.95,21570.00,7225.95,12/1/2014,12,December,2014 +Small Business,Canada,Paseo,873,300.00,261900.00,28809.00,233091.00,218250.00,14841.00,1/1/2014,1,January,2014 +Government,Mexico,Paseo,1122,20.00,22440.00,2468.40,19971.60,11220.00,8751.60,3/1/2014,3,March,2014 +Government,Canada,Paseo,2104.5,350.00,736575.00,81023.25,655551.75,547170.00,108381.75,7/1/2014,7,July,2014 +Channel Partners,Canada,Paseo,4026,12.00,48312.00,5314.32,42997.68,12078.00,30919.68,7/1/2014,7,July,2014 +Channel Partners,France,Paseo,2425.5,12.00,29106.00,3201.66,25904.34,7276.50,18627.84,7/1/2014,7,July,2014 +Government,Canada,Paseo,2394,20.00,47880.00,5266.80,42613.20,23940.00,18673.20,8/1/2014,8,August,2014 +Midmarket,Mexico,Paseo,1984,15.00,29760.00,3273.60,26486.40,19840.00,6646.40,8/1/2014,8,August,2014 +Enterprise,France,Paseo,2441,125.00,305125.00,33563.75,271561.25,292920.00,-21358.75,10/1/2014,10,October,2014 +Government,Germany,Paseo,2992,20.00,59840.00,6582.40,53257.60,29920.00,23337.60,10/1/2013,10,October,2013 +Small Business,Canada,Paseo,1366,300.00,409800.00,45078.00,364722.00,341500.00,23222.00,11/1/2014,11,November,2014 +Government,France,Velo,2805,20.00,56100.00,6171.00,49929.00,28050.00,21879.00,9/1/2013,9,September,2013 +Midmarket,Mexico,Velo,655,15.00,9825.00,1080.75,8744.25,6550.00,2194.25,9/1/2013,9,September,2013 +Government,Mexico,Velo,344,350.00,120400.00,13244.00,107156.00,89440.00,17716.00,10/1/2013,10,October,2013 +Government,Canada,Velo,1808,7.00,12656.00,1392.16,11263.84,9040.00,2223.84,11/1/2014,11,November,2014 +Channel Partners,France,VTT,1734,12.00,20808.00,2288.88,18519.12,5202.00,13317.12,1/1/2014,1,January,2014 +Enterprise,Mexico,VTT,554,125.00,69250.00,7617.50,61632.50,66480.00,-4847.50,1/1/2014,1,January,2014 +Government,Canada,VTT,2935,20.00,58700.00,6457.00,52243.00,29350.00,22893.00,11/1/2013,11,November,2013 +Enterprise,Germany,Amarilla,3165,125.00,395625.00,43518.75,352106.25,379800.00,-27693.75,1/1/2014,1,January,2014 +Government,Mexico,Amarilla,2629,20.00,52580.00,5783.80,46796.20,26290.00,20506.20,1/1/2014,1,January,2014 +Enterprise,France,Amarilla,1433,125.00,179125.00,19703.75,159421.25,171960.00,-12538.75,5/1/2014,5,May,2014 +Enterprise,Mexico,Amarilla,947,125.00,118375.00,13021.25,105353.75,113640.00,-8286.25,9/1/2013,9,September,2013 +Government,Mexico,Amarilla,344,350.00,120400.00,13244.00,107156.00,89440.00,17716.00,10/1/2013,10,October,2013 +Midmarket,Mexico,Amarilla,2157,15.00,32355.00,3559.05,28795.95,21570.00,7225.95,12/1/2014,12,December,2014 +Government,United States of America,Paseo,380,7.00,2660.00,292.60,2367.40,1900.00,467.40,9/1/2013,9,September,2013 +Government,Mexico,Carretera,886,350.00,310100.00,37212.00,272888.00,230360.00,42528.00,6/1/2014,6,June,2014 +Enterprise,Canada,Carretera,2416,125.00,302000.00,36240.00,265760.00,289920.00,-24160.00,9/1/2013,9,September,2013 +Enterprise,Mexico,Carretera,2156,125.00,269500.00,32340.00,237160.00,258720.00,-21560.00,10/1/2014,10,October,2014 +Midmarket,Canada,Carretera,2689,15.00,40335.00,4840.20,35494.80,26890.00,8604.80,11/1/2014,11,November,2014 +Midmarket,United States of America,Montana,677,15.00,10155.00,1218.60,8936.40,6770.00,2166.40,3/1/2014,3,March,2014 +Small Business,France,Montana,1773,300.00,531900.00,63828.00,468072.00,443250.00,24822.00,4/1/2014,4,April,2014 +Government,Mexico,Montana,2420,7.00,16940.00,2032.80,14907.20,12100.00,2807.20,9/1/2014,9,September,2014 +Government,Canada,Montana,2734,7.00,19138.00,2296.56,16841.44,13670.00,3171.44,10/1/2014,10,October,2014 +Government,Mexico,Montana,1715,20.00,34300.00,4116.00,30184.00,17150.00,13034.00,10/1/2013,10,October,2013 +Small Business,France,Montana,1186,300.00,355800.00,42696.00,313104.00,296500.00,16604.00,12/1/2013,12,December,2013 +Small Business,United States of America,Paseo,3495,300.00,1048500.00,125820.00,922680.00,873750.00,48930.00,1/1/2014,1,January,2014 +Government,Mexico,Paseo,886,350.00,310100.00,37212.00,272888.00,230360.00,42528.00,6/1/2014,6,June,2014 +Enterprise,Mexico,Paseo,2156,125.00,269500.00,32340.00,237160.00,258720.00,-21560.00,10/1/2014,10,October,2014 +Government,Mexico,Paseo,905,20.00,18100.00,2172.00,15928.00,9050.00,6878.00,10/1/2014,10,October,2014 +Government,Mexico,Paseo,1715,20.00,34300.00,4116.00,30184.00,17150.00,13034.00,10/1/2013,10,October,2013 +Government,France,Paseo,1594,350.00,557900.00,66948.00,490952.00,414440.00,76512.00,11/1/2014,11,November,2014 +Small Business,Germany,Paseo,1359,300.00,407700.00,48924.00,358776.00,339750.00,19026.00,11/1/2014,11,November,2014 +Small Business,Mexico,Paseo,2150,300.00,645000.00,77400.00,567600.00,537500.00,30100.00,11/1/2014,11,November,2014 +Government,Mexico,Paseo,1197,350.00,418950.00,50274.00,368676.00,311220.00,57456.00,11/1/2014,11,November,2014 +Midmarket,Mexico,Paseo,380,15.00,5700.00,684.00,5016.00,3800.00,1216.00,12/1/2013,12,December,2013 +Government,Mexico,Paseo,1233,20.00,24660.00,2959.20,21700.80,12330.00,9370.80,12/1/2014,12,December,2014 +Government,Mexico,Velo,1395,350.00,488250.00,58590.00,429660.00,362700.00,66960.00,7/1/2014,7,July,2014 +Government,United States of America,Velo,986,350.00,345100.00,41412.00,303688.00,256360.00,47328.00,10/1/2014,10,October,2014 +Government,Mexico,Velo,905,20.00,18100.00,2172.00,15928.00,9050.00,6878.00,10/1/2014,10,October,2014 +Channel Partners,Canada,VTT,2109,12.00,25308.00,3036.96,22271.04,6327.00,15944.04,5/1/2014,5,May,2014 +Midmarket,France,VTT,3874.5,15.00,58117.50,6974.10,51143.40,38745.00,12398.40,7/1/2014,7,July,2014 +Government,Canada,VTT,623,350.00,218050.00,26166.00,191884.00,161980.00,29904.00,9/1/2013,9,September,2013 +Government,United States of America,VTT,986,350.00,345100.00,41412.00,303688.00,256360.00,47328.00,10/1/2014,10,October,2014 +Enterprise,United States of America,VTT,2387,125.00,298375.00,35805.00,262570.00,286440.00,-23870.00,11/1/2014,11,November,2014 +Government,Mexico,VTT,1233,20.00,24660.00,2959.20,21700.80,12330.00,9370.80,12/1/2014,12,December,2014 +Government,United States of America,Amarilla,270,350.00,94500.00,11340.00,83160.00,70200.00,12960.00,2/1/2014,2,February,2014 +Government,France,Amarilla,3421.5,7.00,23950.50,2874.06,21076.44,17107.50,3968.94,7/1/2014,7,July,2014 +Government,Canada,Amarilla,2734,7.00,19138.00,2296.56,16841.44,13670.00,3171.44,10/1/2014,10,October,2014 +Midmarket,United States of America,Amarilla,2548,15.00,38220.00,4586.40,33633.60,25480.00,8153.60,11/1/2013,11,November,2013 +Government,France,Carretera,2521.5,20.00,50430.00,6051.60,44378.40,25215.00,19163.40,1/1/2014,1,January,2014 +Channel Partners,Mexico,Montana,2661,12.00,31932.00,3831.84,28100.16,7983.00,20117.16,5/1/2014,5,May,2014 +Government,Germany,Paseo,1531,20.00,30620.00,3674.40,26945.60,15310.00,11635.60,12/1/2014,12,December,2014 +Government,France,VTT,1491,7.00,10437.00,1252.44,9184.56,7455.00,1729.56,3/1/2014,3,March,2014 +Government,Germany,VTT,1531,20.00,30620.00,3674.40,26945.60,15310.00,11635.60,12/1/2014,12,December,2014 +Channel Partners,Canada,Amarilla,2761,12.00,33132.00,3975.84,29156.16,8283.00,20873.16,9/1/2013,9,September,2013 +Midmarket,United States of America,Carretera,2567,15.00,38505.00,5005.65,33499.35,25670.00,7829.35,6/1/2014,6,June,2014 +Midmarket,United States of America,VTT,2567,15.00,38505.00,5005.65,33499.35,25670.00,7829.35,6/1/2014,6,June,2014 +Government,Canada,Carretera,923,350.00,323050.00,41996.50,281053.50,239980.00,41073.50,3/1/2014,3,March,2014 +Government,France,Carretera,1790,350.00,626500.00,81445.00,545055.00,465400.00,79655.00,3/1/2014,3,March,2014 +Government,Germany,Carretera,442,20.00,8840.00,1149.20,7690.80,4420.00,3270.80,9/1/2013,9,September,2013 +Government,United States of America,Montana,982.5,350.00,343875.00,44703.75,299171.25,255450.00,43721.25,1/1/2014,1,January,2014 +Government,United States of America,Montana,1298,7.00,9086.00,1181.18,7904.82,6490.00,1414.82,2/1/2014,2,February,2014 +Channel Partners,Mexico,Montana,604,12.00,7248.00,942.24,6305.76,1812.00,4493.76,6/1/2014,6,June,2014 +Government,Mexico,Montana,2255,20.00,45100.00,5863.00,39237.00,22550.00,16687.00,7/1/2014,7,July,2014 +Government,Canada,Montana,1249,20.00,24980.00,3247.40,21732.60,12490.00,9242.60,10/1/2014,10,October,2014 +Government,United States of America,Paseo,1438.5,7.00,10069.50,1309.04,8760.47,7192.50,1567.97,1/1/2014,1,January,2014 +Small Business,Germany,Paseo,807,300.00,242100.00,31473.00,210627.00,201750.00,8877.00,1/1/2014,1,January,2014 +Government,United States of America,Paseo,2641,20.00,52820.00,6866.60,45953.40,26410.00,19543.40,2/1/2014,2,February,2014 +Government,Germany,Paseo,2708,20.00,54160.00,7040.80,47119.20,27080.00,20039.20,2/1/2014,2,February,2014 +Government,Canada,Paseo,2632,350.00,921200.00,119756.00,801444.00,684320.00,117124.00,6/1/2014,6,June,2014 +Enterprise,Canada,Paseo,1583,125.00,197875.00,25723.75,172151.25,189960.00,-17808.75,6/1/2014,6,June,2014 +Channel Partners,Mexico,Paseo,571,12.00,6852.00,890.76,5961.24,1713.00,4248.24,7/1/2014,7,July,2014 +Government,France,Paseo,2696,7.00,18872.00,2453.36,16418.64,13480.00,2938.64,8/1/2014,8,August,2014 +Midmarket,Canada,Paseo,1565,15.00,23475.00,3051.75,20423.25,15650.00,4773.25,10/1/2014,10,October,2014 +Government,Canada,Paseo,1249,20.00,24980.00,3247.40,21732.60,12490.00,9242.60,10/1/2014,10,October,2014 +Government,Germany,Paseo,357,350.00,124950.00,16243.50,108706.50,92820.00,15886.50,11/1/2014,11,November,2014 +Channel Partners,Germany,Paseo,1013,12.00,12156.00,1580.28,10575.72,3039.00,7536.72,12/1/2014,12,December,2014 +Midmarket,France,Velo,3997.5,15.00,59962.50,7795.13,52167.38,39975.00,12192.38,1/1/2014,1,January,2014 +Government,Canada,Velo,2632,350.00,921200.00,119756.00,801444.00,684320.00,117124.00,6/1/2014,6,June,2014 +Government,France,Velo,1190,7.00,8330.00,1082.90,7247.10,5950.00,1297.10,6/1/2014,6,June,2014 +Channel Partners,Mexico,Velo,604,12.00,7248.00,942.24,6305.76,1812.00,4493.76,6/1/2014,6,June,2014 +Midmarket,Germany,Velo,660,15.00,9900.00,1287.00,8613.00,6600.00,2013.00,9/1/2013,9,September,2013 +Channel Partners,Mexico,Velo,410,12.00,4920.00,639.60,4280.40,1230.00,3050.40,10/1/2014,10,October,2014 +Small Business,Mexico,Velo,2605,300.00,781500.00,101595.00,679905.00,651250.00,28655.00,11/1/2013,11,November,2013 +Channel Partners,Germany,Velo,1013,12.00,12156.00,1580.28,10575.72,3039.00,7536.72,12/1/2014,12,December,2014 +Enterprise,Canada,VTT,1583,125.00,197875.00,25723.75,172151.25,189960.00,-17808.75,6/1/2014,6,June,2014 +Midmarket,Canada,VTT,1565,15.00,23475.00,3051.75,20423.25,15650.00,4773.25,10/1/2014,10,October,2014 +Enterprise,Canada,Amarilla,1659,125.00,207375.00,26958.75,180416.25,199080.00,-18663.75,1/1/2014,1,January,2014 +Government,France,Amarilla,1190,7.00,8330.00,1082.90,7247.10,5950.00,1297.10,6/1/2014,6,June,2014 +Channel Partners,Mexico,Amarilla,410,12.00,4920.00,639.60,4280.40,1230.00,3050.40,10/1/2014,10,October,2014 +Channel Partners,Germany,Amarilla,1770,12.00,21240.00,2761.20,18478.80,5310.00,13168.80,12/1/2013,12,December,2013 +Government,Mexico,Carretera,2579,20.00,51580.00,7221.20,44358.80,25790.00,18568.80,4/1/2014,4,April,2014 +Government,United States of America,Carretera,1743,20.00,34860.00,4880.40,29979.60,17430.00,12549.60,5/1/2014,5,May,2014 +Government,United States of America,Carretera,2996,7.00,20972.00,2936.08,18035.92,14980.00,3055.92,10/1/2013,10,October,2013 +Government,Germany,Carretera,280,7.00,1960.00,274.40,1685.60,1400.00,285.60,12/1/2014,12,December,2014 +Government,France,Montana,293,7.00,2051.00,287.14,1763.86,1465.00,298.86,2/1/2014,2,February,2014 +Government,United States of America,Montana,2996,7.00,20972.00,2936.08,18035.92,14980.00,3055.92,10/1/2013,10,October,2013 +Midmarket,Germany,Paseo,278,15.00,4170.00,583.80,3586.20,2780.00,806.20,2/1/2014,2,February,2014 +Government,Canada,Paseo,2428,20.00,48560.00,6798.40,41761.60,24280.00,17481.60,3/1/2014,3,March,2014 +Midmarket,United States of America,Paseo,1767,15.00,26505.00,3710.70,22794.30,17670.00,5124.30,9/1/2014,9,September,2014 +Channel Partners,France,Paseo,1393,12.00,16716.00,2340.24,14375.76,4179.00,10196.76,10/1/2014,10,October,2014 +Government,Germany,VTT,280,7.00,1960.00,274.40,1685.60,1400.00,285.60,12/1/2014,12,December,2014 +Channel Partners,France,Amarilla,1393,12.00,16716.00,2340.24,14375.76,4179.00,10196.76,10/1/2014,10,October,2014 +Channel Partners,United States of America,Amarilla,2015,12.00,24180.00,3385.20,20794.80,6045.00,14749.80,12/1/2013,12,December,2013 +Small Business,Mexico,Carretera,801,300.00,240300.00,33642.00,206658.00,200250.00,6408.00,7/1/2014,7,July,2014 +Enterprise,France,Carretera,1023,125.00,127875.00,17902.50,109972.50,122760.00,-12787.50,9/1/2013,9,September,2013 +Small Business,Canada,Carretera,1496,300.00,448800.00,62832.00,385968.00,374000.00,11968.00,10/1/2014,10,October,2014 +Small Business,United States of America,Carretera,1010,300.00,303000.00,42420.00,260580.00,252500.00,8080.00,10/1/2014,10,October,2014 +Midmarket,Germany,Carretera,1513,15.00,22695.00,3177.30,19517.70,15130.00,4387.70,11/1/2014,11,November,2014 +Midmarket,Canada,Carretera,2300,15.00,34500.00,4830.00,29670.00,23000.00,6670.00,12/1/2014,12,December,2014 +Enterprise,Mexico,Carretera,2821,125.00,352625.00,49367.50,303257.50,338520.00,-35262.50,12/1/2013,12,December,2013 +Government,Canada,Montana,2227.5,350.00,779625.00,109147.50,670477.50,579150.00,91327.50,1/1/2014,1,January,2014 +Government,Germany,Montana,1199,350.00,419650.00,58751.00,360899.00,311740.00,49159.00,4/1/2014,4,April,2014 +Government,Canada,Montana,200,350.00,70000.00,9800.00,60200.00,52000.00,8200.00,5/1/2014,5,May,2014 +Government,Canada,Montana,388,7.00,2716.00,380.24,2335.76,1940.00,395.76,9/1/2014,9,September,2014 +Government,Mexico,Montana,1727,7.00,12089.00,1692.46,10396.54,8635.00,1761.54,10/1/2013,10,October,2013 +Midmarket,Canada,Montana,2300,15.00,34500.00,4830.00,29670.00,23000.00,6670.00,12/1/2014,12,December,2014 +Government,Mexico,Paseo,260,20.00,5200.00,728.00,4472.00,2600.00,1872.00,2/1/2014,2,February,2014 +Midmarket,Canada,Paseo,2470,15.00,37050.00,5187.00,31863.00,24700.00,7163.00,9/1/2013,9,September,2013 +Midmarket,Canada,Paseo,1743,15.00,26145.00,3660.30,22484.70,17430.00,5054.70,10/1/2013,10,October,2013 +Channel Partners,United States of America,Paseo,2914,12.00,34968.00,4895.52,30072.48,8742.00,21330.48,10/1/2014,10,October,2014 +Government,France,Paseo,1731,7.00,12117.00,1696.38,10420.62,8655.00,1765.62,10/1/2014,10,October,2014 +Government,Canada,Paseo,700,350.00,245000.00,34300.00,210700.00,182000.00,28700.00,11/1/2014,11,November,2014 +Channel Partners,Canada,Paseo,2222,12.00,26664.00,3732.96,22931.04,6666.00,16265.04,11/1/2013,11,November,2013 +Government,United States of America,Paseo,1177,350.00,411950.00,57673.00,354277.00,306020.00,48257.00,11/1/2014,11,November,2014 +Government,France,Paseo,1922,350.00,672700.00,94178.00,578522.00,499720.00,78802.00,11/1/2013,11,November,2013 +Enterprise,Mexico,Velo,1575,125.00,196875.00,27562.50,169312.50,189000.00,-19687.50,2/1/2014,2,February,2014 +Government,United States of America,Velo,606,20.00,12120.00,1696.80,10423.20,6060.00,4363.20,4/1/2014,4,April,2014 +Small Business,United States of America,Velo,2460,300.00,738000.00,103320.00,634680.00,615000.00,19680.00,7/1/2014,7,July,2014 +Small Business,Canada,Velo,269,300.00,80700.00,11298.00,69402.00,67250.00,2152.00,10/1/2013,10,October,2013 +Small Business,Germany,Velo,2536,300.00,760800.00,106512.00,654288.00,634000.00,20288.00,11/1/2013,11,November,2013 +Government,Mexico,VTT,2903,7.00,20321.00,2844.94,17476.06,14515.00,2961.06,3/1/2014,3,March,2014 +Small Business,United States of America,VTT,2541,300.00,762300.00,106722.00,655578.00,635250.00,20328.00,8/1/2014,8,August,2014 +Small Business,Canada,VTT,269,300.00,80700.00,11298.00,69402.00,67250.00,2152.00,10/1/2013,10,October,2013 +Small Business,Canada,VTT,1496,300.00,448800.00,62832.00,385968.00,374000.00,11968.00,10/1/2014,10,October,2014 +Small Business,United States of America,VTT,1010,300.00,303000.00,42420.00,260580.00,252500.00,8080.00,10/1/2014,10,October,2014 +Government,France,VTT,1281,350.00,448350.00,62769.00,385581.00,333060.00,52521.00,12/1/2013,12,December,2013 +Small Business,Canada,Amarilla,888,300.00,266400.00,37296.00,229104.00,222000.00,7104.00,3/1/2014,3,March,2014 +Enterprise,United States of America,Amarilla,2844,125.00,355500.00,49770.00,305730.00,341280.00,-35550.00,5/1/2014,5,May,2014 +Channel Partners,France,Amarilla,2475,12.00,29700.00,4158.00,25542.00,7425.00,18117.00,8/1/2014,8,August,2014 +Midmarket,Canada,Amarilla,1743,15.00,26145.00,3660.30,22484.70,17430.00,5054.70,10/1/2013,10,October,2013 +Channel Partners,United States of America,Amarilla,2914,12.00,34968.00,4895.52,30072.48,8742.00,21330.48,10/1/2014,10,October,2014 +Government,France,Amarilla,1731,7.00,12117.00,1696.38,10420.62,8655.00,1765.62,10/1/2014,10,October,2014 +Government,Mexico,Amarilla,1727,7.00,12089.00,1692.46,10396.54,8635.00,1761.54,10/1/2013,10,October,2013 +Midmarket,Mexico,Amarilla,1870,15.00,28050.00,3927.00,24123.00,18700.00,5423.00,11/1/2013,11,November,2013 +Enterprise,France,Carretera,1174,125.00,146750.00,22012.50,124737.50,140880.00,-16142.50,8/1/2014,8,August,2014 +Enterprise,Germany,Carretera,2767,125.00,345875.00,51881.25,293993.75,332040.00,-38046.25,8/1/2014,8,August,2014 +Enterprise,Germany,Carretera,1085,125.00,135625.00,20343.75,115281.25,130200.00,-14918.75,10/1/2014,10,October,2014 +Small Business,Mexico,Montana,546,300.00,163800.00,24570.00,139230.00,136500.00,2730.00,10/1/2014,10,October,2014 +Government,Germany,Paseo,1158,20.00,23160.00,3474.00,19686.00,11580.00,8106.00,3/1/2014,3,March,2014 +Midmarket,Canada,Paseo,1614,15.00,24210.00,3631.50,20578.50,16140.00,4438.50,4/1/2014,4,April,2014 +Government,Mexico,Paseo,2535,7.00,17745.00,2661.75,15083.25,12675.00,2408.25,4/1/2014,4,April,2014 +Government,Mexico,Paseo,2851,350.00,997850.00,149677.50,848172.50,741260.00,106912.50,5/1/2014,5,May,2014 +Midmarket,Canada,Paseo,2559,15.00,38385.00,5757.75,32627.25,25590.00,7037.25,8/1/2014,8,August,2014 +Government,United States of America,Paseo,267,20.00,5340.00,801.00,4539.00,2670.00,1869.00,10/1/2013,10,October,2013 +Enterprise,Germany,Paseo,1085,125.00,135625.00,20343.75,115281.25,130200.00,-14918.75,10/1/2014,10,October,2014 +Midmarket,Germany,Paseo,1175,15.00,17625.00,2643.75,14981.25,11750.00,3231.25,10/1/2014,10,October,2014 +Government,United States of America,Paseo,2007,350.00,702450.00,105367.50,597082.50,521820.00,75262.50,11/1/2013,11,November,2013 +Government,Mexico,Paseo,2151,350.00,752850.00,112927.50,639922.50,559260.00,80662.50,11/1/2013,11,November,2013 +Channel Partners,United States of America,Paseo,914,12.00,10968.00,1645.20,9322.80,2742.00,6580.80,12/1/2014,12,December,2014 +Government,France,Paseo,293,20.00,5860.00,879.00,4981.00,2930.00,2051.00,12/1/2014,12,December,2014 +Channel Partners,Mexico,Velo,500,12.00,6000.00,900.00,5100.00,1500.00,3600.00,3/1/2014,3,March,2014 +Midmarket,France,Velo,2826,15.00,42390.00,6358.50,36031.50,28260.00,7771.50,5/1/2014,5,May,2014 +Enterprise,France,Velo,663,125.00,82875.00,12431.25,70443.75,79560.00,-9116.25,9/1/2014,9,September,2014 +Small Business,United States of America,Velo,2574,300.00,772200.00,115830.00,656370.00,643500.00,12870.00,11/1/2013,11,November,2013 +Enterprise,United States of America,Velo,2438,125.00,304750.00,45712.50,259037.50,292560.00,-33522.50,12/1/2013,12,December,2013 +Channel Partners,United States of America,Velo,914,12.00,10968.00,1645.20,9322.80,2742.00,6580.80,12/1/2014,12,December,2014 +Government,Canada,VTT,865.5,20.00,17310.00,2596.50,14713.50,8655.00,6058.50,7/1/2014,7,July,2014 +Midmarket,Germany,VTT,492,15.00,7380.00,1107.00,6273.00,4920.00,1353.00,7/1/2014,7,July,2014 +Government,United States of America,VTT,267,20.00,5340.00,801.00,4539.00,2670.00,1869.00,10/1/2013,10,October,2013 +Midmarket,Germany,VTT,1175,15.00,17625.00,2643.75,14981.25,11750.00,3231.25,10/1/2014,10,October,2014 +Enterprise,Canada,VTT,2954,125.00,369250.00,55387.50,313862.50,354480.00,-40617.50,11/1/2013,11,November,2013 +Enterprise,Germany,VTT,552,125.00,69000.00,10350.00,58650.00,66240.00,-7590.00,11/1/2014,11,November,2014 +Government,France,VTT,293,20.00,5860.00,879.00,4981.00,2930.00,2051.00,12/1/2014,12,December,2014 +Small Business,France,Amarilla,2475,300.00,742500.00,111375.00,631125.00,618750.00,12375.00,3/1/2014,3,March,2014 +Small Business,Mexico,Amarilla,546,300.00,163800.00,24570.00,139230.00,136500.00,2730.00,10/1/2014,10,October,2014 +Government,Mexico,Montana,1368,7.00,9576.00,1436.40,8139.60,6840.00,1299.60,2/1/2014,2,February,2014 +Government,Canada,Paseo,723,7.00,5061.00,759.15,4301.85,3615.00,686.85,4/1/2014,4,April,2014 +Channel Partners,United States of America,VTT,1806,12.00,21672.00,3250.80,18421.20,5418.00,13003.20,5/1/2014,5,May,2014 diff --git a/dotnet/samples/Demos/CodeInterpreterPlugin/README.md b/dotnet/samples/Demos/CodeInterpreterPlugin/README.md index a1e6a007f728..084fed1b41e4 100644 --- a/dotnet/samples/Demos/CodeInterpreterPlugin/README.md +++ b/dotnet/samples/Demos/CodeInterpreterPlugin/README.md @@ -31,3 +31,21 @@ OpenAI__ChatModelId # Azure Container Apps AzureContainerApps__Endpoint ``` + +### Usage Example + +User: Upload the file c:\temp\code-interpreter\test-file.txt + +Assistant: The file test-file.txt has been successfully uploaded. + +User: How many files I have uploaded ? + +Assistant: You have uploaded 1 file. + +User: Show me the contents of this file + +Assistant: The contents of the file "test-file.txt" are as follows: + +```text +the contents of the file +``` \ No newline at end of file diff --git a/dotnet/samples/Demos/FunctionInvocationApproval/README.md b/dotnet/samples/Demos/FunctionInvocationApproval/README.md new file mode 100644 index 000000000000..99ff202e45fd --- /dev/null +++ b/dotnet/samples/Demos/FunctionInvocationApproval/README.md @@ -0,0 +1,44 @@ +# Function Invocation Approval + +This console application shows how to use function invocation filter (`IFunctionInvocationFilter`) to invoke a Kernel Function only if such operation was approved. +If function invocation was rejected, the result will contain the reason why, so the LLM can respond appropriately. + +The application uses a sample plugin which builds software by following these development stages: collection of requirements, design, implementation, testing and deployment. + +Each step can be approved or rejected. Based on that, the LLM will decide how to proceed. + +## Configuring Secrets + +The example requires credentials to access OpenAI or Azure OpenAI. + +If you have set up those credentials as secrets within Secret Manager or through environment variables for other samples from the solution in which this project is found, they will be re-used. + +### To set your secrets with Secret Manager: + +``` +cd dotnet/samples/Demos/FunctionInvocationApproval + +dotnet user-secrets init + +dotnet user-secrets set "OpenAI:ChatModelId" "..." +dotnet user-secrets set "OpenAI:ApiKey" "..." + +dotnet user-secrets set "AzureOpenAI:ChatDeploymentName" "..." +dotnet user-secrets set "AzureOpenAI:Endpoint" "https://... .openai.azure.com/" +dotnet user-secrets set "AzureOpenAI:ApiKey" "..." +``` + +### To set your secrets with environment variables + +Use these names: + +``` +# OpenAI +OpenAI__ChatModelId +OpenAI__ApiKey + +# Azure OpenAI +AzureOpenAI__ChatDeploymentName +AzureOpenAI__Endpoint +AzureOpenAI__ApiKey +``` diff --git a/dotnet/samples/Demos/HomeAutomation/README.md b/dotnet/samples/Demos/HomeAutomation/README.md index 09907e5363e5..aa5c33cec248 100644 --- a/dotnet/samples/Demos/HomeAutomation/README.md +++ b/dotnet/samples/Demos/HomeAutomation/README.md @@ -12,7 +12,7 @@ If you have set up those credentials as secrets within Secret Manager or through ### To set your secrets with Secret Manager: ``` -cd dotnet/samples/HouseAutomation +cd dotnet/samples/Demos/HouseAutomation dotnet user-secrets init diff --git a/dotnet/samples/GettingStartedWithAgents/README.md b/dotnet/samples/GettingStartedWithAgents/README.md index 4cbca4f8e5d5..39952506548c 100644 --- a/dotnet/samples/GettingStartedWithAgents/README.md +++ b/dotnet/samples/GettingStartedWithAgents/README.md @@ -22,7 +22,7 @@ Example|Description [Step1_Agent](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/GettingStartedWithAgents/Step1_Agent.cs)|How to create and use an agent. [Step2_Plugins](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/GettingStartedWithAgents/Step2_Plugins.cs)|How to associate plug-ins with an agent. [Step3_Chat](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/GettingStartedWithAgents/Step3_Chat.cs)|How to create a conversation between agents. -[Step4_KernelFunctionStrategies](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/Step4_KernelFunctionStrategies/Step1_Agent.cs)|How to utilize a `KernelFunction` as a _chat strategy_. +[Step4_KernelFunctionStrategies](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/GettingStartedWithAgents/Step4_KernelFunctionStrategies.cs)|How to utilize a `KernelFunction` as a _chat strategy_. [Step5_JsonResult](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/GettingStartedWithAgents/Step5_JsonResult.cs)|How to have an agent produce JSON. [Step6_DependencyInjection](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/GettingStartedWithAgents/Step6_DependencyInjection.cs)|How to define dependency injection patterns for agents. [Step7_OpenAIAssistant](https://github.com/microsoft/semantic-kernel/blob/main/dotnet/samples/GettingStartedWithAgents/Step7_OpenAIAssistant.cs)|How to create an Open AI Assistant agent. diff --git a/dotnet/src/Agents/Abstractions/AgentChat.cs b/dotnet/src/Agents/Abstractions/AgentChat.cs index 2ab5e75a276c..26b51928c362 100644 --- a/dotnet/src/Agents/Abstractions/AgentChat.cs +++ b/dotnet/src/Agents/Abstractions/AgentChat.cs @@ -223,8 +223,8 @@ protected async IAsyncEnumerable InvokeAgentAsync( this.History.Add(message); messages.Add(message); - // Don't expose internal messages to caller. - if (message.Role == AuthorRole.Tool || message.Items.All(i => i is FunctionCallContent)) + // Don't expose function-call and function-result messages to caller. + if (message.Items.All(i => i is FunctionCallContent || i is FunctionResultContent)) { continue; } @@ -239,7 +239,7 @@ protected async IAsyncEnumerable InvokeAgentAsync( this._agentChannels .Where(kvp => kvp.Value != channel) .Select(kvp => new ChannelReference(kvp.Value, kvp.Key)); - this._broadcastQueue.Enqueue(channelRefs, messages); + this._broadcastQueue.Enqueue(channelRefs, messages.Where(m => m.Role != AuthorRole.Tool).ToArray()); this.Logger.LogInformation("[{MethodName}] Invoked agent {AgentType}: {AgentId}", nameof(InvokeAgentAsync), agent.GetType(), agent.Id); } diff --git a/dotnet/src/Agents/Core/Chat/TerminationStrategy.cs b/dotnet/src/Agents/Core/Chat/TerminationStrategy.cs index 4b1752f88462..843327d77f6a 100644 --- a/dotnet/src/Agents/Core/Chat/TerminationStrategy.cs +++ b/dotnet/src/Agents/Core/Chat/TerminationStrategy.cs @@ -49,7 +49,7 @@ public abstract class TerminationStrategy /// /// Evaluate the input message and determine if the chat has met its completion criteria. /// - /// The agent actively interacting with the nexus. + /// The agent actively interacting with the chat. /// The most recent message /// The to monitor for cancellation requests. The default is . /// True to terminate chat loop. diff --git a/dotnet/src/Agents/OpenAI/Agents.OpenAI.csproj b/dotnet/src/Agents/OpenAI/Agents.OpenAI.csproj index 380bd5877e85..222ea5c5be88 100644 --- a/dotnet/src/Agents/OpenAI/Agents.OpenAI.csproj +++ b/dotnet/src/Agents/OpenAI/Agents.OpenAI.csproj @@ -24,6 +24,7 @@ + diff --git a/dotnet/src/Agents/OpenAI/Extensions/KernelFunctionExtensions.cs b/dotnet/src/Agents/OpenAI/Extensions/KernelFunctionExtensions.cs index 742aa874a301..9665fb680498 100644 --- a/dotnet/src/Agents/OpenAI/Extensions/KernelFunctionExtensions.cs +++ b/dotnet/src/Agents/OpenAI/Extensions/KernelFunctionExtensions.cs @@ -15,7 +15,7 @@ internal static class KernelFunctionExtensions /// The plugin name /// The delimiter character /// An OpenAI tool definition - public static FunctionToolDefinition ToToolDefinition(this KernelFunction function, string pluginName, char delimiter) + public static FunctionToolDefinition ToToolDefinition(this KernelFunction function, string pluginName, string delimiter) { var metadata = function.Metadata; if (metadata.Parameters.Count > 0) @@ -47,10 +47,10 @@ public static FunctionToolDefinition ToToolDefinition(this KernelFunction functi required, }; - return new FunctionToolDefinition(function.GetQualifiedName(pluginName, delimiter), function.Description, BinaryData.FromObjectAsJson(spec)); + return new FunctionToolDefinition(FunctionName.ToFullyQualifiedName(function.Name, pluginName, delimiter), function.Description, BinaryData.FromObjectAsJson(spec)); } - return new FunctionToolDefinition(function.GetQualifiedName(pluginName, delimiter), function.Description); + return new FunctionToolDefinition(FunctionName.ToFullyQualifiedName(function.Name, pluginName, delimiter), function.Description); } private static string ConvertType(Type? type) @@ -86,12 +86,4 @@ TypeCode.Int64 or TypeCode.UInt64 or _ => "object", }; } - - /// - /// Produce a fully qualified toolname. - /// - public static string GetQualifiedName(this KernelFunction function, string pluginName, char delimiter) - { - return $"{pluginName}{delimiter}{function.Name}"; - } } diff --git a/dotnet/src/Agents/OpenAI/OpenAIAssistantChannel.cs b/dotnet/src/Agents/OpenAI/OpenAIAssistantChannel.cs index cd8e2880b669..0d8b20b5b931 100644 --- a/dotnet/src/Agents/OpenAI/OpenAIAssistantChannel.cs +++ b/dotnet/src/Agents/OpenAI/OpenAIAssistantChannel.cs @@ -19,7 +19,7 @@ namespace Microsoft.SemanticKernel.Agents.OpenAI; internal sealed class OpenAIAssistantChannel(AssistantsClient client, string threadId, OpenAIAssistantConfiguration.PollingConfiguration pollingConfiguration) : AgentChannel { - private const char FunctionDelimiter = '-'; + private const string FunctionDelimiter = "-"; private static readonly HashSet s_pollingStatuses = [ @@ -94,7 +94,8 @@ protected override async IAsyncEnumerable InvokeAsync( this.Logger.LogInformation("[{MethodName}] Created run: {RunId}", nameof(InvokeAsync), run.Id); // Evaluate status and process steps and messages, as encountered. - HashSet processedMessageIds = []; + HashSet processedStepIds = []; + Dictionary functionSteps = []; do { @@ -113,65 +114,106 @@ protected override async IAsyncEnumerable InvokeAsync( this.Logger.LogDebug("[{MethodName}] Processing run steps: {RunId}", nameof(InvokeAsync), run.Id); // Execute functions in parallel and post results at once. - var tasks = steps.Data.SelectMany(step => ExecuteStep(agent, step, cancellationToken)).ToArray(); - if (tasks.Length > 0) + FunctionCallContent[] activeFunctionSteps = steps.Data.SelectMany(step => ParseFunctionStep(agent, step)).ToArray(); + if (activeFunctionSteps.Length > 0) { - ToolOutput[]? results = await Task.WhenAll(tasks).ConfigureAwait(false); + // Emit function-call content + yield return GenerateFunctionCallContent(agent.GetName(), activeFunctionSteps); - await this._client.SubmitToolOutputsToRunAsync(run, results, cancellationToken).ConfigureAwait(false); + // Invoke functions for each tool-step + IEnumerable> functionResultTasks = ExecuteFunctionSteps(agent, activeFunctionSteps, cancellationToken); + + // Block for function results + FunctionResultContent[] functionResults = await Task.WhenAll(functionResultTasks).ConfigureAwait(false); + + // Process tool output + ToolOutput[] toolOutputs = GenerateToolOutputs(functionResults); + + await this._client.SubmitToolOutputsToRunAsync(run, toolOutputs, cancellationToken).ConfigureAwait(false); } if (this.Logger.IsEnabled(LogLevel.Information)) // Avoid boxing if not enabled { - this.Logger.LogInformation("[{MethodName}] Processed #{MessageCount} run steps: {RunId}", nameof(InvokeAsync), tasks.Length, run.Id); + this.Logger.LogInformation("[{MethodName}] Processed #{MessageCount} run steps: {RunId}", nameof(InvokeAsync), activeFunctionSteps.Length, run.Id); } } // Enumerate completed messages this.Logger.LogDebug("[{MethodName}] Processing run messages: {RunId}", nameof(InvokeAsync), run.Id); - IEnumerable messageDetails = + IEnumerable completedStepsToProcess = steps - .OrderBy(s => s.CompletedAt) - .Select(s => s.StepDetails) - .OfType() - .Where(d => !processedMessageIds.Contains(d.MessageCreation.MessageId)); + .Where(s => s.CompletedAt.HasValue && !processedStepIds.Contains(s.Id)) + .OrderBy(s => s.CreatedAt); int messageCount = 0; - foreach (RunStepMessageCreationDetails detail in messageDetails) + foreach (RunStep completedStep in completedStepsToProcess) { - ++messageCount; - - // Retrieve the message - ThreadMessage? message = await this.RetrieveMessageAsync(detail, cancellationToken).ConfigureAwait(false); - - if (message is not null) + if (completedStep.Type.Equals(RunStepType.ToolCalls)) { - AuthorRole role = new(message.Role.ToString()); + RunStepToolCallDetails toolCallDetails = (RunStepToolCallDetails)completedStep.StepDetails; - foreach (MessageContent itemContent in message.ContentItems) + foreach (RunStepToolCall toolCall in toolCallDetails.ToolCalls) { ChatMessageContent? content = null; - // Process text content - if (itemContent is MessageTextContent contentMessage) + // Process code-interpreter content + if (toolCall is RunStepCodeInterpreterToolCall toolCodeInterpreter) { - content = GenerateTextMessageContent(agent.GetName(), role, contentMessage); + content = GenerateCodeInterpreterContent(agent.GetName(), toolCodeInterpreter); } - // Process image content - else if (itemContent is MessageImageFileContent contentImage) + // Process function result content + else if (toolCall is RunStepFunctionToolCall toolFunction) { - content = GenerateImageFileContent(agent.GetName(), role, contentImage); + FunctionCallContent functionStep = functionSteps[toolFunction.Id]; // Function step always captured on invocation + content = GenerateFunctionResultContent(agent.GetName(), functionStep, toolFunction.Output); } if (content is not null) { + ++messageCount; + yield return content; } } } + else if (completedStep.Type.Equals(RunStepType.MessageCreation)) + { + RunStepMessageCreationDetails messageCreationDetails = (RunStepMessageCreationDetails)completedStep.StepDetails; - processedMessageIds.Add(detail.MessageCreation.MessageId); + // Retrieve the message + ThreadMessage? message = await this.RetrieveMessageAsync(messageCreationDetails, cancellationToken).ConfigureAwait(false); + + if (message is not null) + { + AuthorRole role = new(message.Role.ToString()); + + foreach (MessageContent itemContent in message.ContentItems) + { + ChatMessageContent? content = null; + + // Process text content + if (itemContent is MessageTextContent contentMessage) + { + content = GenerateTextMessageContent(agent.GetName(), role, contentMessage); + } + // Process image content + else if (itemContent is MessageImageFileContent contentImage) + { + content = GenerateImageFileContent(agent.GetName(), role, contentImage); + } + + if (content is not null) + { + ++messageCount; + + yield return content; + } + } + } + } + + processedStepIds.Add(completedStep.Id); } if (this.Logger.IsEnabled(LogLevel.Information)) // Avoid boxing if not enabled @@ -213,6 +255,34 @@ async Task> PollRunStatusAsync() return await this._client.GetRunStepsAsync(run, cancellationToken: cancellationToken).ConfigureAwait(false); } + + // Local function to capture kernel function state for further processing (participates in method closure). + IEnumerable ParseFunctionStep(OpenAIAssistantAgent agent, RunStep step) + { + if (step.Status == RunStepStatus.InProgress && step.StepDetails is RunStepToolCallDetails callDetails) + { + foreach (RunStepFunctionToolCall toolCall in callDetails.ToolCalls.OfType()) + { + var nameParts = FunctionName.Parse(toolCall.Name, FunctionDelimiter); + + KernelArguments functionArguments = []; + if (!string.IsNullOrWhiteSpace(toolCall.Arguments)) + { + Dictionary arguments = JsonSerializer.Deserialize>(toolCall.Arguments)!; + foreach (var argumentKvp in arguments) + { + functionArguments[argumentKvp.Key] = argumentKvp.Value.ToString(); + } + } + + var content = new FunctionCallContent(nameParts.Name, nameParts.PluginName, toolCall.Id, functionArguments); + + functionSteps.Add(toolCall.Id, content); + + yield return content; + } + } + } } /// @@ -324,48 +394,79 @@ private static ChatMessageContent GenerateImageFileContent(string agentName, Aut return messageContent; } - private static IEnumerable> ExecuteStep(OpenAIAssistantAgent agent, RunStep step, CancellationToken cancellationToken) + private static ChatMessageContent GenerateCodeInterpreterContent(string agentName, RunStepCodeInterpreterToolCall contentCodeInterpreter) { - // Process all of the steps that require action - if (step.Status == RunStepStatus.InProgress && step.StepDetails is RunStepToolCallDetails callDetails) - { - foreach (RunStepFunctionToolCall toolCall in callDetails.ToolCalls.OfType()) + return + new ChatMessageContent( + AuthorRole.Tool, + [ + new TextContent(contentCodeInterpreter.Input) + ]) { - // Run function - yield return ProcessFunctionStepAsync(toolCall.Id, toolCall); - } - } + AuthorName = agentName, + }; + } - // Local function for processing the run-step (participates in method closure). - async Task ProcessFunctionStepAsync(string callId, RunStepFunctionToolCall functionDetails) + private static ChatMessageContent GenerateFunctionCallContent(string agentName, FunctionCallContent[] functionSteps) + { + ChatMessageContent functionCallContent = new(AuthorRole.Tool, content: null) { - object result = await InvokeFunctionCallAsync().ConfigureAwait(false); - if (result is not string toolResult) - { - toolResult = JsonSerializer.Serialize(result); - } + AuthorName = agentName + }; - return new ToolOutput(callId, toolResult!); + functionCallContent.Items.AddRange(functionSteps); - async Task InvokeFunctionCallAsync() - { - KernelFunction function = agent.Kernel.GetKernelFunction(functionDetails.Name, FunctionDelimiter); + return functionCallContent; + } - KernelArguments functionArguments = []; - if (!string.IsNullOrWhiteSpace(functionDetails.Arguments)) - { - Dictionary arguments = JsonSerializer.Deserialize>(functionDetails.Arguments)!; - foreach (var argumentKvp in arguments) - { - functionArguments[argumentKvp.Key] = argumentKvp.Value.ToString(); - } - } + private static ChatMessageContent GenerateFunctionResultContent(string agentName, FunctionCallContent functionStep, string result) + { + ChatMessageContent functionCallContent = new(AuthorRole.Tool, content: null) + { + AuthorName = agentName + }; - FunctionResult result = await function.InvokeAsync(agent.Kernel, functionArguments, cancellationToken).ConfigureAwait(false); + functionCallContent.Items.Add( + new FunctionResultContent( + functionStep.FunctionName, + functionStep.PluginName, + functionStep.Id, + result)); - return result.GetValue() ?? string.Empty; + return functionCallContent; + } + + private static Task[] ExecuteFunctionSteps(OpenAIAssistantAgent agent, FunctionCallContent[] functionSteps, CancellationToken cancellationToken) + { + Task[] functionTasks = new Task[functionSteps.Length]; + + for (int index = 0; index < functionSteps.Length; ++index) + { + functionTasks[index] = functionSteps[index].InvokeAsync(agent.Kernel, cancellationToken); + } + + return functionTasks; + } + + private static ToolOutput[] GenerateToolOutputs(FunctionResultContent[] functionResults) + { + ToolOutput[] toolOutputs = new ToolOutput[functionResults.Length]; + + for (int index = 0; index < functionResults.Length; ++index) + { + FunctionResultContent functionResult = functionResults[index]; + + object resultValue = (functionResult.Result as FunctionResult)?.GetValue() ?? string.Empty; + + if (resultValue is not string textResult) + { + textResult = JsonSerializer.Serialize(resultValue); } + + toolOutputs[index] = new ToolOutput(functionResult.CallId, textResult!); } + + return toolOutputs; } private async Task RetrieveMessageAsync(RunStepMessageCreationDetails detail, CancellationToken cancellationToken) diff --git a/dotnet/src/Agents/UnitTests/OpenAI/Extensions/KernelFunctionExtensionsTests.cs b/dotnet/src/Agents/UnitTests/OpenAI/Extensions/KernelFunctionExtensionsTests.cs index 34f81cc87977..eeb8a4d3b9d1 100644 --- a/dotnet/src/Agents/UnitTests/OpenAI/Extensions/KernelFunctionExtensionsTests.cs +++ b/dotnet/src/Agents/UnitTests/OpenAI/Extensions/KernelFunctionExtensionsTests.cs @@ -25,11 +25,11 @@ public void VerifyKernelFunctionToFunctionTool() KernelFunction f1 = plugin[nameof(TestPlugin.TestFunction1)]; KernelFunction f2 = plugin[nameof(TestPlugin.TestFunction2)]; - FunctionToolDefinition definition1 = f1.ToToolDefinition("testplugin", '-'); + FunctionToolDefinition definition1 = f1.ToToolDefinition("testplugin", "-"); Assert.StartsWith($"testplugin-{nameof(TestPlugin.TestFunction1)}", definition1.Name, StringComparison.Ordinal); Assert.Equal("test description", definition1.Description); - FunctionToolDefinition definition2 = f2.ToToolDefinition("testplugin", '-'); + FunctionToolDefinition definition2 = f2.ToToolDefinition("testplugin", "-"); Assert.StartsWith($"testplugin-{nameof(TestPlugin.TestFunction2)}", definition2.Name, StringComparison.Ordinal); Assert.Equal("test description", definition2.Description); } diff --git a/dotnet/src/Agents/UnitTests/OpenAI/OpenAIAssistantAgentTests.cs b/dotnet/src/Agents/UnitTests/OpenAI/OpenAIAssistantAgentTests.cs index 2a2d4c54bf93..1d9a9ec9dfcf 100644 --- a/dotnet/src/Agents/UnitTests/OpenAI/OpenAIAssistantAgentTests.cs +++ b/dotnet/src/Agents/UnitTests/OpenAI/OpenAIAssistantAgentTests.cs @@ -607,7 +607,7 @@ private static class ResponseContent "first_id": "step_abc123", "last_id": "step_abc456", "has_more": false - } + } """; public const string ToolSteps = @@ -616,13 +616,13 @@ private static class ResponseContent "object": "list", "data": [ { - "id": "step_abc123", + "id": "step_abc987", "object": "thread.run.step", "created_at": 1699063291, "run_id": "run_abc123", "assistant_id": "asst_abc123", "thread_id": "thread_abc123", - "type": "message_creation", + "type": "tool_calls", "status": "in_progress", "cancelled_at": null, "completed_at": 1699063291, @@ -638,7 +638,7 @@ private static class ResponseContent "function": { "name": "MyPlugin-MyFunction", "arguments": "{ \"index\": 3 }", - "output": null + "output": "test" } } ] @@ -653,7 +653,7 @@ private static class ResponseContent "first_id": "step_abc123", "last_id": "step_abc456", "has_more": false - } + } """; public const string ToolResponse = "{ }"; diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatGenerationTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatGenerationTests.cs index c8ede07ebb5d..6b5bda155483 100644 --- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatGenerationTests.cs +++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatGenerationTests.cs @@ -416,6 +416,57 @@ public async Task ItCreatesPostRequestWithSemanticKernelVersionHeaderAsync() Assert.Equal(expectedVersion, header); } + [Fact] + public async Task ItCanUseValueTasksSequentiallyForBearerTokenAsync() + { + // Arrange + var bearerTokenGenerator = new BearerTokenGenerator() + { + BearerKeys = new List { "key1", "key2", "key3" } + }; + + var responseContent = File.ReadAllText(ChatTestDataFilePath); + using var content1 = new HttpResponseMessage { Content = new StringContent(responseContent) }; + using var content2 = new HttpResponseMessage { Content = new StringContent(responseContent) }; + + using MultipleHttpMessageHandlerStub multipleMessageHandlerStub = new() + { + ResponsesToReturn = [content1, content2] + }; + using var httpClient = new HttpClient(multipleMessageHandlerStub, false); + + var client = new GeminiChatCompletionClient( + httpClient: httpClient, + modelId: "fake-model", + apiVersion: VertexAIVersion.V1, + bearerTokenProvider: () => bearerTokenGenerator.GetBearerToken(), + location: "fake-location", + projectId: "fake-project-id"); + + var chatHistory = CreateSampleChatHistory(); + + // Act + await client.GenerateChatMessageAsync(chatHistory); + await client.GenerateChatMessageAsync(chatHistory); + var firstRequestHeader = multipleMessageHandlerStub.RequestHeaders[0]?.GetValues("Authorization").SingleOrDefault(); + var secondRequestHeader = multipleMessageHandlerStub.RequestHeaders[1]?.GetValues("Authorization").SingleOrDefault(); + + // Assert + Assert.NotNull(firstRequestHeader); + Assert.NotNull(secondRequestHeader); + Assert.NotEqual(firstRequestHeader, secondRequestHeader); + Assert.Equal("Bearer key1", firstRequestHeader); + Assert.Equal("Bearer key2", secondRequestHeader); + } + + private sealed class BearerTokenGenerator() + { + private int _index = 0; + public required List BearerKeys { get; init; } + + public ValueTask GetBearerToken() => ValueTask.FromResult(this.BearerKeys[this._index++]); + } + private static ChatHistory CreateSampleChatHistory() { var chatHistory = new ChatHistory(); @@ -436,7 +487,7 @@ private GeminiChatCompletionClient CreateChatCompletionClient( httpClient: httpClient ?? this._httpClient, modelId: modelId, apiVersion: VertexAIVersion.V1, - bearerTokenProvider: () => Task.FromResult(bearerKey), + bearerTokenProvider: () => new ValueTask(bearerKey), location: "fake-location", projectId: "fake-project-id"); } diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatStreamingTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatStreamingTests.cs index c8802dd58c83..73b647429297 100644 --- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatStreamingTests.cs +++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiChatStreamingTests.cs @@ -368,7 +368,7 @@ private GeminiChatCompletionClient CreateChatCompletionClient( return new GeminiChatCompletionClient( httpClient: httpClient ?? this._httpClient, modelId: modelId, - bearerTokenProvider: () => Task.FromResult(bearerKey), + bearerTokenProvider: () => new ValueTask(bearerKey), apiVersion: VertexAIVersion.V1, location: "fake-location", projectId: "fake-project-id"); diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiCountingTokensTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiCountingTokensTests.cs index d25e28cd5f9b..447fec2c98df 100644 --- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiCountingTokensTests.cs +++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/Gemini/Clients/GeminiCountingTokensTests.cs @@ -124,7 +124,7 @@ private GeminiTokenCounterClient CreateTokenCounterClient( return new GeminiTokenCounterClient( httpClient: this._httpClient, modelId: modelId, - bearerTokenProvider: () => Task.FromResult(bearerKey), + bearerTokenProvider: () => ValueTask.FromResult(bearerKey), apiVersion: VertexAIVersion.V1, location: "fake-location", projectId: "fake-project-id"); diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/VertexAI/VertexAIClientEmbeddingsGenerationTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/VertexAI/VertexAIClientEmbeddingsGenerationTests.cs index b30e80bf2f05..cb3586e6cc34 100644 --- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/VertexAI/VertexAIClientEmbeddingsGenerationTests.cs +++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Core/VertexAI/VertexAIClientEmbeddingsGenerationTests.cs @@ -143,7 +143,7 @@ private VertexAIEmbeddingClient CreateEmbeddingsClient( var client = new VertexAIEmbeddingClient( httpClient: this._httpClient, modelId: modelId, - bearerTokenProvider: () => Task.FromResult(bearerKey ?? "fake-key"), + bearerTokenProvider: () => ValueTask.FromResult(bearerKey ?? "fake-key"), apiVersion: VertexAIVersion.V1, location: "us-central1", projectId: "fake-project-id"); diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/VertexAIMemoryBuilderExtensionsTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/VertexAIMemoryBuilderExtensionsTests.cs index 3292fc6d2044..14464c48977a 100644 --- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/VertexAIMemoryBuilderExtensionsTests.cs +++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/VertexAIMemoryBuilderExtensionsTests.cs @@ -39,7 +39,7 @@ public void ShouldBuildMemoryWithVertexAIEmbeddingGeneratorBearerAsFunc() // Act var memory = builder - .WithVertexAITextEmbeddingGeneration("fake-model", () => Task.FromResult("fake-bearer-key"), "fake-location", "fake-project") + .WithVertexAITextEmbeddingGeneration("fake-model", () => ValueTask.FromResult("fake-bearer-key"), "fake-location", "fake-project") .WithMemoryStore(this._mockMemoryStore.Object) .Build(); diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/VertexAIServiceCollectionExtensionsTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/VertexAIServiceCollectionExtensionsTests.cs index 006ff016c087..16ba1e00a9a3 100644 --- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/VertexAIServiceCollectionExtensionsTests.cs +++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Extensions/VertexAIServiceCollectionExtensionsTests.cs @@ -38,7 +38,7 @@ public void VertexAIGeminiChatCompletionServiceShouldBeRegisteredInKernelService var kernelBuilder = Kernel.CreateBuilder(); // Act - kernelBuilder.AddVertexAIGeminiChatCompletion("modelId", () => Task.FromResult("apiKey"), location: "test2", projectId: "projectId"); + kernelBuilder.AddVertexAIGeminiChatCompletion("modelId", () => ValueTask.FromResult("apiKey"), location: "test2", projectId: "projectId"); var kernel = kernelBuilder.Build(); // Assert @@ -70,7 +70,7 @@ public void VertexAIGeminiChatCompletionServiceShouldBeRegisteredInServiceCollec var services = new ServiceCollection(); // Act - services.AddVertexAIGeminiChatCompletion("modelId", () => Task.FromResult("apiKey"), location: "test2", projectId: "projectId"); + services.AddVertexAIGeminiChatCompletion("modelId", () => ValueTask.FromResult("apiKey"), location: "test2", projectId: "projectId"); var serviceProvider = services.BuildServiceProvider(); // Assert @@ -102,7 +102,7 @@ public void VertexAIEmbeddingGenerationServiceShouldBeRegisteredInKernelServices var kernelBuilder = Kernel.CreateBuilder(); // Act - kernelBuilder.AddVertexAIEmbeddingGeneration("modelId", () => Task.FromResult("apiKey"), location: "test2", projectId: "projectId"); + kernelBuilder.AddVertexAIEmbeddingGeneration("modelId", () => ValueTask.FromResult("apiKey"), location: "test2", projectId: "projectId"); var kernel = kernelBuilder.Build(); // Assert @@ -134,7 +134,7 @@ public void VertexAIEmbeddingGenerationServiceShouldBeRegisteredInServiceCollect var services = new ServiceCollection(); // Act - services.AddVertexAIEmbeddingGeneration("modelId", () => Task.FromResult("apiKey"), location: "test2", projectId: "projectId"); + services.AddVertexAIEmbeddingGeneration("modelId", () => ValueTask.FromResult("apiKey"), location: "test2", projectId: "projectId"); var serviceProvider = services.BuildServiceProvider(); // Assert diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Services/VertexAIGeminiChatCompletionServiceTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Services/VertexAIGeminiChatCompletionServiceTests.cs index 98c6fda16458..89e65fbaa534 100644 --- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Services/VertexAIGeminiChatCompletionServiceTests.cs +++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Services/VertexAIGeminiChatCompletionServiceTests.cs @@ -25,7 +25,7 @@ public void AttributesShouldContainModelIdBearerAsFunc() { // Arrange & Act string model = "fake-model"; - var service = new VertexAIGeminiChatCompletionService(model, () => Task.FromResult("key"), "location", "project"); + var service = new VertexAIGeminiChatCompletionService(model, () => new ValueTask("key"), "location", "project"); // Assert Assert.Equal(model, service.Attributes[AIServiceExtensions.ModelIdKey]); diff --git a/dotnet/src/Connectors/Connectors.Google.UnitTests/Services/VertexAITextEmbeddingGenerationServiceTests.cs b/dotnet/src/Connectors/Connectors.Google.UnitTests/Services/VertexAITextEmbeddingGenerationServiceTests.cs index 801e97b9d52f..ffb931af6f59 100644 --- a/dotnet/src/Connectors/Connectors.Google.UnitTests/Services/VertexAITextEmbeddingGenerationServiceTests.cs +++ b/dotnet/src/Connectors/Connectors.Google.UnitTests/Services/VertexAITextEmbeddingGenerationServiceTests.cs @@ -25,7 +25,7 @@ public void AttributesShouldContainModelIdBearerAsFunc() { // Arrange & Act string model = "fake-model"; - var service = new VertexAITextEmbeddingGenerationService(model, () => Task.FromResult("key"), "location", "project"); + var service = new VertexAITextEmbeddingGenerationService(model, () => ValueTask.FromResult("key"), "location", "project"); // Assert Assert.Equal(model, service.Attributes[AIServiceExtensions.ModelIdKey]); diff --git a/dotnet/src/Connectors/Connectors.Google/Connectors.Google.csproj b/dotnet/src/Connectors/Connectors.Google/Connectors.Google.csproj index 0afb53269782..4d5a3deb9906 100644 --- a/dotnet/src/Connectors/Connectors.Google/Connectors.Google.csproj +++ b/dotnet/src/Connectors/Connectors.Google/Connectors.Google.csproj @@ -10,8 +10,8 @@ - - + + @@ -21,12 +21,12 @@ - + - - + + diff --git a/dotnet/src/Connectors/Connectors.Google/Core/ClientBase.cs b/dotnet/src/Connectors/Connectors.Google/Core/ClientBase.cs index 1a3d20ed187c..7482dc723518 100644 --- a/dotnet/src/Connectors/Connectors.Google/Core/ClientBase.cs +++ b/dotnet/src/Connectors/Connectors.Google/Core/ClientBase.cs @@ -14,7 +14,7 @@ namespace Microsoft.SemanticKernel.Connectors.Google.Core; internal abstract class ClientBase { - private readonly Func>? _bearerTokenProvider; + private readonly Func>? _bearerTokenProvider; protected ILogger Logger { get; } @@ -23,7 +23,7 @@ internal abstract class ClientBase protected ClientBase( HttpClient httpClient, ILogger? logger, - Func> bearerTokenProvider) + Func> bearerTokenProvider) : this(httpClient, logger) { Verify.NotNull(bearerTokenProvider); diff --git a/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiChatCompletionClient.cs b/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiChatCompletionClient.cs index 087a1c2bf2f8..e52b5f4e6bd6 100644 --- a/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiChatCompletionClient.cs +++ b/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiChatCompletionClient.cs @@ -125,7 +125,7 @@ public GeminiChatCompletionClient( public GeminiChatCompletionClient( HttpClient httpClient, string modelId, - Func> bearerTokenProvider, + Func> bearerTokenProvider, string location, string projectId, VertexAIVersion apiVersion, diff --git a/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiTokenCounterClient.cs b/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiTokenCounterClient.cs index f382ded93357..8616c8a88dd5 100644 --- a/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiTokenCounterClient.cs +++ b/dotnet/src/Connectors/Connectors.Google/Core/Gemini/Clients/GeminiTokenCounterClient.cs @@ -57,7 +57,7 @@ public GeminiTokenCounterClient( public GeminiTokenCounterClient( HttpClient httpClient, string modelId, - Func> bearerTokenProvider, + Func> bearerTokenProvider, string location, string projectId, VertexAIVersion apiVersion, diff --git a/dotnet/src/Connectors/Connectors.Google/Core/VertexAI/VertexAIEmbeddingClient.cs b/dotnet/src/Connectors/Connectors.Google/Core/VertexAI/VertexAIEmbeddingClient.cs index 6b00fd70b43b..62525f4ef67b 100644 --- a/dotnet/src/Connectors/Connectors.Google/Core/VertexAI/VertexAIEmbeddingClient.cs +++ b/dotnet/src/Connectors/Connectors.Google/Core/VertexAI/VertexAIEmbeddingClient.cs @@ -31,7 +31,7 @@ internal sealed class VertexAIEmbeddingClient : ClientBase public VertexAIEmbeddingClient( HttpClient httpClient, string modelId, - Func> bearerTokenProvider, + Func> bearerTokenProvider, string location, string projectId, VertexAIVersion apiVersion, diff --git a/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIKernelBuilderExtensions.cs b/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIKernelBuilderExtensions.cs index e8432e1c1c4c..f87da9cbc56e 100644 --- a/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIKernelBuilderExtensions.cs +++ b/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIKernelBuilderExtensions.cs @@ -37,7 +37,7 @@ public static class VertexAIKernelBuilderExtensions public static IKernelBuilder AddVertexAIGeminiChatCompletion( this IKernelBuilder builder, string modelId, - Func> bearerTokenProvider, + Func> bearerTokenProvider, string location, string projectId, VertexAIVersion apiVersion = VertexAIVersion.V1, @@ -122,7 +122,7 @@ public static IKernelBuilder AddVertexAIGeminiChatCompletion( public static IKernelBuilder AddVertexAIEmbeddingGeneration( this IKernelBuilder builder, string modelId, - Func> bearerTokenProvider, + Func> bearerTokenProvider, string location, string projectId, VertexAIVersion apiVersion = VertexAIVersion.V1, diff --git a/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIMemoryBuilderExtensions.cs b/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIMemoryBuilderExtensions.cs index bdb37008726e..10d7264dc26e 100644 --- a/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIMemoryBuilderExtensions.cs +++ b/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIMemoryBuilderExtensions.cs @@ -33,7 +33,7 @@ public static class VertexAIMemoryBuilderExtensions public static MemoryBuilder WithVertexAITextEmbeddingGeneration( this MemoryBuilder builder, string modelId, - Func> bearerTokenProvider, + Func> bearerTokenProvider, string location, string projectId, VertexAIVersion apiVersion = VertexAIVersion.V1, diff --git a/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIServiceCollectionExtensions.cs b/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIServiceCollectionExtensions.cs index 0ccfeb7deda9..c60aa979477f 100644 --- a/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIServiceCollectionExtensions.cs +++ b/dotnet/src/Connectors/Connectors.Google/Extensions/VertexAIServiceCollectionExtensions.cs @@ -35,7 +35,7 @@ public static class VertexAIServiceCollectionExtensions public static IServiceCollection AddVertexAIGeminiChatCompletion( this IServiceCollection services, string modelId, - Func> bearerTokenProvider, + Func> bearerTokenProvider, string location, string projectId, VertexAIVersion apiVersion = VertexAIVersion.V1, @@ -116,7 +116,7 @@ public static IServiceCollection AddVertexAIGeminiChatCompletion( public static IServiceCollection AddVertexAIEmbeddingGeneration( this IServiceCollection services, string modelId, - Func> bearerTokenProvider, + Func> bearerTokenProvider, string location, string projectId, VertexAIVersion apiVersion = VertexAIVersion.V1, diff --git a/dotnet/src/Connectors/Connectors.Google/Services/VertexAIGeminiChatCompletionService.cs b/dotnet/src/Connectors/Connectors.Google/Services/VertexAIGeminiChatCompletionService.cs index 4ca2ed9f1bd4..38db5f410314 100644 --- a/dotnet/src/Connectors/Connectors.Google/Services/VertexAIGeminiChatCompletionService.cs +++ b/dotnet/src/Connectors/Connectors.Google/Services/VertexAIGeminiChatCompletionService.cs @@ -39,7 +39,7 @@ public VertexAIGeminiChatCompletionService( VertexAIVersion apiVersion = VertexAIVersion.V1, HttpClient? httpClient = null, ILoggerFactory? loggerFactory = null) - : this(modelId, () => Task.FromResult(bearerKey), location, projectId, apiVersion, httpClient, loggerFactory) + : this(modelId, () => new ValueTask(bearerKey), location, projectId, apiVersion, httpClient, loggerFactory) { Verify.NotNullOrWhiteSpace(bearerKey); } @@ -61,7 +61,7 @@ public VertexAIGeminiChatCompletionService( /// public VertexAIGeminiChatCompletionService( string modelId, - Func> bearerTokenProvider, + Func> bearerTokenProvider, string location, string projectId, VertexAIVersion apiVersion = VertexAIVersion.V1, diff --git a/dotnet/src/Connectors/Connectors.Google/Services/VertexAITextEmbeddingGenerationService.cs b/dotnet/src/Connectors/Connectors.Google/Services/VertexAITextEmbeddingGenerationService.cs index 92389dc00cdb..a9f9b55e06a9 100644 --- a/dotnet/src/Connectors/Connectors.Google/Services/VertexAITextEmbeddingGenerationService.cs +++ b/dotnet/src/Connectors/Connectors.Google/Services/VertexAITextEmbeddingGenerationService.cs @@ -39,7 +39,7 @@ public VertexAITextEmbeddingGenerationService( VertexAIVersion apiVersion = VertexAIVersion.V1, HttpClient? httpClient = null, ILoggerFactory? loggerFactory = null) - : this(modelId, () => Task.FromResult(bearerKey), location, projectId, apiVersion, httpClient, loggerFactory) + : this(modelId, () => new ValueTask(bearerKey), location, projectId, apiVersion, httpClient, loggerFactory) { Verify.NotNullOrWhiteSpace(bearerKey); } @@ -61,7 +61,7 @@ public VertexAITextEmbeddingGenerationService( /// public VertexAITextEmbeddingGenerationService( string modelId, - Func> bearerTokenProvider, + Func> bearerTokenProvider, string location, string projectId, VertexAIVersion apiVersion = VertexAIVersion.V1, diff --git a/dotnet/src/Connectors/Connectors.HuggingFace/Core/HuggingFaceMessageApiClient.cs b/dotnet/src/Connectors/Connectors.HuggingFace/Core/HuggingFaceMessageApiClient.cs index 66bd8cdbf365..468f24490edb 100644 --- a/dotnet/src/Connectors/Connectors.HuggingFace/Core/HuggingFaceMessageApiClient.cs +++ b/dotnet/src/Connectors/Connectors.HuggingFace/Core/HuggingFaceMessageApiClient.cs @@ -85,9 +85,8 @@ internal async IAsyncEnumerable StreamCompleteChatM var endpoint = this.GetChatGenerationEndpoint(); var huggingFaceExecutionSettings = HuggingFacePromptExecutionSettings.FromExecutionSettings(executionSettings); - huggingFaceExecutionSettings.ModelId ??= this._clientCore.ModelId; - var request = this.CreateChatRequest(chatHistory, huggingFaceExecutionSettings); + var request = this.CreateChatRequest(chatHistory, huggingFaceExecutionSettings, modelId); request.Stream = true; using var activity = ModelDiagnostics.StartCompletionActivity(endpoint, modelId, this._clientCore.ModelProvider, chatHistory, huggingFaceExecutionSettings); @@ -149,8 +148,7 @@ internal async Task> CompleteChatMessageAsync( var endpoint = this.GetChatGenerationEndpoint(); var huggingFaceExecutionSettings = HuggingFacePromptExecutionSettings.FromExecutionSettings(executionSettings); - huggingFaceExecutionSettings.ModelId ??= this._clientCore.ModelId; - var request = this.CreateChatRequest(chatHistory, huggingFaceExecutionSettings); + var request = this.CreateChatRequest(chatHistory, huggingFaceExecutionSettings, modelId); using var activity = ModelDiagnostics.StartCompletionActivity(endpoint, modelId, this._clientCore.ModelProvider, chatHistory, huggingFaceExecutionSettings); using var httpRequestMessage = this._clientCore.CreatePost(request, endpoint, this._clientCore.ApiKey); @@ -276,7 +274,8 @@ private async IAsyncEnumerable ProcessChatResponseS private ChatCompletionRequest CreateChatRequest( ChatHistory chatHistory, - HuggingFacePromptExecutionSettings huggingFaceExecutionSettings) + HuggingFacePromptExecutionSettings huggingFaceExecutionSettings, + string modelId) { HuggingFaceClient.ValidateMaxTokens(huggingFaceExecutionSettings.MaxTokens); @@ -287,7 +286,7 @@ private ChatCompletionRequest CreateChatRequest( JsonSerializer.Serialize(huggingFaceExecutionSettings)); } - var request = ChatCompletionRequest.FromChatHistoryAndExecutionSettings(chatHistory, huggingFaceExecutionSettings); + var request = ChatCompletionRequest.FromChatHistoryAndExecutionSettings(chatHistory, huggingFaceExecutionSettings, modelId); return request; } diff --git a/dotnet/src/Connectors/Connectors.HuggingFace/Core/Models/ChatCompletionRequest.cs b/dotnet/src/Connectors/Connectors.HuggingFace/Core/Models/ChatCompletionRequest.cs index e3f930fecfb9..886e13f18bda 100644 --- a/dotnet/src/Connectors/Connectors.HuggingFace/Core/Models/ChatCompletionRequest.cs +++ b/dotnet/src/Connectors/Connectors.HuggingFace/Core/Models/ChatCompletionRequest.cs @@ -102,8 +102,9 @@ internal sealed class ChatCompletionRequest /// /// Chat history to be used for the request. /// Execution settings to be used for the request. - /// TexGenerationtRequest object. - internal static ChatCompletionRequest FromChatHistoryAndExecutionSettings(ChatHistory chatHistory, HuggingFacePromptExecutionSettings executionSettings) + /// Model id to use if value in prompt execution settings is not set. + /// TexGenerationRequest object. + internal static ChatCompletionRequest FromChatHistoryAndExecutionSettings(ChatHistory chatHistory, HuggingFacePromptExecutionSettings executionSettings, string modelId) { return new ChatCompletionRequest { @@ -118,7 +119,7 @@ internal static ChatCompletionRequest FromChatHistoryAndExecutionSettings(ChatHi Temperature = executionSettings.Temperature, Stop = executionSettings.Stop, MaxTokens = executionSettings.MaxTokens, - Model = executionSettings.ModelId ?? TextGenerationInferenceDefaultModel, + Model = executionSettings.ModelId ?? modelId ?? TextGenerationInferenceDefaultModel, TopP = executionSettings.TopP, TopLogProbs = executionSettings.TopLogProbs }; diff --git a/dotnet/src/Connectors/Connectors.Memory.AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStore.cs b/dotnet/src/Connectors/Connectors.Memory.AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStore.cs index 70d6210fc355..d9d5b67ee4af 100644 --- a/dotnet/src/Connectors/Connectors.Memory.AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStore.cs +++ b/dotnet/src/Connectors/Connectors.Memory.AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStore.cs @@ -2,6 +2,7 @@ using System; using System.Collections.Generic; +using System.Collections.ObjectModel; using System.Diagnostics; using System.Linq; using System.Runtime.CompilerServices; @@ -22,11 +23,62 @@ namespace Microsoft.SemanticKernel.Connectors.AzureCosmosDBNoSQL; /// public class AzureCosmosDBNoSQLMemoryStore : IMemoryStore, IDisposable { + private const string EmbeddingPath = "/embedding"; + private readonly CosmosClient _cosmosClient; private readonly VectorEmbeddingPolicy _vectorEmbeddingPolicy; private readonly IndexingPolicy _indexingPolicy; private readonly string _databaseName; + /// + /// Initiates a AzureCosmosDBNoSQLMemoryStore instance using a Azure Cosmos DB connection string + /// and other properties required for vector search. + /// + /// Connection string required to connect to Azure Cosmos DB. + /// The database name to connect to. + /// The number of dimensions the embedding vectors to be stored. + /// The data type of the embedding vectors to be stored. + /// The type of index to use for the embedding vectors to be stored. + /// The application name to use in requests. + public AzureCosmosDBNoSQLMemoryStore( + string connectionString, + string databaseName, + ulong dimensions, + VectorDataType vectorDataType, + VectorIndexType vectorIndexType, + string? applicationName = null) + : this( + new CosmosClient( + connectionString, + new CosmosClientOptions + { + ApplicationName = applicationName ?? HttpHeaderConstant.Values.UserAgent, + Serializer = new CosmosSystemTextJsonSerializer(JsonSerializerOptions.Default), + }), + databaseName, + new VectorEmbeddingPolicy( + [ + new Embedding + { + DataType = vectorDataType, + Dimensions = dimensions, + DistanceFunction = DistanceFunction.Cosine, + Path = EmbeddingPath, + } + ]), + new IndexingPolicy + { + VectorIndexes = new Collection { + new() + { + Path = EmbeddingPath, + Type = vectorIndexType, + }, + }, + }) + { + } + /// /// Initiates a AzureCosmosDBNoSQLMemoryStore instance using a Azure Cosmos DB connection string /// and other properties required for vector search. @@ -71,14 +123,29 @@ public AzureCosmosDBNoSQLMemoryStore( VectorEmbeddingPolicy vectorEmbeddingPolicy, IndexingPolicy indexingPolicy) { - if (!vectorEmbeddingPolicy.Embeddings.Any(e => e.Path == "/embedding")) + var embedding = vectorEmbeddingPolicy.Embeddings.FirstOrDefault(e => e.Path == EmbeddingPath); + if (embedding is null) { throw new InvalidOperationException($""" In order for {nameof(GetNearestMatchAsync)} to function, {nameof(vectorEmbeddingPolicy)} should - contain an embedding path at /embedding. It's also recommended to include a that path in the + contain an embedding path at {EmbeddingPath}. It's also recommended to include that path in the {nameof(indexingPolicy)} to improve performance and reduce cost for searches. """); } + else if (embedding.DistanceFunction != DistanceFunction.Cosine) + { + throw new InvalidOperationException($""" + In order for {nameof(GetNearestMatchAsync)} to reliably return relevance information, the {nameof(DistanceFunction)} should + be specified as {nameof(DistanceFunction)}.{nameof(DistanceFunction.Cosine)}. + """); + } + else if (embedding.DataType != VectorDataType.Float16 && embedding.DataType != VectorDataType.Float32) + { + throw new NotSupportedException($""" + Only {nameof(VectorDataType)}.{nameof(VectorDataType.Float16)} and {nameof(VectorDataType)}.{nameof(VectorDataType.Float32)} + are supported. + """); + } this._cosmosClient = cosmosClient; this._databaseName = databaseName; this._vectorEmbeddingPolicy = vectorEmbeddingPolicy; @@ -164,6 +231,12 @@ public async Task UpsertAsync( MemoryRecord record, CancellationToken cancellationToken = default) { + // In some cases we're expected to generate the key to use. Do so if one isn't provided. + if (string.IsNullOrEmpty(record.Key)) + { + record.Key = Guid.NewGuid().ToString(); + } + var result = await this._cosmosClient .GetDatabase(this._databaseName) .GetContainer(collectionName) @@ -193,6 +266,7 @@ public async IAsyncEnumerable UpsertBatchAsync( bool withEmbedding = false, CancellationToken cancellationToken = default) { + // TODO: Consider using a query when `withEmbedding` is false to avoid passing it over the wire. var result = await this._cosmosClient .GetDatabase(this._databaseName) .GetContainer(collectionName) @@ -330,9 +404,10 @@ ORDER BY VectorDistance(x.embedding, @embedding) { foreach (var memoryRecord in await feedIterator.ReadNextAsync(cancellationToken).ConfigureAwait(false)) { - if (memoryRecord.SimilarityScore >= minRelevanceScore) + var relevanceScore = (memoryRecord.SimilarityScore + 1) / 2; + if (relevanceScore >= minRelevanceScore) { - yield return (memoryRecord, memoryRecord.SimilarityScore); + yield return (memoryRecord, relevanceScore); } } } diff --git a/dotnet/src/Connectors/Connectors.Memory.Sqlite/Database.cs b/dotnet/src/Connectors/Connectors.Memory.Sqlite/Database.cs index 84e844800e84..aee0735507c5 100644 --- a/dotnet/src/Connectors/Connectors.Memory.Sqlite/Database.cs +++ b/dotnet/src/Connectors/Connectors.Memory.Sqlite/Database.cs @@ -56,30 +56,13 @@ public async Task CreateCollectionAsync(SqliteConnection conn, string collection await cmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false); } - public async Task UpdateAsync(SqliteConnection conn, + public async Task UpsertAsync(SqliteConnection conn, string collection, string key, string? metadata, string? embedding, string? timestamp, CancellationToken cancellationToken = default) { using SqliteCommand cmd = conn.CreateCommand(); cmd.CommandText = $@" - UPDATE {TableName} - SET metadata=@metadata, embedding=@embedding, timestamp=@timestamp - WHERE collection=@collection - AND key=@key "; - cmd.Parameters.AddWithValue("@collection", collection); - cmd.Parameters.AddWithValue("@key", key); - cmd.Parameters.AddWithValue("@metadata", metadata ?? string.Empty); - cmd.Parameters.AddWithValue("@embedding", embedding ?? string.Empty); - cmd.Parameters.AddWithValue("@timestamp", timestamp ?? string.Empty); - await cmd.ExecuteNonQueryAsync(cancellationToken).ConfigureAwait(false); - } - - public async Task InsertOrIgnoreAsync(SqliteConnection conn, - string collection, string key, string? metadata, string? embedding, string? timestamp, CancellationToken cancellationToken = default) - { - using SqliteCommand cmd = conn.CreateCommand(); - cmd.CommandText = $@" - INSERT OR IGNORE INTO {TableName}(collection, key, metadata, embedding, timestamp) - VALUES(@collection, @key, @metadata, @embedding, @timestamp); "; + INSERT OR REPLACE INTO {TableName}(collection, key, metadata, embedding, timestamp) + VALUES(@collection, @key, @metadata, @embedding, @timestamp);"; cmd.Parameters.AddWithValue("@collection", collection); cmd.Parameters.AddWithValue("@key", key); cmd.Parameters.AddWithValue("@metadata", metadata ?? string.Empty); diff --git a/dotnet/src/Connectors/Connectors.Memory.Sqlite/SqliteMemoryStore.cs b/dotnet/src/Connectors/Connectors.Memory.Sqlite/SqliteMemoryStore.cs index bdceb8884885..1dbe176146ce 100644 --- a/dotnet/src/Connectors/Connectors.Memory.Sqlite/SqliteMemoryStore.cs +++ b/dotnet/src/Connectors/Connectors.Memory.Sqlite/SqliteMemoryStore.cs @@ -246,18 +246,8 @@ private async Task InternalUpsertAsync(SqliteConnection connection, stri { record.Key = record.Metadata.Id; - // Update - await this._dbConnector.UpdateAsync( - conn: connection, - collection: collectionName, - key: record.Key, - metadata: record.GetSerializedMetadata(), - embedding: JsonSerializer.Serialize(record.Embedding, JsonOptionsCache.Default), - timestamp: ToTimestampString(record.Timestamp), - cancellationToken: cancellationToken).ConfigureAwait(false); - - // Insert if entry does not exists - await this._dbConnector.InsertOrIgnoreAsync( + // Insert or replace + await this._dbConnector.UpsertAsync( conn: connection, collection: collectionName, key: record.Key, diff --git a/dotnet/src/Connectors/Connectors.OpenAI/AzureSdk/ClientCore.cs b/dotnet/src/Connectors/Connectors.OpenAI/AzureSdk/ClientCore.cs index 2a0cf135adaa..8059077d8bf4 100644 --- a/dotnet/src/Connectors/Connectors.OpenAI/AzureSdk/ClientCore.cs +++ b/dotnet/src/Connectors/Connectors.OpenAI/AzureSdk/ClientCore.cs @@ -561,33 +561,6 @@ internal async Task> GetChatMessageContentsAsy return [chat.Last()]; } - - static void AddResponseMessage(ChatCompletionsOptions chatOptions, ChatHistory chat, string? result, string? errorMessage, ChatCompletionsToolCall toolCall, ILogger logger) - { - // Log any error - if (errorMessage is not null && logger.IsEnabled(LogLevel.Debug)) - { - Debug.Assert(result is null); - logger.LogDebug("Failed to handle tool request ({ToolId}). {Error}", toolCall.Id, errorMessage); - } - - // Add the tool response message to the chat options - result ??= errorMessage ?? string.Empty; - chatOptions.Messages.Add(new ChatRequestToolMessage(result, toolCall.Id)); - - // Add the tool response message to the chat history. - var message = new ChatMessageContent(role: AuthorRole.Tool, content: result, metadata: new Dictionary { { OpenAIChatMessageContent.ToolIdProperty, toolCall.Id } }); - - if (toolCall is ChatCompletionsFunctionToolCall functionCall) - { - // Add an item of type FunctionResultContent to the ChatMessageContent.Items collection in addition to the function result stored as a string in the ChatMessageContent.Content property. - // This will enable migration to the new function calling model and facilitate the deprecation of the current one in the future. - var functionName = FunctionName.Parse(functionCall.Name, OpenAIFunction.NameSeparator); - message.Items.Add(new FunctionResultContent(functionName.Name, functionName.PluginName, functionCall.Id, result)); - } - - chat.Add(message); - } } // Update tool use information for the next go-around based on having completed another iteration. @@ -721,6 +694,16 @@ internal async IAsyncEnumerable GetStreamingC } var openAIStreamingChatMessageContent = new OpenAIStreamingChatMessageContent(update, update.ChoiceIndex ?? 0, this.DeploymentOrModelName, metadata) { AuthorName = streamedName }; + + if (update.ToolCallUpdate is StreamingFunctionToolCallUpdate functionCallUpdate) + { + openAIStreamingChatMessageContent.Items.Add(new StreamingFunctionCallUpdateContent( + callId: functionCallUpdate.Id, + name: functionCallUpdate.Name, + arguments: functionCallUpdate.ArgumentsUpdate, + functionCallIndex: functionCallUpdate.ToolCallIndex)); + } + streamedContents?.Add(openAIStreamingChatMessageContent); yield return openAIStreamingChatMessageContent; } @@ -728,12 +711,13 @@ internal async IAsyncEnumerable GetStreamingC // Translate all entries into ChatCompletionsFunctionToolCall instances. toolCalls = OpenAIFunctionToolCall.ConvertToolCallUpdatesToChatCompletionsFunctionToolCalls( ref toolCallIdsByIndex, ref functionNamesByIndex, ref functionArgumentBuildersByIndex); + // Translate all entries into FunctionCallContent instances for diagnostics purposes. - functionCallContents = ModelDiagnostics.IsSensitiveEventsEnabled() ? toolCalls.Select(this.GetFunctionCallContent).ToArray() : null; + functionCallContents = this.GetFunctionCallContents(toolCalls).ToArray(); } finally { - activity?.EndStreaming(streamedContents, functionCallContents); + activity?.EndStreaming(streamedContents, ModelDiagnostics.IsSensitiveEventsEnabled() ? functionCallContents : null); await responseEnumerator.DisposeAsync(); } } @@ -764,17 +748,7 @@ internal async IAsyncEnumerable GetStreamingC // Add the original assistant message to the chatOptions; this is required for the service // to understand the tool call responses. chatOptions.Messages.Add(GetRequestMessage(streamedRole ?? default, content, streamedName, toolCalls)); - // Add the result message to the caller's chat history - var newChatMessageContent = new OpenAIChatMessageContent(streamedRole ?? default, content, this.DeploymentOrModelName, toolCalls, metadata) - { - AuthorName = streamedName - }; - // Add the tool call messages to the new chat message content for diagnostics purposes. - foreach (var functionCall in functionCallContents ?? []) - { - newChatMessageContent.Items.Add(functionCall); - } - chat.Add(newChatMessageContent); + chat.Add(this.GetChatMessage(streamedRole ?? default, content, toolCalls, functionCallContents, metadata, streamedName)); // Respond to each tooling request. for (int toolCallIndex = 0; toolCallIndex < toolCalls.Length; toolCallIndex++) @@ -784,7 +758,7 @@ internal async IAsyncEnumerable GetStreamingC // We currently only know about function tool calls. If it's anything else, we'll respond with an error. if (string.IsNullOrEmpty(toolCall.Name)) { - AddResponseMessage(chatOptions, chat, streamedRole, toolCall, metadata, result: null, "Error: Tool call was not a function call.", this.Logger); + AddResponseMessage(chatOptions, chat, result: null, "Error: Tool call was not a function call.", toolCall, this.Logger); continue; } @@ -796,7 +770,7 @@ internal async IAsyncEnumerable GetStreamingC } catch (JsonException) { - AddResponseMessage(chatOptions, chat, streamedRole, toolCall, metadata, result: null, "Error: Function call arguments were invalid JSON.", this.Logger); + AddResponseMessage(chatOptions, chat, result: null, "Error: Function call arguments were invalid JSON.", toolCall, this.Logger); continue; } @@ -806,14 +780,14 @@ internal async IAsyncEnumerable GetStreamingC if (chatExecutionSettings.ToolCallBehavior?.AllowAnyRequestedKernelFunction is not true && !IsRequestableTool(chatOptions, openAIFunctionToolCall)) { - AddResponseMessage(chatOptions, chat, streamedRole, toolCall, metadata, result: null, "Error: Function call request for a function that wasn't defined.", this.Logger); + AddResponseMessage(chatOptions, chat, result: null, "Error: Function call request for a function that wasn't defined.", toolCall, this.Logger); continue; } // Find the function in the kernel and populate the arguments. if (!kernel!.Plugins.TryGetFunctionAndArguments(openAIFunctionToolCall, out KernelFunction? function, out KernelArguments? functionArgs)) { - AddResponseMessage(chatOptions, chat, streamedRole, toolCall, metadata, result: null, "Error: Requested function could not be found.", this.Logger); + AddResponseMessage(chatOptions, chat, result: null, "Error: Requested function could not be found.", toolCall, this.Logger); continue; } @@ -848,7 +822,7 @@ internal async IAsyncEnumerable GetStreamingC catch (Exception e) #pragma warning restore CA1031 // Do not catch general exception types { - AddResponseMessage(chatOptions, chat, streamedRole, toolCall, metadata, result: null, $"Error: Exception while invoking function. {e.Message}", this.Logger); + AddResponseMessage(chatOptions, chat, result: null, $"Error: Exception while invoking function. {e.Message}", toolCall, this.Logger); continue; } finally @@ -862,7 +836,7 @@ internal async IAsyncEnumerable GetStreamingC object functionResultValue = functionResult.GetValue() ?? string.Empty; var stringResult = ProcessFunctionResult(functionResultValue, chatExecutionSettings.ToolCallBehavior); - AddResponseMessage(chatOptions, chat, streamedRole, toolCall, metadata, stringResult, errorMessage: null, this.Logger); + AddResponseMessage(chatOptions, chat, stringResult, errorMessage: null, toolCall, this.Logger); // If filter requested termination, returning latest function result and breaking request iteration loop. if (invocationContext.Terminate) @@ -877,22 +851,6 @@ internal async IAsyncEnumerable GetStreamingC yield return new OpenAIStreamingChatMessageContent(lastChatMessage.Role, lastChatMessage.Content); yield break; } - - static void AddResponseMessage( - ChatCompletionsOptions chatOptions, ChatHistory chat, ChatRole? streamedRole, ChatCompletionsToolCall tool, IReadOnlyDictionary? metadata, - string? result, string? errorMessage, ILogger logger) - { - if (errorMessage is not null && logger.IsEnabled(LogLevel.Debug)) - { - Debug.Assert(result is null); - logger.LogDebug("Failed to handle tool request ({ToolId}). {Error}", tool.Id, errorMessage); - } - - // Add the tool response message to both the chat options and to the chat history. - result ??= errorMessage ?? string.Empty; - chatOptions.Messages.Add(new ChatRequestToolMessage(result, tool.Id)); - chat.AddMessage(AuthorRole.Tool, result, metadata: new Dictionary { { OpenAIChatMessageContent.ToolIdProperty, tool.Id } }); - } } // Update tool use information for the next go-around based on having completed another iteration. @@ -1391,58 +1349,106 @@ private OpenAIChatMessageContent GetChatMessage(ChatChoice chatChoice, ChatCompl { var message = new OpenAIChatMessageContent(chatChoice.Message, this.DeploymentOrModelName, GetChatChoiceMetadata(responseData, chatChoice)); - foreach (var toolCall in chatChoice.Message.ToolCalls) + message.Items.AddRange(this.GetFunctionCallContents(chatChoice.Message.ToolCalls)); + + return message; + } + + private OpenAIChatMessageContent GetChatMessage(ChatRole chatRole, string content, ChatCompletionsFunctionToolCall[] toolCalls, FunctionCallContent[]? functionCalls, IReadOnlyDictionary? metadata, string? authorName) + { + var message = new OpenAIChatMessageContent(chatRole, content, this.DeploymentOrModelName, toolCalls, metadata) { - // Adding items of 'FunctionCallContent' type to the 'Items' collection even though the function calls are available via the 'ToolCalls' property. - // This allows consumers to work with functions in an LLM-agnostic way. - if (toolCall is ChatCompletionsFunctionToolCall functionToolCall) - { - var functionCallContent = this.GetFunctionCallContent(functionToolCall); - message.Items.Add(functionCallContent); - } + AuthorName = authorName, + }; + + if (functionCalls is not null) + { + message.Items.AddRange(functionCalls); } return message; } - private FunctionCallContent GetFunctionCallContent(ChatCompletionsFunctionToolCall toolCall) + private IEnumerable GetFunctionCallContents(IEnumerable toolCalls) { - KernelArguments? arguments = null; - Exception? exception = null; - try + List? result = null; + + foreach (var toolCall in toolCalls) { - arguments = JsonSerializer.Deserialize(toolCall.Arguments); - if (arguments is not null) + // Adding items of 'FunctionCallContent' type to the 'Items' collection even though the function calls are available via the 'ToolCalls' property. + // This allows consumers to work with functions in an LLM-agnostic way. + if (toolCall is ChatCompletionsFunctionToolCall functionToolCall) { - // Iterate over copy of the names to avoid mutating the dictionary while enumerating it - var names = arguments.Names.ToArray(); - foreach (var name in names) + Exception? exception = null; + KernelArguments? arguments = null; + try { - arguments[name] = arguments[name]?.ToString(); + arguments = JsonSerializer.Deserialize(functionToolCall.Arguments); + if (arguments is not null) + { + // Iterate over copy of the names to avoid mutating the dictionary while enumerating it + var names = arguments.Names.ToArray(); + foreach (var name in names) + { + arguments[name] = arguments[name]?.ToString(); + } + } + } + catch (JsonException ex) + { + exception = new KernelException("Error: Function call arguments were invalid JSON.", ex); + + if (this.Logger.IsEnabled(LogLevel.Debug)) + { + this.Logger.LogDebug(ex, "Failed to deserialize function arguments ({FunctionName}/{FunctionId}).", functionToolCall.Name, functionToolCall.Id); + } } + + var functionName = FunctionName.Parse(functionToolCall.Name, OpenAIFunction.NameSeparator); + + var functionCallContent = new FunctionCallContent( + functionName: functionName.Name, + pluginName: functionName.PluginName, + id: functionToolCall.Id, + arguments: arguments) + { + InnerContent = functionToolCall, + Exception = exception + }; + + result ??= []; + result.Add(functionCallContent); } } - catch (JsonException ex) - { - exception = new KernelException("Error: Function call arguments were invalid JSON.", ex); - if (this.Logger.IsEnabled(LogLevel.Debug)) - { - this.Logger.LogDebug(ex, "Failed to deserialize function arguments ({FunctionName}/{FunctionId}).", toolCall.Name, toolCall.Id); - } + return result ?? Enumerable.Empty(); + } + + private static void AddResponseMessage(ChatCompletionsOptions chatOptions, ChatHistory chat, string? result, string? errorMessage, ChatCompletionsToolCall toolCall, ILogger logger) + { + // Log any error + if (errorMessage is not null && logger.IsEnabled(LogLevel.Debug)) + { + Debug.Assert(result is null); + logger.LogDebug("Failed to handle tool request ({ToolId}). {Error}", toolCall.Id, errorMessage); } - var functionName = FunctionName.Parse(toolCall.Name, OpenAIFunction.NameSeparator); + // Add the tool response message to the chat options + result ??= errorMessage ?? string.Empty; + chatOptions.Messages.Add(new ChatRequestToolMessage(result, toolCall.Id)); - return new FunctionCallContent( - functionName: functionName.Name, - pluginName: functionName.PluginName, - id: toolCall.Id, - arguments: arguments) + // Add the tool response message to the chat history. + var message = new ChatMessageContent(role: AuthorRole.Tool, content: result, metadata: new Dictionary { { OpenAIChatMessageContent.ToolIdProperty, toolCall.Id } }); + + if (toolCall is ChatCompletionsFunctionToolCall functionCall) { - InnerContent = toolCall, - Exception = exception - }; + // Add an item of type FunctionResultContent to the ChatMessageContent.Items collection in addition to the function result stored as a string in the ChatMessageContent.Content property. + // This will enable migration to the new function calling model and facilitate the deprecation of the current one in the future. + var functionName = FunctionName.Parse(functionCall.Name, OpenAIFunction.NameSeparator); + message.Items.Add(new FunctionResultContent(functionName.Name, functionName.PluginName, functionCall.Id, result)); + } + + chat.Add(message); } private static void ValidateMaxTokens(int? maxTokens) diff --git a/dotnet/src/Connectors/Connectors.OpenAI/CompatibilitySuppressions.xml b/dotnet/src/Connectors/Connectors.OpenAI/CompatibilitySuppressions.xml index 1dd99a9223a4..3477ed220ea0 100644 --- a/dotnet/src/Connectors/Connectors.OpenAI/CompatibilitySuppressions.xml +++ b/dotnet/src/Connectors/Connectors.OpenAI/CompatibilitySuppressions.xml @@ -1,6 +1,20 @@  + + CP0002 + F:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFilePurpose.Assistants + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0002 + F:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFilePurpose.FineTune + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + CP0002 M:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFileService.GetFileContent(System.String,System.Threading.CancellationToken) @@ -8,6 +22,41 @@ lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll true + + CP0002 + M:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAITextToImageService.#ctor(System.String,System.String,System.Net.Http.HttpClient,Microsoft.Extensions.Logging.ILoggerFactory) + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0002 + M:Microsoft.SemanticKernel.OpenAIServiceCollectionExtensions.AddOpenAITextToImage(Microsoft.Extensions.DependencyInjection.IServiceCollection,System.String,System.String,System.String) + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0002 + M:Microsoft.SemanticKernel.OpenAIServiceCollectionExtensions.AddOpenAITextToImage(Microsoft.SemanticKernel.IKernelBuilder,System.String,System.String,System.String,System.Net.Http.HttpClient) + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0002 + F:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFilePurpose.Assistants + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0002 + F:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFilePurpose.FineTune + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + CP0002 M:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFileService.GetFileContent(System.String,System.Threading.CancellationToken) @@ -15,4 +64,53 @@ lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll true + + CP0002 + M:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAITextToImageService.#ctor(System.String,System.String,System.Net.Http.HttpClient,Microsoft.Extensions.Logging.ILoggerFactory) + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0002 + M:Microsoft.SemanticKernel.OpenAIServiceCollectionExtensions.AddOpenAITextToImage(Microsoft.Extensions.DependencyInjection.IServiceCollection,System.String,System.String,System.String) + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0002 + M:Microsoft.SemanticKernel.OpenAIServiceCollectionExtensions.AddOpenAITextToImage(Microsoft.SemanticKernel.IKernelBuilder,System.String,System.String,System.String,System.Net.Http.HttpClient) + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0007 + T:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFilePurpose + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0007 + T:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFilePurpose + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0008 + T:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFilePurpose + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/net8.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + + + CP0008 + T:Microsoft.SemanticKernel.Connectors.OpenAI.OpenAIFilePurpose + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + lib/netstandard2.0/Microsoft.SemanticKernel.Connectors.OpenAI.dll + true + \ No newline at end of file diff --git a/dotnet/src/Connectors/Connectors.OpenAI/Files/OpenAIFilePurpose.cs b/dotnet/src/Connectors/Connectors.OpenAI/Files/OpenAIFilePurpose.cs index a01b2d08fa8d..8d87720fa89f 100644 --- a/dotnet/src/Connectors/Connectors.OpenAI/Files/OpenAIFilePurpose.cs +++ b/dotnet/src/Connectors/Connectors.OpenAI/Files/OpenAIFilePurpose.cs @@ -1,22 +1,99 @@ // Copyright (c) Microsoft. All rights reserved. +using System; using System.Diagnostics.CodeAnalysis; namespace Microsoft.SemanticKernel.Connectors.OpenAI; /// -/// Defines the purpose associated with the uploaded file. +/// Defines the purpose associated with the uploaded file: +/// https://platform.openai.com/docs/api-reference/files/object#files/object-purpose /// [Experimental("SKEXP0010")] -public enum OpenAIFilePurpose +public readonly struct OpenAIFilePurpose : IEquatable { /// - /// File to be used by assistants for model processing. + /// File to be used by assistants as input. /// - Assistants, + public static OpenAIFilePurpose Assistants { get; } = new("assistants"); /// - /// File to be used by fine-tuning jobs. + /// File produced as assistants output. /// - FineTune, + public static OpenAIFilePurpose AssistantsOutput { get; } = new("assistants_output"); + + /// + /// Files uploaded as a batch of API requests + /// + public static OpenAIFilePurpose Batch { get; } = new("batch"); + + /// + /// File produced as result of a file included as a batch request. + /// + public static OpenAIFilePurpose BatchOutput { get; } = new("batch_output"); + + /// + /// File to be used as input to fine-tune a model. + /// + public static OpenAIFilePurpose FineTune { get; } = new("fine-tune"); + + /// + /// File produced as result of fine-tuning a model. + /// + public static OpenAIFilePurpose FineTuneResults { get; } = new("fine-tune-results"); + + /// + /// File to be used for Assistants image file inputs. + /// + public static OpenAIFilePurpose Vision { get; } = new("vision"); + + /// + /// Gets the label associated with this . + /// + public string Label { get; } + + /// + /// Creates a new instance with the provided label. + /// + /// The label to associate with this . + public OpenAIFilePurpose(string label) + { + Verify.NotNullOrWhiteSpace(label, nameof(label)); + this.Label = label!; + } + + /// + /// Returns a value indicating whether two instances are equivalent, as determined by a + /// case-insensitive comparison of their labels. + /// + /// the first instance to compare + /// the second instance to compare + /// true if left and right are both null or have equivalent labels; false otherwise + public static bool operator ==(OpenAIFilePurpose left, OpenAIFilePurpose right) + => left.Equals(right); + + /// + /// Returns a value indicating whether two instances are not equivalent, as determined by a + /// case-insensitive comparison of their labels. + /// + /// the first instance to compare + /// the second instance to compare + /// false if left and right are both null or have equivalent labels; true otherwise + public static bool operator !=(OpenAIFilePurpose left, OpenAIFilePurpose right) + => !(left == right); + + /// + public override bool Equals([NotNullWhen(true)] object? obj) + => obj is OpenAIFilePurpose otherPurpose && this == otherPurpose; + + /// + public bool Equals(OpenAIFilePurpose other) + => string.Equals(this.Label, other.Label, StringComparison.OrdinalIgnoreCase); + + /// + public override int GetHashCode() + => StringComparer.OrdinalIgnoreCase.GetHashCode(this.Label); + + /// + public override string ToString() => this.Label; } diff --git a/dotnet/src/Connectors/Connectors.OpenAI/Files/OpenAIFileService.cs b/dotnet/src/Connectors/Connectors.OpenAI/Files/OpenAIFileService.cs index cc61734f44c8..690954448eea 100644 --- a/dotnet/src/Connectors/Connectors.OpenAI/Files/OpenAIFileService.cs +++ b/dotnet/src/Connectors/Connectors.OpenAI/Files/OpenAIFileService.cs @@ -112,7 +112,8 @@ public async Task DeleteFileAsync(string id, CancellationToken cancellationToken public async Task GetFileContentAsync(string id, CancellationToken cancellationToken = default) { Verify.NotNull(id, nameof(id)); - var (stream, mimetype) = await this.StreamGetRequestAsync($"{this._serviceUri}/{id}/content", cancellationToken).ConfigureAwait(false); + var contentUri = $"{this._serviceUri}/{id}/content"; + var (stream, mimetype) = await this.StreamGetRequestAsync(contentUri, cancellationToken).ConfigureAwait(false); using (stream) { @@ -123,7 +124,12 @@ public async Task GetFileContentAsync(string id, CancellationToke #else await stream.CopyToAsync(memoryStream, cancellationToken).ConfigureAwait(false); #endif - return new BinaryContent(memoryStream.ToArray(), mimetype); + return + new(memoryStream.ToArray(), mimetype) + { + Metadata = new Dictionary() { { "id", id } }, + Uri = new Uri(contentUri), + }; } } @@ -147,9 +153,19 @@ public async Task GetFileAsync(string id, CancellationToken /// /// The to monitor for cancellation requests. The default is . /// The metadata of all uploaded files. - public async Task> GetFilesAsync(CancellationToken cancellationToken = default) + public Task> GetFilesAsync(CancellationToken cancellationToken = default) + => this.GetFilesAsync(null, cancellationToken); + + /// + /// Retrieve metadata for previously uploaded files + /// + /// The purpose of the files by which to filter. + /// The to monitor for cancellation requests. The default is . + /// The metadata of all uploaded files. + public async Task> GetFilesAsync(OpenAIFilePurpose? filePurpose, CancellationToken cancellationToken = default) { - var result = await this.ExecuteGetRequestAsync(this._serviceUri.ToString(), cancellationToken).ConfigureAwait(false); + var serviceUri = filePurpose.HasValue && !string.IsNullOrEmpty(filePurpose.Value.Label) ? $"{this._serviceUri}?purpose={filePurpose}" : this._serviceUri.ToString(); + var result = await this.ExecuteGetRequestAsync(serviceUri, cancellationToken).ConfigureAwait(false); return result.Data.Select(this.ConvertFileReference).ToArray(); } @@ -167,7 +183,7 @@ public async Task UploadContentAsync(BinaryContent fileCont Verify.NotNull(fileContent.Data, nameof(fileContent.Data)); using var formData = new MultipartFormDataContent(); - using var contentPurpose = new StringContent(this.ConvertPurpose(settings.Purpose)); + using var contentPurpose = new StringContent(settings.Purpose.Label); using var contentFile = new ByteArrayContent(fileContent.Data.Value.ToArray()); formData.Add(contentPurpose, "purpose"); formData.Add(contentFile, "file", settings.FileName); @@ -281,26 +297,10 @@ private OpenAIFileReference ConvertFileReference(FileInfo result) FileName = result.FileName, CreatedTimestamp = DateTimeOffset.FromUnixTimeSeconds(result.CreatedAt).UtcDateTime, SizeInBytes = result.Bytes ?? 0, - Purpose = this.ConvertPurpose(result.Purpose), + Purpose = new(result.Purpose), }; } - private OpenAIFilePurpose ConvertPurpose(string purpose) => - purpose.ToUpperInvariant() switch - { - "ASSISTANTS" => OpenAIFilePurpose.Assistants, - "FINE-TUNE" => OpenAIFilePurpose.FineTune, - _ => throw new KernelException($"Unknown {nameof(OpenAIFilePurpose)}: {purpose}."), - }; - - private string ConvertPurpose(OpenAIFilePurpose purpose) => - purpose switch - { - OpenAIFilePurpose.Assistants => "assistants", - OpenAIFilePurpose.FineTune => "fine-tune", - _ => throw new KernelException($"Unknown {nameof(OpenAIFilePurpose)}: {purpose}."), - }; - private sealed class FileInfoList { [JsonPropertyName("data")] diff --git a/dotnet/src/Connectors/Connectors.OpenAI/OpenAIServiceCollectionExtensions.cs b/dotnet/src/Connectors/Connectors.OpenAI/OpenAIServiceCollectionExtensions.cs index b9d8b861dbc7..80cc60944965 100644 --- a/dotnet/src/Connectors/Connectors.OpenAI/OpenAIServiceCollectionExtensions.cs +++ b/dotnet/src/Connectors/Connectors.OpenAI/OpenAIServiceCollectionExtensions.cs @@ -1309,6 +1309,7 @@ public static IServiceCollection AddAzureOpenAITextToImage( /// The instance to augment. /// OpenAI API key, see https://platform.openai.com/account/api-keys /// OpenAI organization id. This is usually optional unless your account belongs to multiple organizations. + /// The model to use for image generation. /// A local identifier for the given AI service /// The HttpClient to use with this service. /// The same instance as . @@ -1317,6 +1318,7 @@ public static IKernelBuilder AddOpenAITextToImage( this IKernelBuilder builder, string apiKey, string? orgId = null, + string? modelId = null, string? serviceId = null, HttpClient? httpClient = null) { @@ -1327,6 +1329,7 @@ public static IKernelBuilder AddOpenAITextToImage( new OpenAITextToImageService( apiKey, orgId, + modelId, HttpClientProvider.GetHttpClient(httpClient, serviceProvider), serviceProvider.GetService())); @@ -1339,12 +1342,14 @@ public static IKernelBuilder AddOpenAITextToImage( /// The instance to augment. /// OpenAI API key, see https://platform.openai.com/account/api-keys /// OpenAI organization id. This is usually optional unless your account belongs to multiple organizations. + /// The model to use for image generation. /// A local identifier for the given AI service /// The same instance as . [Experimental("SKEXP0010")] public static IServiceCollection AddOpenAITextToImage(this IServiceCollection services, string apiKey, string? orgId = null, + string? modelId = null, string? serviceId = null) { Verify.NotNull(services); @@ -1354,6 +1359,7 @@ public static IServiceCollection AddOpenAITextToImage(this IServiceCollection se new OpenAITextToImageService( apiKey, orgId, + modelId, HttpClientProvider.GetHttpClient(serviceProvider), serviceProvider.GetService())); } diff --git a/dotnet/src/Connectors/Connectors.OpenAI/TextToImage/OpenAITextToImageService.cs b/dotnet/src/Connectors/Connectors.OpenAI/TextToImage/OpenAITextToImageService.cs index 08dad90554c8..335fe8cad5ee 100644 --- a/dotnet/src/Connectors/Connectors.OpenAI/TextToImage/OpenAITextToImageService.cs +++ b/dotnet/src/Connectors/Connectors.OpenAI/TextToImage/OpenAITextToImageService.cs @@ -8,6 +8,7 @@ using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; +using Microsoft.SemanticKernel.Services; using Microsoft.SemanticKernel.TextToImage; namespace Microsoft.SemanticKernel.Connectors.OpenAI; @@ -35,25 +36,37 @@ public sealed class OpenAITextToImageService : ITextToImageService /// private readonly string _authorizationHeaderValue; + /// + /// The model to use for image generation. + /// + private readonly string? _modelId; + /// /// Initializes a new instance of the class. /// /// OpenAI API key, see https://platform.openai.com/account/api-keys /// OpenAI organization id. This is usually optional unless your account belongs to multiple organizations. + /// The model to use for image generation. /// Custom for HTTP requests. /// The to use for logging. If null, no logging will be performed. public OpenAITextToImageService( string apiKey, string? organization = null, + string? modelId = null, HttpClient? httpClient = null, ILoggerFactory? loggerFactory = null) { Verify.NotNullOrWhiteSpace(apiKey); this._authorizationHeaderValue = $"Bearer {apiKey}"; this._organizationHeaderValue = organization; + this._modelId = modelId; this._core = new(httpClient, loggerFactory?.CreateLogger(this.GetType())); this._core.AddAttribute(OpenAIClientCore.OrganizationKey, organization); + if (modelId is not null) + { + this._core.AddAttribute(AIServiceExtensions.ModelIdKey, modelId); + } this._core.RequestCreated += (_, request) => { @@ -77,10 +90,11 @@ public Task GenerateImageAsync(string description, int width, int height throw new ArgumentOutOfRangeException(nameof(width), width, "OpenAI can generate only square images of size 256x256, 512x512, or 1024x1024."); } - return this.GenerateImageAsync(description, width, height, "url", x => x.Url, cancellationToken); + return this.GenerateImageAsync(this._modelId, description, width, height, "url", x => x.Url, cancellationToken); } private async Task GenerateImageAsync( + string? model, string description, int width, int height, string format, Func extractResponse, @@ -90,6 +104,7 @@ private async Task GenerateImageAsync( var requestBody = JsonSerializer.Serialize(new TextToImageRequest { + Model = model, Prompt = description, Size = $"{width}x{height}", Count = 1, diff --git a/dotnet/src/Connectors/Connectors.OpenAI/TextToImage/TextToImageRequest.cs b/dotnet/src/Connectors/Connectors.OpenAI/TextToImage/TextToImageRequest.cs index b5988a91cda4..70b5ac5418ee 100644 --- a/dotnet/src/Connectors/Connectors.OpenAI/TextToImage/TextToImageRequest.cs +++ b/dotnet/src/Connectors/Connectors.OpenAI/TextToImage/TextToImageRequest.cs @@ -9,31 +9,34 @@ namespace Microsoft.SemanticKernel.Connectors.OpenAI; /// internal sealed class TextToImageRequest { + /// + /// Model to use for image generation + /// + [JsonPropertyName("model")] + [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)] + public string? Model { get; set; } + /// /// Image prompt /// [JsonPropertyName("prompt")] - [JsonPropertyOrder(1)] public string Prompt { get; set; } = string.Empty; /// /// Image size /// [JsonPropertyName("size")] - [JsonPropertyOrder(2)] public string Size { get; set; } = "256x256"; /// /// How many images to generate /// [JsonPropertyName("n")] - [JsonPropertyOrder(3)] public int Count { get; set; } = 1; /// /// Image format, "url" or "b64_json" /// [JsonPropertyName("response_format")] - [JsonPropertyOrder(4)] public string Format { get; set; } = "url"; } diff --git a/dotnet/src/Connectors/Connectors.UnitTests/OpenAI/TextToImage/OpenAITextToImageServiceTests.cs b/dotnet/src/Connectors/Connectors.UnitTests/OpenAI/TextToImage/OpenAITextToImageServiceTests.cs index 46334a06fb48..1f31ec076edd 100644 --- a/dotnet/src/Connectors/Connectors.UnitTests/OpenAI/TextToImage/OpenAITextToImageServiceTests.cs +++ b/dotnet/src/Connectors/Connectors.UnitTests/OpenAI/TextToImage/OpenAITextToImageServiceTests.cs @@ -40,6 +40,7 @@ public void ConstructorWorksCorrectly(bool includeLoggerFactory) // Assert Assert.NotNull(service); Assert.Equal("organization", service.Attributes["Organization"]); + Assert.False(service.Attributes.ContainsKey("ModelId")); } [Theory] @@ -51,7 +52,8 @@ public void ConstructorWorksCorrectly(bool includeLoggerFactory) public async Task GenerateImageWorksCorrectlyAsync(int width, int height, bool expectedException) { // Arrange - var service = new OpenAITextToImageService("api-key", "organization", this._httpClient); + var service = new OpenAITextToImageService("api-key", "organization", "dall-e-3", this._httpClient); + Assert.Equal("dall-e-3", service.Attributes["ModelId"]); this._messageHandlerStub.ResponseToReturn = new HttpResponseMessage(System.Net.HttpStatusCode.OK) { Content = new StringContent(""" diff --git a/dotnet/src/Experimental/Orchestration.Flow.IntegrationTests/README.md b/dotnet/src/Experimental/Orchestration.Flow.IntegrationTests/README.md index 90bd07b0bc06..fef9a8dae35b 100644 --- a/dotnet/src/Experimental/Orchestration.Flow.IntegrationTests/README.md +++ b/dotnet/src/Experimental/Orchestration.Flow.IntegrationTests/README.md @@ -3,7 +3,7 @@ ## Requirements 1. **Azure OpenAI**: go to the [Azure OpenAI Quickstart](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/quickstart) - and deploy an instance of Azure OpenAI, deploy a model like "text-davinci-003" find your Endpoint and API key. + and deploy an instance of Azure OpenAI, deploy a model like "gpt-35-turbo-instruct" find your Endpoint and API key. 2. **OpenAI**: go to [OpenAI](https://platform.openai.com) to register and procure your API key. 3. **Azure Bing Web Search API**: go to [Bing Web Search API](https://www.microsoft.com/en-us/bing/apis/bing-web-search-api) and select `Try Now` to get started. @@ -25,13 +25,13 @@ To set your secrets with Secret Manager: cd dotnet/src/IntegrationTests dotnet user-secrets init -dotnet user-secrets set "OpenAI:ServiceId" "text-davinci-003" -dotnet user-secrets set "OpenAI:ModelId" "text-davinci-003" +dotnet user-secrets set "OpenAI:ServiceId" "gpt-3.5-turbo-instruct" +dotnet user-secrets set "OpenAI:ModelId" "gpt-3.5-turbo-instruct" dotnet user-secrets set "OpenAI:ChatModelId" "gpt-4" dotnet user-secrets set "OpenAI:ApiKey" "..." -dotnet user-secrets set "AzureOpenAI:ServiceId" "azure-text-davinci-003" -dotnet user-secrets set "AzureOpenAI:DeploymentName" "text-davinci-003" +dotnet user-secrets set "AzureOpenAI:ServiceId" "azure-gpt-35-turbo-instruct" +dotnet user-secrets set "AzureOpenAI:DeploymentName" "gpt-35-turbo-instruct" dotnet user-secrets set "AzureOpenAI:ChatDeploymentName" "gpt-4" dotnet user-secrets set "AzureOpenAI:Endpoint" "https://contoso.openai.azure.com/" dotnet user-secrets set "AzureOpenAI:ApiKey" "..." @@ -56,14 +56,14 @@ For example: ```json { "OpenAI": { - "ServiceId": "text-davinci-003", - "ModelId": "text-davinci-003", + "ServiceId": "gpt-3.5-turbo-instruct", + "ModelId": "gpt-3.5-turbo-instruct", "ChatModelId": "gpt-4", "ApiKey": "sk-...." }, "AzureOpenAI": { - "ServiceId": "azure-text-davinci-003", - "DeploymentName": "text-davinci-003", + "ServiceId": "gpt-35-turbo-instruct", + "DeploymentName": "gpt-35-turbo-instruct", "ChatDeploymentName": "gpt-4", "Endpoint": "https://contoso.openai.azure.com/", "ApiKey": "...." @@ -95,7 +95,7 @@ When setting environment variables, use a double underscore (i.e. "\_\_") to del ```bash export OpenAI__ApiKey="sk-...." export AzureOpenAI__ApiKey="...." - export AzureOpenAI__DeploymentName="azure-text-davinci-003" + export AzureOpenAI__DeploymentName="gpt-35-turbo-instruct" export AzureOpenAI__ChatDeploymentName="gpt-4" export AzureOpenAIEmbeddings__DeploymentName="azure-text-embedding-ada-002" export AzureOpenAI__Endpoint="https://contoso.openai.azure.com/" @@ -107,7 +107,7 @@ When setting environment variables, use a double underscore (i.e. "\_\_") to del ```ps $env:OpenAI__ApiKey = "sk-...." $env:AzureOpenAI__ApiKey = "...." - $env:AzureOpenAI__DeploymentName = "azure-text-davinci-003" + $env:AzureOpenAI__DeploymentName = "gpt-35-turbo-instruct" $env:AzureOpenAI__ChatDeploymentName = "gpt-4" $env:AzureOpenAIEmbeddings__DeploymentName = "azure-text-embedding-ada-002" $env:AzureOpenAI__Endpoint = "https://contoso.openai.azure.com/" diff --git a/dotnet/src/Experimental/Orchestration.Flow.IntegrationTests/testsettings.json b/dotnet/src/Experimental/Orchestration.Flow.IntegrationTests/testsettings.json index 2b5e41c5cbd7..e2ce917f9732 100644 --- a/dotnet/src/Experimental/Orchestration.Flow.IntegrationTests/testsettings.json +++ b/dotnet/src/Experimental/Orchestration.Flow.IntegrationTests/testsettings.json @@ -1,12 +1,12 @@ { "OpenAI": { - "ServiceId": "text-davinci-003", - "ModelId": "text-davinci-003", + "ServiceId": "gpt-3.5-turbo-instruct", + "ModelId": "gpt-3.5-turbo-instruct", "ApiKey": "" }, "AzureOpenAI": { - "ServiceId": "azure-text-davinci-003", - "DeploymentName": "text-davinci-003", + "ServiceId": "azure-gpt-35-turbo-instruct", + "DeploymentName": "gpt-35-turbo-instruct", "ChatDeploymentName": "gpt-4", "Endpoint": "", "ApiKey": "" diff --git a/dotnet/src/Experimental/Orchestration.Flow/FlowOrchestrator.cs b/dotnet/src/Experimental/Orchestration.Flow/FlowOrchestrator.cs index d86c1681b96e..67abae8ef61c 100644 --- a/dotnet/src/Experimental/Orchestration.Flow/FlowOrchestrator.cs +++ b/dotnet/src/Experimental/Orchestration.Flow/FlowOrchestrator.cs @@ -73,6 +73,6 @@ public async Task ExecuteFlowAsync( } var executor = new FlowExecutor(this._kernelBuilder, this._flowStatusProvider, this._globalPluginCollection, this._config); - return await executor.ExecuteFlowAsync(flow, sessionId, input, kernelArguments ?? new KernelArguments(null)).ConfigureAwait(false); + return await executor.ExecuteFlowAsync(flow, sessionId, input, kernelArguments ?? new KernelArguments()).ConfigureAwait(false); } } diff --git a/dotnet/src/Extensions/PromptTemplates.Handlebars/Helpers/KernelHelpers/KernelFunctionHelpers.cs b/dotnet/src/Extensions/PromptTemplates.Handlebars/Helpers/KernelHelpers/KernelFunctionHelpers.cs index 9f9b599ef9b6..9cb98b446e68 100644 --- a/dotnet/src/Extensions/PromptTemplates.Handlebars/Helpers/KernelHelpers/KernelFunctionHelpers.cs +++ b/dotnet/src/Extensions/PromptTemplates.Handlebars/Helpers/KernelHelpers/KernelFunctionHelpers.cs @@ -226,7 +226,7 @@ private static void ProcessPositionalArguments(KernelFunctionMetadata functionMe // Deserialize any JSON content or return the content as a string if (restApiOperationResponse.ContentType?.IndexOf("application/json", StringComparison.OrdinalIgnoreCase) >= 0) { - var parsedJson = JsonValue.Parse(restApiOperationResponse.Content.ToString() ?? string.Empty); + var parsedJson = JsonValue.Parse(restApiOperationResponse.Content?.ToString() ?? string.Empty); return KernelHelpersUtils.DeserializeJsonNode(parsedJson); } diff --git a/dotnet/src/Functions/Functions.OpenApi/Extensions/OpenApiKernelExtensions.cs b/dotnet/src/Functions/Functions.OpenApi/Extensions/OpenApiKernelExtensions.cs index 3bcb963571b7..98126638dc62 100644 --- a/dotnet/src/Functions/Functions.OpenApi/Extensions/OpenApiKernelExtensions.cs +++ b/dotnet/src/Functions/Functions.OpenApi/Extensions/OpenApiKernelExtensions.cs @@ -280,7 +280,7 @@ internal static KernelFunction CreateRestApiFunction( var logger = loggerFactory?.CreateLogger(typeof(OpenApiKernelExtensions)) ?? NullLogger.Instance; - async Task ExecuteAsync(KernelArguments variables, CancellationToken cancellationToken) + async Task ExecuteAsync(Kernel kernel, KernelFunction function, KernelArguments variables, CancellationToken cancellationToken) { try { @@ -314,6 +314,9 @@ async Task ExecuteAsync(KernelArguments variables, Can var options = new RestApiOperationRunOptions { + Kernel = kernel, + KernelFunction = function, + KernelArguments = arguments, ServerUrlOverride = executionParameters?.ServerUrlOverride, ApiHostUrl = documentUri is not null ? new Uri(documentUri.GetLeftPart(UriPartial.Authority)) : null }; @@ -364,12 +367,12 @@ async Task ExecuteAsync(KernelArguments variables, Can } /// - /// Converts operation id to valid SK Function name. + /// Converts operation id to valid name. /// A function name can contain only ASCII letters, digits, and underscores. /// /// The operation id. /// The logger. - /// Valid SK Function name. + /// Valid KernelFunction name. private static string ConvertOperationIdToValidFunctionName(string operationId, ILogger logger) { try @@ -380,7 +383,7 @@ private static string ConvertOperationIdToValidFunctionName(string operationId, catch (ArgumentException) { // The exception indicates that the operationId is not a valid function name. - // To comply with the SK Function name requirements, it needs to be converted or sanitized. + // To comply with the KernelFunction name requirements, it needs to be converted or sanitized. // Therefore, it should not be re-thrown, but rather swallowed to allow the conversion below. } diff --git a/dotnet/src/Functions/Functions.OpenApi/Extensions/RestApiOperationResponseExtensions.cs b/dotnet/src/Functions/Functions.OpenApi/Extensions/RestApiOperationResponseExtensions.cs index 46f694b2afb4..c377f5e6f1a7 100644 --- a/dotnet/src/Functions/Functions.OpenApi/Extensions/RestApiOperationResponseExtensions.cs +++ b/dotnet/src/Functions/Functions.OpenApi/Extensions/RestApiOperationResponseExtensions.cs @@ -33,7 +33,7 @@ public static bool IsValid(this RestApiOperationResponse response) return true; } - return response.ContentType switch + return response.ContentType! switch { var ct when ct.StartsWith("application/json", StringComparison.OrdinalIgnoreCase) => ValidateJson(response), var ct when ct.StartsWith("application/xml", StringComparison.OrdinalIgnoreCase) => ValidateXml(response), @@ -47,7 +47,7 @@ private static bool ValidateJson(RestApiOperationResponse response) try { var jsonSchema = JsonSchema.FromText(JsonSerializer.Serialize(response.ExpectedSchema)); - using var contentDoc = JsonDocument.Parse(response.Content.ToString() ?? ""); + using var contentDoc = JsonDocument.Parse(response.Content?.ToString() ?? string.Empty); var result = jsonSchema.Evaluate(contentDoc); return result.IsValid; } diff --git a/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperation.cs b/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperation.cs index 36c2f58cca1a..af65b1c59825 100644 --- a/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperation.cs +++ b/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperation.cs @@ -5,6 +5,7 @@ using System.Linq; using System.Net.Http; using System.Text.Json.Nodes; +using System.Web; namespace Microsoft.SemanticKernel.Plugins.OpenApi; @@ -238,7 +239,7 @@ private string BuildPath(string pathTemplate, IDictionary argum var node = OpenApiTypeConverter.Convert(parameter.Name, parameter.Type, argument); // Serializing the parameter and adding it to the path. - pathTemplate = pathTemplate.Replace($"{{{parameter.Name}}}", node.ToString().Trim('"')); + pathTemplate = pathTemplate.Replace($"{{{parameter.Name}}}", HttpUtility.UrlEncode(serializer.Invoke(parameter, node))); } return pathTemplate; diff --git a/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationRunOptions.cs b/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationRunOptions.cs index bf716e8f371c..1462145b9ea3 100644 --- a/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationRunOptions.cs +++ b/dotnet/src/Functions/Functions.OpenApi/Model/RestApiOperationRunOptions.cs @@ -18,4 +18,19 @@ internal sealed class RestApiOperationRunOptions /// The URL of REST API host. /// public Uri? ApiHostUrl { get; set; } + + /// + /// The Kernel instance used for the operation run. + /// + public Kernel? Kernel { get; set; } + + /// + /// The Kernel function whose invocation triggered the operation run. + /// + public KernelFunction? KernelFunction { get; set; } + + /// + /// The Kernel arguments whose associated with the operation run. + /// + public KernelArguments? KernelArguments { get; set; } } diff --git a/dotnet/src/Functions/Functions.OpenApi/OpenApiKernelFunctionContext.cs b/dotnet/src/Functions/Functions.OpenApi/OpenApiKernelFunctionContext.cs new file mode 100644 index 000000000000..b40b65bdd57c --- /dev/null +++ b/dotnet/src/Functions/Functions.OpenApi/OpenApiKernelFunctionContext.cs @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Net.Http; + +namespace Microsoft.SemanticKernel.Plugins.OpenApi; + +/// +/// Class with data related to an Open API invocation. +/// +public sealed class OpenApiKernelFunctionContext +{ + /// + /// Key to access the in the . + /// +#if NET5_0_OR_GREATER + public static readonly HttpRequestOptionsKey KernelFunctionContextKey = new("KernelFunctionContext"); +#else + public static readonly string KernelFunctionContextKey = "KernelFunctionContext"; +#endif + + /// + /// Initializes a new instance of the class. + /// + /// The associated with this context. + /// The associated with this context. + /// The associated with this context. + internal OpenApiKernelFunctionContext(Kernel? kernel, KernelFunction? function, KernelArguments? arguments) + { + this.Kernel = kernel; + this.Function = function; + this.Arguments = arguments; + } + + /// + /// Gets the . + /// + public Kernel? Kernel { get; } + + /// + /// Gets the . + /// + public KernelFunction? Function { get; } + + /// + /// Gets the . + /// + public KernelArguments? Arguments { get; } +} diff --git a/dotnet/src/Functions/Functions.OpenApi/RestApiOperationRunner.cs b/dotnet/src/Functions/Functions.OpenApi/RestApiOperationRunner.cs index 6f541b9dc55d..b7bc593c76b2 100644 --- a/dotnet/src/Functions/Functions.OpenApi/RestApiOperationRunner.cs +++ b/dotnet/src/Functions/Functions.OpenApi/RestApiOperationRunner.cs @@ -142,7 +142,7 @@ public Task RunAsync( var operationPayload = this.BuildOperationPayload(operation, arguments); - return this.SendAsync(url, operation.Method, headers, operationPayload.Payload, operationPayload.Content, operation.Responses.ToDictionary(item => item.Key, item => item.Value.Schema), cancellationToken); + return this.SendAsync(url, operation.Method, headers, operationPayload.Payload, operationPayload.Content, operation.Responses.ToDictionary(item => item.Key, item => item.Value.Schema), options, cancellationToken); } #region private @@ -156,6 +156,7 @@ public Task RunAsync( /// HTTP request payload. /// HTTP request content. /// The dictionary of expected response schemas. + /// Options for REST API operation run. /// The cancellation token. /// Response content and content type private async Task SendAsync( @@ -165,10 +166,17 @@ private async Task SendAsync( object? payload = null, HttpContent? requestContent = null, IDictionary? expectedSchemas = null, + RestApiOperationRunOptions? options = null, CancellationToken cancellationToken = default) { using var requestMessage = new HttpRequestMessage(method, url); +#if NET5_0_OR_GREATER + requestMessage.Options.Set(OpenApiKernelFunctionContext.KernelFunctionContextKey, new OpenApiKernelFunctionContext(options?.Kernel, options?.KernelFunction, options?.KernelArguments)); +#else + requestMessage.Properties.Add(OpenApiKernelFunctionContext.KernelFunctionContextKey, new OpenApiKernelFunctionContext(options?.Kernel, options?.KernelFunction, options?.KernelArguments)); +#endif + await this._authCallback(requestMessage, cancellationToken).ConfigureAwait(false); if (requestContent is not null) @@ -193,7 +201,7 @@ private async Task SendAsync( { using var responseMessage = await this._httpClient.SendWithSuccessCheckAsync(requestMessage, cancellationToken).ConfigureAwait(false); - var response = await SerializeResponseContentAsync(requestMessage, payload, responseMessage.Content).ConfigureAwait(false); + var response = await SerializeResponseContentAsync(requestMessage, payload, responseMessage).ConfigureAwait(false); response.ExpectedSchema ??= GetExpectedSchema(expectedSchemas, responseMessage.StatusCode); @@ -228,11 +236,21 @@ private async Task SendAsync( /// /// The HttpRequestMessage associated with the HTTP request. /// The payload sent in the HTTP request. - /// The HttpContent object containing the response content to be serialized. + /// The HttpResponseMessage object containing the response content to be serialized. /// The serialized content. - private static async Task SerializeResponseContentAsync(HttpRequestMessage request, object? payload, HttpContent content) + private static async Task SerializeResponseContentAsync(HttpRequestMessage request, object? payload, HttpResponseMessage responseMessage) { - var contentType = content.Headers.ContentType; + if (responseMessage.StatusCode == HttpStatusCode.NoContent) + { + return new RestApiOperationResponse(null, null) + { + RequestMethod = request.Method.Method, + RequestUri = request.RequestUri, + RequestPayload = payload, + }; + } + + var contentType = responseMessage.Content.Headers.ContentType; var mediaType = contentType?.MediaType ?? throw new KernelException("No media type available."); @@ -256,7 +274,7 @@ private static async Task SerializeResponseContentAsyn } // Serialize response content and return it - var serializedContent = await serializer.Invoke(content).ConfigureAwait(false); + var serializedContent = await serializer.Invoke(responseMessage.Content).ConfigureAwait(false); return new RestApiOperationResponse(serializedContent, contentType!.ToString()) { diff --git a/dotnet/src/Functions/Functions.OpenApi/Serialization/FormStyleParameterSerializer.cs b/dotnet/src/Functions/Functions.OpenApi/Serialization/FormStyleParameterSerializer.cs index 0f985f3d8197..917f94750a29 100644 --- a/dotnet/src/Functions/Functions.OpenApi/Serialization/FormStyleParameterSerializer.cs +++ b/dotnet/src/Functions/Functions.OpenApi/Serialization/FormStyleParameterSerializer.cs @@ -24,7 +24,8 @@ public static string Serialize(RestApiOperationParameter parameter, JsonNode arg Verify.NotNull(parameter); Verify.NotNull(argument); - if (parameter.Style != RestApiOperationParameterStyle.Form) + var style = parameter.Style ?? RestApiOperationParameterStyle.Form; + if (style != RestApiOperationParameterStyle.Form) { throw new NotSupportedException($"Unsupported Rest API operation parameter style '{parameter.Style}' for parameter '{parameter.Name}'"); } @@ -35,7 +36,13 @@ public static string Serialize(RestApiOperationParameter parameter, JsonNode arg return SerializeArrayParameter(parameter, argument); } - // Handling parameters of primitive and removing extra quotes added by the JsonValue for string values. + // Handling parameters where the underlying value is already a string. + if (argument is JsonValue jsonValue && jsonValue.TryGetValue(out string? value)) + { + return $"{parameter.Name}={HttpUtility.UrlEncode(value)}"; + } + + // Handling parameters of any arbitrary type by using JSON format without enclosing quotes. return $"{parameter.Name}={HttpUtility.UrlEncode(argument.ToString().Trim('"'))}"; } diff --git a/dotnet/src/Functions/Functions.OpenApi/Serialization/SimpleStyleParameterSerializer.cs b/dotnet/src/Functions/Functions.OpenApi/Serialization/SimpleStyleParameterSerializer.cs index 98da72fbdb76..9104ed9635dc 100644 --- a/dotnet/src/Functions/Functions.OpenApi/Serialization/SimpleStyleParameterSerializer.cs +++ b/dotnet/src/Functions/Functions.OpenApi/Serialization/SimpleStyleParameterSerializer.cs @@ -23,7 +23,8 @@ public static string Serialize(RestApiOperationParameter parameter, JsonNode arg Verify.NotNull(parameter); Verify.NotNull(argument); - if (parameter.Style != RestApiOperationParameterStyle.Simple) + var style = parameter.Style ?? RestApiOperationParameterStyle.Simple; + if (style != RestApiOperationParameterStyle.Simple) { throw new NotSupportedException($"Unsupported Rest API operation parameter style '{parameter.Style}' for parameter '{parameter.Name}'"); } @@ -34,7 +35,13 @@ public static string Serialize(RestApiOperationParameter parameter, JsonNode arg return SerializeArrayParameter(parameter, argument); } - // Handling parameters of primitive and removing extra quotes added by the JsonValue for string values. + // Handling parameters where the underlying value is already a string. + if (argument is JsonValue jsonValue && jsonValue.TryGetValue(out string? value)) + { + return value; + } + + // Handling parameters of any arbitrary type by using JSON format without enclosing quotes. return argument.ToString().Trim('"'); } diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationRunnerTests.cs b/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationRunnerTests.cs index c48f551c36f4..b836ec18ed80 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationRunnerTests.cs +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationRunnerTests.cs @@ -1103,6 +1103,109 @@ public async Task ItShouldReturnRequestUriAndContentAsync() Assert.Equal("{\"name\":\"fake-name-value\",\"attributes\":{\"enabled\":true}}", ((JsonObject)result.RequestPayload).ToJsonString()); } + [Fact] + public async Task ItShouldHandleNoContentAsync() + { + // Arrange + this._httpMessageHandlerStub!.ResponseToReturn = new HttpResponseMessage(System.Net.HttpStatusCode.NoContent); + + List payloadProperties = + [ + new("name", "string", true, []), + new("attributes", "object", false, + [ + new("enabled", "boolean", false, []), + ]) + ]; + + var payload = new RestApiOperationPayload(MediaTypeNames.Application.Json, payloadProperties); + + var operation = new RestApiOperation( + "fake-id", + new Uri("https://fake-random-test-host"), + "fake-path", + HttpMethod.Post, + "fake-description", + [], + payload + ); + + var arguments = new KernelArguments + { + { "name", "fake-name-value" }, + { "enabled", true } + }; + + var sut = new RestApiOperationRunner(this._httpClient, this._authenticationHandlerMock.Object, enableDynamicPayload: true); + + // Act + var result = await sut.RunAsync(operation, arguments); + + // Assert + Assert.NotNull(result.RequestMethod); + Assert.Equal(HttpMethod.Post.Method, result.RequestMethod); + Assert.NotNull(result.RequestUri); + Assert.Equal("https://fake-random-test-host/fake-path", result.RequestUri.AbsoluteUri); + Assert.NotNull(result.RequestPayload); + Assert.IsType(result.RequestPayload); + Assert.Equal("{\"name\":\"fake-name-value\",\"attributes\":{\"enabled\":true}}", ((JsonObject)result.RequestPayload).ToJsonString()); + } + + [Fact] + public async Task ItShouldSetHttpRequestMessageOptionsAsync() + { + // Arrange + this._httpMessageHandlerStub.ResponseToReturn.Content = new StringContent("fake-content", Encoding.UTF8, MediaTypeNames.Application.Json); + + List payloadProperties = + [ + new("name", "string", true, []), + new("attributes", "object", false, + [ + new("enabled", "boolean", false, []), + ]) + ]; + + var payload = new RestApiOperationPayload(MediaTypeNames.Application.Json, payloadProperties); + + var operation = new RestApiOperation( + "fake-id", + new Uri("https://fake-random-test-host"), + "fake-path", + HttpMethod.Post, + "fake-description", + [], + payload + ); + + var arguments = new KernelArguments + { + { "name", "fake-name-value" }, + { "enabled", true } + }; + + var options = new RestApiOperationRunOptions() + { + Kernel = new(), + KernelFunction = KernelFunctionFactory.CreateFromMethod(() => false), + KernelArguments = arguments, + }; + + var sut = new RestApiOperationRunner(this._httpClient, this._authenticationHandlerMock.Object, enableDynamicPayload: true); + + // Act + var result = await sut.RunAsync(operation, arguments, options); + + // Assert + var requestMessage = this._httpMessageHandlerStub.RequestMessage; + Assert.NotNull(requestMessage); + Assert.True(requestMessage.Options.TryGetValue(OpenApiKernelFunctionContext.KernelFunctionContextKey, out var kernelFunctionContext)); + Assert.NotNull(kernelFunctionContext); + Assert.Equal(options.Kernel, kernelFunctionContext.Kernel); + Assert.Equal(options.KernelFunction, kernelFunctionContext.Function); + Assert.Equal(options.KernelArguments, kernelFunctionContext.Arguments); + } + public class SchemaTestData : IEnumerable { public IEnumerator GetEnumerator() @@ -1185,15 +1288,17 @@ public void Dispose() private sealed class HttpMessageHandlerStub : DelegatingHandler { - public HttpRequestHeaders? RequestHeaders { get; private set; } + public HttpRequestHeaders? RequestHeaders => this.RequestMessage?.Headers; - public HttpContentHeaders? ContentHeaders { get; private set; } + public HttpContentHeaders? ContentHeaders => this.RequestMessage?.Content?.Headers; public byte[]? RequestContent { get; private set; } - public Uri? RequestUri { get; private set; } + public Uri? RequestUri => this.RequestMessage?.RequestUri; + + public HttpMethod? Method => this.RequestMessage?.Method; - public HttpMethod? Method { get; private set; } + public HttpRequestMessage? RequestMessage { get; private set; } public HttpResponseMessage ResponseToReturn { get; set; } @@ -1207,11 +1312,8 @@ public HttpMessageHandlerStub() protected override async Task SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) { - this.Method = request.Method; - this.RequestUri = request.RequestUri; - this.RequestHeaders = request.Headers; + this.RequestMessage = request; this.RequestContent = request.Content is null ? null : await request.Content.ReadAsByteArrayAsync(cancellationToken); - this.ContentHeaders = request.Content?.Headers; return await Task.FromResult(this.ResponseToReturn); } diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationTests.cs b/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationTests.cs index b4d7b17469e2..c9f082b329a3 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationTests.cs +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/RestApiOperationTests.cs @@ -64,7 +64,7 @@ public void ItShouldUseHostUrlOverrideIfProvided() } [Fact] - public void ItShouldReplacePathParametersByValuesFromArguments() + public void ItShouldBuildOperationUrlWithPathParametersFromArguments() { // Arrange var parameters = new List { @@ -106,6 +106,49 @@ public void ItShouldReplacePathParametersByValuesFromArguments() Assert.Equal("https://fake-random-test-host/v1/34/other_fake_path_section", url.OriginalString); } + [Fact] + public void ItShouldBuildOperationUrlWithEncodedArguments() + { + // Arrange + var parameters = new List { + new( + name: "p1", + type: "string", + isRequired: true, + expand: false, + location: RestApiOperationParameterLocation.Path, + style: RestApiOperationParameterStyle.Simple), + new( + name: "p2", + type: "string", + isRequired: true, + expand: false, + location: RestApiOperationParameterLocation.Path, + style: RestApiOperationParameterStyle.Simple) + }; + + var sut = new RestApiOperation( + "fake_id", + new Uri("https://fake-random-test-host"), + "/{p1}/{p2}/other_fake_path_section", + HttpMethod.Get, + "fake_description", + parameters + ); + + var arguments = new Dictionary + { + { "p1", "foo:bar" }, + { "p2", "foo/bar" } + }; + + // Act + var url = sut.BuildOperationUrl(arguments); + + // Assert + Assert.Equal("https://fake-random-test-host/foo%3abar/foo%2fbar/other_fake_path_section", url.OriginalString); + } + [Fact] public void ShouldBuildResourceUrlWithoutQueryString() { @@ -148,6 +191,112 @@ public void ShouldBuildResourceUrlWithoutQueryString() Assert.Equal($"{fakeHostUrlOverride}/fake-path-value/", url.OriginalString); } + [Fact] + public void ItShouldBuildQueryString() + { + // Arrange + var parameters = new List { + new( + name: "since_create_time", + type: "string", + isRequired: false, + expand: false, + location: RestApiOperationParameterLocation.Query), + new( + name: "before_create_time", + type: "string", + isRequired: false, + expand: false, + location: RestApiOperationParameterLocation.Query), + }; + + var sut = new RestApiOperation( + "fake_id", + new Uri("https://fake-random-test-host"), + "fake-path/", + HttpMethod.Get, + "fake_description", + parameters); + + var arguments = new Dictionary + { + { "since_create_time", "2024-01-01T00:00:00+00:00" }, + { "before_create_time", "2024-05-01T00:00:00+00:00" }, + }; + + // Act + var queryString = sut.BuildQueryString(arguments); + + // Assert + Assert.Equal("since_create_time=2024-01-01T00%3A00%3A00%2B00%3A00&before_create_time=2024-05-01T00%3A00%3A00%2B00%3A00", queryString, ignoreCase: true); + } + + [Fact] + public void ItShouldBuildQueryStringWithQuotes() + { + // Arrange + var parameters = new List { + new( + name: "has_quotes", + type: "string", + isRequired: false, + expand: false, + location: RestApiOperationParameterLocation.Query) + }; + + var sut = new RestApiOperation( + "fake_id", + new Uri("https://fake-random-test-host"), + "fake-path/", + HttpMethod.Get, + "fake_description", + parameters); + + var arguments = new Dictionary + { + { "has_quotes", "\"Quoted Value\"" }, + }; + + // Act + var queryString = sut.BuildQueryString(arguments); + + // Assert + Assert.Equal("has_quotes=%22Quoted+Value%22", queryString, ignoreCase: true); + } + + [Fact] + public void ItShouldBuildQueryStringForArray() + { + // Arrange + var parameters = new List { + new( + name: "times", + type: "array", + isRequired: false, + expand: false, + location: RestApiOperationParameterLocation.Query), + }; + + var sut = new RestApiOperation( + "fake_id", + new Uri("https://fake-random-test-host"), + "fake-path/", + HttpMethod.Get, + "fake_description", + parameters); + + var arguments = new Dictionary + { + { "times", new string[] { "2024-01-01T00:00:00+00:00", "2024-05-01T00:00:00+00:00" } }, + }; + + // Act + var queryString = sut.BuildQueryString(arguments); + + // Assert + Assert.Equal("times=2024-01-01T00%3A00%3A00%2B00%3A00,2024-05-01T00%3A00%3A00%2B00%3A00", queryString, ignoreCase: true); + } + [Fact] public void ItShouldRenderHeaderValuesFromArguments() { diff --git a/dotnet/src/Functions/Functions.UnitTests/OpenApi/Serialization/FormStyleParametersSerializerTests.cs b/dotnet/src/Functions/Functions.UnitTests/OpenApi/Serialization/FormStyleParametersSerializerTests.cs index 852a88c79b78..3d27259b0936 100644 --- a/dotnet/src/Functions/Functions.UnitTests/OpenApi/Serialization/FormStyleParametersSerializerTests.cs +++ b/dotnet/src/Functions/Functions.UnitTests/OpenApi/Serialization/FormStyleParametersSerializerTests.cs @@ -75,7 +75,7 @@ public void ItShouldCreateParameterForPrimitiveValue() } [Fact] - public void ItShouldCreateParameterForStringValue() + public void ItShouldCreateParameterForDateTimeValue() { // Arrange var parameter = new RestApiOperationParameter( @@ -95,6 +95,28 @@ public void ItShouldCreateParameterForStringValue() Assert.Equal("id=2023-12-06T11%3a53%3a36Z", result); } + [Theory] + [InlineData("2024-01-01T00:00:00+00:00", "2024-01-01T00%3a00%3a00%2b00%3a00")] + public void ItShouldCreateParameterForStringValue(string value, string encodedValue) + { + // Arrange + var parameter = new RestApiOperationParameter( + name: "id", + type: "string", + isRequired: true, + expand: false, + location: RestApiOperationParameterLocation.Query, + style: RestApiOperationParameterStyle.Form); + + // Act + var result = FormStyleParameterSerializer.Serialize(parameter, JsonValue.Create(value)); + + // Assert + Assert.NotNull(result); + + Assert.Equal($"id={encodedValue}", result); + } + [Theory] [InlineData(":", "%3a")] [InlineData("/", "%2f")] diff --git a/dotnet/src/IntegrationTests/Connectors/HuggingFace/ChatCompletion/HuggingFaceChatCompletionTests.cs b/dotnet/src/IntegrationTests/Connectors/HuggingFace/ChatCompletion/HuggingFaceChatCompletionTests.cs new file mode 100644 index 000000000000..cca6f6703fcb --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/HuggingFace/ChatCompletion/HuggingFaceChatCompletionTests.cs @@ -0,0 +1,137 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Text; +using System.Threading.Tasks; +using Microsoft.Extensions.Configuration; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.ChatCompletion; +using Microsoft.SemanticKernel.Connectors.HuggingFace; +using Xunit; + +namespace SemanticKernel.IntegrationTests.Connectors.HuggingFace.ChatCompletion; + +/// +/// Integration tests for . +/// +/// +/// Instructions for setting up a Text Generation Inference (TGI) endpoint, see: https://huggingface.co/blog/tgi-messages-api +/// +public sealed class HuggingFaceChatCompletionTests +{ + private const string Endpoint = "https://.endpoints.huggingface.cloud/v1/"; + private const string Model = "tgi"; + + private readonly IConfigurationRoot _configuration; + + public HuggingFaceChatCompletionTests() + { + // Load configuration + this._configuration = new ConfigurationBuilder() + .AddJsonFile(path: "testsettings.json", optional: false, reloadOnChange: true) + .AddJsonFile(path: "testsettings.development.json", optional: true, reloadOnChange: true) + .AddEnvironmentVariables() + .AddUserSecrets() + .Build(); + } + + [Fact(Skip = "This test is for manual verification.")] + public async Task GetChatMessageContentsAsync() + { + // Arrange + var chatHistory = new ChatHistory + { + new ChatMessageContent(AuthorRole.System, "Use C# 12 features."), + new ChatMessageContent(AuthorRole.User, "Write a C# Hello world?") + }; + var huggingFaceRemote = new HuggingFaceChatCompletionService(Model, endpoint: new Uri(Endpoint), apiKey: this.GetApiKey()); + + // Act + var response = await huggingFaceRemote.GetChatMessageContentsAsync(chatHistory, new HuggingFacePromptExecutionSettings() { MaxNewTokens = 50 }); + + // Assert + Assert.NotNull(response); + Assert.Single(response); + Assert.True(response[0].Content?.Length > 0); + } + + [Fact(Skip = "This test is for manual verification.")] + public async Task GetStreamingChatMessageContentsAsync() + { + // Arrange + var chatHistory = new ChatHistory + { + new ChatMessageContent(AuthorRole.System, "Use C# 12 features."), + new ChatMessageContent(AuthorRole.User, "Write a C# Hello world?") + }; + var huggingFaceRemote = new HuggingFaceChatCompletionService(Model, endpoint: new Uri(Endpoint), apiKey: this.GetApiKey()); + + // Act + var response = new StringBuilder(); + await foreach (var update in huggingFaceRemote.GetStreamingChatMessageContentsAsync(chatHistory, new HuggingFacePromptExecutionSettings() { MaxNewTokens = 50 })) + { + if (update.Content is { Length: > 0 }) + { + response.Append(update.Content); + } + } + + // Assert + Assert.NotNull(response); + Assert.True(response.Length > 0); + } + + [Fact(Skip = "This test is for manual verification.")] + public async Task InvokeKernelFunctionAsync() + { + // Arrange + Kernel kernel = Kernel.CreateBuilder() + .AddHuggingFaceChatCompletion(Model, endpoint: new Uri(Endpoint), apiKey: this.GetApiKey()) + .Build(); + + var kernelFunction = kernel.CreateFunctionFromPrompt("Write a C# Hello world", new HuggingFacePromptExecutionSettings + { + MaxNewTokens = 50, + }); + + // Act + var response = await kernel.InvokeAsync(kernelFunction); + + // Assert + Assert.NotNull(response); + Assert.True(response.ToString().Length > 0); + } + + [Fact(Skip = "This test is for manual verification.")] + public async Task InvokeKernelFunctionStreamingAsync() + { + // Arrange + Kernel kernel = Kernel.CreateBuilder() + .AddHuggingFaceChatCompletion(Model, endpoint: new Uri(Endpoint), apiKey: this.GetApiKey()) + .Build(); + + var kernelFunction = kernel.CreateFunctionFromPrompt("Write a C# Hello world", new HuggingFacePromptExecutionSettings + { + MaxNewTokens = 50, + }); + + // Act + var response = new StringBuilder(); + await foreach (var update in kernel.InvokeStreamingAsync(kernelFunction)) + { + if (update.ToString() is { Length: > 0 }) + { + response.Append(update.ToString()); + } + } + + // Assert + Assert.NotNull(response); + Assert.True(response.ToString().Length > 0); + } + + private string GetApiKey() + { + return this._configuration.GetSection("HuggingFace:ApiKey").Get()!; + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStoreTests.cs b/dotnet/src/IntegrationTests/Connectors/Memory/AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStoreTests.cs index 0e8aee320856..e75116e34893 100644 --- a/dotnet/src/IntegrationTests/Connectors/Memory/AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStoreTests.cs +++ b/dotnet/src/IntegrationTests/Connectors/Memory/AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStoreTests.cs @@ -1,9 +1,14 @@ // Copyright (c) Microsoft. All rights reserved. using System; +using System.Collections.Generic; +using System.Collections.ObjectModel; using System.Linq; +using System.Threading; using System.Threading.Tasks; +using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.Connectors.AzureCosmosDBNoSQL; +using Microsoft.SemanticKernel.Embeddings; using Microsoft.SemanticKernel.Memory; using MongoDB.Driver; using Xunit; @@ -117,6 +122,54 @@ public async Task ItCanGetNearestMatchesAsync(int limit, bool withEmbeddings) await memoryStore.DeleteCollectionAsync(collectionName); } + [Theory(Skip = SkipReason)] + [InlineData(true)] + [InlineData(false)] + public async Task ItCanSaveReferenceGetTextAndSearchTextAsync(bool withEmbedding) + { + var collectionName = this._fixture.CollectionName; + var memoryStore = this._fixture.MemoryStore; + var textMemory = new SemanticTextMemory(memoryStore, new MockTextEmbeddingGenerationService()); + var textToStore = "SampleText"; + var id = "MyExternalId"; + var source = "MyExternalSource"; + var refId = await textMemory.SaveReferenceAsync(collectionName, textToStore, id, source); + Assert.NotNull(refId); + + var expectedQueryResult = new MemoryQueryResult( + new MemoryRecordMetadata(isReference: true, id, text: "", description: "", source, additionalMetadata: ""), + 1.0, + withEmbedding ? DataHelper.VectorSearchTestEmbedding : null); + + var queryResult = await textMemory.GetAsync(collectionName, refId, withEmbedding); + AssertQueryResultEqual(expectedQueryResult, queryResult, withEmbedding); + + var searchResults = await textMemory.SearchAsync(collectionName, textToStore, withEmbeddings: withEmbedding).ToListAsync(); + Assert.Equal(1, searchResults?.Count); + AssertQueryResultEqual(expectedQueryResult, searchResults?[0], compareEmbeddings: true); + + await textMemory.RemoveAsync(collectionName, refId); + } + + private static void AssertQueryResultEqual(MemoryQueryResult expected, MemoryQueryResult? actual, bool compareEmbeddings) + { + Assert.NotNull(actual); + Assert.Equal(expected.Relevance, actual.Relevance); + Assert.Equal(expected.Metadata.Id, actual.Metadata.Id); + Assert.Equal(expected.Metadata.Text, actual.Metadata.Text); + Assert.Equal(expected.Metadata.Description, actual.Metadata.Description); + Assert.Equal(expected.Metadata.ExternalSourceName, actual.Metadata.ExternalSourceName); + Assert.Equal(expected.Metadata.AdditionalMetadata, actual.Metadata.AdditionalMetadata); + Assert.Equal(expected.Metadata.IsReference, actual.Metadata.IsReference); + + if (compareEmbeddings) + { + Assert.NotNull(expected.Embedding); + Assert.NotNull(actual.Embedding); + Assert.Equal(expected.Embedding.Value.Span, actual.Embedding.Value.Span); + } + } + private static void AssertMemoryRecordEqual( MemoryRecord expectedRecord, MemoryRecord actualRecord, @@ -147,4 +200,15 @@ private static void AssertMemoryRecordEqual( Assert.True(actualRecord.Embedding.Span.IsEmpty); } } + + private sealed class MockTextEmbeddingGenerationService : ITextEmbeddingGenerationService + { + public IReadOnlyDictionary Attributes { get; } = ReadOnlyDictionary.Empty; + + public Task>> GenerateEmbeddingsAsync(IList data, Kernel? kernel = null, CancellationToken cancellationToken = default) + { + IList> result = new List> { DataHelper.VectorSearchTestEmbedding }; + return Task.FromResult(result); + } + } } diff --git a/dotnet/src/IntegrationTests/Connectors/Memory/AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStoreTestsFixture.cs b/dotnet/src/IntegrationTests/Connectors/Memory/AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStoreTestsFixture.cs index 93cbea170f40..1df46166e63f 100644 --- a/dotnet/src/IntegrationTests/Connectors/Memory/AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStoreTestsFixture.cs +++ b/dotnet/src/IntegrationTests/Connectors/Memory/AzureCosmosDBNoSQL/AzureCosmosDBNoSQLMemoryStoreTestsFixture.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. using System; -using System.Collections.ObjectModel; using System.Threading.Tasks; using Microsoft.Azure.Cosmos; using Microsoft.Extensions.Configuration; @@ -35,28 +34,9 @@ public AzureCosmosDBNoSQLMemoryStoreTestsFixture() this.MemoryStore = new AzureCosmosDBNoSQLMemoryStore( connectionString, this.DatabaseName, - new VectorEmbeddingPolicy( - new Collection - { - new() - { - DataType = VectorDataType.Float32, - Dimensions = 3, - DistanceFunction = DistanceFunction.Cosine, - Path = "/embedding" - } - }), - new() - { - VectorIndexes = new Collection { - new() - { - Path = "/embedding", - Type = VectorIndexType.Flat, - }, - }, - } - ); + dimensions: 3, + VectorDataType.Float32, + VectorIndexType.Flat); } public Task InitializeAsync() diff --git a/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAICompletionTests.cs b/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAICompletionTests.cs index a2285a1c4dd5..03cd3429d4b0 100644 --- a/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAICompletionTests.cs +++ b/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAICompletionTests.cs @@ -397,6 +397,38 @@ public async Task AzureOpenAIInvokePromptTestAsync() // Assert Assert.Contains("Pike Place", actual.GetValue(), StringComparison.OrdinalIgnoreCase); + Assert.NotNull(actual.Metadata); + } + + [Fact] + public async Task AzureOpenAIInvokePromptWithMultipleResultsTestAsync() + { + // Arrange + this._kernelBuilder.Services.AddSingleton(this._logger); + var builder = this._kernelBuilder; + this.ConfigureAzureOpenAIChatAsText(builder); + Kernel target = builder.Build(); + + var prompt = "Where is the most famous fish market in Seattle, Washington, USA?"; + + var executionSettings = new OpenAIPromptExecutionSettings() { MaxTokens = 150, ResultsPerPrompt = 3 }; + + // Act + FunctionResult actual = await target.InvokePromptAsync(prompt, new(executionSettings)); + + // Assert + Assert.Null(actual.Metadata); + + var chatMessageContents = actual.GetValue>(); + + Assert.NotNull(chatMessageContents); + Assert.Equal(executionSettings.ResultsPerPrompt, chatMessageContents.Count); + + foreach (var chatMessageContent in chatMessageContents) + { + Assert.NotNull(chatMessageContent.Metadata); + Assert.Contains("Pike Place", chatMessageContent.Content, StringComparison.OrdinalIgnoreCase); + } } [Fact] @@ -434,7 +466,7 @@ public async Task MultipleServiceLoadPromptConfigTestAsync() { "name": "FishMarket2", "execution_settings": { - "azure-text-davinci-003": { + "azure-gpt-35-turbo-instruct": { "max_tokens": 256 } } diff --git a/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAIFileServiceTests.cs b/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAIFileServiceTests.cs new file mode 100644 index 000000000000..30b0c3d1115b --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAIFileServiceTests.cs @@ -0,0 +1,156 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Threading.Tasks; +using Microsoft.Extensions.Configuration; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.Connectors.OpenAI; +using SemanticKernel.IntegrationTests.TestSettings; +using Xunit; +using Xunit.Abstractions; + +namespace SemanticKernel.IntegrationTests.Connectors.OpenAI; + +#pragma warning disable xUnit1004 // Contains test methods used in manual verification. Disable warning for this file only. + +public sealed class OpenAIFileServiceTests(ITestOutputHelper output) : IDisposable +{ + private readonly IConfigurationRoot _configuration = new ConfigurationBuilder() + .AddJsonFile(path: "testsettings.json", optional: false, reloadOnChange: true) + .AddJsonFile(path: "testsettings.development.json", optional: true, reloadOnChange: true) + .AddEnvironmentVariables() + .AddUserSecrets() + .Build(); + + [Theory(Skip = "OpenAI will often throttle requests. This test is for manual verification.")] + [InlineData("test_image_001.jpg", "image/jpeg")] + [InlineData("test_content.txt", "text/plain")] + public async Task OpenAIFileServiceLifecycleAsync(string fileName, string mimeType) + { + // Arrange + OpenAIFileService fileService = this.CreateOpenAIFileService(); + + // Act & Assert + await this.VerifyFileServiceLifecycleAsync(fileService, fileName, mimeType); + } + + [Theory] + [InlineData("test_image_001.jpg", "image/jpeg")] + [InlineData("test_content.txt", "text/plain")] + public async Task AzureOpenAIFileServiceLifecycleAsync(string fileName, string mimeType) + { + // Arrange + OpenAIFileService fileService = this.CreateOpenAIFileService(); + + // Act & Assert + await this.VerifyFileServiceLifecycleAsync(fileService, fileName, mimeType); + } + + private async Task VerifyFileServiceLifecycleAsync(OpenAIFileService fileService, string fileName, string mimeType) + { + // Setup file content + await using FileStream fileStream = File.OpenRead($"./TestData/{fileName}"); + BinaryData sourceData = await BinaryData.FromStreamAsync(fileStream); + BinaryContent sourceContent = new(sourceData.ToArray(), mimeType); + + // Upload file with unsupported purpose (failure case) + await Assert.ThrowsAsync(() => fileService.UploadContentAsync(sourceContent, new(fileName, OpenAIFilePurpose.AssistantsOutput))); + + // Upload file with wacky purpose (failure case) + await Assert.ThrowsAsync(() => fileService.UploadContentAsync(sourceContent, new(fileName, new OpenAIFilePurpose("pretend")))); + + // Upload file + OpenAIFileReference fileReference = await fileService.UploadContentAsync(sourceContent, new(fileName, OpenAIFilePurpose.FineTune)); + try + { + AssertFileReferenceEquals(fileReference, fileName, sourceData.Length, OpenAIFilePurpose.FineTune); + + // Retrieve files by different purpose + Dictionary fileMap = await GetFilesAsync(fileService, OpenAIFilePurpose.Assistants); + Assert.DoesNotContain(fileReference.Id, fileMap.Keys); + + // Retrieve files by wacky purpose (failure case) + await Assert.ThrowsAsync(() => GetFilesAsync(fileService, new OpenAIFilePurpose("pretend"))); + + // Retrieve files by expected purpose + fileMap = await GetFilesAsync(fileService, OpenAIFilePurpose.FineTune); + Assert.Contains(fileReference.Id, fileMap.Keys); + AssertFileReferenceEquals(fileMap[fileReference.Id], fileName, sourceData.Length, OpenAIFilePurpose.FineTune); + + // Retrieve files by no specific purpose + fileMap = await GetFilesAsync(fileService); + Assert.Contains(fileReference.Id, fileMap.Keys); + AssertFileReferenceEquals(fileMap[fileReference.Id], fileName, sourceData.Length, OpenAIFilePurpose.FineTune); + + // Retrieve file by id + OpenAIFileReference file = await fileService.GetFileAsync(fileReference.Id); + AssertFileReferenceEquals(file, fileName, sourceData.Length, OpenAIFilePurpose.FineTune); + + // Retrieve file content + BinaryContent retrievedContent = await fileService.GetFileContentAsync(fileReference.Id); + Assert.NotNull(retrievedContent.Data); + Assert.NotNull(retrievedContent.Uri); + Assert.NotNull(retrievedContent.Metadata); + Assert.Equal(fileReference.Id, retrievedContent.Metadata["id"]); + Assert.Equal(sourceContent.Data!.Value.Length, retrievedContent.Data.Value.Length); + } + finally + { + // Delete file + await fileService.DeleteFileAsync(fileReference.Id); + } + } + + private static void AssertFileReferenceEquals(OpenAIFileReference fileReference, string expectedFileName, int expectedSize, OpenAIFilePurpose expectedPurpose) + { + Assert.Equal(expectedFileName, fileReference.FileName); + Assert.Equal(expectedPurpose, fileReference.Purpose); + Assert.Equal(expectedSize, fileReference.SizeInBytes); + } + + private static async Task> GetFilesAsync(OpenAIFileService fileService, OpenAIFilePurpose? purpose = null) + { + IEnumerable files = await fileService.GetFilesAsync(purpose); + Dictionary fileIds = files.DistinctBy(f => f.Id).ToDictionary(f => f.Id); + return fileIds; + } + + #region internals + + private readonly XunitLogger _logger = new(output); + private readonly RedirectOutput _testOutputHelper = new(output); + + public void Dispose() + { + this._logger.Dispose(); + this._testOutputHelper.Dispose(); + } + + private OpenAIFileService CreateOpenAIFileService() + { + var openAIConfiguration = this._configuration.GetSection("OpenAI").Get(); + + Assert.NotNull(openAIConfiguration); + Assert.NotNull(openAIConfiguration.ApiKey); + Assert.NotNull(openAIConfiguration.ServiceId); + + return new(openAIConfiguration.ApiKey, openAIConfiguration.ServiceId, loggerFactory: this._logger); + } + + private OpenAIFileService CreateAzureOpenAIFileService() + { + var azureOpenAIConfiguration = this._configuration.GetSection("AzureOpenAI").Get(); + + Assert.NotNull(azureOpenAIConfiguration); + Assert.NotNull(azureOpenAIConfiguration.Endpoint); + Assert.NotNull(azureOpenAIConfiguration.ApiKey); + Assert.NotNull(azureOpenAIConfiguration.ServiceId); + + return new(new Uri(azureOpenAIConfiguration.Endpoint), azureOpenAIConfiguration.ApiKey, azureOpenAIConfiguration.ServiceId, loggerFactory: this._logger); + } + + #endregion +} diff --git a/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAITextToImageTests.cs b/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAITextToImageTests.cs new file mode 100644 index 000000000000..e133f91ee547 --- /dev/null +++ b/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAITextToImageTests.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Threading.Tasks; +using Microsoft.Extensions.Configuration; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.TextToImage; +using SemanticKernel.IntegrationTests.TestSettings; +using Xunit; + +namespace SemanticKernel.IntegrationTests.Connectors.OpenAI; +public sealed class OpenAITextToImageTests +{ + private readonly IConfigurationRoot _configuration = new ConfigurationBuilder() + .AddJsonFile(path: "testsettings.json", optional: false, reloadOnChange: true) + .AddJsonFile(path: "testsettings.development.json", optional: true, reloadOnChange: true) + .AddEnvironmentVariables() + .AddUserSecrets() + .Build(); + + [Fact(Skip = "This test is for manual verification.")] + public async Task OpenAITextToImageTestAsync() + { + // Arrange + OpenAIConfiguration? openAIConfiguration = this._configuration.GetSection("OpenAITextToImage").Get(); + Assert.NotNull(openAIConfiguration); + + var kernel = Kernel.CreateBuilder() + .AddOpenAITextToImage(apiKey: openAIConfiguration.ApiKey) + .Build(); + + var service = kernel.GetRequiredService(); + + // Act + var result = await service.GenerateImageAsync("The sun rises in the east and sets in the west.", 512, 512); + + // Assert + Assert.NotNull(result); + Assert.NotEmpty(result); + } + + [Fact(Skip = "This test is for manual verification.")] + public async Task OpenAITextToImageByModelTestAsync() + { + // Arrange + OpenAIConfiguration? openAIConfiguration = this._configuration.GetSection("OpenAITextToImage").Get(); + Assert.NotNull(openAIConfiguration); + + var kernel = Kernel.CreateBuilder() + .AddOpenAITextToImage(apiKey: openAIConfiguration.ApiKey, modelId: openAIConfiguration.ModelId) + .Build(); + + var service = kernel.GetRequiredService(); + + // Act + var result = await service.GenerateImageAsync("The sun rises in the east and sets in the west.", 1024, 1024); + + // Assert + Assert.NotNull(result); + Assert.NotEmpty(result); + } + + [Fact(Skip = "This test is for manual verification.")] + public async Task AzureOpenAITextToImageTestAsync() + { + // Arrange + AzureOpenAIConfiguration? azureOpenAIConfiguration = this._configuration.GetSection("AzureOpenAITextToImage").Get(); + Assert.NotNull(azureOpenAIConfiguration); + + var kernel = Kernel.CreateBuilder() + .AddAzureOpenAITextToImage( + azureOpenAIConfiguration.DeploymentName, + azureOpenAIConfiguration.Endpoint, + azureOpenAIConfiguration.ApiKey) + .Build(); + + var service = kernel.GetRequiredService(); + + // Act + var result = await service.GenerateImageAsync("The sun rises in the east and sets in the west.", 1024, 1024); + + // Assert + Assert.NotNull(result); + Assert.NotEmpty(result); + } +} diff --git a/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAIToolsTests.cs b/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAIToolsTests.cs index ebfcccd31472..049287fbbc14 100644 --- a/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAIToolsTests.cs +++ b/dotnet/src/IntegrationTests/Connectors/OpenAI/OpenAIToolsTests.cs @@ -460,6 +460,253 @@ public async Task ConnectorAgnosticFunctionCallingModelClassesCanBeUsedForAutoFu Assert.NotNull(getWeatherForCityFunctionCallResult.Result); } + [Fact] + public async Task ConnectorAgnosticFunctionCallingModelClassesCanBeUsedForManualFunctionCallingForStreamingAsync() + { + // Arrange + var kernel = this.InitializeKernel(importHelperPlugin: true); + + var settings = new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions }; + + var sut = kernel.GetRequiredService(); + + var chatHistory = new ChatHistory(); + chatHistory.AddUserMessage("Given the current time of day and weather, what is the likely color of the sky in Boston?"); + + string? result = null; + + // Act + while (true) + { + AuthorRole? authorRole = null; + var fccBuilder = new FunctionCallContentBuilder(); + var textContent = new StringBuilder(); + + await foreach (var streamingContent in sut.GetStreamingChatMessageContentsAsync(chatHistory, settings, kernel)) + { + textContent.Append(streamingContent.Content); + authorRole ??= streamingContent.Role; + fccBuilder.Append(streamingContent); + } + + var functionCalls = fccBuilder.Build(); + if (functionCalls.Any()) + { + var fcContent = new ChatMessageContent(role: authorRole ?? default, content: null); + chatHistory.Add(fcContent); + + // Iterating over the requested function calls and invoking them + foreach (var functionCall in functionCalls) + { + fcContent.Items.Add(functionCall); + + var functionResult = await functionCall.InvokeAsync(kernel); + + chatHistory.Add(functionResult.ToChatMessage()); + } + + continue; + } + + result = textContent.ToString(); + break; + } + + // Assert + Assert.Contains("rain", result, StringComparison.InvariantCultureIgnoreCase); + } + + [Fact] + public async Task ConnectorAgnosticFunctionCallingModelClassesCanBeUsedForAutoFunctionCallingForStreamingAsync() + { + // Arrange + var kernel = this.InitializeKernel(importHelperPlugin: true); + + var chatHistory = new ChatHistory(); + chatHistory.AddUserMessage("Given the current time of day and weather, what is the likely color of the sky in Boston?"); + + var settings = new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.AutoInvokeKernelFunctions }; + + var sut = kernel.GetRequiredService(); + + var result = new StringBuilder(); + + // Act + await foreach (var contentUpdate in sut.GetStreamingChatMessageContentsAsync(chatHistory, settings, kernel)) + { + result.Append(contentUpdate.Content); + } + + // Assert + Assert.Equal(5, chatHistory.Count); + + var userMessage = chatHistory[0]; + Assert.Equal(AuthorRole.User, userMessage.Role); + + // LLM requested the current time. + var getCurrentTimeFunctionCallRequestMessage = chatHistory[1]; + Assert.Equal(AuthorRole.Assistant, getCurrentTimeFunctionCallRequestMessage.Role); + + var getCurrentTimeFunctionCallRequest = getCurrentTimeFunctionCallRequestMessage.Items.OfType().Single(); + Assert.Equal("GetCurrentUtcTime", getCurrentTimeFunctionCallRequest.FunctionName); + Assert.Equal("HelperFunctions", getCurrentTimeFunctionCallRequest.PluginName); + Assert.NotNull(getCurrentTimeFunctionCallRequest.Id); + + // Connector invoked the GetCurrentUtcTime function and added result to chat history. + var getCurrentTimeFunctionCallResultMessage = chatHistory[2]; + Assert.Equal(AuthorRole.Tool, getCurrentTimeFunctionCallResultMessage.Role); + Assert.Single(getCurrentTimeFunctionCallResultMessage.Items.OfType()); // Current function calling model adds TextContent item representing the result of the function call. + + var getCurrentTimeFunctionCallResult = getCurrentTimeFunctionCallResultMessage.Items.OfType().Single(); + Assert.Equal("GetCurrentUtcTime", getCurrentTimeFunctionCallResult.FunctionName); + Assert.Equal("HelperFunctions", getCurrentTimeFunctionCallResult.PluginName); + Assert.Equal(getCurrentTimeFunctionCallRequest.Id, getCurrentTimeFunctionCallResult.CallId); + Assert.NotNull(getCurrentTimeFunctionCallResult.Result); + + // LLM requested the weather for Boston. + var getWeatherForCityFunctionCallRequestMessage = chatHistory[3]; + Assert.Equal(AuthorRole.Assistant, getWeatherForCityFunctionCallRequestMessage.Role); + + var getWeatherForCityFunctionCallRequest = getWeatherForCityFunctionCallRequestMessage.Items.OfType().Single(); + Assert.Equal("Get_Weather_For_City", getWeatherForCityFunctionCallRequest.FunctionName); + Assert.Equal("HelperFunctions", getWeatherForCityFunctionCallRequest.PluginName); + Assert.NotNull(getWeatherForCityFunctionCallRequest.Id); + + // Connector invoked the Get_Weather_For_City function and added result to chat history. + var getWeatherForCityFunctionCallResultMessage = chatHistory[4]; + Assert.Equal(AuthorRole.Tool, getWeatherForCityFunctionCallResultMessage.Role); + Assert.Single(getWeatherForCityFunctionCallResultMessage.Items.OfType()); // Current function calling model adds TextContent item representing the result of the function call. + + var getWeatherForCityFunctionCallResult = getWeatherForCityFunctionCallResultMessage.Items.OfType().Single(); + Assert.Equal("Get_Weather_For_City", getWeatherForCityFunctionCallResult.FunctionName); + Assert.Equal("HelperFunctions", getWeatherForCityFunctionCallResult.PluginName); + Assert.Equal(getWeatherForCityFunctionCallRequest.Id, getWeatherForCityFunctionCallResult.CallId); + Assert.NotNull(getWeatherForCityFunctionCallResult.Result); + } + + [Fact(Skip = "The test is temporarily disabled until a more stable solution is found.")] + public async Task ConnectorAgnosticFunctionCallingModelClassesCanPassFunctionExceptionToConnectorForStreamingAsync() + { + // Arrange + var kernel = this.InitializeKernel(importHelperPlugin: true); + + var settings = new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions }; + + var sut = kernel.GetRequiredService(); + + var chatHistory = new ChatHistory(); + chatHistory.AddSystemMessage("If you are unable to answer the question for whatever reason, please add the 'error' keyword to the response."); + chatHistory.AddUserMessage("Given the current time of day and weather, what is the likely color of the sky in Boston?"); + + string? result = null; + + // Act + while (true) + { + AuthorRole? authorRole = null; + var fccBuilder = new FunctionCallContentBuilder(); + var textContent = new StringBuilder(); + + await foreach (var streamingContent in sut.GetStreamingChatMessageContentsAsync(chatHistory, settings, kernel)) + { + textContent.Append(streamingContent.Content); + authorRole ??= streamingContent.Role; + fccBuilder.Append(streamingContent); + } + + var functionCalls = fccBuilder.Build(); + if (functionCalls.Any()) + { + var fcContent = new ChatMessageContent(role: authorRole ?? default, content: null); + chatHistory.Add(fcContent); + + // Iterating over the requested function calls and invoking them + foreach (var functionCall in functionCalls) + { + fcContent.Items.Add(functionCall); + + // Simulating an exception + var exception = new OperationCanceledException("The operation was canceled due to timeout."); + + chatHistory.Add(new FunctionResultContent(functionCall, exception).ToChatMessage()); + } + + continue; + } + + result = textContent.ToString(); + break; + } + + // Assert + Assert.Contains("error", result, StringComparison.InvariantCultureIgnoreCase); + } + + [Fact] + public async Task ConnectorAgnosticFunctionCallingModelClassesSupportSimulatedFunctionCallsForStreamingAsync() + { + // Arrange + var kernel = this.InitializeKernel(importHelperPlugin: true); + + var settings = new OpenAIPromptExecutionSettings() { ToolCallBehavior = ToolCallBehavior.EnableKernelFunctions }; + + var sut = kernel.GetRequiredService(); + + var chatHistory = new ChatHistory(); + chatHistory.AddSystemMessage("if there's a tornado warning, please add the 'tornado' keyword to the response."); + chatHistory.AddUserMessage("Given the current time of day and weather, what is the likely color of the sky in Boston?"); + + string? result = null; + + // Act + while (true) + { + AuthorRole? authorRole = null; + var fccBuilder = new FunctionCallContentBuilder(); + var textContent = new StringBuilder(); + + await foreach (var streamingContent in sut.GetStreamingChatMessageContentsAsync(chatHistory, settings, kernel)) + { + textContent.Append(streamingContent.Content); + authorRole ??= streamingContent.Role; + fccBuilder.Append(streamingContent); + } + + var functionCalls = fccBuilder.Build(); + if (functionCalls.Any()) + { + var fcContent = new ChatMessageContent(role: authorRole ?? default, content: null); + chatHistory.Add(fcContent); + + // Iterating over the requested function calls and invoking them + foreach (var functionCall in functionCalls) + { + fcContent.Items.Add(functionCall); + + var functionResult = await functionCall.InvokeAsync(kernel); + + chatHistory.Add(functionResult.ToChatMessage()); + } + + // Adding a simulated function call to the connector response message + var simulatedFunctionCall = new FunctionCallContent("weather-alert", id: "call_123"); + fcContent.Items.Add(simulatedFunctionCall); + + // Adding a simulated function result to chat history + var simulatedFunctionResult = "A Tornado Watch has been issued, with potential for severe thunderstorms causing unusual sky colors like green, yellow, or dark gray. Stay informed and follow safety instructions from authorities."; + chatHistory.Add(new FunctionResultContent(simulatedFunctionCall, simulatedFunctionResult).ToChatMessage()); + + continue; + } + + result = textContent.ToString(); + break; + } + + // Assert + Assert.Contains("tornado", result, StringComparison.InvariantCultureIgnoreCase); + } + private Kernel InitializeKernel(bool importHelperPlugin = false) { OpenAIConfiguration? openAIConfiguration = this._configuration.GetSection("Planners:OpenAI").Get(); diff --git a/dotnet/src/IntegrationTests/CrossLanguage/PromptWithComplexObjectsTest.cs b/dotnet/src/IntegrationTests/CrossLanguage/PromptWithComplexObjectsTest.cs index cae56a022f7b..87fb3e1c888d 100644 --- a/dotnet/src/IntegrationTests/CrossLanguage/PromptWithComplexObjectsTest.cs +++ b/dotnet/src/IntegrationTests/CrossLanguage/PromptWithComplexObjectsTest.cs @@ -1,9 +1,9 @@ // Copyright (c) Microsoft. All rights reserved. -using Microsoft.SemanticKernel; using System.IO; using System.Text.Json.Nodes; using System.Threading.Tasks; +using Microsoft.SemanticKernel; using Xunit; namespace SemanticKernel.IntegrationTests.CrossLanguage; diff --git a/dotnet/src/IntegrationTests/CrossLanguage/PromptWithHelperFunctionsTest.cs b/dotnet/src/IntegrationTests/CrossLanguage/PromptWithHelperFunctionsTest.cs index 9fad909d790a..12d7166e0bb5 100644 --- a/dotnet/src/IntegrationTests/CrossLanguage/PromptWithHelperFunctionsTest.cs +++ b/dotnet/src/IntegrationTests/CrossLanguage/PromptWithHelperFunctionsTest.cs @@ -1,10 +1,10 @@ // Copyright (c) Microsoft. All rights reserved. -using Microsoft.SemanticKernel; using System; using System.IO; using System.Text.Json.Nodes; using System.Threading.Tasks; +using Microsoft.SemanticKernel; using Xunit; namespace SemanticKernel.IntegrationTests.CrossLanguage; diff --git a/dotnet/src/IntegrationTests/Plugins/RepairServiceTests.cs b/dotnet/src/IntegrationTests/Plugins/RepairServiceTests.cs index f5da4448ef02..9d8610806d8c 100644 --- a/dotnet/src/IntegrationTests/Plugins/RepairServiceTests.cs +++ b/dotnet/src/IntegrationTests/Plugins/RepairServiceTests.cs @@ -2,6 +2,7 @@ using System.Net.Http; using System.Text.Json; using System.Text.Json.Serialization; +using System.Threading; using System.Threading.Tasks; using Microsoft.SemanticKernel; using Microsoft.SemanticKernel.Plugins.OpenApi; @@ -106,6 +107,59 @@ public async Task HttpOperationExceptionIncludeRequestInfoAsync() } } + [Fact(Skip = "This test is for manual verification.")] + public async Task UseDelegatingHandlerAsync() + { + // Arrange + var kernel = new Kernel(); + using var stream = System.IO.File.OpenRead("Plugins/repair-service.json"); + + using var httpHandler = new HttpClientHandler(); + using var customHandler = new CustomHandler(httpHandler); + using HttpClient httpClient = new(customHandler); + + var plugin = await kernel.ImportPluginFromOpenApiAsync( + "RepairService", + stream, + new OpenAIFunctionExecutionParameters(httpClient) { IgnoreNonCompliantErrors = true, EnableDynamicPayload = false }); + + // List All Repairs + var result = await plugin["listRepairs"].InvokeAsync(kernel); + + Assert.NotNull(result); + var repairs = JsonSerializer.Deserialize(result.ToString()); + Assert.True(repairs?.Length > 0); + var count = repairs?.Length ?? 0; + + // Create Repair - oil change + var arguments = new KernelArguments + { + ["payload"] = """{ "title": "Engine oil change", "description": "Need to drain the old engine oil and replace it with fresh oil.", "assignedTo": "", "date": "", "image": "" }""" + }; + result = await plugin["createRepair"].InvokeAsync(kernel, arguments); + + Assert.NotNull(result); + Assert.Equal("New repair created", result.ToString()); + + // Create Repair - brake pads change + arguments = new KernelArguments + { + ["payload"] = """{ "title": "Brake pads change", "description": "Need to replace the brake pads on all wheels.", "assignedTo": "", "date": "", "image": "" }""" + }; + result = await plugin["createRepair"].InvokeAsync(kernel, arguments); + + Assert.NotNull(result); + Assert.Equal("New repair created", result.ToString()); + + // List All Repairs + result = await plugin["listRepairs"].InvokeAsync(kernel); + + Assert.NotNull(result); + repairs = JsonSerializer.Deserialize(result.ToString()); + Assert.True(repairs?.Length > 0); + Assert.Equal(count + 2, repairs?.Length); + } + public class Repair { [JsonPropertyName("id")] @@ -126,4 +180,22 @@ public class Repair [JsonPropertyName("image")] public string? Image { get; set; } } + + private sealed class CustomHandler(HttpMessageHandler innerHandler) : DelegatingHandler(innerHandler) + { + protected override async Task SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) + { +#if NET5_0_OR_GREATER + request.Options.TryGetValue(OpenApiKernelFunctionContext.KernelFunctionContextKey, out var context); +#else + request.Properties.TryGetValue(OpenApiKernelFunctionContext.KernelFunctionContextKey, out var context); +#endif + + // Modify the HttpRequestMessage + request.Headers.Add("Kernel-Function-Name", context?.Function?.Name); + + // Call the next handler in the pipeline + return await base.SendAsync(request, cancellationToken); + } + } } diff --git a/dotnet/src/IntegrationTests/README.md b/dotnet/src/IntegrationTests/README.md index 4a16b6018543..bc2234acda64 100644 --- a/dotnet/src/IntegrationTests/README.md +++ b/dotnet/src/IntegrationTests/README.md @@ -3,7 +3,7 @@ ## Requirements 1. **Azure OpenAI**: go to the [Azure OpenAI Quickstart](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/quickstart) - and deploy an instance of Azure OpenAI, deploy a model like "text-davinci-003" find your Endpoint and API key. + and deploy an instance of Azure OpenAI, deploy a model like "gpt-35-turbo-instruct" find your Endpoint and API key. 2. **OpenAI**: go to [OpenAI](https://platform.openai.com) to register and procure your API key. 3. **HuggingFace API key**: see https://huggingface.co/docs/huggingface_hub/guides/inference for details. 4. **Azure Bing Web Search API**: go to [Bing Web Search API](https://www.microsoft.com/en-us/bing/apis/bing-web-search-api) @@ -29,13 +29,18 @@ To set your secrets with Secret Manager: cd dotnet/src/IntegrationTests dotnet user-secrets init -dotnet user-secrets set "OpenAI:ServiceId" "text-davinci-003" -dotnet user-secrets set "OpenAI:ModelId" "text-davinci-003" + +dotnet user-secrets set "OpenAI:ServiceId" "gpt-3.5-turbo-instruct" +dotnet user-secrets set "OpenAI:ModelId" "gpt-3.5-turbo-instruct" dotnet user-secrets set "OpenAI:ChatModelId" "gpt-4" dotnet user-secrets set "OpenAI:ApiKey" "..." -dotnet user-secrets set "AzureOpenAI:ServiceId" "azure-text-davinci-003" -dotnet user-secrets set "AzureOpenAI:DeploymentName" "text-davinci-003" +dotnet user-secrets set "OpenAITextToImage:ServiceId" "dall-e-3" +dotnet user-secrets set "OpenAITextToImage:ModelId" "dall-e-3" +dotnet user-secrets set "OpenAITextToImage:ApiKey" "..." + +dotnet user-secrets set "AzureOpenAI:ServiceId" "azure-gpt-35-turbo-instruct" +dotnet user-secrets set "AzureOpenAI:DeploymentName" "gpt-35-turbo-instruct" dotnet user-secrets set "AzureOpenAI:ChatDeploymentName" "gpt-4" dotnet user-secrets set "AzureOpenAI:Endpoint" "https://contoso.openai.azure.com/" dotnet user-secrets set "AzureOpenAI:ApiKey" "..." @@ -45,14 +50,21 @@ dotnet user-secrets set "AzureOpenAIEmbeddings:DeploymentName" "text-embedding-a dotnet user-secrets set "AzureOpenAIEmbeddings:Endpoint" "https://contoso.openai.azure.com/" dotnet user-secrets set "AzureOpenAIEmbeddings:ApiKey" "..." +dotnet user-secrets set "AzureOpenAIAudioToText:ServiceId" "azure-audio-to-text" dotnet user-secrets set "AzureOpenAIAudioToText:DeploymentName" "whisper-1" dotnet user-secrets set "AzureOpenAIAudioToText:Endpoint" "https://contoso.openai.azure.com/" dotnet user-secrets set "AzureOpenAIAudioToText:ApiKey" "..." +dotnet user-secrets set "AzureOpenAITextToAudio:ServiceId" "azure-text-to-audio" dotnet user-secrets set "AzureOpenAITextToAudio:DeploymentName" "tts-1" dotnet user-secrets set "AzureOpenAITextToAudio:Endpoint" "https://contoso.openai.azure.com/" dotnet user-secrets set "AzureOpenAITextToAudio:ApiKey" "..." +dotnet user-secrets set "AzureOpenAITextToImage:ServiceId" "azure-text-to-image" +dotnet user-secrets set "AzureOpenAITextToImage:DeploymentName" "dall-e-3" +dotnet user-secrets set "AzureOpenAITextToImage:Endpoint" "https://contoso.openai.azure.com/" +dotnet user-secrets set "AzureOpenAITextToImage:ApiKey" "..." + dotnet user-secrets set "MistralAI:ChatModel" "mistral-large-latest" dotnet user-secrets set "MistralAI:EmbeddingModel" "mistral-embed" dotnet user-secrets set "MistralAI:ApiKey" "..." @@ -82,14 +94,14 @@ For example: ```json { "OpenAI": { - "ServiceId": "text-davinci-003", - "ModelId": "text-davinci-003", + "ServiceId": "gpt-3.5-turbo-instruct", + "ModelId": "gpt-3.5-turbo-instruct", "ChatModelId": "gpt-4", "ApiKey": "sk-...." }, "AzureOpenAI": { - "ServiceId": "azure-text-davinci-003", - "DeploymentName": "text-davinci-003", + "ServiceId": "azure-gpt-35-turbo-instruct", + "DeploymentName": "gpt-35-turbo-instruct", "ChatDeploymentName": "gpt-4", "Endpoint": "https://contoso.openai.azure.com/", "ApiKey": "...." @@ -127,7 +139,7 @@ When setting environment variables, use a double underscore (i.e. "\_\_") to del ```bash export OpenAI__ApiKey="sk-...." export AzureOpenAI__ApiKey="...." - export AzureOpenAI__DeploymentName="azure-text-davinci-003" + export AzureOpenAI__DeploymentName="gpt-35-turbo-instruct" export AzureOpenAI__ChatDeploymentName="gpt-4" export AzureOpenAIEmbeddings__DeploymentName="azure-text-embedding-ada-002" export AzureOpenAI__Endpoint="https://contoso.openai.azure.com/" @@ -141,7 +153,7 @@ When setting environment variables, use a double underscore (i.e. "\_\_") to del ```ps $env:OpenAI__ApiKey = "sk-...." $env:AzureOpenAI__ApiKey = "...." - $env:AzureOpenAI__DeploymentName = "azure-text-davinci-003" + $env:AzureOpenAI__DeploymentName = "gpt-35-turbo-instruct" $env:AzureOpenAI__ChatDeploymentName = "gpt-4" $env:AzureOpenAIEmbeddings__DeploymentName = "azure-text-embedding-ada-002" $env:AzureOpenAI__Endpoint = "https://contoso.openai.azure.com/" diff --git a/dotnet/src/IntegrationTests/TestData/test_content.txt b/dotnet/src/IntegrationTests/TestData/test_content.txt new file mode 100644 index 000000000000..447ce0649e56 --- /dev/null +++ b/dotnet/src/IntegrationTests/TestData/test_content.txt @@ -0,0 +1,9 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Amet dictum sit amet justo donec enim diam vulputate ut. Nibh ipsum consequat nisl vel pretium lectus. Urna nec tincidunt praesent semper feugiat. Tristique nulla aliquet enim tortor. Ut morbi tincidunt augue interdum velit euismod in pellentesque massa. Ullamcorper morbi tincidunt ornare massa eget egestas purus viverra. Commodo ullamcorper a lacus vestibulum sed arcu non. Volutpat ac tincidunt vitae semper quis lectus nulla. Sem nulla pharetra diam sit amet nisl. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. + +Morbi blandit cursus risus at ultrices mi tempus. Sagittis orci a scelerisque purus. Iaculis nunc sed augue lacus viverra. Accumsan sit amet nulla facilisi morbi tempus iaculis. Nisl rhoncus mattis rhoncus urna neque. Commodo odio aenean sed adipiscing diam donec adipiscing tristique. Tristique senectus et netus et malesuada fames. Nascetur ridiculus mus mauris vitae ultricies leo integer. Ut sem viverra aliquet eget. Sed egestas egestas fringilla phasellus faucibus scelerisque. + +In tellus integer feugiat scelerisque varius morbi. Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Cum sociis natoque penatibus et magnis dis. Iaculis at erat pellentesque adipiscing commodo elit at imperdiet dui. Praesent semper feugiat nibh sed pulvinar proin gravida hendrerit lectus. Consectetur a erat nam at lectus urna. Hac habitasse platea dictumst vestibulum rhoncus est pellentesque elit. Aliquam vestibulum morbi blandit cursus risus at ultrices. Eu non diam phasellus vestibulum lorem sed. Risus pretium quam vulputate dignissim suspendisse in est. Elit scelerisque mauris pellentesque pulvinar pellentesque habitant morbi. At varius vel pharetra vel turpis nunc eget. Aliquam malesuada bibendum arcu vitae. At consectetur lorem donec massa. Mi sit amet mauris commodo. Maecenas volutpat blandit aliquam etiam erat velit. Nullam ac tortor vitae purus faucibus ornare suspendisse. + +Facilisi nullam vehicula ipsum a arcu cursus vitae. Commodo sed egestas egestas fringilla phasellus. Lacus luctus accumsan tortor posuere ac ut consequat. Adipiscing commodo elit at imperdiet dui accumsan sit. Non tellus orci ac auctor augue. Viverra aliquet eget sit amet tellus. Luctus venenatis lectus magna fringilla urna porttitor rhoncus dolor. Mattis enim ut tellus elementum. Nunc sed id semper risus. At augue eget arcu dictum. + +Ullamcorper a lacus vestibulum sed arcu non. Vitae tortor condimentum lacinia quis vel. Dui faucibus in ornare quam viverra. Vel pharetra vel turpis nunc eget. In egestas erat imperdiet sed euismod nisi porta lorem mollis. Lacus vestibulum sed arcu non odio euismod lacinia at quis. Augue mauris augue neque gravida in. Ornare quam viverra orci sagittis. Lacus suspendisse faucibus interdum posuere lorem ipsum. Arcu vitae elementum curabitur vitae nunc sed velit dignissim. Diam quam nulla porttitor massa id neque. Gravida dictum fusce ut placerat orci nulla pellentesque. Mus mauris vitae ultricies leo integer malesuada nunc vel risus. Donec pretium vulputate sapien nec sagittis aliquam. Velit egestas dui id ornare. Sed elementum tempus egestas sed sed risus pretium quam vulputate. \ No newline at end of file diff --git a/dotnet/src/IntegrationTests/prompts/GenerateStory.yaml b/dotnet/src/IntegrationTests/prompts/GenerateStory.yaml index fc5ecd88f34e..d3612b594d59 100644 --- a/dotnet/src/IntegrationTests/prompts/GenerateStory.yaml +++ b/dotnet/src/IntegrationTests/prompts/GenerateStory.yaml @@ -1,6 +1,6 @@ name: GenerateStory template: | - Tell a story about {{$topic}} that is {{$length}} sentences long. + Tell a story about {{$topic}} that is {{$length}} sentences long. Include {{$topic}} words in response. template_format: semantic-kernel description: A function that generates a story about a topic. input_variables: diff --git a/dotnet/src/IntegrationTests/prompts/GenerateStoryHandlebars.yaml b/dotnet/src/IntegrationTests/prompts/GenerateStoryHandlebars.yaml index b1cb891fb706..891a33f30747 100644 --- a/dotnet/src/IntegrationTests/prompts/GenerateStoryHandlebars.yaml +++ b/dotnet/src/IntegrationTests/prompts/GenerateStoryHandlebars.yaml @@ -1,6 +1,6 @@ name: GenerateStory template: | - Tell a story about {{topic}} that is {{length}} sentences long. + Tell a story about {{topic}} that is {{length}} sentences long. Include {{topic}} words in response. template_format: handlebars description: A function that generates a story about a topic. input_variables: diff --git a/dotnet/src/IntegrationTests/testsettings.json b/dotnet/src/IntegrationTests/testsettings.json index 353b97a32ec7..39ec5c4d3b1c 100644 --- a/dotnet/src/IntegrationTests/testsettings.json +++ b/dotnet/src/IntegrationTests/testsettings.json @@ -1,12 +1,12 @@ { "OpenAI": { - "ServiceId": "text-davinci-003", - "ModelId": "text-davinci-003", + "ServiceId": "gpt-3.5-turbo-instruct", + "ModelId": "gpt-3.5-turbo-instruct", "ApiKey": "" }, "AzureOpenAI": { - "ServiceId": "azure-text-davinci-003", - "DeploymentName": "text-davinci-003", + "ServiceId": "azure-gpt-35-turbo-instruct", + "DeploymentName": "gpt-35-turbo-instruct", "ChatDeploymentName": "gpt-4", "Endpoint": "", "ApiKey": "" diff --git a/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs b/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs index dadf49c15d27..8e65d7dcd88a 100644 --- a/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs +++ b/dotnet/src/InternalUtilities/samples/InternalUtilities/BaseTest.cs @@ -89,17 +89,16 @@ public void WriteLine(string? format, params object?[] args) /// /// This method can be substituted by Console.WriteLine when used in Console apps. /// + /// The message public void WriteLine(string? message) - => this.Output.WriteLine(message); + => this.Output.WriteLine(message ?? string.Empty); /// /// Current interface ITestOutputHelper does not have a Write method. This extension method adds it to make it analogous to Console.Write when used in Console apps. /// /// Target object to write public void Write(object? target = null) - { - this.Output.WriteLine(target ?? string.Empty); - } + => this.Output.WriteLine(target ?? string.Empty); protected sealed class LoggingHandler(HttpMessageHandler innerHandler, ITestOutputHelper output) : DelegatingHandler(innerHandler) { diff --git a/dotnet/src/InternalUtilities/src/Diagnostics/ModelDiagnostics.cs b/dotnet/src/InternalUtilities/src/Diagnostics/ModelDiagnostics.cs index 3b53a9e5bda2..3425d187e4fd 100644 --- a/dotnet/src/InternalUtilities/src/Diagnostics/ModelDiagnostics.cs +++ b/dotnet/src/InternalUtilities/src/Diagnostics/ModelDiagnostics.cs @@ -325,13 +325,13 @@ private static void SetCompletionResponse( int? promptTokens, int? completionTokens) { - if (!IsModelDiagnosticsEnabled()) + if (!IsModelDiagnosticsEnabled() || choices.Count == 0) { return; } // Assuming all metadata is in the last chunk of the choice - switch (choices.FirstOrDefault().Value.FirstOrDefault()) + switch (choices.FirstOrDefault().Value?.FirstOrDefault()) { case StreamingTextContent: var textCompletions = choices.Select(choiceContents => diff --git a/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsPythonPlugin.cs b/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsPythonPlugin.cs index e61b5ec2c5b4..6c92763f3fe4 100644 --- a/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsPythonPlugin.cs +++ b/dotnet/src/Plugins/Plugins.Core/CodeInterpreter/SessionsPythonPlugin.cs @@ -21,7 +21,7 @@ namespace Microsoft.SemanticKernel.Plugins.Core.CodeInterpreter; public partial class SessionsPythonPlugin { private static readonly string s_assemblyVersion = typeof(Kernel).Assembly.GetName().Version!.ToString(); - + private const string ApiVersion = "2024-02-02-preview"; private readonly Uri _poolManagementEndpoint; private readonly SessionsPythonSettings _settings; private readonly Func>? _authTokenProvider; @@ -97,7 +97,7 @@ public async Task ExecuteCodeAsync([Description("The valid Python code t await this.AddHeadersAsync(httpClient).ConfigureAwait(false); - using var request = new HttpRequestMessage(HttpMethod.Post, this._poolManagementEndpoint + "python/execute") + using var request = new HttpRequestMessage(HttpMethod.Post, this._poolManagementEndpoint + $"python/execute?api-version={ApiVersion}") { Content = new StringContent(JsonSerializer.Serialize(requestBody), Encoding.UTF8, "application/json") }; @@ -155,7 +155,7 @@ public async Task UploadFileAsync( await this.AddHeadersAsync(httpClient).ConfigureAwait(false); using var fileContent = new ByteArrayContent(File.ReadAllBytes(localFilePath)); - using var request = new HttpRequestMessage(HttpMethod.Post, $"{this._poolManagementEndpoint}python/uploadFile?identifier={this._settings.SessionId}") + using var request = new HttpRequestMessage(HttpMethod.Post, $"{this._poolManagementEndpoint}python/uploadFile?identifier={this._settings.SessionId}&api-version={ApiVersion}") { Content = new MultipartFormDataContent { @@ -194,7 +194,7 @@ public async Task DownloadFileAsync( using var httpClient = this._httpClientFactory.CreateClient(); await this.AddHeadersAsync(httpClient).ConfigureAwait(false); - var response = await httpClient.GetAsync(new Uri($"{this._poolManagementEndpoint}python/downloadFile?identifier={this._settings.SessionId}&filename={remoteFilePath}")).ConfigureAwait(false); + var response = await httpClient.GetAsync(new Uri($"{this._poolManagementEndpoint}python/downloadFile?identifier={this._settings.SessionId}&filename={remoteFilePath}&api-version={ApiVersion}")).ConfigureAwait(false); if (!response.IsSuccessStatusCode) { var errorBody = await response.Content.ReadAsStringAsync().ConfigureAwait(false); @@ -230,7 +230,7 @@ public async Task> ListFilesAsync() using var httpClient = this._httpClientFactory.CreateClient(); await this.AddHeadersAsync(httpClient).ConfigureAwait(false); - var response = await httpClient.GetAsync(new Uri($"{this._poolManagementEndpoint}python/files?identifier={this._settings.SessionId}")).ConfigureAwait(false); + var response = await httpClient.GetAsync(new Uri($"{this._poolManagementEndpoint}python/files?identifier={this._settings.SessionId}&api-version={ApiVersion}")).ConfigureAwait(false); if (!response.IsSuccessStatusCode) { diff --git a/dotnet/src/Plugins/Plugins.Core/PromptFunctionConstants.cs b/dotnet/src/Plugins/Plugins.Core/PromptFunctionConstants.cs index 34b90cc9bb90..03bbd9783299 100644 --- a/dotnet/src/Plugins/Plugins.Core/PromptFunctionConstants.cs +++ b/dotnet/src/Plugins/Plugins.Core/PromptFunctionConstants.cs @@ -10,7 +10,7 @@ internal static class PromptFunctionConstants END CONTENT TO SUMMARIZE. -Summarize the conversation in 'CONTENT TO SUMMARIZE', identifying main points of discussion and any conclusions that were reached. +Summarize the conversation in 'CONTENT TO SUMMARIZE', identifying main points of discussion and any conclusions that were reached, in the language that best fits the content. Do not incorporate other general knowledge. Summary is in plain text, in complete sentences, with no markup or tags. @@ -19,10 +19,14 @@ Do not incorporate other general knowledge. internal const string GetConversationActionItemsDefinition = """ - You are an action item extractor. You will be given chat history and need to make note of action items mentioned in the chat. + You are an action item extractor. You will be given chat history or content and need to make note of action items mentioned. Extract action items from the content if there are any. If there are no action, return nothing. If a single field is missing, use an empty string. Return the action items in json. + Guidelines: + Action items are specific tasks or requests that someone needs to complete. + Routine statements or general comments about habits or preferences should not be considered action items. + Possible statuses for action items are: Open, Closed, In Progress. EXAMPLE INPUT WITH ACTION ITEMS: @@ -43,6 +47,23 @@ Return the action items in json. ] } + EXAMPLE INPUT WITH IMPLIED ACTION ITEMS: + + I need a list of vegan breakfast recipes. Can you get that for me? + + EXAMPLE OUTPUT: + { + "actionItems": [ + { + "owner": "", + "actionItem": "Give a list of breakfast recipes that are vegan friendly", + "dueDate": "", + "status": "Open", + "notes": "" + } + ] + } + EXAMPLE INPUT WITHOUT ACTION ITEMS: John Doe said: "Hey I'm going to the store, do you need anything?" diff --git a/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/AuthorRole.cs b/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/AuthorRole.cs index 7c572509056c..05f473b1b792 100644 --- a/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/AuthorRole.cs +++ b/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/AuthorRole.cs @@ -32,7 +32,7 @@ namespace Microsoft.SemanticKernel.ChatCompletion; public static AuthorRole Tool { get; } = new("tool"); /// - /// Gets the label associated with this AuthorRole. + /// Gets the label associated with this . /// /// /// The label is what will be serialized into the "role" message field of the Chat Message format. @@ -40,9 +40,9 @@ namespace Microsoft.SemanticKernel.ChatCompletion; public string Label { get; } /// - /// Creates a new AuthorRole instance with the provided label. + /// Creates a new instance with the provided label. /// - /// The label to associate with this AuthorRole. + /// The label to associate with this . [JsonConstructor] public AuthorRole(string label) { @@ -51,21 +51,21 @@ public AuthorRole(string label) } /// - /// Returns a value indicating whether two AuthorRole instances are equivalent, as determined by a + /// Returns a value indicating whether two instances are equivalent, as determined by a /// case-insensitive comparison of their labels. /// - /// the first AuthorRole instance to compare - /// the second AuthorRole instance to compare + /// the first instance to compare + /// the second instance to compare /// true if left and right are both null or have equivalent labels; false otherwise public static bool operator ==(AuthorRole left, AuthorRole right) => left.Equals(right); /// - /// Returns a value indicating whether two AuthorRole instances are not equivalent, as determined by a + /// Returns a value indicating whether two instances are not equivalent, as determined by a /// case-insensitive comparison of their labels. /// - /// the first AuthorRole instance to compare - /// the second AuthorRole instance to compare + /// the first instance to compare + /// the second instance to compare /// false if left and right are both null or have equivalent labels; true otherwise public static bool operator !=(AuthorRole left, AuthorRole right) => !(left == right); @@ -80,8 +80,8 @@ public bool Equals(AuthorRole other) /// public override int GetHashCode() - => StringComparer.OrdinalIgnoreCase.GetHashCode(this.Label ?? string.Empty); + => StringComparer.OrdinalIgnoreCase.GetHashCode(this.Label); /// - public override string ToString() => this.Label ?? string.Empty; + public override string ToString() => this.Label; } diff --git a/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/ChatCompletionServiceExtensions.cs b/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/ChatCompletionServiceExtensions.cs index 102faca62de8..a452d979c4f5 100644 --- a/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/ChatCompletionServiceExtensions.cs +++ b/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/ChatCompletionServiceExtensions.cs @@ -53,7 +53,7 @@ public static Task> GetChatMessageContentsAsyn /// The AI execution settings (optional). /// The containing services, plugins, and other state for use throughout the operation. /// The to monitor for cancellation requests. The default is . - /// List of different chat results generated by the remote model + /// Single chat message content generated by the remote model. public static async Task GetChatMessageContentAsync( this IChatCompletionService chatCompletionService, string prompt, diff --git a/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/StreamingKernelContentItemCollection.cs b/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/StreamingKernelContentItemCollection.cs new file mode 100644 index 000000000000..d3dbac4f919d --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/AI/ChatCompletion/StreamingKernelContentItemCollection.cs @@ -0,0 +1,142 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel.ChatCompletion; + +#pragma warning disable CA1033 // Interface methods should be callable by child types + +/// +/// Contains collection of streaming kernel content items of type . +/// +[Experimental("SKEXP0001")] +public sealed class StreamingKernelContentItemCollection : IList, IReadOnlyList +{ + /// + /// Initializes a new instance of the class. + /// + public StreamingKernelContentItemCollection() + { + this._items = []; + } + + /// + /// Gets or sets the content item at the specified index in the collection. + /// + /// The index of the content item to get or set. + /// The content item at the specified index. + /// is null. + /// The was not valid for this collection. + public StreamingKernelContent this[int index] + { + get => this._items[index]; + set + { + Verify.NotNull(value); + this._items[index] = value; + } + } + + /// + /// Gets the number of content items in the collection. + /// + public int Count => this._items.Count; + + /// + /// Adds a content item to the collection. + /// + /// The content item to add. + /// is null. + public void Add(StreamingKernelContent item) + { + Verify.NotNull(item); + this._items.Add(item); + } + + /// + /// Removes all content items from the collection. + /// + public void Clear() => this._items.Clear(); + + /// + /// Determines whether a content item is in the collection. + /// + /// The content item to locate. + /// True if the content item is found in the collection; otherwise, false. + /// is null. + public bool Contains(StreamingKernelContent item) + { + Verify.NotNull(item); + return this._items.Contains(item); + } + + /// + /// Copies all of the content items in the collection to an array, starting at the specified destination array index. + /// + /// The destination array into which the content items should be copied. + /// The zero-based index into at which copying should begin. + /// is null. + /// The number of content items in the collection is greater than the available space from to the end of . + /// is less than 0. + public void CopyTo(StreamingKernelContent[] array, int arrayIndex) => this._items.CopyTo(array, arrayIndex); + + /// + /// Searches for the specified content item and returns the index of the first occurrence. + /// + /// The content item to locate. + /// The index of the first found occurrence of the specified content item; -1 if the content item could not be found. + /// is null. + public int IndexOf(StreamingKernelContent item) + { + Verify.NotNull(item); + return this._items.IndexOf(item); + } + + /// + /// Inserts a content item into the collection at the specified index. + /// + /// The index at which the content item should be inserted. + /// The content item to insert. + /// is null. + public void Insert(int index, StreamingKernelContent item) + { + Verify.NotNull(item); + this._items.Insert(index, item); + } + + /// + /// Removes the first occurrence of the specified content item from the collection. + /// + /// The content item to remove from the collection. + /// True if the item was successfully removed; false if it wasn't located in the collection. + /// is null. + public bool Remove(StreamingKernelContent item) + { + Verify.NotNull(item); + return this._items.Remove(item); + } + + /// + /// Removes the content item at the specified index from the collection. + /// + /// The index of the content item to remove. + public void RemoveAt(int index) => this._items.RemoveAt(index); + + /// + bool ICollection.IsReadOnly => false; + + /// + IEnumerator IEnumerable.GetEnumerator() => this._items.GetEnumerator(); + + /// + IEnumerator IEnumerable.GetEnumerator() => this._items.GetEnumerator(); + + #region private + + private readonly List _items; + + #endregion +} diff --git a/dotnet/src/SemanticKernel.Abstractions/AI/PromptExecutionSettings.cs b/dotnet/src/SemanticKernel.Abstractions/AI/PromptExecutionSettings.cs index bce11b356e0f..f10ccaa3ff39 100644 --- a/dotnet/src/SemanticKernel.Abstractions/AI/PromptExecutionSettings.cs +++ b/dotnet/src/SemanticKernel.Abstractions/AI/PromptExecutionSettings.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Generic; using System.Collections.ObjectModel; +using System.Diagnostics.CodeAnalysis; using System.Text.Json.Serialization; using Microsoft.SemanticKernel.ChatCompletion; using Microsoft.SemanticKernel.TextGeneration; @@ -27,6 +28,27 @@ public class PromptExecutionSettings /// public static string DefaultServiceId => "default"; + /// + /// Service identifier. + /// This identifies the service these settings are configured for e.g., azure_openai_eastus, openai, ollama, huggingface, etc. + /// + /// + /// When provided, this service identifier will be the key in a dictionary collection of execution settings for both and . + /// If not provided the service identifier will be the default value in . + /// + [Experimental("SKEXP0001")] + [JsonPropertyName("service_id")] + public string? ServiceId + { + get => this._serviceId; + + set + { + this.ThrowIfFrozen(); + this._serviceId = value; + } + } + /// /// Model identifier. /// This identifies the AI model these settings are configured for e.g., gpt-4, gpt-3.5-turbo @@ -93,6 +115,7 @@ public virtual PromptExecutionSettings Clone() return new() { ModelId = this.ModelId, + ServiceId = this.ServiceId, ExtensionData = this.ExtensionData is not null ? new Dictionary(this.ExtensionData) : null }; } @@ -113,6 +136,7 @@ protected void ThrowIfFrozen() private string? _modelId; private IDictionary? _extensionData; + private string? _serviceId; #endregion } diff --git a/dotnet/src/SemanticKernel.Abstractions/AI/TextGeneration/TextGenerationExtensions.cs b/dotnet/src/SemanticKernel.Abstractions/AI/TextGeneration/TextGenerationExtensions.cs index bf955ff2ebc1..0f044184e1bc 100644 --- a/dotnet/src/SemanticKernel.Abstractions/AI/TextGeneration/TextGenerationExtensions.cs +++ b/dotnet/src/SemanticKernel.Abstractions/AI/TextGeneration/TextGenerationExtensions.cs @@ -23,7 +23,7 @@ public static class TextGenerationExtensions /// The AI execution settings (optional). /// The containing services, plugins, and other state for use throughout the operation. /// The to monitor for cancellation requests. The default is . - /// List of different text results generated by the remote model + /// Single text content generated by the remote model. public static async Task GetTextContentAsync( this ITextGenerationService textGenerationService, string prompt, @@ -34,7 +34,7 @@ public static async Task GetTextContentAsync( .Single(); /// - /// Get a single text generation result for the standardized prompt and settings. + /// Get a text generation results for the standardized prompt and settings. /// /// Text generation service /// The standardized prompt input. @@ -42,7 +42,7 @@ public static async Task GetTextContentAsync( /// The containing services, plugins, and other state for use throughout the operation. /// The to monitor for cancellation requests. The default is . /// List of different text results generated by the remote model - internal static async Task GetTextContentWithDefaultParserAsync( + internal static async Task> GetTextContentsWithDefaultParserAsync( this ITextGenerationService textGenerationService, string prompt, PromptExecutionSettings? executionSettings = null, @@ -52,12 +52,14 @@ internal static async Task GetTextContentWithDefaultParserAsync( if (textGenerationService is IChatCompletionService chatCompletion && ChatPromptParser.TryParse(prompt, out var chatHistory)) { - var chatMessage = await chatCompletion.GetChatMessageContentAsync(chatHistory, executionSettings, kernel, cancellationToken).ConfigureAwait(false); - return new TextContent(chatMessage.Content, chatMessage.ModelId, chatMessage.InnerContent, chatMessage.Encoding, chatMessage.Metadata); + var chatMessages = await chatCompletion.GetChatMessageContentsAsync(chatHistory, executionSettings, kernel, cancellationToken).ConfigureAwait(false); + return chatMessages + .Select(chatMessage => new TextContent(chatMessage.Content, chatMessage.ModelId, chatMessage.InnerContent, chatMessage.Encoding, chatMessage.Metadata)) + .ToArray(); } // When using against text generations, the prompt will be used as is. - return await textGenerationService.GetTextContentAsync(prompt, executionSettings, kernel, cancellationToken).ConfigureAwait(false); + return await textGenerationService.GetTextContentsAsync(prompt, executionSettings, kernel, cancellationToken).ConfigureAwait(false); } /// diff --git a/dotnet/src/SemanticKernel.Abstractions/Contents/ChatMessageContent.cs b/dotnet/src/SemanticKernel.Abstractions/Contents/ChatMessageContent.cs index d9c31c50982c..24ff3cf19438 100644 --- a/dotnet/src/SemanticKernel.Abstractions/Contents/ChatMessageContent.cs +++ b/dotnet/src/SemanticKernel.Abstractions/Contents/ChatMessageContent.cs @@ -41,18 +41,12 @@ public string? Content } set { - if (value is null) - { - return; - } - var textContent = this.Items.OfType().FirstOrDefault(); if (textContent is not null) { textContent.Text = value; - textContent.Encoding = this.Encoding; } - else + else if (value is not null) { this.Items.Add(new TextContent( text: value, diff --git a/dotnet/src/SemanticKernel.Abstractions/Contents/FunctionCallContentBuilder.cs b/dotnet/src/SemanticKernel.Abstractions/Contents/FunctionCallContentBuilder.cs new file mode 100644 index 000000000000..42abeab610f5 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Contents/FunctionCallContentBuilder.cs @@ -0,0 +1,161 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Text; +using System.Text.Json; + +namespace Microsoft.SemanticKernel; + +/// +/// A builder class for creating objects from incremental function call updates represented by . +/// +[Experimental("SKEXP0001")] +public sealed class FunctionCallContentBuilder +{ + private Dictionary? _functionCallIdsByIndex = null; + private Dictionary? _functionNamesByIndex = null; + private Dictionary? _functionArgumentBuildersByIndex = null; + + /// + /// Extracts function call updates from the content and track them for later building. + /// + /// The content to extract function call updates from. + public void Append(StreamingChatMessageContent content) + { + var streamingFunctionCallUpdates = content.Items.OfType(); + + foreach (var update in streamingFunctionCallUpdates) + { + TrackStreamingFunctionCallUpdate(update, + ref this._functionCallIdsByIndex, + ref this._functionNamesByIndex, + ref this._functionArgumentBuildersByIndex); + } + } + + /// + /// Builds a list of out of function call updates tracked by the method. + /// + /// A list of objects. + public IReadOnlyList Build() + { + FunctionCallContent[]? functionCalls = null; + + if (this._functionCallIdsByIndex is { Count: > 0 }) + { + functionCalls = new FunctionCallContent[this._functionCallIdsByIndex.Count]; + + for (int i = 0; i < this._functionCallIdsByIndex.Count; i++) + { + KeyValuePair functionCallIndexAndId = this._functionCallIdsByIndex.ElementAt(i); + + string? pluginName = null; + string functionName = string.Empty; + + if (this._functionNamesByIndex?.TryGetValue(functionCallIndexAndId.Key, out string? fqn) ?? false) + { + var functionFullyQualifiedName = Microsoft.SemanticKernel.FunctionName.Parse(fqn); + pluginName = functionFullyQualifiedName.PluginName; + functionName = functionFullyQualifiedName.Name; + } + + (KernelArguments? arguments, Exception? exception) = this.GetFunctionArguments(functionCallIndexAndId.Key); + + functionCalls[i] = new FunctionCallContent( + functionName: functionName, + pluginName: pluginName, + id: functionCallIndexAndId.Value, + arguments) + { + Exception = exception + }; + } + } + + return functionCalls ?? []; + } + + /// + /// Gets function arguments for a given function call index. + /// + /// The function call index to get the function arguments for. + /// A tuple containing the KernelArguments and an Exception if any. + private (KernelArguments? Arguments, Exception? Exception) GetFunctionArguments(int functionCallIndex) + { + if (this._functionArgumentBuildersByIndex is null || + !this._functionArgumentBuildersByIndex.TryGetValue(functionCallIndex, out StringBuilder? functionArgumentsBuilder)) + { + return (null, null); + } + + var argumentsString = functionArgumentsBuilder.ToString(); + if (string.IsNullOrEmpty(argumentsString)) + { + return (null, null); + } + + Exception? exception = null; + KernelArguments? arguments = null; + try + { + arguments = JsonSerializer.Deserialize(argumentsString); + if (arguments is { Count: > 0 }) + { + var names = arguments.Names.ToArray(); + foreach (var name in names) + { + arguments[name] = arguments[name]?.ToString(); + } + } + } + catch (JsonException ex) + { + exception = new KernelException("Error: Function call arguments were invalid JSON.", ex); + } + + return (arguments, exception); + } + + /// + /// Tracks streaming function call update contents. + /// + /// The streaming function call update content to track. + /// The dictionary of function call IDs by function call index. + /// The dictionary of function names by function call index. + /// The dictionary of function argument builders by function call index. + private static void TrackStreamingFunctionCallUpdate(StreamingFunctionCallUpdateContent update, ref Dictionary? functionCallIdsByIndex, ref Dictionary? functionNamesByIndex, ref Dictionary? functionArgumentBuildersByIndex) + { + if (update is null) + { + // Nothing to track. + return; + } + + // If we have an call id, ensure the index is being tracked. Even if it's not a function update, + // we want to keep track of it so we can send back an error. + if (update.CallId is string id) + { + (functionCallIdsByIndex ??= [])[update.FunctionCallIndex] = id; + } + + // Ensure we're tracking the function's name. + if (update.Name is string name) + { + (functionNamesByIndex ??= [])[update.FunctionCallIndex] = name; + } + + // Ensure we're tracking the function's arguments. + if (update.Arguments is string argumentsUpdate) + { + if (!(functionArgumentBuildersByIndex ??= []).TryGetValue(update.FunctionCallIndex, out StringBuilder? arguments)) + { + functionArgumentBuildersByIndex[update.FunctionCallIndex] = arguments = new(); + } + + arguments.Append(argumentsUpdate); + } + } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingChatMessageContent.cs b/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingChatMessageContent.cs index 5a14e6cb56d7..5cc7afb582ed 100644 --- a/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingChatMessageContent.cs +++ b/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingChatMessageContent.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; +using System.Linq; using System.Text; using System.Text.Json.Serialization; using Microsoft.SemanticKernel.ChatCompletion; @@ -17,9 +18,46 @@ namespace Microsoft.SemanticKernel; public class StreamingChatMessageContent : StreamingKernelContent { /// - /// Text associated to the message payload + /// A convenience property to get or set the text of the first item in the collection of type. /// - public string? Content { get; set; } + public string? Content + { + get + { + var textContent = this.Items.OfType().FirstOrDefault(); + return textContent?.Text; + } + set + { + var textContent = this.Items.OfType().FirstOrDefault(); + if (textContent is not null) + { + textContent.Text = value; + } + else if (value is not null) + { + this.Items.Add(new StreamingTextContent( + text: value, + choiceIndex: this.ChoiceIndex, + modelId: this.ModelId, + innerContent: this.InnerContent, + encoding: this.Encoding, + metadata: this.Metadata + )); + } + } + } + + /// + /// Chat message content items. + /// + [JsonIgnore] + [Experimental("SKEXP0001")] + public StreamingKernelContentItemCollection Items + { + get => this._items ??= []; + set => this._items = value; + } /// /// Name of the author of the message @@ -34,10 +72,32 @@ public class StreamingChatMessageContent : StreamingKernelContent public AuthorRole? Role { get; set; } /// - /// The encoding of the text content. + /// A convenience property to get or set the encoding of the first item in the collection of type. /// [JsonIgnore] - public Encoding Encoding { get; set; } + public Encoding Encoding + { + get + { + var textContent = this.Items.OfType().FirstOrDefault(); + if (textContent is not null) + { + return textContent.Encoding; + } + + return this._encoding; + } + set + { + this._encoding = value; + + var textContent = this.Items.OfType().FirstOrDefault(); + if (textContent is not null) + { + textContent.Encoding = value; + } + } + } /// /// Initializes a new instance of the class. @@ -55,7 +115,7 @@ public StreamingChatMessageContent(AuthorRole? role, string? content, object? in { this.Role = role; this.Content = content; - this.Encoding = encoding ?? Encoding.UTF8; + this._encoding = encoding ?? Encoding.UTF8; } /// @@ -63,4 +123,7 @@ public StreamingChatMessageContent(AuthorRole? role, string? content, object? in /// public override byte[] ToByteArray() => this.Encoding.GetBytes(this.ToString()); + + private StreamingKernelContentItemCollection? _items; + private Encoding _encoding; } diff --git a/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingFunctionCallUpdateContent.cs b/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingFunctionCallUpdateContent.cs new file mode 100644 index 000000000000..3d186681f481 --- /dev/null +++ b/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingFunctionCallUpdateContent.cs @@ -0,0 +1,60 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; +using System.Text; + +namespace Microsoft.SemanticKernel; + +/// +/// Represents a function streaming call requested by LLM. +/// +[Experimental("SKEXP0001")] +public class StreamingFunctionCallUpdateContent : StreamingKernelContent +{ + /// + /// The function call ID. + /// + public string? CallId { get; init; } + + /// + /// The function name. + /// + public string? Name { get; init; } + + /// + /// The function arguments that can come as full or partial. + /// + public string? Arguments { get; init; } + + /// + /// The function call index. + /// + public int FunctionCallIndex { get; init; } + + /// + /// Creates a new instance of the class. + /// + /// The function call ID. + /// The function name. + /// The function original arguments. + /// The function call index. + public StreamingFunctionCallUpdateContent(string? callId = null, string? name = null, string? arguments = null, int functionCallIndex = 0) + { + this.CallId = callId; + this.Name = name; + this.Arguments = arguments; + this.FunctionCallIndex = functionCallIndex; + } + + /// + public override string ToString() + { + return nameof(StreamingFunctionCallUpdateContent); + } + + /// + public override byte[] ToByteArray() + { + return Encoding.UTF8.GetBytes(this.ToString()); + } +} diff --git a/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingKernelContent.cs b/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingKernelContent.cs index 0285eafe92c1..59231bf3c4f9 100644 --- a/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingKernelContent.cs +++ b/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingKernelContent.cs @@ -13,7 +13,7 @@ public abstract class StreamingKernelContent /// /// In a scenario of multiple choices per request, this represents zero-based index of the choice in the streaming sequence /// - public int ChoiceIndex { get; } + public int ChoiceIndex { get; set; } /// /// The inner content representation. Use this to bypass the current abstraction. @@ -22,17 +22,17 @@ public abstract class StreamingKernelContent /// The usage of this property is considered "unsafe". Use it only if strictly necessary. /// [JsonIgnore] - public object? InnerContent { get; } + public object? InnerContent { get; set; } /// /// The model ID used to generate the content. /// - public string? ModelId { get; } + public string? ModelId { get; set; } /// /// The metadata associated with the content. /// - public IReadOnlyDictionary? Metadata { get; } + public IReadOnlyDictionary? Metadata { get; set; } /// /// Abstract string representation of the chunk in a way it could compose/append with previous chunks. @@ -52,6 +52,13 @@ public abstract class StreamingKernelContent /// Byte array representation of the chunk public abstract byte[] ToByteArray(); + /// + /// Initializes a new instance of the class. + /// + protected StreamingKernelContent() + { + } + /// /// Initializes a new instance of the class. /// diff --git a/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingTextContent.cs b/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingTextContent.cs index f13d03820c60..766656be5a3b 100644 --- a/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingTextContent.cs +++ b/dotnet/src/SemanticKernel.Abstractions/Contents/StreamingTextContent.cs @@ -15,7 +15,7 @@ public class StreamingTextContent : StreamingKernelContent /// /// Text associated to the update /// - public string? Text { get; } + public string? Text { get; set; } /// /// The encoding of the text content. diff --git a/dotnet/src/SemanticKernel.Abstractions/Functions/KernelArguments.cs b/dotnet/src/SemanticKernel.Abstractions/Functions/KernelArguments.cs index d7776f83f24a..eda736b3f583 100644 --- a/dotnet/src/SemanticKernel.Abstractions/Functions/KernelArguments.cs +++ b/dotnet/src/SemanticKernel.Abstractions/Functions/KernelArguments.cs @@ -21,6 +21,7 @@ public sealed class KernelArguments : IDictionary, IReadOnlyDic { /// Dictionary of name/values for all the arguments in the instance. private readonly Dictionary _arguments; + private IReadOnlyDictionary? _executionSettings; /// /// Initializes a new instance of the class with the specified AI execution settings. @@ -36,12 +37,36 @@ public KernelArguments() /// /// The prompt execution settings. public KernelArguments(PromptExecutionSettings? executionSettings) + : this(executionSettings is null ? null : [executionSettings]) { - this._arguments = new(StringComparer.OrdinalIgnoreCase); + } + /// + /// Initializes a new instance of the class with the specified AI execution settings. + /// + /// The prompt execution settings. + public KernelArguments(IEnumerable? executionSettings) + { + this._arguments = new(StringComparer.OrdinalIgnoreCase); if (executionSettings is not null) { - this.ExecutionSettings = new Dictionary() { { PromptExecutionSettings.DefaultServiceId, executionSettings } }; + var newExecutionSettings = new Dictionary(); + foreach (var settings in executionSettings) + { + var targetServiceId = settings.ServiceId ?? PromptExecutionSettings.DefaultServiceId; + if (newExecutionSettings.ContainsKey(targetServiceId)) + { + var exceptionMessage = (targetServiceId == PromptExecutionSettings.DefaultServiceId) + ? $"Multiple prompt execution settings with the default service id '{PromptExecutionSettings.DefaultServiceId}' or no service id have been provided. Specify a single default prompt execution settings and provide a unique service id for all other instances." + : $"Multiple prompt execution settings with the service id '{targetServiceId}' have been provided. Provide a unique service id for all instances."; + + throw new ArgumentException(exceptionMessage, nameof(executionSettings)); + } + + newExecutionSettings[targetServiceId] = settings; + } + + this.ExecutionSettings = newExecutionSettings; } } @@ -65,7 +90,30 @@ public KernelArguments(IDictionary source, Dictionary /// Gets or sets the prompt execution settings. /// - public IReadOnlyDictionary? ExecutionSettings { get; set; } + /// + /// The settings dictionary is keyed by the service ID, or for the default execution settings. + /// When setting, the service id of each must match the key in the dictionary. + /// + public IReadOnlyDictionary? ExecutionSettings + { + get => this._executionSettings; + set + { + if (value is { Count: > 0 }) + { + foreach (var kv in value!) + { + // Ensures that if a service id is specified it needs to match to the current key in the dictionary. + if (!string.IsNullOrWhiteSpace(kv.Value.ServiceId) && kv.Key != kv.Value.ServiceId) + { + throw new ArgumentException($"Service id '{kv.Value.ServiceId}' must match the key '{kv.Key}'.", nameof(this.ExecutionSettings)); + } + } + } + + this._executionSettings = value; + } + } /// /// Gets the number of arguments contained in the . diff --git a/dotnet/src/SemanticKernel.Abstractions/Functions/RestApiOperationResponse.cs b/dotnet/src/SemanticKernel.Abstractions/Functions/RestApiOperationResponse.cs index 5cfe2d09c850..d76077624557 100644 --- a/dotnet/src/SemanticKernel.Abstractions/Functions/RestApiOperationResponse.cs +++ b/dotnet/src/SemanticKernel.Abstractions/Functions/RestApiOperationResponse.cs @@ -14,12 +14,12 @@ public sealed class RestApiOperationResponse /// /// Gets the content of the response. /// - public object Content { get; } + public object? Content { get; } /// /// Gets the content type of the response. /// - public string ContentType { get; } + public string? ContentType { get; } /// /// The expected schema of the response as advertised in the OpenAPI operation. @@ -47,7 +47,7 @@ public sealed class RestApiOperationResponse /// The content of the response. /// The content type of the response. /// The schema against which the response body should be validated. - public RestApiOperationResponse(object content, string contentType, KernelJsonSchema? expectedSchema = null) + public RestApiOperationResponse(object? content, string? contentType, KernelJsonSchema? expectedSchema = null) { this.Content = content; this.ContentType = contentType; diff --git a/dotnet/src/SemanticKernel.Abstractions/PromptTemplate/PromptTemplateConfig.cs b/dotnet/src/SemanticKernel.Abstractions/PromptTemplate/PromptTemplateConfig.cs index 1a55cbbff837..1cce254ec1a8 100644 --- a/dotnet/src/SemanticKernel.Abstractions/PromptTemplate/PromptTemplateConfig.cs +++ b/dotnet/src/SemanticKernel.Abstractions/PromptTemplate/PromptTemplateConfig.cs @@ -178,6 +178,7 @@ public List InputVariables /// /// /// The settings dictionary is keyed by the service ID, or for the default execution settings. + /// When setting, the service id of each must match the key in the dictionary. /// [JsonPropertyName("execution_settings")] public Dictionary ExecutionSettings @@ -186,6 +187,19 @@ public Dictionary ExecutionSettings set { Verify.NotNull(value); + + if (value.Count != 0) + { + foreach (var kv in value) + { + // Ensures that if a service id is provided it must match the key in the dictionary. + if (!string.IsNullOrWhiteSpace(kv.Value.ServiceId) && kv.Key != kv.Value.ServiceId) + { + throw new ArgumentException($"Service id '{kv.Value.ServiceId}' must match the key '{kv.Key}'.", nameof(this.ExecutionSettings)); + } + } + } + this._executionSettings = value; } } @@ -224,7 +238,13 @@ public void AddExecutionSettings(PromptExecutionSettings settings, string? servi { Verify.NotNull(settings); - var key = serviceId ?? PromptExecutionSettings.DefaultServiceId; + if (!string.IsNullOrWhiteSpace(serviceId) && !string.IsNullOrWhiteSpace(settings.ServiceId)) + { + throw new ArgumentException($"Service id must not be passed when '{nameof(settings.ServiceId)}' is already provided in execution settings.", nameof(serviceId)); + } + + var key = serviceId ?? settings.ServiceId ?? PromptExecutionSettings.DefaultServiceId; + if (this.ExecutionSettings.ContainsKey(key)) { throw new ArgumentException($"Execution settings for service id '{key}' already exists.", nameof(serviceId)); diff --git a/dotnet/src/SemanticKernel.Core/Functions/KernelFunctionFactory.cs b/dotnet/src/SemanticKernel.Core/Functions/KernelFunctionFactory.cs index 25d384d51351..f6f0a805f4a6 100644 --- a/dotnet/src/SemanticKernel.Core/Functions/KernelFunctionFactory.cs +++ b/dotnet/src/SemanticKernel.Core/Functions/KernelFunctionFactory.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.ComponentModel; using System.Diagnostics.CodeAnalysis; +using System.Linq; using System.Reflection; using Microsoft.Extensions.Logging; @@ -107,6 +108,37 @@ public static KernelFunction CreateFromPrompt( string? templateFormat = null, IPromptTemplateFactory? promptTemplateFactory = null, ILoggerFactory? loggerFactory = null) => + KernelFunctionFromPrompt.Create( + promptTemplate, + CreateSettingsDictionary(executionSettings is null ? null : [executionSettings]), + functionName, + description, + templateFormat, + promptTemplateFactory, + loggerFactory); + + /// + /// Creates a instance for a prompt specified via a prompt template. + /// + /// Prompt template for the function. + /// Default execution settings to use when invoking this prompt function. + /// The name to use for the function. If null, it will default to a randomly generated name. + /// The description to use for the function. + /// The template format of . This must be provided if is not null. + /// + /// The to use when interpreting the into a . + /// If null, a default factory will be used. + /// + /// The to use for logging. If null, no logging will be performed. + /// The created for invoking the prompt. + public static KernelFunction CreateFromPrompt( + string promptTemplate, + IEnumerable? executionSettings, + string? functionName = null, + string? description = null, + string? templateFormat = null, + IPromptTemplateFactory? promptTemplateFactory = null, + ILoggerFactory? loggerFactory = null) => KernelFunctionFromPrompt.Create(promptTemplate, CreateSettingsDictionary(executionSettings), functionName, description, templateFormat, promptTemplateFactory, loggerFactory); /// @@ -141,10 +173,6 @@ public static KernelFunction CreateFromPrompt( /// Wraps the specified settings into a dictionary with the default service ID as the key. /// [return: NotNullIfNotNull(nameof(settings))] - private static Dictionary? CreateSettingsDictionary(PromptExecutionSettings? settings) => - settings is null ? null : - new Dictionary(1) - { - { PromptExecutionSettings.DefaultServiceId, settings }, - }; + private static Dictionary? CreateSettingsDictionary(IEnumerable? settings) => + settings?.ToDictionary(s => s.ServiceId ?? PromptExecutionSettings.DefaultServiceId, s => s); } diff --git a/dotnet/src/SemanticKernel.Core/Functions/KernelFunctionFromPrompt.cs b/dotnet/src/SemanticKernel.Core/Functions/KernelFunctionFromPrompt.cs index 44a799a8c42a..1d357b05679f 100644 --- a/dotnet/src/SemanticKernel.Core/Functions/KernelFunctionFromPrompt.cs +++ b/dotnet/src/SemanticKernel.Core/Functions/KernelFunctionFromPrompt.cs @@ -123,38 +123,29 @@ protected override async ValueTask InvokeCoreAsync( { this.AddDefaultValues(arguments); - var result = await this.RenderPromptAsync(kernel, arguments, cancellationToken).ConfigureAwait(false); + var promptRenderingResult = await this.RenderPromptAsync(kernel, arguments, cancellationToken).ConfigureAwait(false); #pragma warning disable CS0612 // Events are deprecated - if (result.RenderedEventArgs?.Cancel is true) + if (promptRenderingResult.RenderedEventArgs?.Cancel is true) { throw new OperationCanceledException($"A {nameof(Kernel)}.{nameof(Kernel.PromptRendered)} event handler requested cancellation after prompt rendering."); } #pragma warning restore CS0612 // Events are deprecated // Return function result if it was set in prompt filter. - if (result.FunctionResult is not null) + if (promptRenderingResult.FunctionResult is not null) { - result.FunctionResult.RenderedPrompt = result.RenderedPrompt; - return result.FunctionResult; + promptRenderingResult.FunctionResult.RenderedPrompt = promptRenderingResult.RenderedPrompt; + return promptRenderingResult.FunctionResult; } - if (result.AIService is IChatCompletionService chatCompletion) + return promptRenderingResult.AIService switch { - var chatContent = await chatCompletion.GetChatMessageContentAsync(result.RenderedPrompt, result.ExecutionSettings, kernel, cancellationToken).ConfigureAwait(false); - this.CaptureUsageDetails(chatContent.ModelId, chatContent.Metadata, this._logger); - return new FunctionResult(this, chatContent, kernel.Culture, chatContent.Metadata) { RenderedPrompt = result.RenderedPrompt }; - } - - if (result.AIService is ITextGenerationService textGeneration) - { - var textContent = await textGeneration.GetTextContentWithDefaultParserAsync(result.RenderedPrompt, result.ExecutionSettings, kernel, cancellationToken).ConfigureAwait(false); - this.CaptureUsageDetails(textContent.ModelId, textContent.Metadata, this._logger); - return new FunctionResult(this, textContent, kernel.Culture, textContent.Metadata) { RenderedPrompt = result.RenderedPrompt }; - } - - // The service selector didn't find an appropriate service. This should only happen with a poorly implemented selector. - throw new NotSupportedException($"The AI service {result.AIService.GetType()} is not supported. Supported services are {typeof(IChatCompletionService)} and {typeof(ITextGenerationService)}"); + IChatCompletionService chatCompletion => await this.GetChatCompletionResultAsync(chatCompletion, kernel, promptRenderingResult, cancellationToken).ConfigureAwait(false), + ITextGenerationService textGeneration => await this.GetTextGenerationResultAsync(textGeneration, kernel, promptRenderingResult, cancellationToken).ConfigureAwait(false), + // The service selector didn't find an appropriate service. This should only happen with a poorly implemented selector. + _ => throw new NotSupportedException($"The AI service {promptRenderingResult.AIService.GetType()} is not supported. Supported services are {typeof(IChatCompletionService)} and {typeof(ITextGenerationService)}") + }; } /// @@ -449,5 +440,67 @@ private void CaptureUsageDetails(string? modelId, IReadOnlyDictionary GetChatCompletionResultAsync( + IChatCompletionService chatCompletion, + Kernel kernel, + PromptRenderingResult promptRenderingResult, + CancellationToken cancellationToken) + { + var chatContents = await chatCompletion.GetChatMessageContentsAsync( + promptRenderingResult.RenderedPrompt, + promptRenderingResult.ExecutionSettings, + kernel, + cancellationToken).ConfigureAwait(false); + + if (chatContents is { Count: 0 }) + { + return new FunctionResult(this, culture: kernel.Culture) { RenderedPrompt = promptRenderingResult.RenderedPrompt }; + } + + // Usage details are global and duplicated for each chat message content, use first one to get usage information + var chatContent = chatContents[0]; + this.CaptureUsageDetails(chatContent.ModelId, chatContent.Metadata, this._logger); + + // If collection has one element, return single result + if (chatContents.Count == 1) + { + return new FunctionResult(this, chatContent, kernel.Culture, chatContent.Metadata) { RenderedPrompt = promptRenderingResult.RenderedPrompt }; + } + + // Otherwise, return multiple results + return new FunctionResult(this, chatContents, kernel.Culture) { RenderedPrompt = promptRenderingResult.RenderedPrompt }; + } + + private async Task GetTextGenerationResultAsync( + ITextGenerationService textGeneration, + Kernel kernel, + PromptRenderingResult promptRenderingResult, + CancellationToken cancellationToken) + { + var textContents = await textGeneration.GetTextContentsWithDefaultParserAsync( + promptRenderingResult.RenderedPrompt, + promptRenderingResult.ExecutionSettings, + kernel, + cancellationToken).ConfigureAwait(false); + + if (textContents is { Count: 0 }) + { + return new FunctionResult(this, culture: kernel.Culture) { RenderedPrompt = promptRenderingResult.RenderedPrompt }; + } + + // Usage details are global and duplicated for each text content, use first one to get usage information + var textContent = textContents[0]; + this.CaptureUsageDetails(textContent.ModelId, textContent.Metadata, this._logger); + + // If collection has one element, return single result + if (textContents.Count == 1) + { + return new FunctionResult(this, textContent, kernel.Culture, textContent.Metadata) { RenderedPrompt = promptRenderingResult.RenderedPrompt }; + } + + // Otherwise, return multiple results + return new FunctionResult(this, textContents, kernel.Culture) { RenderedPrompt = promptRenderingResult.RenderedPrompt }; + } + #endregion } diff --git a/dotnet/src/SemanticKernel.Core/KernelExtensions.cs b/dotnet/src/SemanticKernel.Core/KernelExtensions.cs index a05340a64775..6a96395cedea 100644 --- a/dotnet/src/SemanticKernel.Core/KernelExtensions.cs +++ b/dotnet/src/SemanticKernel.Core/KernelExtensions.cs @@ -109,6 +109,42 @@ public static KernelFunction CreateFunctionFromPrompt( kernel.LoggerFactory); } + /// + /// Creates a instance for a prompt specified via a prompt template. + /// + /// The containing services, plugins, and other state for use throughout the operation. + /// Prompt template for the function. + /// List of execution settings to use when invoking this prompt function. + /// The name to use for the function. If null, it will default to a randomly generated name. + /// The description to use for the function. + /// The template format of . This must be provided if is not null. + /// + /// The to use when interpreting the into a . + /// If null, a default factory will be used. + /// + /// The created for invoking the prompt. + public static KernelFunction CreateFunctionFromPrompt( + this Kernel kernel, + string promptTemplate, + IEnumerable? executionSettings, + string? functionName = null, + string? description = null, + string? templateFormat = null, + IPromptTemplateFactory? promptTemplateFactory = null) + { + Verify.NotNull(kernel); + Verify.NotNull(promptTemplate); + + return KernelFunctionFactory.CreateFromPrompt( + promptTemplate, + executionSettings, + functionName, + description, + templateFormat, + promptTemplateFactory, + kernel.LoggerFactory); + } + /// /// Creates a instance for a prompt specified via a prompt template configuration. /// diff --git a/dotnet/src/SemanticKernel.Core/PromptTemplate/EchoPromptTemplate.cs b/dotnet/src/SemanticKernel.Core/PromptTemplate/EchoPromptTemplate.cs new file mode 100644 index 000000000000..c66b5db31566 --- /dev/null +++ b/dotnet/src/SemanticKernel.Core/PromptTemplate/EchoPromptTemplate.cs @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Threading; +using System.Threading.Tasks; + +namespace Microsoft.SemanticKernel; + +/// +/// Implementation of that just returns the prompt template. +/// +internal sealed class EchoPromptTemplate : IPromptTemplate +{ + private readonly PromptTemplateConfig _promptConfig; + private readonly Task _renderResult; + + /// + /// Constructor for . + /// + /// Prompt template configuration + internal EchoPromptTemplate(PromptTemplateConfig promptConfig) + { + Verify.NotNull(promptConfig, nameof(promptConfig)); + Verify.NotNull(promptConfig.Template, nameof(promptConfig.Template)); + + this._promptConfig = promptConfig; + this._renderResult = Task.FromResult(this._promptConfig.Template); + } + + /// +#pragma warning disable VSTHRD003 // Avoid awaiting foreign Tasks + public Task RenderAsync(Kernel kernel, KernelArguments? arguments = null, CancellationToken cancellationToken = default) => this._renderResult; +#pragma warning restore VSTHRD003 // Avoid awaiting foreign Tasks +} diff --git a/dotnet/src/SemanticKernel.Core/PromptTemplate/EchoPromptTemplateFactory.cs b/dotnet/src/SemanticKernel.Core/PromptTemplate/EchoPromptTemplateFactory.cs new file mode 100644 index 000000000000..fe7697d19547 --- /dev/null +++ b/dotnet/src/SemanticKernel.Core/PromptTemplate/EchoPromptTemplateFactory.cs @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; + +namespace Microsoft.SemanticKernel; + +/// +/// Provides an implementation of which creates no operation instances of . +/// +public sealed class EchoPromptTemplateFactory : IPromptTemplateFactory +{ + /// + /// Singleton instance of . + /// + public static EchoPromptTemplateFactory Instance { get; } = new EchoPromptTemplateFactory(); + + /// + public bool TryCreate(PromptTemplateConfig templateConfig, [NotNullWhen(true)] out IPromptTemplate? result) + { + result = new EchoPromptTemplate(templateConfig); + + return true; + } +} diff --git a/dotnet/src/SemanticKernel.Core/PromptTemplate/KernelPromptTemplate.cs b/dotnet/src/SemanticKernel.Core/PromptTemplate/KernelPromptTemplate.cs index 132e18bc2edb..83abe231becb 100644 --- a/dotnet/src/SemanticKernel.Core/PromptTemplate/KernelPromptTemplate.cs +++ b/dotnet/src/SemanticKernel.Core/PromptTemplate/KernelPromptTemplate.cs @@ -27,7 +27,7 @@ namespace Microsoft.SemanticKernel; internal sealed class KernelPromptTemplate : IPromptTemplate { /// - /// Constructor for PromptTemplate. + /// Constructor for . /// /// Prompt template configuration /// Flag indicating whether to allow potentially dangerous content to be inserted into the prompt diff --git a/dotnet/src/SemanticKernel.UnitTests/AI/ChatCompletion/StreamingKernelContentItemCollectionTests.cs b/dotnet/src/SemanticKernel.UnitTests/AI/ChatCompletion/StreamingKernelContentItemCollectionTests.cs new file mode 100644 index 000000000000..b85c61e01546 --- /dev/null +++ b/dotnet/src/SemanticKernel.UnitTests/AI/ChatCompletion/StreamingKernelContentItemCollectionTests.cs @@ -0,0 +1,218 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections; +using System.Collections.Generic; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.ChatCompletion; +using Xunit; + +namespace SemanticKernel.UnitTests.AI.ChatCompletion; + +public class StreamingKernelContentItemCollectionTests +{ + [Fact] + public void ItShouldBeEmptyByDefault() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + + // Assert + Assert.Empty(collection); + } + + [Fact] + public void ItShouldBePossibleToAddItemToTheCollection() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + var item = new StreamingTextContent("fake-text"); + + // Act + collection.Add(item); + + // Assert + Assert.Single(collection); + Assert.Same(item, collection[0]); + } + + [Fact] + public void ItShouldBePossibleToAccessItemByIndex() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + + var item1 = new StreamingTextContent("fake-text"); + collection.Add(item1); + + // Act + var retrievedItem = collection[0]; + + // Assert + Assert.Same(item1, retrievedItem); + } + + [Fact] + public void ItShouldBeEmptyAfterClear() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + collection.Add(new StreamingTextContent("fake-text")); + + // Act + collection.Clear(); + + // Assert + Assert.Empty(collection); + } + + [Fact] + public void ItShouldContainItemAfterAdd() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + var item = new StreamingTextContent("fake-text"); + + // Act + collection.Add(item); + + // Assert + Assert.Contains(item, collection); + } + + [Fact] + public void ItShouldCopyItemsToAnArray() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + var item1 = new StreamingTextContent("fake-text1"); + var item2 = new StreamingTextContent("fake-text2"); + collection.Add(item1); + collection.Add(item2); + + // Act + var array = new StreamingKernelContent[2]; + collection.CopyTo(array, 0); + + // Assert + Assert.Equal(new[] { item1, item2 }, array); + } + + [Fact] + public void ItShouldReturnIndexOfItem() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + var item1 = new StreamingTextContent("fake-text1"); + var item2 = new StreamingTextContent("fake-text2"); + collection.Add(item1); + collection.Add(item2); + + // Act + var index = collection.IndexOf(item2); + + // Assert + Assert.Equal(1, index); + } + + [Fact] + public void ItShouldInsertItemIntoCollection() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + var item1 = new StreamingTextContent("fake-text1"); + var item2 = new StreamingTextContent("fake-text2"); + collection.Add(item1); + + // Act + collection.Insert(0, item2); + + // Assert + Assert.Equal(new[] { item2, item1 }, collection); + } + + [Fact] + public void ItShouldRemoveItemFromCollection() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + var item1 = new StreamingTextContent("fake-text1"); + var item2 = new StreamingTextContent("fake-text2"); + collection.Add(item1); + collection.Add(item2); + + // Act + collection.Remove(item1); + + // Assert + Assert.Equal(new[] { item2 }, collection); + } + + [Fact] + public void ItShouldRemoveItemAtSpecifiedIndex() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + var item1 = new StreamingTextContent("fake-text1"); + var item2 = new StreamingTextContent("fake-text2"); + collection.Add(item1); + collection.Add(item2); + + // Act + collection.RemoveAt(0); + + // Assert + Assert.Equal(new[] { item2 }, collection); + } + + [Fact] + public void ItIsNotReadOnly() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + + // Assert + Assert.False(((ICollection)collection).IsReadOnly); + } + + [Fact] + public void ItShouldReturnEnumerator() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + var item1 = new StreamingTextContent("fake-text1"); + var item2 = new StreamingTextContent("fake-text2"); + collection.Add(item1); + collection.Add(item2); + + // Act + var enumerator = ((IEnumerable)collection).GetEnumerator(); + + // Assert + Assert.True(enumerator.MoveNext()); + Assert.Same(item1, enumerator.Current); + Assert.True(enumerator.MoveNext()); + Assert.Same(item2, enumerator.Current); + Assert.False(enumerator.MoveNext()); + } + + [Fact] + public void ItShouldReturnGenericEnumerator() + { + // Arrange + var collection = new StreamingKernelContentItemCollection(); + var item1 = new StreamingTextContent("fake-text1"); + var item2 = new StreamingTextContent("fake-text2"); + collection.Add(item1); + collection.Add(item2); + + // Act + var enumerator = ((IEnumerable)collection).GetEnumerator(); + + // Assert + Assert.True(enumerator.MoveNext()); + Assert.Same(item1, enumerator.Current); + Assert.True(enumerator.MoveNext()); + Assert.Same(item2, enumerator.Current); + Assert.False(enumerator.MoveNext()); + } +} diff --git a/dotnet/src/SemanticKernel.UnitTests/AI/PromptExecutionSettingsTests.cs b/dotnet/src/SemanticKernel.UnitTests/AI/PromptExecutionSettingsTests.cs index 83257b701112..dd822a091175 100644 --- a/dotnet/src/SemanticKernel.UnitTests/AI/PromptExecutionSettingsTests.cs +++ b/dotnet/src/SemanticKernel.UnitTests/AI/PromptExecutionSettingsTests.cs @@ -14,6 +14,8 @@ public void PromptExecutionSettingsCloneWorksAsExpected() // Arrange string configPayload = """ { + "model_id": "gpt-3", + "service_id": "service-1", "max_tokens": 60, "temperature": 0.5, "top_p": 0.0, @@ -30,6 +32,36 @@ public void PromptExecutionSettingsCloneWorksAsExpected() Assert.NotNull(clone); Assert.Equal(executionSettings.ModelId, clone.ModelId); Assert.Equivalent(executionSettings.ExtensionData, clone.ExtensionData); + Assert.Equal(executionSettings.ServiceId, clone.ServiceId); + } + + [Fact] + public void PromptExecutionSettingsSerializationWorksAsExpected() + { + // Arrange + string configPayload = """ + { + "model_id": "gpt-3", + "service_id": "service-1", + "max_tokens": 60, + "temperature": 0.5, + "top_p": 0.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0 + } + """; + + // Act + var executionSettings = JsonSerializer.Deserialize(configPayload); + + // Assert + Assert.NotNull(executionSettings); + Assert.Equal("gpt-3", executionSettings.ModelId); + Assert.Equal("service-1", executionSettings.ServiceId); + Assert.Equal(60, ((JsonElement)executionSettings.ExtensionData!["max_tokens"]).GetInt32()); + Assert.Equal(0.5, ((JsonElement)executionSettings.ExtensionData!["temperature"]).GetDouble()); + Assert.Equal(0.0, ((JsonElement)executionSettings.ExtensionData!["top_p"]).GetDouble()); + Assert.Equal(0.0, ((JsonElement)executionSettings.ExtensionData!["presence_penalty"]).GetDouble()); } [Fact] diff --git a/dotnet/src/SemanticKernel.UnitTests/Contents/ChatMessageContentTests.cs b/dotnet/src/SemanticKernel.UnitTests/Contents/ChatMessageContentTests.cs index a25376128f2d..fdbd4cae0524 100644 --- a/dotnet/src/SemanticKernel.UnitTests/Contents/ChatMessageContentTests.cs +++ b/dotnet/src/SemanticKernel.UnitTests/Contents/ChatMessageContentTests.cs @@ -55,8 +55,10 @@ public void ContentPropertySetterShouldAddTextContentToItemsCollection() Assert.Contains(sut.Items, item => item is TextContent textContent && textContent.Text == "fake-content"); } - [Fact] - public void ContentPropertySetterShouldUpdateContentOfFirstTextContentItem() + [Theory] + [InlineData(null)] + [InlineData("fake-content-1-update")] + public void ContentPropertySetterShouldUpdateContentOfFirstTextContentItem(string? content) { // Arrange var items = new ChatMessageContentItemCollection @@ -68,10 +70,23 @@ public void ContentPropertySetterShouldUpdateContentOfFirstTextContentItem() var sut = new ChatMessageContent(AuthorRole.User, items: items) { - Content = "fake-content-1-update" + Content = content + }; + + Assert.Equal(content, ((TextContent)sut.Items[1]).Text); + } + + [Fact] + public void ContentPropertySetterShouldNotAddTextContentToItemsCollection() + { + // Arrange + var sut = new ChatMessageContent(AuthorRole.User, content: null) + { + Content = null }; - Assert.Equal("fake-content-1-update", ((TextContent)sut.Items[1]).Text); + // Assert + Assert.Empty(sut.Items); } [Fact] diff --git a/dotnet/src/SemanticKernel.UnitTests/Contents/FunctionCallContentBuilderTests.cs b/dotnet/src/SemanticKernel.UnitTests/Contents/FunctionCallContentBuilderTests.cs new file mode 100644 index 000000000000..452f4e9fbca7 --- /dev/null +++ b/dotnet/src/SemanticKernel.UnitTests/Contents/FunctionCallContentBuilderTests.cs @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Linq; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.ChatCompletion; +using Xunit; + +namespace SemanticKernel.UnitTests.Contents; + +public class FunctionCallContentBuilderTests +{ + [Fact] + public void ItShouldBuildFunctionCallContentForOneFunction() + { + // Arrange + var sut = new FunctionCallContentBuilder(); + + // Act + var update1 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 1, functionCallIndex: 2, callId: "f_101", name: null, arguments: null); + sut.Append(update1); + + var update2 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 1, functionCallIndex: 2, callId: null, name: "WeatherUtils-GetTemperature", arguments: null); + sut.Append(update2); + + var update3 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 1, functionCallIndex: 2, callId: null, name: null, arguments: "{\"city\":"); + sut.Append(update3); + + var update4 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 1, functionCallIndex: 2, callId: null, name: null, arguments: "\"Seattle\"}"); + sut.Append(update4); + + var functionCalls = sut.Build(); + + // Assert + var functionCall = Assert.Single(functionCalls); + + Assert.Equal("f_101", functionCall.Id); + Assert.Equal("WeatherUtils", functionCall.PluginName); + Assert.Equal("GetTemperature", functionCall.FunctionName); + + Assert.NotNull(functionCall.Arguments); + Assert.Equal("Seattle", functionCall.Arguments["city"]); + + Assert.Null(functionCall.Exception); + } + + [Fact] + public void ItShouldBuildFunctionCallContentForManyFunctions() + { + // Arrange + var sut = new FunctionCallContentBuilder(); + + // Act + var f1_update1 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 0, functionCallIndex: 1, callId: "f_1", name: "WeatherUtils-GetTemperature", arguments: null); + sut.Append(f1_update1); + + var f2_update1 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 0, functionCallIndex: 2, callId: null, name: "WeatherUtils-GetHumidity", arguments: null); + sut.Append(f2_update1); + + var f2_update2 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 0, functionCallIndex: 2, callId: "f_2", name: null, arguments: null); + sut.Append(f2_update2); + + var f1_update2 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 0, functionCallIndex: 1, callId: null, name: null, arguments: "{\"city\":"); + sut.Append(f1_update2); + + var f2_update3 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 0, functionCallIndex: 2, callId: null, name: null, arguments: "{\"city\":"); + sut.Append(f2_update3); + + var f1_update3 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 0, functionCallIndex: 1, callId: null, name: null, arguments: "\"Seattle\"}"); + sut.Append(f1_update3); + + var f2_update4 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 0, functionCallIndex: 2, callId: null, name: null, arguments: "\"Georgia\"}"); + sut.Append(f2_update4); + + var functionCalls = sut.Build(); + + // Assert + Assert.Equal(2, functionCalls.Count); + + var functionCall1 = functionCalls.ElementAt(0); + Assert.Equal("f_1", functionCall1.Id); + Assert.Equal("WeatherUtils", functionCall1.PluginName); + Assert.Equal("GetTemperature", functionCall1.FunctionName); + Assert.Equal("Seattle", functionCall1.Arguments?["city"]); + Assert.Null(functionCall1.Exception); + + var functionCall2 = functionCalls.ElementAt(1); + Assert.Equal("f_2", functionCall2.Id); + Assert.Equal("WeatherUtils", functionCall2.PluginName); + Assert.Equal("GetHumidity", functionCall2.FunctionName); + Assert.Equal("Georgia", functionCall2.Arguments?["city"]); + Assert.Null(functionCall2.Exception); + } + + [Fact] + public void ItShouldCaptureArgumentsDeserializationException() + { + // Arrange + var sut = new FunctionCallContentBuilder(); + + // Act + var update1 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 1, functionCallIndex: 2, callId: "f_101", name: "WeatherUtils-GetTemperature", arguments: null); + sut.Append(update1); + + var update2 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 1, functionCallIndex: 2, callId: null, name: null, arguments: "{\"city\":"); + sut.Append(update2); + + // Invalid JSON - double closing braces - }} + var update3 = CreateStreamingContentWithFunctionCallUpdate(choiceIndex: 1, functionCallIndex: 2, callId: null, name: null, arguments: "\"Seattle\"}}"); + sut.Append(update3); + + var functionCalls = sut.Build(); + + // Assert + var functionCall = Assert.Single(functionCalls); + + Assert.Equal("f_101", functionCall.Id); + Assert.Equal("WeatherUtils", functionCall.PluginName); + Assert.Equal("GetTemperature", functionCall.FunctionName); + Assert.Null(functionCall.Arguments); + Assert.NotNull(functionCall.Exception); + } + + private static StreamingChatMessageContent CreateStreamingContentWithFunctionCallUpdate(int choiceIndex, int functionCallIndex, string? callId, string? name, string? arguments) + { + var content = new StreamingChatMessageContent(AuthorRole.Assistant, null); + + content.Items.Add(new StreamingFunctionCallUpdateContent + { + ChoiceIndex = choiceIndex, + FunctionCallIndex = functionCallIndex, + CallId = callId, + Name = name, + Arguments = arguments, + }); + + return content; + } +} diff --git a/dotnet/src/SemanticKernel.UnitTests/Contents/StreamingChatMessageContentTests.cs b/dotnet/src/SemanticKernel.UnitTests/Contents/StreamingChatMessageContentTests.cs new file mode 100644 index 000000000000..f7f7c5e43be7 --- /dev/null +++ b/dotnet/src/SemanticKernel.UnitTests/Contents/StreamingChatMessageContentTests.cs @@ -0,0 +1,161 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text; +using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.ChatCompletion; +using Xunit; + +namespace SemanticKernel.UnitTests.Contents; +public class StreamingChatMessageContentTests +{ + [Fact] + public void ConstructorShouldAddTextContentToItemsCollectionIfContentProvided() + { + // Arrange & act + var sut = new StreamingChatMessageContent(AuthorRole.User, "fake-content"); + + // Assert + Assert.Single(sut.Items); + + Assert.Contains(sut.Items, item => item is StreamingTextContent textContent && textContent.Text == "fake-content"); + } + + [Fact] + public void ConstructorShouldNodAddTextContentToItemsCollectionIfNoContentProvided() + { + // Arrange & act + var sut = new StreamingChatMessageContent(AuthorRole.User, content: null); + + // Assert + Assert.Empty(sut.Items); + } + + [Fact] + public void ContentPropertySetterShouldAddTextContentToItemsCollection() + { + // Arrange + var sut = new StreamingChatMessageContent(AuthorRole.User, content: null) + { + Content = "fake-content" + }; + + // Assert + Assert.Single(sut.Items); + + Assert.Contains(sut.Items, item => item is StreamingTextContent textContent && textContent.Text == "fake-content"); + } + + [Fact] + public void ContentPropertySetterShouldNotAddTextContentToItemsCollection() + { + // Arrange + var sut = new StreamingChatMessageContent(AuthorRole.User, content: null) + { + Content = null + }; + + // Assert + Assert.Empty(sut.Items); + } + + [Theory] + [InlineData(null)] + [InlineData("content-update")] + public void ContentPropertySetterShouldUpdateContentOfFirstTextContentItem(string? content) + { + // Arrange + var items = new StreamingKernelContentItemCollection + { + new StreamingTextContent("fake-content-1"), + new StreamingTextContent("fake-content-2") + }; + + var sut = new StreamingChatMessageContent(AuthorRole.User, content: null); + sut.Items = items; + sut.Content = content; + + Assert.Equal(content, ((StreamingTextContent)sut.Items[0]).Text); + } + + [Fact] + public void ContentPropertyGetterShouldReturnNullIfThereAreNoTextContentItems() + { + // Arrange and act + var sut = new StreamingChatMessageContent(AuthorRole.User, content: null); + + // Assert + Assert.Null(sut.Content); + Assert.Equal(string.Empty, sut.ToString()); + } + + [Fact] + public void ContentPropertyGetterShouldReturnContentOfTextContentItem() + { + // Arrange + var sut = new StreamingChatMessageContent(AuthorRole.User, "fake-content"); + + // Act and assert + Assert.Equal("fake-content", sut.Content); + Assert.Equal("fake-content", sut.ToString()); + } + + [Fact] + public void ContentPropertyGetterShouldReturnContentOfTheFirstTextContentItem() + { + // Arrange + var items = new StreamingKernelContentItemCollection + { + new StreamingTextContent("fake-content-1"), + new StreamingTextContent("fake-content-2") + }; + + var sut = new StreamingChatMessageContent(AuthorRole.User, content: null) + { + Items = items + }; + + // Act and assert + Assert.Equal("fake-content-1", sut.Content); + } + + [Fact] + public void ItShouldBePossibleToSetAndGetEncodingEvenIfThereAreNoItems() + { + // Arrange + var sut = new StreamingChatMessageContent(AuthorRole.User, content: null) + { + Encoding = Encoding.UTF32 + }; + + // Assert + Assert.Empty(sut.Items); + Assert.Equal(Encoding.UTF32, sut.Encoding); + } + + [Fact] + public void EncodingPropertySetterShouldUpdateEncodingTextContentItem() + { + // Arrange + var sut = new StreamingChatMessageContent(AuthorRole.User, content: "fake-content") + { + Encoding = Encoding.UTF32 + }; + + // Assert + Assert.Single(sut.Items); + Assert.Equal(Encoding.UTF32, ((StreamingTextContent)sut.Items[0]).Encoding); + } + + [Fact] + public void EncodingPropertyGetterShouldReturnEncodingOfTextContentItem() + { + // Arrange + var sut = new StreamingChatMessageContent(AuthorRole.User, content: "fake-content"); + + // Act + ((StreamingTextContent)sut.Items[0]).Encoding = Encoding.Latin1; + + // Assert + Assert.Equal(Encoding.Latin1, sut.Encoding); + } +} diff --git a/dotnet/src/SemanticKernel.UnitTests/Functions/KernelArgumentsTests.cs b/dotnet/src/SemanticKernel.UnitTests/Functions/KernelArgumentsTests.cs index a9d1625e79e7..8899668fd573 100644 --- a/dotnet/src/SemanticKernel.UnitTests/Functions/KernelArgumentsTests.cs +++ b/dotnet/src/SemanticKernel.UnitTests/Functions/KernelArgumentsTests.cs @@ -1,6 +1,8 @@ // Copyright (c) Microsoft. All rights reserved. +using System; using System.Collections.Generic; +using System.Linq; using Microsoft.SemanticKernel; using Xunit; @@ -62,6 +64,43 @@ public void ItCanBeCreatedWithBothExecutionSettingsAndArguments() Assert.Equal("fake-value", argument.Value); } + [Fact] + public void ItCanBeCreatedWithMultipleExecutionSettingsAndArguments() + { + // Arrange + var executionSettings1 = new PromptExecutionSettings(); + var executionSettings2 = new PromptExecutionSettings() { ServiceId = "service-2" }; + var executionSettings3 = new PromptExecutionSettings() { ServiceId = "service-3" }; + + // Act + KernelArguments sut = new([executionSettings1, executionSettings2, executionSettings3]) { { "fake-key", "fake-value" } }; + + // Assert + Assert.Same(executionSettings1, sut.ExecutionSettings?[PromptExecutionSettings.DefaultServiceId]); + Assert.Same(executionSettings2, sut.ExecutionSettings?["service-2"]); + Assert.Same(executionSettings3, sut.ExecutionSettings?["service-3"]); + + var argument = Assert.Single(sut); + Assert.Equal("fake-key", argument.Key); + Assert.Equal("fake-value", argument.Value); + } + + [Theory] + [InlineData(null, null)] + [InlineData("default", null)] + [InlineData(null, "default")] + [InlineData("service1", null, "service1")] + [InlineData(null, "service2", "service2")] + [InlineData("service1", "service2", "service3", null, "service1")] + public void ItCannotBeCreatedWithMultipleExecutionSettingsWithClashingServiceIdOrWithoutServiceIdSet(params string?[] serviceIds) + { + // Arrange + var executionSettingsList = serviceIds?.Select(serviceId => new PromptExecutionSettings() { ServiceId = serviceId }).ToList(); + + // Act & Assert + Assert.Throws(() => new KernelArguments(executionSettingsList) { { "fake-key", "fake-value" } }); + } + [Fact] public void ItCanPerformCaseInsensitiveSearch() { diff --git a/dotnet/src/SemanticKernel.UnitTests/Functions/KernelExtensionsTests.cs b/dotnet/src/SemanticKernel.UnitTests/Functions/KernelExtensionsTests.cs index ea36d8864d17..57010c640b91 100644 --- a/dotnet/src/SemanticKernel.UnitTests/Functions/KernelExtensionsTests.cs +++ b/dotnet/src/SemanticKernel.UnitTests/Functions/KernelExtensionsTests.cs @@ -1,6 +1,12 @@ // Copyright (c) Microsoft. All rights reserved. +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.DependencyInjection; using Microsoft.SemanticKernel; +using Microsoft.SemanticKernel.ChatCompletion; +using Microsoft.SemanticKernel.TextGeneration; +using Moq; using Xunit; namespace SemanticKernel.UnitTests.Functions; @@ -65,6 +71,37 @@ public void CreatePluginFromDescriptionAndFunctions() Assert.True(plugin.Contains("Function2")); } + [Fact] + public async Task CreateFunctionFromPromptWithMultipleSettingsUseCorrectServiceAsync() + { + // Arrange + var mockTextGeneration1 = new Mock(); + var mockTextGeneration2 = new Mock(); + var fakeTextContent = new TextContent("llmResult"); + var fakeChatContent = new ChatMessageContent(AuthorRole.User, "content"); + + mockTextGeneration1.Setup(c => c.GetTextContentsAsync(It.IsAny(), It.IsAny(), It.IsAny(), It.IsAny())).ReturnsAsync([fakeTextContent]); + mockTextGeneration2.Setup(c => c.GetChatMessageContentsAsync(It.IsAny(), It.IsAny(), It.IsAny(), It.IsAny())).ReturnsAsync([fakeChatContent]); + + IKernelBuilder builder = Kernel.CreateBuilder(); + builder.Services.AddKeyedSingleton("service1", mockTextGeneration1.Object); + builder.Services.AddKeyedSingleton("service2", mockTextGeneration2.Object); + builder.Services.AddKeyedSingleton("service3", mockTextGeneration1.Object); + Kernel kernel = builder.Build(); + + KernelFunction function = kernel.CreateFunctionFromPrompt("coolfunction", [ + new PromptExecutionSettings { ServiceId = "service5" }, // Should ignore this as service5 is not registered + new PromptExecutionSettings { ServiceId = "service2" }, + ]); + + // Act + await kernel.InvokeAsync(function); + + // Assert + mockTextGeneration1.Verify(a => a.GetTextContentsAsync("coolfunction", It.IsAny(), It.IsAny(), It.IsAny()), Times.Never()); + mockTextGeneration2.Verify(a => a.GetChatMessageContentsAsync(It.IsAny(), It.IsAny(), It.IsAny(), It.IsAny()), Times.Once()); + } + [Fact] public void ImportPluginFromFunctions() { diff --git a/dotnet/src/SemanticKernel.UnitTests/Functions/KernelFunctionFromPromptTests.cs b/dotnet/src/SemanticKernel.UnitTests/Functions/KernelFunctionFromPromptTests.cs index ae9838e77414..a1080983efc1 100644 --- a/dotnet/src/SemanticKernel.UnitTests/Functions/KernelFunctionFromPromptTests.cs +++ b/dotnet/src/SemanticKernel.UnitTests/Functions/KernelFunctionFromPromptTests.cs @@ -116,6 +116,60 @@ public async Task ItUsesServiceIdWhenProvidedAsync() mockTextGeneration2.Verify(a => a.GetTextContentsAsync("template", It.IsAny(), It.IsAny(), It.IsAny()), Times.Never()); } + [Fact] + public async Task ItUsesServiceIdWhenProvidedInMethodAsync() + { + // Arrange + var mockTextGeneration1 = new Mock(); + var mockTextGeneration2 = new Mock(); + var fakeTextContent = new TextContent("llmResult"); + + mockTextGeneration1.Setup(c => c.GetTextContentsAsync(It.IsAny(), It.IsAny(), It.IsAny(), It.IsAny())).ReturnsAsync([fakeTextContent]); + mockTextGeneration2.Setup(c => c.GetTextContentsAsync(It.IsAny(), It.IsAny(), It.IsAny(), It.IsAny())).ReturnsAsync([fakeTextContent]); + + IKernelBuilder builder = Kernel.CreateBuilder(); + builder.Services.AddKeyedSingleton("service1", mockTextGeneration1.Object); + builder.Services.AddKeyedSingleton("service2", mockTextGeneration2.Object); + Kernel kernel = builder.Build(); + + var func = kernel.CreateFunctionFromPrompt("my prompt", [new PromptExecutionSettings { ServiceId = "service2" }]); + + // Act + await kernel.InvokeAsync(func); + + // Assert + mockTextGeneration1.Verify(a => a.GetTextContentsAsync("my prompt", It.IsAny(), It.IsAny(), It.IsAny()), Times.Never()); + mockTextGeneration2.Verify(a => a.GetTextContentsAsync("my prompt", It.IsAny(), It.IsAny(), It.IsAny()), Times.Once()); + } + + [Fact] + public async Task ItUsesChatServiceIdWhenProvidedInMethodAsync() + { + // Arrange + var mockTextGeneration1 = new Mock(); + var mockTextGeneration2 = new Mock(); + var fakeTextContent = new TextContent("llmResult"); + var fakeChatContent = new ChatMessageContent(AuthorRole.User, "content"); + + mockTextGeneration1.Setup(c => c.GetTextContentsAsync(It.IsAny(), It.IsAny(), It.IsAny(), It.IsAny())).ReturnsAsync([fakeTextContent]); + mockTextGeneration2.Setup(c => c.GetChatMessageContentsAsync(It.IsAny(), It.IsAny(), It.IsAny(), It.IsAny())).ReturnsAsync([fakeChatContent]); + + IKernelBuilder builder = Kernel.CreateBuilder(); + builder.Services.AddKeyedSingleton("service1", mockTextGeneration1.Object); + builder.Services.AddKeyedSingleton("service2", mockTextGeneration2.Object); + builder.Services.AddKeyedSingleton("service3", mockTextGeneration1.Object); + Kernel kernel = builder.Build(); + + var func = kernel.CreateFunctionFromPrompt("my prompt", [new PromptExecutionSettings { ServiceId = "service2" }]); + + // Act + await kernel.InvokeAsync(func); + + // Assert + mockTextGeneration1.Verify(a => a.GetTextContentsAsync("my prompt", It.IsAny(), It.IsAny(), It.IsAny()), Times.Never()); + mockTextGeneration2.Verify(a => a.GetChatMessageContentsAsync(It.IsAny(), It.IsAny(), It.IsAny(), It.IsAny()), Times.Once()); + } + [Fact] public async Task ItFailsIfInvalidServiceIdIsProvidedAsync() { @@ -654,6 +708,197 @@ public async Task ItUsesPromptAsUserMessageAsync(KernelInvocationType invocation Assert.Equal("Test prompt as user message", messageContent.Content); } + [Theory] + [InlineData("semantic-kernel", "This is my prompt {{$input}}")] + [InlineData("handlebars", "This is my prompt {{input}}")] + public async Task ItUsesPromptWithEchoPromptTemplateFactoryAsync(string templateFormat, string template) + { + // Arrange + var mockTextGeneration = new Mock(); + var fakeTextContent = new TextContent(template); + + mockTextGeneration.Setup(c => c.GetTextContentsAsync(It.Is(p => p.Equals(template, StringComparison.Ordinal)), It.IsAny(), It.IsAny(), It.IsAny())).ReturnsAsync([fakeTextContent]); + + IKernelBuilder builder = Kernel.CreateBuilder(); + builder.Services.AddKeyedSingleton("x", mockTextGeneration.Object); + Kernel kernel = builder.Build(); + + var promptConfig = new PromptTemplateConfig(template) { TemplateFormat = templateFormat }; + var func = kernel.CreateFunctionFromPrompt(promptConfig, promptTemplateFactory: new EchoPromptTemplateFactory()); + var args = new KernelArguments(); + args["input"] = "Some Input"; + + // Act + var result = await kernel.InvokeAsync(func, args); + + // Assert + mockTextGeneration.Verify(a => a.GetTextContentsAsync(template, It.IsAny(), It.IsAny(), It.IsAny()), Times.Once()); + Assert.Equal(template, result.GetValue()); + } + + [Fact] + public async Task InvokePromptAsyncWithTextGenerationReturnsSingleResultAsync() + { + // Arrange + var expectedTextContent = new TextContent("text", "model-id", metadata: new Dictionary { { "key", "value" } }); + var mockTextGenerationService = this.GetMockTextGenerationService(textContents: [expectedTextContent]); + + KernelBuilder builder = new(); + builder.Services.AddTransient((sp) => mockTextGenerationService.Object); + Kernel kernel = builder.Build(); + + // Act + var result = await kernel.InvokePromptAsync("Prompt"); + + // Assert + Assert.Equal("text", result.GetValue()); + Assert.Equal("text", result.GetValue()!.ToString()); + + var actualTextContent = result.GetValue(); + + Assert.NotNull(actualTextContent); + Assert.Equal(result.Metadata, actualTextContent.Metadata); + + Assert.Equal(expectedTextContent.ModelId, actualTextContent.ModelId); + Assert.Equal(expectedTextContent.Text, actualTextContent.Text); + Assert.Equal(expectedTextContent.Metadata, actualTextContent.Metadata); + } + + [Fact] + public async Task InvokePromptAsyncWithTextGenerationReturnsMultipleResultsAsync() + { + // Arrange + List expectedTextContents = + [ + new TextContent("text1", "model-id", metadata: new Dictionary { { "key1", "value1" } }), + new TextContent("text2", "model-id", metadata: new Dictionary { { "key2", "value2" } }), + ]; + + var mockTextGenerationService = this.GetMockTextGenerationService(textContents: expectedTextContents); + + KernelBuilder builder = new(); + builder.Services.AddTransient((sp) => mockTextGenerationService.Object); + Kernel kernel = builder.Build(); + + // Act + var result = await kernel.InvokePromptAsync("Prompt"); + + // Assert + Assert.Throws(() => result.GetValue()); + Assert.Throws(() => result.GetValue()); + + var actualTextContents = result.GetValue>(); + + Assert.NotNull(actualTextContents); + Assert.Null(result.Metadata); + + Assert.Equal(expectedTextContents.Count, actualTextContents.Count); + + for (var i = 0; i < expectedTextContents.Count; i++) + { + Assert.Equal(expectedTextContents[i].ModelId, actualTextContents[i].ModelId); + Assert.Equal(expectedTextContents[i].Text, actualTextContents[i].Text); + Assert.Equal(expectedTextContents[i].Metadata, actualTextContents[i].Metadata); + } + } + + [Fact] + public async Task InvokePromptAsyncWithChatCompletionReturnsSingleResultAsync() + { + // Arrange + var expectedChatMessageContent = new ChatMessageContent(AuthorRole.Assistant, "chat-message", "model-id", new Dictionary { { "key", "value" } }); + var mockChatCompletionService = this.GetMockChatCompletionService(chatMessageContents: [expectedChatMessageContent]); + + KernelBuilder builder = new(); + builder.Services.AddTransient((sp) => mockChatCompletionService.Object); + Kernel kernel = builder.Build(); + + // Act + var result = await kernel.InvokePromptAsync("Prompt"); + + // Assert + Assert.Equal("chat-message", result.GetValue()); + Assert.Equal("chat-message", result.GetValue()!.ToString()); + + var actualChatMessageContent = result.GetValue(); + + Assert.NotNull(actualChatMessageContent); + Assert.Equal(result.Metadata, expectedChatMessageContent.Metadata); + + Assert.Equal(expectedChatMessageContent.ModelId, actualChatMessageContent.ModelId); + Assert.Equal(expectedChatMessageContent.Role, actualChatMessageContent.Role); + Assert.Equal(expectedChatMessageContent.Content, actualChatMessageContent.Content); + Assert.Equal(expectedChatMessageContent.Metadata, actualChatMessageContent.Metadata); + } + + [Fact] + public async Task InvokePromptAsyncWithChatCompletionReturnsMultipleResultsAsync() + { + // Arrange + List expectedChatMessageContents = + [ + new ChatMessageContent(AuthorRole.Assistant, "chat-message1", "model-id", new Dictionary { { "key1", "value1" } }), + new ChatMessageContent(AuthorRole.Assistant, "chat-message2", "model-id", new Dictionary { { "key2", "value2" } }) + ]; + + var mockChatCompletionService = this.GetMockChatCompletionService(chatMessageContents: expectedChatMessageContents); + + KernelBuilder builder = new(); + builder.Services.AddTransient((sp) => mockChatCompletionService.Object); + Kernel kernel = builder.Build(); + + // Act + var result = await kernel.InvokePromptAsync("Prompt"); + + // Assert + Assert.Throws(() => result.GetValue()); + Assert.Throws(() => result.GetValue()); + + var actualChatMessageContents = result.GetValue>(); + + Assert.NotNull(actualChatMessageContents); + Assert.Null(result.Metadata); + + Assert.Equal(expectedChatMessageContents.Count, actualChatMessageContents.Count); + + for (var i = 0; i < expectedChatMessageContents.Count; i++) + { + Assert.Equal(expectedChatMessageContents[i].ModelId, actualChatMessageContents[i].ModelId); + Assert.Equal(expectedChatMessageContents[i].Role, actualChatMessageContents[i].Role); + Assert.Equal(expectedChatMessageContents[i].Content, actualChatMessageContents[i].Content); + Assert.Equal(expectedChatMessageContents[i].Metadata, actualChatMessageContents[i].Metadata); + } + } + + [Fact] + public async Task InvokePromptAsyncWithPromptFunctionInTemplateAndSingleResultAsync() + { + // Arrange + var expectedChatMessageContent = new ChatMessageContent(AuthorRole.Assistant, "chat-message", "model-id", new Dictionary { { "key", "value" } }); + var mockChatCompletionService = this.GetMockChatCompletionService(chatMessageContents: [expectedChatMessageContent]); + + KernelBuilder builder = new(); + builder.Services.AddTransient((sp) => mockChatCompletionService.Object); + Kernel kernel = builder.Build(); + + var innerFunction = KernelFunctionFactory.CreateFromPrompt("Prompt", functionName: "GetData"); + var plugin = KernelPluginFactory.CreateFromFunctions("MyPlugin", [innerFunction]); + + kernel.Plugins.Add(plugin); + + // Act + var result = await kernel.InvokePromptAsync("Data: {{MyPlugin.GetData}}"); + + // Assert + Assert.True(mockChatCompletionService.Invocations is { Count: 2 }); + + var lastInvocation = mockChatCompletionService.Invocations[^1]; + var lastInvocationChatHistory = lastInvocation!.Arguments[0] as ChatHistory; + + Assert.NotNull(lastInvocationChatHistory); + Assert.Equal("Data: chat-message", lastInvocationChatHistory[0].Content); + } + public enum KernelInvocationType { InvokePrompt, @@ -697,5 +942,27 @@ public Task> GetTextContentsAsync(string prompt, Prom } } + private Mock GetMockTextGenerationService(IReadOnlyList? textContents = null) + { + var mockTextGenerationService = new Mock(); + + mockTextGenerationService + .Setup(l => l.GetTextContentsAsync(It.IsAny(), It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.FromResult>(textContents ?? [new TextContent("Default result")])); + + return mockTextGenerationService; + } + + private Mock GetMockChatCompletionService(IReadOnlyList? chatMessageContents = null) + { + var mockChatCompletionService = new Mock(); + + mockChatCompletionService + .Setup(l => l.GetChatMessageContentsAsync(It.IsAny(), It.IsAny(), It.IsAny(), It.IsAny())) + .Returns(Task.FromResult>(chatMessageContents ?? [new(AuthorRole.Assistant, "Default result")])); + + return mockChatCompletionService; + } + #endregion } diff --git a/dotnet/src/SemanticKernel.UnitTests/Functions/OrderedAIServiceSelectorTests.cs b/dotnet/src/SemanticKernel.UnitTests/Functions/OrderedAIServiceSelectorTests.cs index 15b001c13c99..eafac8ac5ca3 100644 --- a/dotnet/src/SemanticKernel.UnitTests/Functions/OrderedAIServiceSelectorTests.cs +++ b/dotnet/src/SemanticKernel.UnitTests/Functions/OrderedAIServiceSelectorTests.cs @@ -109,6 +109,26 @@ public void ItThrowsAKernelExceptionForNotFoundService() Assert.Throws(() => serviceSelector.SelectAIService(kernel, function, [])); } + [Fact] + public void ItGetsDefaultServiceForNotFoundModel() + { + // Arrange + IKernelBuilder builder = Kernel.CreateBuilder(); + builder.Services.AddKeyedSingleton("service1", new TextGenerationService("model_id_1")); + builder.Services.AddKeyedSingleton("service2", new TextGenerationService("model_id_2")); + Kernel kernel = builder.Build(); + + var promptConfig = new PromptTemplateConfig() { Template = "Hello AI" }; + promptConfig.AddExecutionSettings(new PromptExecutionSettings { ModelId = "notfound" }); + var function = kernel.CreateFunctionFromPrompt(promptConfig); + var serviceSelector = new OrderedAIServiceSelector(); + + // Act + // Assert + (var aiService, var defaultExecutionSettings) = serviceSelector.SelectAIService(kernel, function, []); + Assert.Equal(kernel.GetRequiredService("service2"), aiService); + } + [Fact] public void ItUsesDefaultServiceForNoExecutionSettings() { diff --git a/dotnet/src/SemanticKernel.UnitTests/PromptTemplate/EchoPromptTemplateTests.cs b/dotnet/src/SemanticKernel.UnitTests/PromptTemplate/EchoPromptTemplateTests.cs new file mode 100644 index 000000000000..3a4d6a7b68c0 --- /dev/null +++ b/dotnet/src/SemanticKernel.UnitTests/PromptTemplate/EchoPromptTemplateTests.cs @@ -0,0 +1,44 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Threading.Tasks; +using Microsoft.SemanticKernel; +using Xunit; + +namespace SemanticKernel.UnitTests.PromptTemplate; + +public sealed class EchoPromptTemplateTests +{ + [Fact] + public async Task ItDoesNothingForSemanticKernelFormatAsync() + { + // Arrange + var template = """This {{$x11}} {{$a}}{{$missing}} test template {{p.bar $b}} and {{p.foo c='literal "c"' d = $d}} and {{p.baz ename=$e}}"""; + var promptTemplateConfig = new PromptTemplateConfig(template); + var templateFactory = new EchoPromptTemplateFactory(); + + // Act + var target = templateFactory.Create(promptTemplateConfig); + var result = await target.RenderAsync(new Kernel()); + + // Assert + Assert.NotNull(result); + Assert.Equal(template, result); + } + + [Fact] + public async Task ItDoesNothingForHandlebarsFormatAsync() + { + // Arrange + var template = """This {{x11}} {{a}}{{missing}} test template {{p.bar b}} and {{p.foo c='literal "c"' d = d}} and {{p.baz ename=e}}"""; + var promptTemplateConfig = new PromptTemplateConfig(template) { TemplateFormat = "handlebars" }; + var templateFactory = new EchoPromptTemplateFactory(); + + // Act + var target = templateFactory.Create(promptTemplateConfig); + var result = await target.RenderAsync(new Kernel()); + + // Assert + Assert.NotNull(result); + Assert.Equal(template, result); + } +} diff --git a/dotnet/src/SemanticKernel.UnitTests/PromptTemplate/PromptTemplateConfigTests.cs b/dotnet/src/SemanticKernel.UnitTests/PromptTemplate/PromptTemplateConfigTests.cs index 3285ed6b819f..5fecdf71b8c3 100644 --- a/dotnet/src/SemanticKernel.UnitTests/PromptTemplate/PromptTemplateConfigTests.cs +++ b/dotnet/src/SemanticKernel.UnitTests/PromptTemplate/PromptTemplateConfigTests.cs @@ -105,6 +105,241 @@ public void DeserializingExpectMultipleModels() Assert.Equal(2, promptTemplateConfig.ExecutionSettings.Count); } + [Fact] + public void DeserializingDoesNotAutoSetServiceIdWhenNotProvided() + { + // Arrange + string configPayload = """ + { + "schema": 1, + "description": "", + "execution_settings": + { + "service1": { + "model_id": "gpt-4", + "max_tokens": 200, + "temperature": 0.2, + "top_p": 0.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "stop_sequences": + [ + "Human", + "AI" + ] + }, + "service2": { + "model_id": "gpt-3.5_turbo", + "max_tokens": 256, + "temperature": 0.3, + "top_p": 0.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "stop_sequences": + [ + "Human", + "AI" + ] + } + } + } + """; + + // Act + var promptTemplateConfig = JsonSerializer.Deserialize(configPayload); + + // Assert + Assert.NotNull(promptTemplateConfig); + Assert.Null(promptTemplateConfig.ExecutionSettings["service1"].ServiceId); + Assert.Null(promptTemplateConfig.ExecutionSettings["service2"].ServiceId); + } + + [Fact] + public void DeserializingDoesNotAutoSetServiceIdWhenDefault() + { + // Arrange + string configPayload = """ + { + "schema": 1, + "description": "", + "execution_settings": + { + "default": { + "model_id": "gpt-4", + "max_tokens": 200, + "temperature": 0.2, + "top_p": 0.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "stop_sequences": + [ + "Human", + "AI" + ] + } + } + } + """; + + // Act + var promptTemplateConfig = JsonSerializer.Deserialize(configPayload); + + // Assert + Assert.NotNull(promptTemplateConfig); + Assert.NotNull(promptTemplateConfig.DefaultExecutionSettings); + Assert.Null(promptTemplateConfig.DefaultExecutionSettings?.ServiceId); + } + + [Fact] + public void DeserializingServiceIdUnmatchingIndexShouldThrow() + { + // Arrange + string configPayload = """ + { + "schema": 1, + "description": "", + "execution_settings": + { + "service1": { + "model_id": "gpt-4", + "max_tokens": 200, + "temperature": 0.2, + "top_p": 0.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "stop_sequences": + [ + "Human", + "AI" + ] + }, + "service2": { + "service_id": "service3", + "model_id": "gpt-3.5_turbo", + "max_tokens": 256, + "temperature": 0.3, + "top_p": 0.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "stop_sequences": + [ + "Human", + "AI" + ] + } + } + } + """; + + // Act & Assert + var exception = Assert.Throws(() => JsonSerializer.Deserialize(configPayload)); + } + + [Fact] + public void ItCannotAddExecutionSettingsWithSameServiceId() + { + // Arrange + var settings = new PromptTemplateConfig(); + settings.AddExecutionSettings(new PromptExecutionSettings(), "service1"); + + // Act & Assert + Assert.Throws(() => settings.AddExecutionSettings(new PromptExecutionSettings(), "service1")); + } + + [Fact] + public void ItAddExecutionSettingsAndNeverOverwriteServiceId() + { + // Arrange + var promptTemplateConfig = new PromptTemplateConfig(); + var settings1 = new PromptExecutionSettings { ModelId = "model-service-3", ServiceId = "should not override" }; + + // Act + promptTemplateConfig.AddExecutionSettings(new PromptExecutionSettings { ModelId = "model1" }); + promptTemplateConfig.AddExecutionSettings(new PromptExecutionSettings { ModelId = "model2" }, "service1"); + promptTemplateConfig.AddExecutionSettings(new PromptExecutionSettings { ServiceId = "service2", ModelId = "model-service-2" }); + promptTemplateConfig.AddExecutionSettings(new PromptExecutionSettings { ServiceId = "service3", ModelId = "model-service-3" }); + promptTemplateConfig.AddExecutionSettings(settings1); + + // Assert + Assert.Equal("model1", promptTemplateConfig.ExecutionSettings["default"].ModelId); + Assert.Null(promptTemplateConfig.ExecutionSettings["default"].ServiceId); + + Assert.Equal("model2", promptTemplateConfig.ExecutionSettings["service1"].ModelId); + Assert.Null(promptTemplateConfig.ExecutionSettings["service1"].ServiceId); + + Assert.Equal("model-service-2", promptTemplateConfig.ExecutionSettings["service2"].ModelId); + Assert.Equal("service2", promptTemplateConfig.ExecutionSettings["service2"].ServiceId); + + Assert.Equal("model-service-3", promptTemplateConfig.ExecutionSettings["service3"].ModelId); + Assert.Equal("service3", promptTemplateConfig.ExecutionSettings["service3"].ServiceId); + + // Never changes settings id + Assert.Equal("should not override", settings1.ServiceId); + Assert.True(promptTemplateConfig.ExecutionSettings.ContainsKey("should not override")); + } + + [Fact] + public void ItThrowsWhenServiceIdIsProvidedAndExecutionSettingsAlreadyHasAServiceIdPropertySet() + { + // Arrange + var promptTemplateConfig = new PromptTemplateConfig(); + var settings = new PromptExecutionSettings { ModelId = "model-service-3", ServiceId = "service2" }; + + // Act & Assert + Assert.Throws(() => promptTemplateConfig.AddExecutionSettings(settings, "service1")); + } + + [Fact] + public void DeserializingServiceIdSameIndexKeepsLast() + { + // Arrange + string configPayload = """ + { + "schema": 1, + "description": "", + "execution_settings": + { + "service1": { + "model_id": "gpt-4", + "max_tokens": 200, + "temperature": 0.2, + "top_p": 0.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "stop_sequences": + [ + "Human", + "AI" + ] + }, + "service1": { + "model_id": "gpt-3.5_turbo", + "max_tokens": 256, + "temperature": 0.3, + "top_p": 0.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "stop_sequences": + [ + "Human", + "AI" + ] + } + } + } + """; + + // Act + var promptTemplate = JsonSerializer.Deserialize(configPayload); + + // Assert + Assert.NotNull(promptTemplate); + Assert.NotNull(promptTemplate.ExecutionSettings); + Assert.Single(promptTemplate.ExecutionSettings); + Assert.Null(promptTemplate.ExecutionSettings["service1"].ServiceId); + Assert.Equal("gpt-3.5_turbo", promptTemplate.ExecutionSettings["service1"].ModelId); + } + [Fact] public void DeserializingExpectCompletion() { diff --git a/python/.env.example b/python/.env.example index d6a0e18dff5b..39d1c10a60df 100644 --- a/python/.env.example +++ b/python/.env.example @@ -1,10 +1,14 @@ OPENAI_API_KEY="" +OPEN_AI_CHAT_MODEL_ID="" +OPEN_AI_TEXT_MODEL_ID="" +OPEN_AI_EMBEDDING_MODEL_ID="" OPENAI_ORG_ID="" -AZURE_OPENAI_SYSTEM_MESSAGE="You are an AI assistant that helps people find information" -AZURE_OPENAI_API_VERSION="2024-02-15-preview" -AZURE_OPENAI_DEPLOYMENT_NAME="" +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="" +AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="" +AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME="" AZURE_OPENAI_ENDPOINT="" AZURE_OPENAI_API_KEY="" +AZURE_OPENAI_API_VERSION="2024-02-15-preview" AZURE_OPENAI_TEMPERATURE=0 AZURE_OPENAI_MAX_TOKENS=1000 AZURE_OPENAI_TOP_P=1.0 diff --git a/python/DEV_SETUP.md b/python/DEV_SETUP.md index 3ccb1b3cf854..5b95d7400863 100644 --- a/python/DEV_SETUP.md +++ b/python/DEV_SETUP.md @@ -36,7 +36,7 @@ This optional `env_file_path` parameter will allow pydantic settings to use the If using the second method, we suggest adding a copy of the `.env` file under these folders: -- [python/tests](tests) +- [./tests](./tests) - [./samples/getting_started](./samples/getting_started). ## System setup @@ -139,19 +139,24 @@ poetry run pre-commit install ## VSCode Setup +Open the [workspace](https://code.visualstudio.com/docs/editor/workspaces) in VSCode. +> The Python workspace is the `./python` folder if you are at the root of the repository. + Open any of the `.py` files in the project and run the `Python: Select Interpreter` command from the command palette. Make sure the virtual env (venv) created by `poetry` is selected. The python you're looking for should be under `~/.cache/pypoetry/virtualenvs/semantic-kernel-.../bin/python`. -If prompted, install `ruff` and `black` (these should have been installed as part of `poetry install`). +If prompted, install `ruff`. (It should have been installed as part of `poetry install`). + +You also need to install the `ruff` extension in VSCode so that auto-formatting uses the `ruff` formatter on save. +Read more about the extension here: https://github.com/astral-sh/ruff-vscode ## Tests You can run the unit tests under the [tests/unit](tests/unit/) folder. ```bash - cd python poetry install poetry run pytest tests/unit ``` @@ -162,7 +167,6 @@ Alternatively, you can run them using VSCode Tasks. Open the command palette You can run the integration tests under the [tests/integration](tests/integration/) folder. ```bash - cd python poetry install poetry run pytest tests/integration ``` @@ -170,7 +174,6 @@ You can run the integration tests under the [tests/integration](tests/integratio You can also run all the tests together under the [tests](tests/) folder. ```bash - cd python poetry install poetry run pytest tests ``` @@ -327,7 +330,6 @@ Ideally you should run these checks before committing any changes, use `poetry r We try to maintain a high code coverage for the project. To run the code coverage on the unit tests, you can use the following command: ```bash - cd python poetry run pytest --cov=semantic_kernel --cov-report=term-missing:skip-covered tests/unit/ ``` or use the following task (using `Ctrl+Shift+P`): diff --git a/python/poetry.lock b/python/poetry.lock index 06db9d210ba1..5b3a8f2038b5 100644 --- a/python/poetry.lock +++ b/python/poetry.lock @@ -154,63 +154,6 @@ files = [ {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, ] -[[package]] -name = "argon2-cffi" -version = "23.1.0" -description = "Argon2 for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea"}, - {file = "argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08"}, -] - -[package.dependencies] -argon2-cffi-bindings = "*" - -[package.extras] -dev = ["argon2-cffi[tests,typing]", "tox (>4)"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-copybutton", "sphinx-notfound-page"] -tests = ["hypothesis", "pytest"] -typing = ["mypy"] - -[[package]] -name = "argon2-cffi-bindings" -version = "21.2.0" -description = "Low-level CFFI bindings for Argon2" -optional = false -python-versions = ">=3.6" -files = [ - {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, - {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, -] - -[package.dependencies] -cffi = ">=1.0.1" - -[package.extras] -dev = ["cogapp", "pre-commit", "pytest", "wheel"] -tests = ["pytest"] - [[package]] name = "asgiref" version = "3.8.1" @@ -278,13 +221,13 @@ tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "p [[package]] name = "authlib" -version = "1.3.0" +version = "1.3.1" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = false python-versions = ">=3.8" files = [ - {file = "Authlib-1.3.0-py2.py3-none-any.whl", hash = "sha256:9637e4de1fb498310a56900b3e2043a206b03cb11c05422014b0302cbc814be3"}, - {file = "Authlib-1.3.0.tar.gz", hash = "sha256:959ea62a5b7b5123c5059758296122b57cd2585ae2ed1c0622c21b371ffdae06"}, + {file = "Authlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:d35800b973099bbadc49b42b256ecb80041ad56b7fe1216a362c7943c088f377"}, + {file = "authlib-1.3.1.tar.gz", hash = "sha256:7ae843f03c06c5c0debd63c9db91f9fda64fa62a42a77419fa15fbb7e7a58917"}, ] [package.dependencies] @@ -303,13 +246,13 @@ files = [ [[package]] name = "azure-core" -version = "1.30.1" +version = "1.30.2" description = "Microsoft Azure Core Library for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "azure-core-1.30.1.tar.gz", hash = "sha256:26273a254131f84269e8ea4464f3560c731f29c0c1f69ac99010845f239c1a8f"}, - {file = "azure_core-1.30.1-py3-none-any.whl", hash = "sha256:7c5ee397e48f281ec4dd773d67a0a47a0962ed6fa833036057f9ea067f688e74"}, + {file = "azure-core-1.30.2.tar.gz", hash = "sha256:a14dc210efcd608821aa472d9fb8e8d035d29b68993819147bc290a8ac224472"}, + {file = "azure_core-1.30.2-py3-none-any.whl", hash = "sha256:cf019c1ca832e96274ae85abd3d9f752397194d9fea3b41487290562ac8abe4a"}, ] [package.dependencies] @@ -337,13 +280,13 @@ typing-extensions = ">=4.6.0" [[package]] name = "azure-identity" -version = "1.16.0" +version = "1.16.1" description = "Microsoft Azure Identity Library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "azure-identity-1.16.0.tar.gz", hash = "sha256:6ff1d667cdcd81da1ceab42f80a0be63ca846629f518a922f7317a7e3c844e1b"}, - {file = "azure_identity-1.16.0-py3-none-any.whl", hash = "sha256:722fdb60b8fdd55fa44dc378b8072f4b419b56a5e54c0de391f644949f3a826f"}, + {file = "azure-identity-1.16.1.tar.gz", hash = "sha256:6d93f04468f240d59246d8afde3091494a5040d4f141cad0f49fc0c399d0d91e"}, + {file = "azure_identity-1.16.1-py3-none-any.whl", hash = "sha256:8fb07c25642cd4ac422559a8b50d3e77f73dcc2bbfaba419d06d6c9d7cff6726"}, ] [package.dependencies] @@ -368,26 +311,6 @@ azure-common = ">=1.1" azure-core = ">=1.28.0" isodate = ">=0.6.0" -[[package]] -name = "azure-storage-blob" -version = "12.20.0" -description = "Microsoft Azure Blob Storage Client Library for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "azure-storage-blob-12.20.0.tar.gz", hash = "sha256:eeb91256e41d4b5b9bad6a87fd0a8ade07dd58aa52344e2c8d2746e27a017d3b"}, - {file = "azure_storage_blob-12.20.0-py3-none-any.whl", hash = "sha256:de6b3bf3a90e9341a6bcb96a2ebe981dffff993e9045818f6549afea827a52a9"}, -] - -[package.dependencies] -azure-core = ">=1.28.0" -cryptography = ">=2.1.4" -isodate = ">=0.6.1" -typing-extensions = ">=4.6.0" - -[package.extras] -aio = ["azure-core[aio] (>=1.28.0)"] - [[package]] name = "backoff" version = "2.2.1" @@ -763,13 +686,13 @@ numpy = "*" [[package]] name = "chromadb" -version = "0.4.24" +version = "0.5.0" description = "Chroma." optional = false python-versions = ">=3.8" files = [ - {file = "chromadb-0.4.24-py3-none-any.whl", hash = "sha256:3a08e237a4ad28b5d176685bd22429a03717fe09d35022fb230d516108da01da"}, - {file = "chromadb-0.4.24.tar.gz", hash = "sha256:a5c80b4e4ad9b236ed2d4899a5b9e8002b489293f2881cb2cadab5b199ee1c72"}, + {file = "chromadb-0.5.0-py3-none-any.whl", hash = "sha256:8193dc65c143b61d8faf87f02c44ecfa778d471febd70de517f51c5d88a06009"}, + {file = "chromadb-0.5.0.tar.gz", hash = "sha256:7954af614a9ff7b2902ddbd0a162f33f7ec0669e2429903905c4f7876d1f766f"}, ] [package.dependencies] @@ -790,7 +713,6 @@ opentelemetry-sdk = ">=1.2.0" orjson = ">=3.9.12" overrides = ">=7.3.1" posthog = ">=2.4.0" -pulsar-client = ">=3.1.0" pydantic = ">=1.9" pypika = ">=0.48.9" PyYAML = ">=6.0.0" @@ -1477,69 +1399,61 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "grpcio" -version = "1.60.0" +version = "1.63.0" description = "HTTP/2-based RPC framework" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "grpcio-1.60.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:d020cfa595d1f8f5c6b343530cd3ca16ae5aefdd1e832b777f9f0eb105f5b139"}, - {file = "grpcio-1.60.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b98f43fcdb16172dec5f4b49f2fece4b16a99fd284d81c6bbac1b3b69fcbe0ff"}, - {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:20e7a4f7ded59097c84059d28230907cd97130fa74f4a8bfd1d8e5ba18c81491"}, - {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:452ca5b4afed30e7274445dd9b441a35ece656ec1600b77fff8c216fdf07df43"}, - {file = "grpcio-1.60.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43e636dc2ce9ece583b3e2ca41df5c983f4302eabc6d5f9cd04f0562ee8ec1ae"}, - {file = "grpcio-1.60.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e306b97966369b889985a562ede9d99180def39ad42c8014628dd3cc343f508"}, - {file = "grpcio-1.60.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f897c3b127532e6befdcf961c415c97f320d45614daf84deba0a54e64ea2457b"}, - {file = "grpcio-1.60.0-cp310-cp310-win32.whl", hash = "sha256:b87efe4a380887425bb15f220079aa8336276398dc33fce38c64d278164f963d"}, - {file = "grpcio-1.60.0-cp310-cp310-win_amd64.whl", hash = "sha256:a9c7b71211f066908e518a2ef7a5e211670761651039f0d6a80d8d40054047df"}, - {file = "grpcio-1.60.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:fb464479934778d7cc5baf463d959d361954d6533ad34c3a4f1d267e86ee25fd"}, - {file = "grpcio-1.60.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:4b44d7e39964e808b071714666a812049765b26b3ea48c4434a3b317bac82f14"}, - {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:90bdd76b3f04bdb21de5398b8a7c629676c81dfac290f5f19883857e9371d28c"}, - {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91229d7203f1ef0ab420c9b53fe2ca5c1fbeb34f69b3bc1b5089466237a4a134"}, - {file = "grpcio-1.60.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b36a2c6d4920ba88fa98075fdd58ff94ebeb8acc1215ae07d01a418af4c0253"}, - {file = "grpcio-1.60.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:297eef542156d6b15174a1231c2493ea9ea54af8d016b8ca7d5d9cc65cfcc444"}, - {file = "grpcio-1.60.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:87c9224acba0ad8bacddf427a1c2772e17ce50b3042a789547af27099c5f751d"}, - {file = "grpcio-1.60.0-cp311-cp311-win32.whl", hash = "sha256:95ae3e8e2c1b9bf671817f86f155c5da7d49a2289c5cf27a319458c3e025c320"}, - {file = "grpcio-1.60.0-cp311-cp311-win_amd64.whl", hash = "sha256:467a7d31554892eed2aa6c2d47ded1079fc40ea0b9601d9f79204afa8902274b"}, - {file = "grpcio-1.60.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:a7152fa6e597c20cb97923407cf0934e14224af42c2b8d915f48bc3ad2d9ac18"}, - {file = "grpcio-1.60.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:7db16dd4ea1b05ada504f08d0dca1cd9b926bed3770f50e715d087c6f00ad748"}, - {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:b0571a5aef36ba9177e262dc88a9240c866d903a62799e44fd4aae3f9a2ec17e"}, - {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fd9584bf1bccdfff1512719316efa77be235469e1e3295dce64538c4773840b"}, - {file = "grpcio-1.60.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6a478581b1a1a8fdf3318ecb5f4d0cda41cacdffe2b527c23707c9c1b8fdb55"}, - {file = "grpcio-1.60.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:77c8a317f0fd5a0a2be8ed5cbe5341537d5c00bb79b3bb27ba7c5378ba77dbca"}, - {file = "grpcio-1.60.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1c30bb23a41df95109db130a6cc1b974844300ae2e5d68dd4947aacba5985aa5"}, - {file = "grpcio-1.60.0-cp312-cp312-win32.whl", hash = "sha256:2aef56e85901c2397bd557c5ba514f84de1f0ae5dd132f5d5fed042858115951"}, - {file = "grpcio-1.60.0-cp312-cp312-win_amd64.whl", hash = "sha256:e381fe0c2aa6c03b056ad8f52f8efca7be29fb4d9ae2f8873520843b6039612a"}, - {file = "grpcio-1.60.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:92f88ca1b956eb8427a11bb8b4a0c0b2b03377235fc5102cb05e533b8693a415"}, - {file = "grpcio-1.60.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:e278eafb406f7e1b1b637c2cf51d3ad45883bb5bd1ca56bc05e4fc135dfdaa65"}, - {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:a48edde788b99214613e440fce495bbe2b1e142a7f214cce9e0832146c41e324"}, - {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de2ad69c9a094bf37c1102b5744c9aec6cf74d2b635558b779085d0263166454"}, - {file = "grpcio-1.60.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:073f959c6f570797272f4ee9464a9997eaf1e98c27cb680225b82b53390d61e6"}, - {file = "grpcio-1.60.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c826f93050c73e7769806f92e601e0efdb83ec8d7c76ddf45d514fee54e8e619"}, - {file = "grpcio-1.60.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9e30be89a75ee66aec7f9e60086fadb37ff8c0ba49a022887c28c134341f7179"}, - {file = "grpcio-1.60.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b0fb2d4801546598ac5cd18e3ec79c1a9af8b8f2a86283c55a5337c5aeca4b1b"}, - {file = "grpcio-1.60.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:9073513ec380434eb8d21970e1ab3161041de121f4018bbed3146839451a6d8e"}, - {file = "grpcio-1.60.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:74d7d9fa97809c5b892449b28a65ec2bfa458a4735ddad46074f9f7d9550ad13"}, - {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:1434ca77d6fed4ea312901122dc8da6c4389738bf5788f43efb19a838ac03ead"}, - {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61e76020e0c332a98290323ecfec721c9544f5b739fab925b6e8cbe1944cf19"}, - {file = "grpcio-1.60.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675997222f2e2f22928fbba640824aebd43791116034f62006e19730715166c0"}, - {file = "grpcio-1.60.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5208a57eae445ae84a219dfd8b56e04313445d146873117b5fa75f3245bc1390"}, - {file = "grpcio-1.60.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:428d699c8553c27e98f4d29fdc0f0edc50e9a8a7590bfd294d2edb0da7be3629"}, - {file = "grpcio-1.60.0-cp38-cp38-win32.whl", hash = "sha256:83f2292ae292ed5a47cdcb9821039ca8e88902923198f2193f13959360c01860"}, - {file = "grpcio-1.60.0-cp38-cp38-win_amd64.whl", hash = "sha256:705a68a973c4c76db5d369ed573fec3367d7d196673fa86614b33d8c8e9ebb08"}, - {file = "grpcio-1.60.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:c193109ca4070cdcaa6eff00fdb5a56233dc7610216d58fb81638f89f02e4968"}, - {file = "grpcio-1.60.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:676e4a44e740deaba0f4d95ba1d8c5c89a2fcc43d02c39f69450b1fa19d39590"}, - {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:5ff21e000ff2f658430bde5288cb1ac440ff15c0d7d18b5fb222f941b46cb0d2"}, - {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c86343cf9ff7b2514dd229bdd88ebba760bd8973dac192ae687ff75e39ebfab"}, - {file = "grpcio-1.60.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fd3b3968ffe7643144580f260f04d39d869fcc2cddb745deef078b09fd2b328"}, - {file = "grpcio-1.60.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:30943b9530fe3620e3b195c03130396cd0ee3a0d10a66c1bee715d1819001eaf"}, - {file = "grpcio-1.60.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b10241250cb77657ab315270b064a6c7f1add58af94befa20687e7c8d8603ae6"}, - {file = "grpcio-1.60.0-cp39-cp39-win32.whl", hash = "sha256:79a050889eb8d57a93ed21d9585bb63fca881666fc709f5d9f7f9372f5e7fd03"}, - {file = "grpcio-1.60.0-cp39-cp39-win_amd64.whl", hash = "sha256:8a97a681e82bc11a42d4372fe57898d270a2707f36c45c6676e49ce0d5c41353"}, - {file = "grpcio-1.60.0.tar.gz", hash = "sha256:2199165a1affb666aa24adf0c97436686d0a61bc5fc113c037701fb7c7fceb96"}, + {file = "grpcio-1.63.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:2e93aca840c29d4ab5db93f94ed0a0ca899e241f2e8aec6334ab3575dc46125c"}, + {file = "grpcio-1.63.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:91b73d3f1340fefa1e1716c8c1ec9930c676d6b10a3513ab6c26004cb02d8b3f"}, + {file = "grpcio-1.63.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:b3afbd9d6827fa6f475a4f91db55e441113f6d3eb9b7ebb8fb806e5bb6d6bd0d"}, + {file = "grpcio-1.63.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f3f6883ce54a7a5f47db43289a0a4c776487912de1a0e2cc83fdaec9685cc9f"}, + {file = "grpcio-1.63.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf8dae9cc0412cb86c8de5a8f3be395c5119a370f3ce2e69c8b7d46bb9872c8d"}, + {file = "grpcio-1.63.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:08e1559fd3b3b4468486b26b0af64a3904a8dbc78d8d936af9c1cf9636eb3e8b"}, + {file = "grpcio-1.63.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5c039ef01516039fa39da8a8a43a95b64e288f79f42a17e6c2904a02a319b357"}, + {file = "grpcio-1.63.0-cp310-cp310-win32.whl", hash = "sha256:ad2ac8903b2eae071055a927ef74121ed52d69468e91d9bcbd028bd0e554be6d"}, + {file = "grpcio-1.63.0-cp310-cp310-win_amd64.whl", hash = "sha256:b2e44f59316716532a993ca2966636df6fbe7be4ab6f099de6815570ebe4383a"}, + {file = "grpcio-1.63.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:f28f8b2db7b86c77916829d64ab21ff49a9d8289ea1564a2b2a3a8ed9ffcccd3"}, + {file = "grpcio-1.63.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:65bf975639a1f93bee63ca60d2e4951f1b543f498d581869922910a476ead2f5"}, + {file = "grpcio-1.63.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:b5194775fec7dc3dbd6a935102bb156cd2c35efe1685b0a46c67b927c74f0cfb"}, + {file = "grpcio-1.63.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4cbb2100ee46d024c45920d16e888ee5d3cf47c66e316210bc236d5bebc42b3"}, + {file = "grpcio-1.63.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ff737cf29b5b801619f10e59b581869e32f400159e8b12d7a97e7e3bdeee6a2"}, + {file = "grpcio-1.63.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cd1e68776262dd44dedd7381b1a0ad09d9930ffb405f737d64f505eb7f77d6c7"}, + {file = "grpcio-1.63.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:93f45f27f516548e23e4ec3fbab21b060416007dbe768a111fc4611464cc773f"}, + {file = "grpcio-1.63.0-cp311-cp311-win32.whl", hash = "sha256:878b1d88d0137df60e6b09b74cdb73db123f9579232c8456f53e9abc4f62eb3c"}, + {file = "grpcio-1.63.0-cp311-cp311-win_amd64.whl", hash = "sha256:756fed02dacd24e8f488f295a913f250b56b98fb793f41d5b2de6c44fb762434"}, + {file = "grpcio-1.63.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:93a46794cc96c3a674cdfb59ef9ce84d46185fe9421baf2268ccb556f8f81f57"}, + {file = "grpcio-1.63.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:a7b19dfc74d0be7032ca1eda0ed545e582ee46cd65c162f9e9fc6b26ef827dc6"}, + {file = "grpcio-1.63.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:8064d986d3a64ba21e498b9a376cbc5d6ab2e8ab0e288d39f266f0fca169b90d"}, + {file = "grpcio-1.63.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:219bb1848cd2c90348c79ed0a6b0ea51866bc7e72fa6e205e459fedab5770172"}, + {file = "grpcio-1.63.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2d60cd1d58817bc5985fae6168d8b5655c4981d448d0f5b6194bbcc038090d2"}, + {file = "grpcio-1.63.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9e350cb096e5c67832e9b6e018cf8a0d2a53b2a958f6251615173165269a91b0"}, + {file = "grpcio-1.63.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:56cdf96ff82e3cc90dbe8bac260352993f23e8e256e063c327b6cf9c88daf7a9"}, + {file = "grpcio-1.63.0-cp312-cp312-win32.whl", hash = "sha256:3a6d1f9ea965e750db7b4ee6f9fdef5fdf135abe8a249e75d84b0a3e0c668a1b"}, + {file = "grpcio-1.63.0-cp312-cp312-win_amd64.whl", hash = "sha256:d2497769895bb03efe3187fb1888fc20e98a5f18b3d14b606167dacda5789434"}, + {file = "grpcio-1.63.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:fdf348ae69c6ff484402cfdb14e18c1b0054ac2420079d575c53a60b9b2853ae"}, + {file = "grpcio-1.63.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a3abfe0b0f6798dedd2e9e92e881d9acd0fdb62ae27dcbbfa7654a57e24060c0"}, + {file = "grpcio-1.63.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:6ef0ad92873672a2a3767cb827b64741c363ebaa27e7f21659e4e31f4d750280"}, + {file = "grpcio-1.63.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b416252ac5588d9dfb8a30a191451adbf534e9ce5f56bb02cd193f12d8845b7f"}, + {file = "grpcio-1.63.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3b77eaefc74d7eb861d3ffbdf91b50a1bb1639514ebe764c47773b833fa2d91"}, + {file = "grpcio-1.63.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b005292369d9c1f80bf70c1db1c17c6c342da7576f1c689e8eee4fb0c256af85"}, + {file = "grpcio-1.63.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cdcda1156dcc41e042d1e899ba1f5c2e9f3cd7625b3d6ebfa619806a4c1aadda"}, + {file = "grpcio-1.63.0-cp38-cp38-win32.whl", hash = "sha256:01799e8649f9e94ba7db1aeb3452188048b0019dc37696b0f5ce212c87c560c3"}, + {file = "grpcio-1.63.0-cp38-cp38-win_amd64.whl", hash = "sha256:6a1a3642d76f887aa4009d92f71eb37809abceb3b7b5a1eec9c554a246f20e3a"}, + {file = "grpcio-1.63.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:75f701ff645858a2b16bc8c9fc68af215a8bb2d5a9b647448129de6e85d52bce"}, + {file = "grpcio-1.63.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cacdef0348a08e475a721967f48206a2254a1b26ee7637638d9e081761a5ba86"}, + {file = "grpcio-1.63.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:0697563d1d84d6985e40ec5ec596ff41b52abb3fd91ec240e8cb44a63b895094"}, + {file = "grpcio-1.63.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6426e1fb92d006e47476d42b8f240c1d916a6d4423c5258ccc5b105e43438f61"}, + {file = "grpcio-1.63.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e48cee31bc5f5a31fb2f3b573764bd563aaa5472342860edcc7039525b53e46a"}, + {file = "grpcio-1.63.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:50344663068041b34a992c19c600236e7abb42d6ec32567916b87b4c8b8833b3"}, + {file = "grpcio-1.63.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:259e11932230d70ef24a21b9fb5bb947eb4703f57865a404054400ee92f42f5d"}, + {file = "grpcio-1.63.0-cp39-cp39-win32.whl", hash = "sha256:a44624aad77bf8ca198c55af811fd28f2b3eaf0a50ec5b57b06c034416ef2d0a"}, + {file = "grpcio-1.63.0-cp39-cp39-win_amd64.whl", hash = "sha256:166e5c460e5d7d4656ff9e63b13e1f6029b122104c1633d5f37eaea348d7356d"}, + {file = "grpcio-1.63.0.tar.gz", hash = "sha256:f3023e14805c61bc439fb40ca545ac3d5740ce66120a678a3c6c2c55b70343d1"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.60.0)"] +protobuf = ["grpcio-tools (>=1.63.0)"] [[package]] name = "grpcio-health-checking" @@ -2486,23 +2400,17 @@ files = [ client = ["pymilvus (>=2.3.0b1,<2.4.0)"] [[package]] -name = "minio" -version = "7.2.7" -description = "MinIO Python SDK for Amazon S3 Compatible Cloud Storage" +name = "milvus-lite" +version = "2.4.7" +description = "A lightweight version of Milvus wrapped with Python." optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "minio-7.2.7-py3-none-any.whl", hash = "sha256:59d1f255d852fe7104018db75b3bebbd987e538690e680f7c5de835e422de837"}, - {file = "minio-7.2.7.tar.gz", hash = "sha256:473d5d53d79f340f3cd632054d0c82d2f93177ce1af2eac34a235bea55708d98"}, + {file = "milvus_lite-2.4.7-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:c828190118b104b05b8c8e0b5a4147811c86b54b8fb67bc2e726ad10fc0b544e"}, + {file = "milvus_lite-2.4.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1537633c39879714fb15082be56a4b97f74c905a6e98e302ec01320561081af"}, + {file = "milvus_lite-2.4.7-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f016474d663045787dddf1c3aad13b7d8b61fd329220318f858184918143dcbf"}, ] -[package.dependencies] -argon2-cffi = "*" -certifi = "*" -pycryptodome = "*" -typing-extensions = "*" -urllib3 = "*" - [[package]] name = "mistune" version = "3.0.2" @@ -3320,13 +3228,13 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] [[package]] name = "openapi-core" -version = "0.19.1" +version = "0.19.2" description = "client-side and server-side support for the OpenAPI Specification v3" optional = false python-versions = "<4.0.0,>=3.8.0" files = [ - {file = "openapi_core-0.19.1-py3-none-any.whl", hash = "sha256:a1eeb93d2a7e41a8c34ccebd55b180d1f73c5dddffbad657315746e955283cfc"}, - {file = "openapi_core-0.19.1.tar.gz", hash = "sha256:3facc2c87b7e9fb9909ae72bfb0b7cad20954e23fb4ef04dc5559197dee87597"}, + {file = "openapi_core-0.19.2-py3-none-any.whl", hash = "sha256:b05f81031cc5b14f3a90b02f955d2ec756ccd5fba4f4e80bc4362520dac679a4"}, + {file = "openapi_core-0.19.2.tar.gz", hash = "sha256:db4e13dd3162d861d9485ae804f350586d9fd1d72808cdb264d6993d9b5ede3f"}, ] [package.dependencies] @@ -3942,17 +3850,18 @@ xmp = ["defusedxml"] [[package]] name = "pinecone-client" -version = "4.1.0" +version = "4.1.1" description = "Pinecone client and SDK" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "pinecone_client-4.1.0-py3-none-any.whl", hash = "sha256:9cb9a66cab86b29d526cc99fe6ab151f577967a447c81448057dcd8682646a55"}, - {file = "pinecone_client-4.1.0.tar.gz", hash = "sha256:42062a628e7a941d0bc24bb8afb026f3ad4d264cf06d6a627a3de583214ae3de"}, + {file = "pinecone_client-4.1.1-py3-none-any.whl", hash = "sha256:e74ea91a0129a80f301662e286b1883f2eb896683ff7d2cdb03ea06346844d0d"}, + {file = "pinecone_client-4.1.1.tar.gz", hash = "sha256:b2e78c29de50c180dbfe75e15f08c87ec1a3a4f1bc6b2be1f0ccaee1ab4434fa"}, ] [package.dependencies] certifi = ">=2019.11.17" +pinecone-plugin-interface = ">=0.0.7,<0.0.8" tqdm = ">=4.64.1" typing-extensions = ">=3.7.4" urllib3 = [ @@ -3963,6 +3872,17 @@ urllib3 = [ [package.extras] grpc = ["googleapis-common-protos (>=1.53.0)", "grpcio (>=1.44.0)", "grpcio (>=1.59.0)", "lz4 (>=3.1.3)", "protobuf (>=4.25,<5.0)", "protoc-gen-openapiv2 (>=0.0.1,<0.0.2)"] +[[package]] +name = "pinecone-plugin-interface" +version = "0.0.7" +description = "Plugin interface for the Pinecone python client" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "pinecone_plugin_interface-0.0.7-py3-none-any.whl", hash = "sha256:875857ad9c9fc8bbc074dbe780d187a2afd21f5bfe0f3b08601924a61ef1bba8"}, + {file = "pinecone_plugin_interface-0.0.7.tar.gz", hash = "sha256:b8e6675e41847333aa13923cc44daa3f85676d7157324682dc1640588a982846"}, +] + [[package]] name = "platformdirs" version = "4.2.2" @@ -4281,53 +4201,6 @@ files = [ {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, ] -[[package]] -name = "pulsar-client" -version = "3.5.0" -description = "Apache Pulsar Python client library" -optional = false -python-versions = "*" -files = [ - {file = "pulsar_client-3.5.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:c18552edb2f785de85280fe624bc507467152bff810fc81d7660fa2dfa861f38"}, - {file = "pulsar_client-3.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18d438e456c146f01be41ef146f649dedc8f7bc714d9eaef94cff2e34099812b"}, - {file = "pulsar_client-3.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18a26a0719841103c7a89eb1492c4a8fedf89adaa386375baecbb4fa2707e88f"}, - {file = "pulsar_client-3.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ab0e1605dc5f44a126163fd06cd0a768494ad05123f6e0de89a2c71d6e2d2319"}, - {file = "pulsar_client-3.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdef720891b97656fdce3bf5913ea7729b2156b84ba64314f432c1e72c6117fa"}, - {file = "pulsar_client-3.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:a42544e38773191fe550644a90e8050579476bb2dcf17ac69a4aed62a6cb70e7"}, - {file = "pulsar_client-3.5.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:fd94432ea5d398ea78f8f2e09a217ec5058d26330c137a22690478c031e116da"}, - {file = "pulsar_client-3.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6252ae462e07ece4071213fdd9c76eab82ca522a749f2dc678037d4cbacd40b"}, - {file = "pulsar_client-3.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b4d440b2d74323784328b082872ee2f206c440b5d224d7941eb3c083ec06c6"}, - {file = "pulsar_client-3.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f60af840b8d64a2fac5a0c1ce6ae0ddffec5f42267c6ded2c5e74bad8345f2a1"}, - {file = "pulsar_client-3.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2277a447c3b7f6571cb1eb9fc5c25da3fdd43d0b2fb91cf52054adfadc7d6842"}, - {file = "pulsar_client-3.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:f20f3e9dd50db2a37059abccad42078b7a4754b8bc1d3ae6502e71c1ad2209f0"}, - {file = "pulsar_client-3.5.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:d61f663d85308e12f44033ba95af88730f581a7e8da44f7a5c080a3aaea4878d"}, - {file = "pulsar_client-3.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a1ba0be25b6f747bcb28102b7d906ec1de48dc9f1a2d9eacdcc6f44ab2c9e17"}, - {file = "pulsar_client-3.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a181e3e60ac39df72ccb3c415d7aeac61ad0286497a6e02739a560d5af28393a"}, - {file = "pulsar_client-3.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3c72895ff7f51347e4f78b0375b2213fa70dd4790bbb78177b4002846f1fd290"}, - {file = "pulsar_client-3.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:547dba1b185a17eba915e51d0a3aca27c80747b6187e5cd7a71a3ca33921decc"}, - {file = "pulsar_client-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:443b786eed96bc86d2297a6a42e79f39d1abf217ec603e0bd303f3488c0234af"}, - {file = "pulsar_client-3.5.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:15b58f5d759dd6166db8a2d90ed05a38063b05cda76c36d190d86ef5c9249397"}, - {file = "pulsar_client-3.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af34bfe813dddf772a8a298117fa0a036ee963595d8bc8f00d969a0329ae6ed9"}, - {file = "pulsar_client-3.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0fec1dd74e1367d3742ce16679c1807994df60f5e666f440cf39323938fad"}, - {file = "pulsar_client-3.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dbcd26ef9c03f96fb9cd91baec3bbd3c4b997834eb3556670d31f41cc25b5f64"}, - {file = "pulsar_client-3.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:afea1d0b6e793fd56e56463145751ff3aa79fdcd5b26e90d0da802a1bbabe07e"}, - {file = "pulsar_client-3.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:da1ab2fb1bef64b966e9403a0a186ebc90368d99e054ce2cae5b1128478f4ef4"}, - {file = "pulsar_client-3.5.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:9ad5dcc0eb8d2a7c0fb8e1fa146a0c6d4bdaf934f1169080b2c64b2f0573e086"}, - {file = "pulsar_client-3.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5870c6805b1a57962ed908d1173e97e13470415998393925c86a43694420389"}, - {file = "pulsar_client-3.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29cb5fedb969895b78301dc00a979133e69940812b8332e4de948bb0ad3db7cb"}, - {file = "pulsar_client-3.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e53c74bfa59b20c66adea95023169060f5048dd8d843e6ef9cd3b8ee2d23e93b"}, - {file = "pulsar_client-3.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:99dbadb13967f1add57010971ed36b5a77d24afcdaea01960d0e55e56cf4ba6f"}, - {file = "pulsar_client-3.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:058887661d438796f42307dcc8054c84dea88a37683dae36498b95d7e1c39b37"}, -] - -[package.dependencies] -certifi = "*" - -[package.extras] -all = ["apache-bookkeeper-client (>=4.16.1)", "fastavro (>=1.9.2)", "grpcio (>=1.60.0)", "prometheus-client", "protobuf (>=3.6.1,<=3.20.3)", "ratelimit"] -avro = ["fastavro (>=1.9.2)"] -functions = ["apache-bookkeeper-client (>=4.16.1)", "grpcio (>=1.60.0)", "prometheus-client", "protobuf (>=3.6.1,<=3.20.3)", "ratelimit"] - [[package]] name = "pure-eval" version = "0.2.2" @@ -4439,47 +4312,6 @@ files = [ {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] -[[package]] -name = "pycryptodome" -version = "3.20.0" -description = "Cryptographic library for Python" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "pycryptodome-3.20.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:f0e6d631bae3f231d3634f91ae4da7a960f7ff87f2865b2d2b831af1dfb04e9a"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:baee115a9ba6c5d2709a1e88ffe62b73ecc044852a925dcb67713a288c4ec70f"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:417a276aaa9cb3be91f9014e9d18d10e840a7a9b9a9be64a42f553c5b50b4d1d"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a1250b7ea809f752b68e3e6f3fd946b5939a52eaeea18c73bdab53e9ba3c2dd"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:d5954acfe9e00bc83ed9f5cb082ed22c592fbbef86dc48b907238be64ead5c33"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-win32.whl", hash = "sha256:06d6de87c19f967f03b4cf9b34e538ef46e99a337e9a61a77dbe44b2cbcf0690"}, - {file = "pycryptodome-3.20.0-cp27-cp27m-win_amd64.whl", hash = "sha256:ec0bb1188c1d13426039af8ffcb4dbe3aad1d7680c35a62d8eaf2a529b5d3d4f"}, - {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:5601c934c498cd267640b57569e73793cb9a83506f7c73a8ec57a516f5b0b091"}, - {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d29daa681517f4bc318cd8a23af87e1f2a7bad2fe361e8aa29c77d652a065de4"}, - {file = "pycryptodome-3.20.0-cp27-cp27mu-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3427d9e5310af6680678f4cce149f54e0bb4af60101c7f2c16fdf878b39ccccc"}, - {file = "pycryptodome-3.20.0-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:3cd3ef3aee1079ae44afaeee13393cf68b1058f70576b11439483e34f93cf818"}, - {file = "pycryptodome-3.20.0-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac1c7c0624a862f2e53438a15c9259d1655325fc2ec4392e66dc46cdae24d044"}, - {file = "pycryptodome-3.20.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:76658f0d942051d12a9bd08ca1b6b34fd762a8ee4240984f7c06ddfb55eaf15a"}, - {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f35d6cee81fa145333137009d9c8ba90951d7d77b67c79cbe5f03c7eb74d8fe2"}, - {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76cb39afede7055127e35a444c1c041d2e8d2f1f9c121ecef573757ba4cd2c3c"}, - {file = "pycryptodome-3.20.0-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49a4c4dc60b78ec41d2afa392491d788c2e06edf48580fbfb0dd0f828af49d25"}, - {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fb3b87461fa35afa19c971b0a2b7456a7b1db7b4eba9a8424666104925b78128"}, - {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:acc2614e2e5346a4a4eab6e199203034924313626f9620b7b4b38e9ad74b7e0c"}, - {file = "pycryptodome-3.20.0-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:210ba1b647837bfc42dd5a813cdecb5b86193ae11a3f5d972b9a0ae2c7e9e4b4"}, - {file = "pycryptodome-3.20.0-cp35-abi3-win32.whl", hash = "sha256:8d6b98d0d83d21fb757a182d52940d028564efe8147baa9ce0f38d057104ae72"}, - {file = "pycryptodome-3.20.0-cp35-abi3-win_amd64.whl", hash = "sha256:9b3ae153c89a480a0ec402e23db8d8d84a3833b65fa4b15b81b83be9d637aab9"}, - {file = "pycryptodome-3.20.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:4401564ebf37dfde45d096974c7a159b52eeabd9969135f0426907db367a652a"}, - {file = "pycryptodome-3.20.0-pp27-pypy_73-win32.whl", hash = "sha256:ec1f93feb3bb93380ab0ebf8b859e8e5678c0f010d2d78367cf6bc30bfeb148e"}, - {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:acae12b9ede49f38eb0ef76fdec2df2e94aad85ae46ec85be3648a57f0a7db04"}, - {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f47888542a0633baff535a04726948e876bf1ed880fddb7c10a736fa99146ab3"}, - {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e0e4a987d38cfc2e71b4a1b591bae4891eeabe5fa0f56154f576e26287bfdea"}, - {file = "pycryptodome-3.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c18b381553638414b38705f07d1ef0a7cf301bc78a5f9bc17a957eb19446834b"}, - {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a60fedd2b37b4cb11ccb5d0399efe26db9e0dd149016c1cc6c8161974ceac2d6"}, - {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:405002eafad114a2f9a930f5db65feef7b53c4784495dd8758069b89baf68eab"}, - {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ab6ab0cb755154ad14e507d1df72de9897e99fd2d4922851a276ccc14f4f1a5"}, - {file = "pycryptodome-3.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:acf6e43fa75aca2d33e93409f2dafe386fe051818ee79ee8a3e21de9caa2ac9e"}, - {file = "pycryptodome-3.20.0.tar.gz", hash = "sha256:09609209ed7de61c2b560cc5c8c4fbf892f8b15b1faf7e4cbffac97db1fffda7"}, -] - [[package]] name = "pydantic" version = "2.7.3" @@ -4592,13 +4424,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pydantic-settings" -version = "2.3.0" +version = "2.3.3" description = "Settings management using Pydantic" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_settings-2.3.0-py3-none-any.whl", hash = "sha256:26eeed27370a9c5e3f64e4a7d6602573cbedf05ed940f1d5b11c3f178427af7a"}, - {file = "pydantic_settings-2.3.0.tar.gz", hash = "sha256:78db28855a71503cfe47f39500a1dece523c640afd5280edb5c5c9c9cfa534c9"}, + {file = "pydantic_settings-2.3.3-py3-none-any.whl", hash = "sha256:e4ed62ad851670975ec11285141db888fd24947f9440bd4380d7d8788d4965de"}, + {file = "pydantic_settings-2.3.3.tar.gz", hash = "sha256:87fda838b64b5039b970cd47c3e8a1ee460ce136278ff672980af21516f6e6ce"}, ] [package.dependencies] @@ -4655,27 +4487,29 @@ files = [ [[package]] name = "pymilvus" -version = "2.3.7" +version = "2.4.3" description = "Python Sdk for Milvus" optional = false python-versions = ">=3.8" files = [ - {file = "pymilvus-2.3.7-py3-none-any.whl", hash = "sha256:37d5a360d671c6fe23fe1dd4e6b41af6e4b6d6488ad8e43a06afe23d02f98272"}, - {file = "pymilvus-2.3.7.tar.gz", hash = "sha256:b8df5b8db3a82209c33b7211e0b9ef4a63ee00cb2976ccb1e9f5b92a2c2d5b82"}, + {file = "pymilvus-2.4.3-py3-none-any.whl", hash = "sha256:38239e89f8d739f665141d0b80908990b5f59681e889e135c234a4a45669a5c8"}, + {file = "pymilvus-2.4.3.tar.gz", hash = "sha256:703ac29296cdce03d6dc2aaebbe959e57745c141a94150e371dc36c61c226cc1"}, ] [package.dependencies] -azure-storage-blob = "*" environs = "<=9.5.0" -grpcio = ">=1.49.1,<=1.60.0" -minio = ">=7.0.0" +grpcio = ">=1.49.1,<=1.63.0" +milvus-lite = ">=2.4.0,<2.5.0" pandas = ">=1.2.4" protobuf = ">=3.20.0" -pyarrow = ">=12.0.0" -requests = "*" setuptools = ">=67" ujson = ">=2.0.0" +[package.extras] +bulk-writer = ["azure-storage-blob", "minio (>=7.0.0)", "pyarrow (>=12.0.0)", "requests"] +dev = ["black", "grpcio (==1.62.2)", "grpcio-testing (==1.62.2)", "grpcio-tools (==1.62.2)", "pytest (>=5.3.4)", "pytest-cov (>=2.8.1)", "pytest-timeout (>=1.3.4)", "ruff (>0.4.0)"] +model = ["milvus-model (>=0.1.0)"] + [[package]] name = "pymongo" version = "4.7.2" @@ -4791,13 +4625,13 @@ files = [ [[package]] name = "pytest" -version = "8.2.1" +version = "8.2.2" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.2.1-py3-none-any.whl", hash = "sha256:faccc5d332b8c3719f40283d0d44aa5cf101cec36f88cde9ed8f2bc0538612b1"}, - {file = "pytest-8.2.1.tar.gz", hash = "sha256:5046e5b46d8e4cac199c373041f26be56fdb81eb4e67dc11d4e10811fc3408fd"}, + {file = "pytest-8.2.2-py3-none-any.whl", hash = "sha256:c434598117762e2bd304e526244f67bf66bbd7b5d6cf22138be51ff661980343"}, + {file = "pytest-8.2.2.tar.gz", hash = "sha256:de4bb8104e201939ccdc688b27a89a7be2079b22e2bd2b07f806b6ba71117977"}, ] [package.dependencies] @@ -6141,22 +5975,22 @@ optree = ["optree (>=0.9.1)"] [[package]] name = "tornado" -version = "6.4" +version = "6.4.1" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = false -python-versions = ">= 3.8" +python-versions = ">=3.8" files = [ - {file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"}, - {file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"}, - {file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"}, - {file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"}, - {file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"}, - {file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"}, - {file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"}, - {file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"}, - {file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"}, - {file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"}, - {file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"}, + {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"}, + {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"}, + {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"}, + {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"}, + {file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"}, + {file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"}, + {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"}, ] [[package]] @@ -6422,13 +6256,13 @@ files = [ [[package]] name = "urllib3" -version = "2.2.1" +version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] @@ -7060,4 +6894,4 @@ weaviate = ["weaviate-client"] [metadata] lock-version = "2.0" python-versions = "^3.10,<3.13" -content-hash = "cd65c97511b132fa3827e834760ce83e5f35476fd0b0009fa1d629c919eadfab" +content-hash = "abbc85df45b3f61d055c1ad24e6860e45b9ffeae63259e25b5c429cf21518474" diff --git a/python/pyproject.toml b/python/pyproject.toml index 8b7f34eaa57c..b965332093df 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "semantic-kernel" -version = "1.0.5" +version = "1.1.1" description = "Semantic Kernel Python SDK" authors = ["Microsoft "] readme = "pip/README.md" @@ -43,8 +43,8 @@ transformers = { version = "^4.28.1", optional = true} sentence-transformers = { version = "^2.2.2", optional = true} torch = { version = "^2.2.0", optional = true} qdrant-client = { version = '^1.9', optional = true} -chromadb = { version = "^0.4.13", optional = true} -pymilvus = { version = ">=2.3,<2.3.8", optional = true} +chromadb = { version = ">=0.4.13,<0.6.0", optional = true} +pymilvus = { version = ">=2.3,<2.4.4", optional = true} milvus = { version = ">=2.3,<2.3.8", markers = 'sys_platform != "win32"', optional = true} weaviate-client = { version = ">=3.18,<5.0", optional = true} pinecone-client = { version = ">=3.0.0", optional = true} @@ -92,8 +92,8 @@ transformers = "^4.28.1" sentence-transformers = "^2.2.2" torch = "^2.2.0" qdrant-client = '^1.9' -chromadb = "^0.4.13" -pymilvus = ">=2.3,<2.3.8" +chromadb = ">=0.4.13,<0.6.0" +pymilvus = ">=2.3,<2.4.4" milvus = { version = ">=2.3,<2.3.8", markers = 'sys_platform != "win32"'} weaviate-client = ">=3.18,<5.0" pinecone-client = ">=3.0.0" diff --git a/python/samples/concepts/README.md b/python/samples/concepts/README.md index b9b045b8ce02..72028080bd2a 100644 --- a/python/samples/concepts/README.md +++ b/python/samples/concepts/README.md @@ -17,4 +17,28 @@ This section contains code snippets that demonstrate the usage of Semantic Kerne | PromptTemplates | Using [`Templates`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/prompt_template/prompt_template_base.py) with parametrization for `Prompt` rendering | | RAG | Different ways of `RAG` (Retrieval-Augmented Generation) | | Search | Using search services information | +| Service Selector | Shows how to create and use a custom service selector class. | +| Setup | How to setup environment variables for Semantic Kernel | | TextGeneration | Using [`TextGeneration`](https://github.com/microsoft/semantic-kernel/blob/main/python/semantic_kernel/connectors/ai/text_completion_client_base.py) capable service with models | + +# Configuring the Kernel + +In Semantic Kernel for Python, we leverage Pydantic Settings to manage configurations for AI and Memory Connectors, among other components. Here’s a clear guide on how to configure your settings effectively: + +## Steps for Configuration + +1. **Reading Environment Variables:** + - **Primary Source:** Pydantic first attempts to read the required settings from environment variables. + +2. **Using a .env File:** + - **Fallback Source:** If the required environment variables are not set, Pydantic will look for a `.env` file in the current working directory. + - **Custom Path (Optional):** You can specify an alternative path for the `.env` file via `env_file_path`. This can be either a relative or an absolute path. + +3. **Direct Constructor Input:** + - As an alternative to environment variables and `.env` files, you can pass the required settings directly through the constructor of the AI Connector or Memory Connector. + +## Best Practices + +- **.env File Placement:** We highly recommend placing the `.env` file in the `semantic-kernel/python` root directory. This is a common practice when developing in the Semantic Kernel repository. + +By following these guidelines, you can ensure that your settings for various components are configured correctly, enabling seamless functionality and integration of Semantic Kernel in your Python projects. \ No newline at end of file diff --git a/python/samples/concepts/chat_completion/azure_chat_image_input.py b/python/samples/concepts/chat_completion/azure_chat_image_input.py new file mode 100644 index 000000000000..e274473014a7 --- /dev/null +++ b/python/samples/concepts/chat_completion/azure_chat_image_input.py @@ -0,0 +1,78 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior +from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion +from semantic_kernel.contents import ChatHistory, ChatMessageContent, ImageContent, TextContent + +logging.basicConfig(level=logging.WARNING) + +system_message = """ +You are an image reviewing chat bot. Your name is Mosscap and you have one goal +critiquing images that are supplied. +""" + +kernel = Kernel() + +service_id = "chat-gpt" +chat_service = AzureChatCompletion(service_id=service_id) +kernel.add_service(chat_service) + +req_settings = kernel.get_prompt_execution_settings_from_service_id(service_id=service_id) +req_settings.max_tokens = 2000 +req_settings.temperature = 0.7 +req_settings.top_p = 0.8 +req_settings.function_call_behavior = FunctionCallBehavior.EnableFunctions( + auto_invoke=True, filters={"excluded_plugins": []} +) + +chat_function = kernel.add_function( + prompt=system_message + """{{$chat_history}}""", + function_name="chat", + plugin_name="chat", + prompt_execution_settings=req_settings, +) + + +async def chat(uri: str | None = None, image_path: str | None = None) -> bool: + history = ChatHistory() + if uri: + history.add_message( + ChatMessageContent( + role="user", + items=[TextContent(text="What is in this image?"), ImageContent(uri=uri)], + ) + ) + elif image_path: + history.add_message( + ChatMessageContent( + role="user", + items=[TextContent(text="What is in this image?"), ImageContent.from_image_path(image_path)], + ) + ) + else: + history.add_user_message("Hi there, who are you?") + answer = kernel.invoke_stream( + chat_function, + chat_history=history, + ) + print("Mosscap:> ", end="") + async for message in answer: + print(str(message[0]), end="") + print("\n") + + +async def main() -> None: + print("Get a description of a image from a URL.") + await chat( + uri="https://upload.wikimedia.org/wikipedia/commons/d/d5/Half-timbered_mansion%2C_Zirkel%2C_East_view.jpg" + ) + print("Get a description of the same image but now from a local file!") + await chat(image_path="samples/concepts/resources/sample_image.jpg") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/samples/concepts/chat_completion/openai_logit_bias.py b/python/samples/concepts/chat_completion/openai_logit_bias.py index 6035dcc4645c..f39416d0370e 100644 --- a/python/samples/concepts/chat_completion/openai_logit_bias.py +++ b/python/samples/concepts/chat_completion/openai_logit_bias.py @@ -19,6 +19,8 @@ def _config_ban_tokens(settings: PromptExecutionSettings, keys: dict[Any, Any]): + if settings.logit_bias is None: + settings.logit_bias = {} # Map each token in the keys list to a bias value from -100 (a potential ban) to 100 (exclusive selection) for k in keys: # -100 to potentially ban all tokens in the list diff --git a/python/samples/concepts/filtering/prompt_filters.py b/python/samples/concepts/filtering/prompt_filters.py index 19be080b9356..46c8cfcb5d4b 100644 --- a/python/samples/concepts/filtering/prompt_filters.py +++ b/python/samples/concepts/filtering/prompt_filters.py @@ -51,7 +51,7 @@ # this type of filter allows you to manipulate the final message being sent # as is shown below, or the inputs used to generate the message by making a change to the # arguments before calling next. -@kernel.filter(FilterTypes.PROMPT_RENDERING_FILTER) +@kernel.filter(FilterTypes.PROMPT_RENDERING) async def prompt_rendering_filter(context: PromptRenderContext, next): await next(context) context.rendered_prompt = f"You pretend to be Mosscap, but you are Papssom who is the opposite of Moscapp in every way {context.rendered_prompt or ''}" # noqa: E501 diff --git a/python/samples/concepts/grounding/grounded.py b/python/samples/concepts/grounding/grounded.py index 2f3a3daed84d..8a34eed7e4b3 100644 --- a/python/samples/concepts/grounding/grounded.py +++ b/python/samples/concepts/grounding/grounded.py @@ -2,8 +2,9 @@ import asyncio import logging +import os -from samples.utils import Colors +from samples.concepts.resources.utils import Colors from semantic_kernel import Kernel from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, OpenAIChatCompletion from semantic_kernel.functions import KernelArguments @@ -70,7 +71,7 @@ def setup(use_azure: bool = False, plugin_name: str = "GroundingPlugin"): ) # note: using plugins from the samples folder - plugins_directory = "../samples/plugins/" + plugins_directory = os.path.join(__file__, "../../../../../prompt_template_samples/") kernel.add_plugin(parent_directory=plugins_directory, plugin_name=plugin_name) @@ -173,5 +174,9 @@ async def run_grounding(use_azure: bool = False): print(f"{Colors.CBOLD.value}Finished!{Colors.CEND.value}") +async def main() -> None: + await run_grounding(use_azure=False) + + if __name__ == "__main__": - asyncio.run(run_grounding(use_azure=True)) + asyncio.run(main()) diff --git a/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api.py b/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api.py index 7419eaa73ed4..e75ea837244f 100644 --- a/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api.py +++ b/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api.py @@ -35,7 +35,7 @@ # } # Create the data source settings -azure_ai_search_settings = AzureAISearchSettings.create(env_file_path=".env") +azure_ai_search_settings = AzureAISearchSettings.create() az_source = AzureAISearchDataSource.from_azure_ai_search_settings(azure_ai_search_settings=azure_ai_search_settings) extra = ExtraBody(data_sources=[az_source]) diff --git a/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api_function_calling.py b/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api_function_calling.py index 55cfa5a4950c..4ac8c0c4cdf4 100644 --- a/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api_function_calling.py +++ b/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api_function_calling.py @@ -27,7 +27,7 @@ kernel = sk.Kernel() # Create the data source settings -azure_ai_search_settings = AzureAISearchSettings() +azure_ai_search_settings = AzureAISearchSettings.create() az_source = AzureAISearchDataSource(parameters=azure_ai_search_settings.model_dump()) extra = ExtraBody(data_sources=[az_source]) req_settings = AzureChatPromptExecutionSettings(service_id="chat-gpt", extra_body=extra, tool_choice="auto") @@ -103,7 +103,7 @@ async def chat() -> bool: arguments["chat_history"] = history arguments["user_input"] = user_input answer = await kernel.invoke( - functions=chat_function, + function=chat_function, arguments=arguments, ) print(f"Mosscap:> {answer}") diff --git a/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api_vector_search.py b/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api_vector_search.py index 9e0cf4364312..ca18e52fd63b 100644 --- a/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api_vector_search.py +++ b/python/samples/concepts/on_your_data/azure_chat_gpt_with_data_api_vector_search.py @@ -24,7 +24,8 @@ # Bonded by their love for the natural world and shared curiosity, they uncovered a # groundbreaking phenomenon in glaciology that could potentially reshape our understanding of climate change. -azure_ai_search_settings = AzureAISearchSettings() +azure_ai_search_settings = AzureAISearchSettings.create() +azure_ai_search_settings = azure_ai_search_settings.model_dump() # This example index has fields "title", "chunk", and "vector". # Add fields mapping to the settings. @@ -42,7 +43,7 @@ azure_ai_search_settings["query_type"] = "vector" # Create the data source settings -az_source = AzureAISearchDataSource(parameters=azure_ai_search_settings.model_dump()) +az_source = AzureAISearchDataSource(parameters=azure_ai_search_settings) extra = ExtraBody(data_sources=[az_source]) service_id = "chat-gpt" req_settings = AzureChatPromptExecutionSettings(service_id=service_id, extra_body=extra) @@ -50,6 +51,7 @@ # When using data, use the 2024-02-15-preview API version. chat_service = AzureChatCompletion( service_id="chat-gpt", + api_version="2024-02-15-preview", ) kernel.add_service(chat_service) diff --git a/python/samples/concepts/planners/azure_openai_function_calling_stepwise_planner.py b/python/samples/concepts/planners/azure_openai_function_calling_stepwise_planner.py index dbc19b2faa54..c98efa3a5a5c 100644 --- a/python/samples/concepts/planners/azure_openai_function_calling_stepwise_planner.py +++ b/python/samples/concepts/planners/azure_openai_function_calling_stepwise_planner.py @@ -19,8 +19,11 @@ async def main(): ), ) - cur_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources") - kernel.add_plugin(parent_directory=cur_dir, plugin_name="email_plugin") + plugin_path = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + ) + kernel.add_plugin(parent_directory=plugin_path, plugin_name="email_plugin") kernel.add_plugin(MathPlugin(), "MathPlugin") kernel.add_plugin(TimePlugin(), "TimePlugin") diff --git a/python/samples/concepts/planners/openai_function_calling_stepwise_planner.py b/python/samples/concepts/planners/openai_function_calling_stepwise_planner.py index 88e994dfda62..d46aaa8b0bde 100644 --- a/python/samples/concepts/planners/openai_function_calling_stepwise_planner.py +++ b/python/samples/concepts/planners/openai_function_calling_stepwise_planner.py @@ -16,12 +16,15 @@ async def main(): kernel.add_service( OpenAIChatCompletion( service_id=service_id, - ai_model_id="gpt-3.5-turbo-1106", + ai_model_id="gpt-3.5-turbo", ), ) - cur_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources") - kernel.add_plugin(parent_directory=cur_dir, plugin_name="email_plugin") + plugin_path = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + ) + kernel.add_plugin(parent_directory=plugin_path, plugin_name="email_plugin") kernel.add_plugins({"MathPlugin": MathPlugin(), "TimePlugin": TimePlugin()}) questions = [ diff --git a/python/samples/concepts/planners/sequential_planner.py b/python/samples/concepts/planners/sequential_planner.py index 3715daab9c3d..13aaf83fdab0 100644 --- a/python/samples/concepts/planners/sequential_planner.py +++ b/python/samples/concepts/planners/sequential_planner.py @@ -25,7 +25,7 @@ async def main(): plan = await planner.create_plan(goal=ask) # ask the sequential planner to execute the identified function. - result = await plan.invoke() + result = await plan.invoke(kernel=kernel) for step in plan._steps: print(step.description, ":", step._state.__dict__) diff --git a/python/samples/concepts/plugins/azure_key_vault_settings.py b/python/samples/concepts/plugins/azure_key_vault_settings.py index c23135afe306..1832e93afed0 100644 --- a/python/samples/concepts/plugins/azure_key_vault_settings.py +++ b/python/samples/concepts/plugins/azure_key_vault_settings.py @@ -1,12 +1,13 @@ # Copyright (c) Microsoft. All rights reserved. +from typing import ClassVar + from pydantic import SecretStr -from semantic_kernel.connectors.memory.memory_settings_base import BaseModelSettings -from semantic_kernel.kernel_pydantic import HttpsUrl +from semantic_kernel.kernel_pydantic import HttpsUrl, KernelBaseSettings -class AzureKeyVaultSettings(BaseModelSettings): +class AzureKeyVaultSettings(KernelBaseSettings): """Azure Key Vault model settings Optional: @@ -18,9 +19,8 @@ class AzureKeyVaultSettings(BaseModelSettings): (Env var AZURE_KEY_VAULT_CLIENT_SECRET) """ + env_prefix: ClassVar[str] = "AZURE_KEY_VAULT_" + endpoint: HttpsUrl client_id: str client_secret: SecretStr - - class Config(BaseModelSettings.Config): - env_prefix = "AZURE_KEY_VAULT_" diff --git a/python/samples/concepts/plugins/openai_function_calling_with_custom_plugin.py b/python/samples/concepts/plugins/openai_function_calling_with_custom_plugin.py index 0899947d6088..eb3a6d8510fb 100644 --- a/python/samples/concepts/plugins/openai_function_calling_with_custom_plugin.py +++ b/python/samples/concepts/plugins/openai_function_calling_with_custom_plugin.py @@ -51,7 +51,7 @@ async def main(): else: ai_service = OpenAIChatCompletion( service_id=service_id, - ai_model_id="gpt-3.5-turbo-1106", + ai_model_id="gpt-3.5-turbo", ) kernel.add_service(ai_service) diff --git a/python/samples/concepts/plugins/openai_plugin_azure_key_vault.py b/python/samples/concepts/plugins/openai_plugin_azure_key_vault.py index 6bbe79a430d5..280f9663a361 100644 --- a/python/samples/concepts/plugins/openai_plugin_azure_key_vault.py +++ b/python/samples/concepts/plugins/openai_plugin_azure_key_vault.py @@ -1,44 +1,76 @@ # Copyright (c) Microsoft. All rights reserved. - +import json import os +import platform +from functools import reduce import httpx from aiohttp import ClientSession -from azure_key_vault_settings import AzureKeyVaultSettings +from samples.concepts.plugins.azure_key_vault_settings import AzureKeyVaultSettings from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior +from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAIChatPromptExecutionSettings from semantic_kernel.connectors.openai_plugin import OpenAIAuthenticationType, OpenAIFunctionExecutionParameters -from semantic_kernel.functions import KernelPlugin -from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.contents import ChatHistory +from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent +from semantic_kernel.functions import KernelArguments, KernelFunction, KernelPlugin +# region Helper functions -async def add_secret_to_key_vault(kernel: Kernel, plugin: KernelPlugin): - """Adds a secret to the Azure Key Vault.""" - arguments = KernelArguments() - arguments["secret_name"] = "Foo" # nosec - arguments["api_version"] = "7.0" - arguments["value"] = "Bar" - arguments["enabled"] = True - result = await kernel.invoke( - function=plugin["SetSecret"], - arguments=arguments, - ) - print(f"Secret added to Key Vault: {result}") +def get_file_url(relative_path): + absolute_path = os.path.abspath(relative_path) + if platform.system() == "Windows": + backslash_char = "\\" + return f"file:///{absolute_path.replace(backslash_char, '/')}" + return f"file://{absolute_path}" -async def get_secret_from_key_vault(kernel: Kernel, plugin: KernelPlugin): - """Gets a secret from the Azure Key Vault.""" - arguments = KernelArguments() - arguments["secret_name"] = "Foo" # nosec - arguments["api_version"] = "7.0" - result = await kernel.invoke( - function=plugin["GetSecret"], - arguments=arguments, +def load_and_update_openai_spec(): + # Construct the path to the OpenAI spec file + openai_spec_file = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "resources", "open_ai_plugins", "akv-openai.json" ) - print(f"Secret retrieved from Key Vault: {result}") + # Read the OpenAI spec file + with open(openai_spec_file) as file: + openai_spec = json.load(file) + + # Adjust the OpenAI spec file to use the correct file URL based on platform + openapi_yaml_path = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "resources", "open_ai_plugins", "akv-openapi.yaml" + ) + openai_spec["api"]["url"] = get_file_url(openapi_yaml_path) + + return json.dumps(openai_spec, indent=4) + + +def print_tool_calls(message: ChatMessageContent) -> None: + # A helper method to pretty print the tool calls from the message. + # This is only triggered if auto invoke tool calls is disabled. + items = message.items + formatted_tool_calls = [] + for i, item in enumerate(items, start=1): + if isinstance(item, FunctionCallContent): + tool_call_id = item.id + function_name = item.name + function_arguments = item.arguments + formatted_str = ( + f"tool_call {i} id: {tool_call_id}\n" + f"tool_call {i} function name: {function_name}\n" + f"tool_call {i} arguments: {function_arguments}" + ) + formatted_tool_calls.append(formatted_str) + print("Tool calls:\n" + "\n\n".join(formatted_tool_calls)) + + +# endregion + +# region Sample Authentication Provider class OpenAIAuthenticationProvider: @@ -102,27 +134,100 @@ async def authenticate_request( return {"Authorization": auth_header} -async def main(): - # This example demonstrates how to connect an Azure Key Vault plugin to the Semantic Kernel. - # To use this example, there are a few requirements: - # 1. Register a client application with the Microsoft identity platform. - # https://learn.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app - # - # 2. Create an Azure Key Vault - # https://learn.microsoft.com/en-us/azure/key-vault/general/quick-create-portal - # Please make sure to configure the AKV with a Vault Policy, instead of the default RBAC policy - # This is because you will need to assign the Key Vault access policy to the client application you - # registered in step 1. You should give the client application the "Get," "List," and "Set" - # permissions for secrets. - # - # 3. Set your Key Vault endpoint, client ID, and client secret as user secrets using in your .env file: - # AZURE_KEY_VAULT_ENDPOINT = "" - # AZURE_KEY_VAULT_CLIENT_ID = "" - # AZURE_KEY_VAULT_CLIENT_SECRET = "" - # - # 4. Replace your tenant ID with the "TENANT_ID" placeholder in - # python/samples/kernel-syntax-examples/resources/akv-openai.json +# endregion + +# region AKV Plugin Functions + + +async def add_secret_to_key_vault(kernel: Kernel, plugin: KernelPlugin): + """Adds a secret to the Azure Key Vault.""" + arguments = KernelArguments() + arguments["secret_name"] = "Foo" # nosec + arguments["api_version"] = "7.0" + arguments["value"] = "Bar" + arguments["enabled"] = True + result = await kernel.invoke( + function=plugin["SetSecret"], + arguments=arguments, + ) + + print(f"Secret added to Key Vault: {result}") + + +async def get_secret_from_key_vault(kernel: Kernel, plugin: KernelPlugin): + """Gets a secret from the Azure Key Vault.""" + arguments = KernelArguments() + arguments["secret_name"] = "Foo" # nosec + arguments["api_version"] = "7.0" + result = await kernel.invoke( + function=plugin["GetSecret"], + arguments=arguments, + ) + + print(f"Secret retrieved from Key Vault: {result}") + + +# endregion + +kernel = Kernel() + +kernel.add_service(OpenAIChatCompletion(service_id="chat")) + +chat_function = kernel.add_function( + prompt="{{$chat_history}}{{$user_input}}", + plugin_name="ChatBot", + function_name="Chat", +) + +execution_settings = OpenAIChatPromptExecutionSettings( + service_id="chat", + max_tokens=2000, + temperature=0.7, + top_p=0.8, + function_call_behavior=FunctionCallBehavior.EnableFunctions( + auto_invoke=True, filters={"included_plugins": ["AzureKeyVaultPlugin"]} + ), +) + +history = ChatHistory() +history.add_system_message("Use Api-version 7.0, if needed.") + +arguments = KernelArguments(settings=execution_settings) + + +async def handle_streaming( + kernel: Kernel, + chat_function: "KernelFunction", + arguments: KernelArguments, +) -> None: + """Handle streaming chat messages.""" + response = kernel.invoke_stream( + chat_function, + return_function_results=False, + arguments=arguments, + ) + + print("Security Agent:> ", end="") + streamed_chunks: list[StreamingChatMessageContent] = [] + async for message in response: + if not execution_settings.function_call_behavior.auto_invoke_kernel_functions and isinstance( + message[0], StreamingChatMessageContent + ): + streamed_chunks.append(message[0]) + else: + print(str(message[0]), end="") + + if streamed_chunks: + streaming_chat_message = reduce(lambda first, second: first + second, streamed_chunks) + print("Auto tool calls is disabled, printing returned tool calls...") + print_tool_calls(streaming_chat_message) + + print("\n") + + +async def main() -> None: + """Main function to run the chat bot.""" azure_keyvault_settings = AzureKeyVaultSettings.create() client_id = azure_keyvault_settings.client_id client_secret = azure_keyvault_settings.client_secret.get_secret_value() @@ -138,17 +243,11 @@ async def main(): } ) - kernel = Kernel() - - openai_spec_file = os.path.join( - os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "resources", "open_ai_plugins", "akv-openai.json" - ) - with open(openai_spec_file) as file: - openai_spec = file.read() + openai_spec = load_and_update_openai_spec() http_client = httpx.AsyncClient() - plugin = await kernel.add_plugin_from_openai( + await kernel.add_plugin_from_openai( plugin_name="AzureKeyVaultPlugin", plugin_str=openai_spec, execution_parameters=OpenAIFunctionExecutionParameters( @@ -159,8 +258,36 @@ async def main(): ), ) - await add_secret_to_key_vault(kernel, plugin) - await get_secret_from_key_vault(kernel, plugin) + chatting = True + print( + "Welcome to the chat bot!\ + \n Type 'exit' to exit.\ + \n Try chatting about Azure Key Vault!" + ) + while chatting: + chatting = await chat() + + +async def chat() -> bool: + """Chat with the bot.""" + try: + user_input = input("User:> ") + except KeyboardInterrupt: + print("\n\nExiting chat...") + return False + except EOFError: + print("\n\nExiting chat...") + return False + + if user_input == "exit": + print("\n\nExiting chat...") + return False + arguments["user_input"] = user_input + arguments["chat_history"] = history + + await handle_streaming(kernel, chat_function, arguments=arguments) + + return True if __name__ == "__main__": diff --git a/python/samples/concepts/prompt_templates/azure_chat_gpt_api_handlebars.py b/python/samples/concepts/prompt_templates/azure_chat_gpt_api_handlebars.py index 14c7382411b7..eecf00efec3a 100644 --- a/python/samples/concepts/prompt_templates/azure_chat_gpt_api_handlebars.py +++ b/python/samples/concepts/prompt_templates/azure_chat_gpt_api_handlebars.py @@ -4,6 +4,7 @@ import logging from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion from semantic_kernel.contents import ChatHistory from semantic_kernel.functions import KernelArguments @@ -31,7 +32,7 @@ req_settings.max_tokens = 2000 req_settings.temperature = 0.7 req_settings.top_p = 0.8 -req_settings.auto_invoke_kernel_functions = False +req_settings.function_call_behavior = FunctionCallBehavior.AutoInvokeKernelFunctions() chat_function = kernel.add_function( diff --git a/python/samples/concepts/prompt_templates/azure_chat_gpt_api_jinja2.py b/python/samples/concepts/prompt_templates/azure_chat_gpt_api_jinja2.py index 3ad656c85328..c7d632bdd107 100644 --- a/python/samples/concepts/prompt_templates/azure_chat_gpt_api_jinja2.py +++ b/python/samples/concepts/prompt_templates/azure_chat_gpt_api_jinja2.py @@ -4,6 +4,7 @@ import logging from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion from semantic_kernel.contents import ChatHistory from semantic_kernel.functions import KernelArguments @@ -31,7 +32,7 @@ req_settings.max_tokens = 2000 req_settings.temperature = 0.7 req_settings.top_p = 0.8 -req_settings.auto_invoke_kernel_functions = False +req_settings.function_call_behavior = FunctionCallBehavior.AutoInvokeKernelFunctions() chat_function = kernel.add_function( diff --git a/python/samples/concepts/prompt_templates/configuring_prompts.py b/python/samples/concepts/prompt_templates/configuring_prompts.py index 3e1510127322..502797b58772 100644 --- a/python/samples/concepts/prompt_templates/configuring_prompts.py +++ b/python/samples/concepts/prompt_templates/configuring_prompts.py @@ -8,61 +8,75 @@ from semantic_kernel.functions import KernelArguments from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig +kernel = Kernel() -async def main(): - kernel = Kernel() +useAzureOpenAI = False +model = "gpt-35-turbo" if useAzureOpenAI else "gpt-3.5-turbo" - useAzureOpenAI = False - model = "gpt-35-turbo" if useAzureOpenAI else "gpt-3.5-turbo-1106" - service_id = model +kernel.add_service( + OpenAIChatCompletion(service_id=model, ai_model_id=model), +) - kernel.add_service( - OpenAIChatCompletion(service_id=service_id, ai_model_id=model), - ) +template = """ - template = """ - - Previous information from chat: - {{$chat_history}} - - User: {{$request}} - Assistant: - """ - - print("--- Rendered Prompt ---") - prompt_template_config = PromptTemplateConfig( - template=template, - name="chat", - description="Chat with the assistant", - template_format="semantic-kernel", - input_variables=[ - InputVariable(name="chat_history", description="The conversation history", is_required=False, default=""), - InputVariable(name="request", description="The user's request", is_required=True), - ], - execution_settings=OpenAIChatPromptExecutionSettings(service_id=service_id, max_tokens=4000, temperature=0.2), - ) +Previous information from chat: +{{$chat_history}} + +User: {{$request}} +Assistant: +""" + +print("--- Rendered Prompt ---") +prompt_template_config = PromptTemplateConfig( + template=template, + name="chat", + description="Chat with the assistant", + template_format="semantic-kernel", + input_variables=[ + InputVariable(name="chat_history", description="The conversation history", is_required=False, default=""), + InputVariable(name="request", description="The user's request", is_required=True), + ], + execution_settings=OpenAIChatPromptExecutionSettings(service_id=model, max_tokens=4000, temperature=0.2), +) + +chat_function = kernel.add_function( + function_name="chat", + plugin_name="ChatBot", + prompt_template_config=prompt_template_config, +) - chat = kernel.add_function( - function_name="chat", - plugin_name="ChatBot", - prompt_template_config=prompt_template_config, +chat_history = ChatHistory() + + +async def chat() -> bool: + try: + user_input = input("User:> ") + except KeyboardInterrupt: + print("\n\nExiting chat...") + return False + except EOFError: + print("\n\nExiting chat...") + return False + + if user_input == "exit": + print("\n\nExiting chat...") + return False + + answer = await kernel.invoke( + function=chat_function, arguments=KernelArguments( + request=user_input, chat_history=chat_history, + ), ) + chat_history.add_user_message(user_input) + chat_history.add_assistant_message(str(answer)) + print(f"Mosscap:> {answer}") + return True + - chat_history = ChatHistory() - - print("User > ") - while (user_input := input()) != "exit": - result = await kernel.invoke( - chat, - KernelArguments( - request=user_input, - chat_history=chat_history, - ), - ) - result = str(result) - print(result) - chat_history.add_user_message(user_input) - chat_history.add_assistant_message(result) +async def main() -> None: + chatting = True + while chatting: + chatting = await chat() if __name__ == "__main__": diff --git a/python/samples/concepts/prompt_templates/load_yaml_prompt.py b/python/samples/concepts/prompt_templates/load_yaml_prompt.py index b721fbc183c1..c52591e474ab 100644 --- a/python/samples/concepts/prompt_templates/load_yaml_prompt.py +++ b/python/samples/concepts/prompt_templates/load_yaml_prompt.py @@ -13,15 +13,18 @@ async def main(): service_id = "default" chat_service = OpenAIChatCompletion( - ai_model_id="gpt-4-0613", + ai_model_id="gpt-3.5-turbo", service_id=service_id, ) kernel.add_service(chat_service) chat_history = ChatHistory(system_message="Assistant is a large language model") - cur_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources") - plugin = kernel.add_plugin(plugin_name="sample_plugins", parent_directory=cur_dir) + plugin_path = os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + "resources", + ) + plugin = kernel.add_plugin(plugin_name="sample_plugins", parent_directory=plugin_path) result = await kernel.invoke(plugin["Parrot"], count=2, user_message="I love parrots.", chat_history=chat_history) print(result) diff --git a/python/samples/concepts/prompt_templates/template_language.py b/python/samples/concepts/prompt_templates/template_language.py index fb733357d503..1a7bac59a7a2 100644 --- a/python/samples/concepts/prompt_templates/template_language.py +++ b/python/samples/concepts/prompt_templates/template_language.py @@ -11,19 +11,16 @@ async def main(): kernel = Kernel() - useAzureOpenAI = False - model = "gpt-35-turbo" if useAzureOpenAI else "gpt-3.5-turbo-1106" - service_id = model - + service_id = "template_language" kernel.add_service( - OpenAIChatCompletion(service_id=service_id, ai_model_id=model), + OpenAIChatCompletion(service_id=service_id), ) kernel.add_plugin(TimePlugin(), "time") function_definition = """ - Today is: {{time.Date}} - Current time is: {{time.Time}} + Today is: {{time.date}} + Current time is: {{time.time}} Answer to the following questions using JSON syntax, including the data used. Is it morning, afternoon, evening, or night (morning/afternoon/evening/night)? @@ -32,7 +29,7 @@ async def main(): print("--- Rendered Prompt ---") prompt_template_config = PromptTemplateConfig(template=function_definition) - prompt_template = KernelPromptTemplate(prompt_template_config) + prompt_template = KernelPromptTemplate(prompt_template_config=prompt_template_config) rendered_prompt = await prompt_template.render(kernel, arguments=None) print(rendered_prompt) @@ -41,10 +38,11 @@ async def main(): template=function_definition, execution_settings=OpenAIChatPromptExecutionSettings(service_id=service_id, max_tokens=100), function_name="kind_of_day", + prompt_template=prompt_template, ) print("--- Prompt Function Result ---") - result = await kernel.invoke(kind_of_day) + result = await kernel.invoke(function=kind_of_day) print(result) diff --git a/python/samples/concepts/rag/self-critique_rag.py b/python/samples/concepts/rag/self-critique_rag.py index be1aec5261d0..a5a6c325f348 100644 --- a/python/samples/concepts/rag/self-critique_rag.py +++ b/python/samples/concepts/rag/self-critique_rag.py @@ -29,7 +29,7 @@ async def populate_memory(memory: SemanticTextMemory) -> None: async def main() -> None: kernel = Kernel() - azure_ai_search_settings = AzureAISearchSettings() + azure_ai_search_settings = AzureAISearchSettings.create() vector_size = 1536 # Setting up OpenAI services for text completion and text embedding diff --git a/python/samples/concepts/resources/__init__.py b/python/samples/concepts/resources/__init__.py index 54c09891347a..c86a2cef1ef6 100644 --- a/python/samples/concepts/resources/__init__.py +++ b/python/samples/concepts/resources/__init__.py @@ -1,3 +1,5 @@ # Copyright (c) Microsoft. All rights reserved. -# intentionally left empty +from samples.concepts.resources.utils import Colors + +__all__ = ["Colors"] diff --git a/python/samples/concepts/resources/sample_image.jpg b/python/samples/concepts/resources/sample_image.jpg new file mode 100644 index 000000000000..ea6486656fd5 Binary files /dev/null and b/python/samples/concepts/resources/sample_image.jpg differ diff --git a/python/samples/concepts/search/bing_plugin_examples.py b/python/samples/concepts/search/bing_plugin_examples.py index dbe6b91e09ec..92c8686ccf32 100644 --- a/python/samples/concepts/search/bing_plugin_examples.py +++ b/python/samples/concepts/search/bing_plugin_examples.py @@ -65,7 +65,7 @@ async def example2(kernel: Kernel, service_id: str): oracle = kernel.add_function( function_name="oracle", plugin_name="OraclePlugin", - template=prompt, + prompt=prompt, execution_settings=OpenAIChatPromptExecutionSettings( service_id=service_id, max_tokens=150, temperature=0, top_p=1 ), @@ -97,7 +97,7 @@ async def example2(kernel: Kernel, service_id: str): async def main(): kernel = Kernel() - model = "gpt-3.5-turbo-1106" + model = "gpt-3.5-turbo" service_id = model kernel.add_service( diff --git a/python/samples/concepts/service_selector/custom_service_selector.py b/python/samples/concepts/service_selector/custom_service_selector.py new file mode 100644 index 000000000000..3deb3ce85177 --- /dev/null +++ b/python/samples/concepts/service_selector/custom_service_selector.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft. All rights reserved. + +from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion +from semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion import OpenAIChatCompletion +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.const import DEFAULT_SERVICE_NAME +from semantic_kernel.contents.chat_history import ChatHistory +from semantic_kernel.functions.kernel_arguments import KernelArguments +from semantic_kernel.functions.kernel_function import KernelFunction +from semantic_kernel.kernel import Kernel +from semantic_kernel.kernel_types import AI_SERVICE_CLIENT_TYPE +from semantic_kernel.services.ai_service_client_base import AIServiceClientBase +from semantic_kernel.services.ai_service_selector import AIServiceSelector +from semantic_kernel.services.kernel_services_extension import KernelServicesExtension + + +class CustomServiceSelector(AIServiceSelector): + def select_ai_service( + self, + kernel: "KernelServicesExtension", + function: "KernelFunction", + arguments: "KernelArguments", + type_: type[AI_SERVICE_CLIENT_TYPE] | tuple[type[AI_SERVICE_CLIENT_TYPE], ...] | None = None, + ) -> tuple["AIServiceClientBase", "PromptExecutionSettings"]: + execution_settings_dict = arguments.execution_settings or {} + if func_exec_settings := getattr(function, "prompt_execution_settings", None): + for id, settings in func_exec_settings.items(): + if id not in execution_settings_dict: + execution_settings_dict[id] = settings + if not execution_settings_dict: + from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings + + execution_settings_dict = {DEFAULT_SERVICE_NAME: PromptExecutionSettings()} + + gpt_4_settings = { + service_name: settings + for service_name, settings in execution_settings_dict.items() + if "gpt-4" in service_name + } + if gpt_4_settings: + service_id = list(gpt_4_settings.keys())[0] + service = kernel.get_service(service_id, type=type_) + service_settings = service.get_prompt_execution_settings_from_settings(gpt_4_settings[service_id]) + return service, service_settings + return super().select_ai_service(kernel, function, arguments, type_) + + +kernel = Kernel(ai_service_selector=CustomServiceSelector()) +kernel.add_service(AzureChatCompletion(service_id="gpt-4o")) +kernel.add_service(OpenAIChatCompletion(service_id="gpt-3.5-turbo", ai_model_id="gpt-3.5-turbo")) + +kernel.add_function( + plugin_name="selector", + function_name="select_ai_service", + prompt="Always respond with your name. {{$chat_history}}", + prompt_execution_settings={ + "gpt-4o": PromptExecutionSettings(service_id="gpt-4o", max_tokens=200, temperature=0.0), + "gpt-3.5-turbo": PromptExecutionSettings(service_id="gpt-3.5-turbo", max_tokens=400, temperature=1.0), + }, +) + + +async def main(): + chat_history = ChatHistory() + chat_history.add_user_message("I'm Eduard.") + result = await kernel.invoke(plugin_name="selector", function_name="select_ai_service", chat_history=chat_history) + print(result) + + +if __name__ == "__main__": + import asyncio + + asyncio.run(main()) diff --git a/python/samples/concepts/setup/ALL_SETTINGS.md b/python/samples/concepts/setup/ALL_SETTINGS.md new file mode 100644 index 000000000000..2a0e9b6fb80e --- /dev/null +++ b/python/samples/concepts/setup/ALL_SETTINGS.md @@ -0,0 +1,75 @@ +## AI Service Settings used across SK: + +| Service | Class | Constructor Settings | Environment Variable | Required? | Settings Class | +| --- | --- | --- | --- | --- | --- | +OpenAI | [OpenAIChatCompletion](../../../semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion.py) | | | | [OpenAISettings](../../../semantic_kernel/connectors/ai/open_ai/settings/open_ai_settings.py) +| | | ai_model_id | OPENAI_CHAT_MODEL_ID | Yes +| | | api_key | OPENAI_API_KEY | Yes +| | | org_id | OPENAI_ORG_ID | No +| | [OpenAITextCompletion](../../../semantic_kernel/connectors/ai/open_ai/services/open_ai_text_completion.py) +| | | ai_model_id | OPENAI_TEXT_MODEL_ID | Yes +| | | api_key | OPENAI_API_KEY | Yes +| | | org_id | OPENAI_ORG_ID | No +| | [OpenAITextEmbedding](../../../semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding.py) +| | | ai_model_id | OPENAI_EMBEDDING_MODEL_ID | Yes +| | | api_key | OPENAI_API_KEY | Yes +| | | org_id | OPENAI_ORG_ID | No +Azure OpenAI | [AzureOpenAIChatCompletion](../../../semantic_kernel/connectors/ai/open_ai/services/azure_chat_completion.py) | | | | [AzureOpenAISettings](../../../semantic_kernel/connectors/ai/open_ai/settings/azure_open_ai_settings.py) +| | | deployment_name | AZURE_OPENAI_CHAT_DEPLOYMENT_NAME | Yes +| | | api_key | AZURE_OPENAI_API_KEY | Yes +| | | endpoint | AZURE_OPENAI_ENDPOINT | Yes +| | | api_version | AZURE_OPENAI_API_VERSION | Yes +| | | base_url | AZURE_OPENAI_BASE_URL | Yes +| | [AzureOpenAITextCompletion](../../../semantic_kernel/connectors/ai/open_ai/services/azure_text_completion.py) +| | | deployment_name | AZURE_OPENAI_TEXT_DEPLOYMENT_NAME | Yes +| | | api_key | AZURE_OPENAI_API_KEY | Yes +| | | endpoint | AZURE_OPENAI_ENDPOINT | Yes +| | | api_version | AZURE_OPENAI_API_VERSION | Yes +| | | base_url | AZURE_OPENAI_BASE_URL | Yes +| | [AzureOpenAITextEmbedding](../../../semantic_kernel/connectors/ai/open_ai/services/azure_text_embedding.py) +| | | deployment_name | AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME | Yes +| | | api_key | AZURE_OPENAI_API_KEY | Yes +| | | endpoint | AZURE_OPENAI_ENDPOINT | Yes +| | | api_version | AZURE_OPENAI_API_VERSION | Yes +| | | base_url | AZURE_OPENAI_BASE_URL | Yes + +## Memory Service Settings used across SK: + +| Service | Class | Constructor Settings | Environment Variable | Required? | Settings Class | +| --- | --- | --- | --- | --- | --- | +AstraDB | [AstraDBMemoryService](../../../semantic_kernel/connectors/memory/astradb/astradb_memory_store.py) | | | | [AstraDBSettings](../../../semantic_kernel/connectors/memory/astradb/astradb_settings.py) +| | | app_token | ASTRADB_APP_TOKEN | Yes +| | | db_id | ASTRADB_DB_ID | Yes +| | | region | ASTRADB_REGION | Yes +| | | keyspace | ASTRADB_KEYSPACE | Yes +Azure AI Search | [AzureAISearchMemoryService](../../../semantic_kernel/connectors/memory/azure_cognitive_search/azure_cognitive_search_memory_store.py) | | | | [AzureAISearchSettings](../../../semantic_kernel/connectors/memory/azure_cognitive_search/azure_ai_search_settings.py) +| | | api_key | AZURE_AI_SEARCH_API_KEY | No +| | | endpoint | AZURE_AI_SEARCH_ENDPOINT | Yes +| | | index_name | AZURE_AI_SEARCH_INDEX_NAME | No +Azure Cosmos DB | [AzureCosmosDBMemoryService](../../../semantic_kernel/connectors/memory/azure_cosmosdb/azure_cosmos_db_memory_store.py) | | | | [AzureCosmosDBSettings](../../../semantic_kernel/connectors/memory/azure_cosmosdb/azure_cosmosdb_settings.py) +| | | api | AZURE_COSMOS_DB_API | No +| | | connection_string | AZURE_COSMOS_DB_CONNECTION_STRING or AZCOSMOS_CONNSTR | No +Mongo DB Atlas | [MongoDBAtlasMemoryService](../../../semantic_kernel/connectors/memory/mongodb_atlas/mongodb_atlas_memory_store.py) | | | | [MongoDBAtlasSettings](../../../semantic_kernel/connectors/memory/mongodb_atlas/mongodb_atlas_settings.py) +| | | connection_string | MONGODB_ATLAS_CONNECTION_STRING | Yes +| | | database_name | MONGODB_ATLAS_DATABASE_NAME | No +| | | index_name | MONGODB_ATLAS_INDEX_NAME | No +Pinecone | [PineconeMemoryService](../../../semantic_kernel/connectors/memory/pinecone/pinecone_memory_store.py) | | | | [PineconeSettings](../../../semantic_kernel/connectors/memory/pinecone/pinecone_settings.py) +| | | api_key | PINECONE_API_KEY | Yes +Postgres | [PostgresMemoryService](../../../semantic_kernel/connectors/memory/postgres/postgres_memory_store.py) | | | | [PostgresSettings](../../../semantic_kernel/connectors/memory/postgres/postgres_settings.py) +| | | connection_string | POSTGRES_CONNECTION_STRING | Yes +Redis | [RedisMemoryService](../../../semantic_kernel/connectors/memory/redis/redis_memory_store.py) | | | | [RedisSettings](../../../semantic_kernel/connectors/memory/redis/redis_settings.py) +| | | connection_string | REDIS_CONNECTION_STRING | Yes +Weaviate | [WeaviateMemoryService](../../../semantic_kernel/connectors/memory/weaviate/weaviate_memory_store.py) | | | | [WeaviateSettings](../../../semantic_kernel/connectors/memory/weaviate/weaviate_settings.py) +| | | url | WEAVIATE_URL | No +| | | api_key | WEAVIATE_API_KEY | No +| | | use_embed | WEAVIATE_USE_EMBED | No + +## Other settings used: + +| Service | Class | Constructor Settings | Environment Variable | Required? | Settings Class | +| --- | --- | --- | --- | --- | --- | +Bing | [BingSearch](../../../semantic_kernel/connectors/search_engine/bing_connector.py) | | | | [BingSettings](../../../semantic_kernel/connectors/search_engine/bing_connector_settings.py) +| | | api_key | BING_API_KEY | No +| | | custom_config | BING_CUSTOM_CONFIG | No +Azure Container Apps Sessions | [ACASessionsPlugin](../../../semantic_kernel/core_plugins/sessions_python_tool/sessions_python_plugin.py) | | | | [ACASessionsSettings](../../../semantic_kernel/core_plugins/sessions_python_tool/sessions_python_settings.py) +| | | pool_management_endpoint | ACA_POOL_MANAGEMENT_ENDPOINT | Yes diff --git a/python/samples/concepts/setup/README.md b/python/samples/concepts/setup/README.md new file mode 100644 index 000000000000..c63963b0568f --- /dev/null +++ b/python/samples/concepts/setup/README.md @@ -0,0 +1,75 @@ +# Using environment variables to setup Semantic Kernel + +Semantic Kernel allows you multiple ways to setup your connectors. This guide shows that for OpenAI Connectors. + +After installing the semantic-kernel package, you can try these out. + +## From environment settings +using this method will try to find the required settings in the environment variables +this is done using pydantic settings, see the full docs of that here: https://docs.pydantic.dev/latest/concepts/pydantic_settings/#usage +We use a prefix for all the settings and then have names defined in the OpenAISettings class +for OpenAI that is OPENAI_ as the prefix, with the following settings: + +- api_key (`OPENAI_API_KEY`): OpenAI API key, see https://platform.openai.com/account/api-keys +- org_id (`OPENAI_ORG_ID`): This is usually optional unless your account belongs to multiple organizations. +- chat_model_id (`OPENAI_CHAT_MODEL_ID`): The OpenAI chat model ID to use, for example, gpt-3.5-turbo or gpt-4, + this variable is used in the OpenAIChatCompletion class and get's passed to the ai_model_id there. +- text_model_id (`OPENAI_TEXT_MODEL_ID`): The OpenAI text model ID to use, for example, gpt-3.5-turbo-instruct, + this variable is used in the OpenAITextCompletion class and get's passed to the ai_model_id there. +- embedding_model_id (`OPENAI_EMBEDDING_MODEL_ID`): The embedding model ID to use, for example, text-embedding-ada-002, + this variable is used in the OpenAITextEmbedding class and get's passed to the ai_model_id there. + +```python +import os + +from pydantic import ValidationError + +from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion + +try: + # when nothing is passed to the constructor, + # it will use the above environment variable names to find the required settings, + # in this case it will only fail if the OPENAI_CHAT_MODEL_ID and OPENAI_API_KEY are not found + service = OpenAIChatCompletion(service_id="openai_chat_service") +except ValidationError as e: + print(e) +``` + +## From a .env file +when you want to store and use your settings from a specific file (any file as long as it is in the .env format) you can pass the path to the file to the constructor this will still look at the same names of the settings as above, but will try to load them from the file + +```python +try: + # this will try to load the settings from the file at the given path + service = OpenAIChatCompletion(service_id="openai_chat_service", env_file_path="path/to/env_file") +except ValidationError as e: + print(e) +``` + +## From a different value +if you want to pass the settings yourself, you can do that by passing the values to the constructor this will ignore the environment variables and the .env file in this case our API_KEY is stored in a env variable called MY_API_KEY_VAR_NAME if using a file for this value, then we first need run the following code to load the .env file from the same folder as this file: + +```python +from dotenv import load_dotenv +dotenv_path = os.path.join(os.path.dirname(__file__), '.env') +load_dotenv(dotenv_path) +``` + +After that pass the value directly to the constructor as shown below we can also fix another value, in this case the ai_model_id, which becomes chat_model_id in the settings, fixed to gpt-4o + +```python +try: + # this will use the given values as the settings + api_key = os.getenv("MY_API_KEY_VAR_NAME") + service = OpenAIChatCompletion( + service_id="openai_chat_service", + api_key=api_key, + ai_model_id="gpt-4o", + ) +except ValidationError as e: + print(e) +``` + +## One final note: + +It is a convention that env settings are setup with all caps, and with underscores between words the loader that we use is case insensitive, so you can use any case you want in your env variables but it is a good practice to follow the convention and use all caps. diff --git a/python/samples/concepts/setup/openai_env_setup.py b/python/samples/concepts/setup/openai_env_setup.py new file mode 100644 index 000000000000..b5dd5875629c --- /dev/null +++ b/python/samples/concepts/setup/openai_env_setup.py @@ -0,0 +1,75 @@ +# Copyright (c) Microsoft. All rights reserved. + +# Semantic Kernel allows you multiple ways to setup your connectors. +# this sample shows that for OpenAI Connectors. + +# After installing the semantic-kernel package +# you can use the following code to setup OpenAI Connector + +# From environment settings +# using this method will try to find the required settings in the environment variables +# this is done using pydantic settings, see the full docs of that here: https://docs.pydantic.dev/latest/concepts/pydantic_settings/#usage +# We use a prefix for all the settings and then have names defined in the OpenAISettings class +# for OpenAI that is OPENAI_ as the prefix, with the following settings: +# - api_key (OPENAI_API_KEY): OpenAI API key, see https://platform.openai.com/account/api-keys +# - org_id (OPENAI_ORG_ID): This is usually optional unless your account belongs to multiple organizations. +# - chat_model_id (OPENAI_CHAT_MODEL_ID): The OpenAI chat model ID to use, for example, gpt-3.5-turbo or gpt-4, +# this variable is used in the OpenAIChatCompletion class and get's passed to the ai_model_id there. +# - text_model_id (OPENAI_TEXT_MODEL_ID): The OpenAI text model ID to use, for example, gpt-3.5-turbo-instruct, +# this variable is used in the OpenAITextCompletion class and get's passed to the ai_model_id there. +# - embedding_model_id (OPENAI_EMBEDDING_MODEL_ID): The embedding model ID to use, for example, text-embedding-ada-002, +# this variable is used in the OpenAITextEmbedding class and get's passed to the ai_model_id there. + +import os + +from pydantic import ValidationError + +from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion + +try: + # when nothing is passed to the constructor, + # it will use the above environment variable names to find the required settings, + # in this case it will only fail if the OPENAI_CHAT_MODEL_ID and OPENAI_API_KEY are not found + service = OpenAIChatCompletion(service_id="openai_chat_service") +except ValidationError as e: + print(e) + +# From a .env file +# when you want to store and use your settings from a specific file (any file as long as it is in the .env format) +# you can pass the path to the file to the constructor +# this will still look at the same names of the settings as above, but will try to load them from the file + +try: + # this will try to load the settings from the file at the given path + service = OpenAIChatCompletion(service_id="openai_chat_service", env_file_path="path/to/env_file") +except ValidationError as e: + print(e) + +# From a different value +# if you want to pass the settings yourself, you can do that by passing the values to the constructor +# this will ignore the environment variables and the .env file +# in this case our API_KEY is stored in a env variable called MY_API_KEY_VAR_NAME +# if using a file for this value, then we first need to uncomment and +# run the following code to load the .env file from the same folder as this file: +# from dotenv import load_dotenv +# dotenv_path = os.path.join(os.path.dirname(__file__), '.env') +# load_dotenv(dotenv_path) +# and after that pass the value directly to the constructor as shown below +# we can also fix another value, in this case the ai_model_id, +# which becomes chat_model_id in the settings, fixed to gpt-4o + +try: + # this will use the given values as the settings + api_key = os.getenv("MY_API_KEY_VAR_NAME") + service = OpenAIChatCompletion( + service_id="openai_chat_service", + api_key=api_key, + ai_model_id="gpt-4o", + ) +except ValidationError as e: + print(e) + +# One final note: +# It is a convention that env settings are setup with all caps, and with underscores between words +# the loader that we use is case insensitive, so you can use any case you want in your env variables +# but it is a good practice to follow the convention and use all caps. diff --git a/python/samples/demos/booking_restaurant/README.md b/python/samples/demos/booking_restaurant/README.md index 37dd9ca2e235..5acbd480a3d2 100644 --- a/python/samples/demos/booking_restaurant/README.md +++ b/python/samples/demos/booking_restaurant/README.md @@ -15,7 +15,7 @@ This sample uses function calling capable models and has been tested with the fo | Model type | Model name/id | Model version | Supported | | --------------- | ------------------------- | ------------------: | --------- | | Chat Completion | gpt-3.5-turbo | 0125 | ✅ | -| Chat Completion | gpt-3.5-turbo-1106 | 1106 | ✅ | +| Chat Completion | gpt-3.5-turbo | 1106 | ✅ | | Chat Completion | gpt-3.5-turbo-0613 | 0613 | ✅ | | Chat Completion | gpt-3.5-turbo-0301 | 0301 | ❌ | | Chat Completion | gpt-3.5-turbo-16k | 0613 | ✅ | diff --git a/python/samples/demos/booking_restaurant/restaurant_booking.py b/python/samples/demos/booking_restaurant/restaurant_booking.py index 153b9ddab78a..13c41c2db0fa 100644 --- a/python/samples/demos/booking_restaurant/restaurant_booking.py +++ b/python/samples/demos/booking_restaurant/restaurant_booking.py @@ -21,7 +21,7 @@ kernel = Kernel() service_id = "open_ai" -ai_service = OpenAIChatCompletion(service_id=service_id, ai_model_id="gpt-3.5-turbo-1106") +ai_service = OpenAIChatCompletion(service_id=service_id, ai_model_id="gpt-3.5-turbo") kernel.add_service(ai_service) try: diff --git a/python/samples/getting_started/.env.example b/python/samples/getting_started/.env.example index c727b53f5235..5334bd632329 100644 --- a/python/samples/getting_started/.env.example +++ b/python/samples/getting_started/.env.example @@ -1,6 +1,12 @@ +GLOBAL_LLM_SERVICE="" OPENAI_API_KEY="" +OPEN_AI_CHAT_MODEL_ID="" +OPEN_AI_TEXT_MODEL_ID="" +OPEN_AI_EMBEDDING_MODEL_ID="" OPENAI_ORG_ID="" -AZURE_OPENAI_DEPLOYMENT_NAME="" +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="" +AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="" +AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME="" AZURE_OPENAI_ENDPOINT="" AZURE_OPENAI_API_KEY="" AZURE_AISEARCH_API_KEY="" diff --git a/python/samples/getting_started/00-getting-started.ipynb b/python/samples/getting_started/00-getting-started.ipynb index 665131db8736..94beaee90f85 100644 --- a/python/samples/getting_started/00-getting-started.ipynb +++ b/python/samples/getting_started/00-getting-started.ipynb @@ -7,7 +7,7 @@ "source": [ "# Setup\n", "\n", - "**Step 1**: Import Semantic Kernel SDK from pypi.org\n" + "Import Semantic Kernel SDK from pypi.org" ] }, { @@ -16,7 +16,83 @@ "metadata": {}, "outputs": [], "source": [ - "!python -m pip install semantic-kernel==1.0.5" + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure paths are correct for the imports\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", + "\n", + "\n", + "sys.path.append(grandparent_dir)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's define our kernel for this example." ] }, { @@ -34,7 +110,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Configure the service you'd like to use via the `Service` Enum.\n" + "We will load our settings and get the LLM service to use for the notebook." ] }, { @@ -59,48 +135,10 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Option 1: using OpenAI\n", - "\n", - "**Step 2**: Add your [OpenAI Key](https://openai.com/product/) key to either your environment variables or to the `.env` file in the same folder (org Id only if you have multiple orgs):\n", - "\n", - "```\n", - "OPENAI_API_KEY=\"sk-...\"\n", - "OPENAI_ORG_ID=\"\"\n", - "```\n", - "The environment variables names should match the names used in the `.env` file, as shown above.\n", - "\n", - "If using the `.env` file, please configure the `env_file_path` parameter with a valid path when creating the ChatCompletion class:\n", - "\n", - "```\n", - "chat_completion = OpenAIChatCompletion(service_id=\"test\", env_file_path=)\n", - "```\n", - "\n", - "Use \"keyword arguments\" to instantiate an OpenAI Chat Completion service and add it to the kernel:\n", - "\n", - "## Option 2: using Azure OpenAI\n", - "\n", - "**Step 2**: Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to either your system's environment variables or to the `.env` file in the same folder:\n", - "\n", - "```\n", - "AZURE_OPENAI_API_KEY=\"...\"\n", - "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", - "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", - "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", - "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", - "```\n", - "The environment variables names should match the names used in the `.env` file, as shown above.\n", - "\n", - "If using the `.env` file, please configure the `env_file_path` parameter with a valid path when creating the ChatCompletion class:\n", - "\n", - "```\n", - "chat_completion = AzureChatCompletion(service_id=\"test\", env_file_path=)\n", - "```\n", - "\n", - "Use \"keyword arguments\" to instantiate an Azure OpenAI Chat Completion service and add it to the kernel:\n" + "We now configure our Chat Completion service on the kernel." ] }, { @@ -109,20 +147,27 @@ "metadata": {}, "outputs": [], "source": [ + "# Remove all services so that this cell can be re-run without restarting the kernel\n", + "kernel.remove_all_services()\n", + "\n", "service_id = None\n", "if selectedService == Service.OpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", " service_id = \"default\"\n", " kernel.add_service(\n", - " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\"),\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", "\n", " service_id = \"default\"\n", " kernel.add_service(\n", - " AzureChatCompletion(service_id=service_id),\n", + " AzureChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )" ] }, @@ -179,7 +224,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/python/samples/getting_started/01-basic-loading-the-kernel.ipynb b/python/samples/getting_started/01-basic-loading-the-kernel.ipynb index 243455c0ee45..3f028149613d 100644 --- a/python/samples/getting_started/01-basic-loading-the-kernel.ipynb +++ b/python/samples/getting_started/01-basic-loading-the-kernel.ipynb @@ -9,14 +9,12 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "To run the notebooks we recommend using Poetry and starting a shell with a virtual environment\n", - "prepared to use SK.\n", + "### Setup\n", "\n", - "See [DEV_SETUP.md](../../python/DEV_SETUP.md) for more information.\n" + "Import Semantic Kernel SDK from pypi.org" ] }, { @@ -25,79 +23,106 @@ "metadata": {}, "outputs": [], "source": [ - "!python -m pip install semantic-kernel==1.0.5" + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "from semantic_kernel import Kernel\n", + "# Make sure paths are correct for the imports\n", "\n", - "kernel = Kernel()" + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", + "\n", + "\n", + "sys.path.append(grandparent_dir)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Configuring API Keys and Endpoints\n", + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", "\n", "#### Option 1: using OpenAI\n", "\n", - "Add your [OpenAI Key](https://openai.com/product/) key to either your environment variables or to the `.env` file in the same folder (org Id only if you have multiple orgs):\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", "\n", "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", "OPENAI_API_KEY=\"sk-...\"\n", "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", "```\n", - "The environment variables names should match the names used in the `.env` file, as shown above.\n", - "\n", - "If using the `.env` file, please configure the `env_file_path` parameter with a valid path when creating the ChatCompletion class:\n", - "\n", - "```\n", - "chat_completion = OpenAIChatCompletion(service_id=\"test\", env_file_path=)\n", - "```\n", - "\n", - "Use \"keyword arguments\" to instantiate an OpenAI Chat Completion service and add it to the kernel:\n", + "The names should match the names used in the `.env` file, as shown above.\n", "\n", "#### Option 2: using Azure OpenAI\n", "\n", - "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to either your system's environment variables or to the `.env` file in the same folder:\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", "\n", "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", "AZURE_OPENAI_API_KEY=\"...\"\n", "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", "```\n", - "The environment variables names should match the names used in the `.env` file, as shown above.\n", + "The names should match the names used in the `.env` file, as shown above.\n", "\n", - "If using the `.env` file, please configure the `env_file_path` parameter with a valid path when creating the ChatCompletion class:\n", - "\n", - "```\n", - "chat_completion = AzureChatCompletion(service_id=\"test\", env_file_path=)\n", - "```\n" + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "When using the kernel for AI requests, the kernel needs some settings like URL and credentials to the AI models.\n", - "\n", - "The SDK currently supports OpenAI and Azure OpenAI, among other connectors.\n", + "Let's define our kernel for this example." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from semantic_kernel import Kernel\n", "\n", - "If you need an Azure OpenAI key, go [here](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/quickstart?pivots=rest-api).\n" + "kernel = Kernel()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will load our settings and get the LLM service to use for the notebook." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -118,24 +143,31 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ + "# Remove all services so that this cell can be re-run without restarting the kernel\n", + "kernel.remove_all_services()\n", + "\n", "service_id = None\n", "if selectedService == Service.OpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", - " service_id = \"oai_chat_gpt\"\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\"),\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", "\n", - " service_id = \"aoai_chat_completion\"\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " AzureChatCompletion(service_id=service_id),\n", + " AzureChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )" ] }, @@ -164,7 +196,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.12.3" }, "polyglot_notebook": { "kernelInfo": { diff --git a/python/samples/getting_started/02-running-prompts-from-file.ipynb b/python/samples/getting_started/02-running-prompts-from-file.ipynb index 51ad88928644..4f87190baab3 100644 --- a/python/samples/getting_started/02-running-prompts-from-file.ipynb +++ b/python/samples/getting_started/02-running-prompts-from-file.ipynb @@ -19,6 +19,104 @@ "For instance, [this](../../../prompt_template_samples/FunPlugin/Joke/skprompt.txt) is the **Joke function** part of the **FunPlugin plugin**:\n" ] }, + { + "cell_type": "markdown", + "id": "3feecb6e", + "metadata": {}, + "source": [ + "Import Semantic Kernel SDK from pypi.org" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32187534", + "metadata": {}, + "outputs": [], + "source": [ + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1" + ] + }, + { + "cell_type": "markdown", + "id": "cc58d362", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bc1bc941", + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure paths are correct for the imports\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", + "\n", + "\n", + "sys.path.append(grandparent_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "b5074884", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, + { + "cell_type": "markdown", + "id": "93d7361e", + "metadata": {}, + "source": [ + "Let's move on to learning what prompts are and how to write them." + ] + }, { "attachments": {}, "cell_type": "markdown", @@ -101,11 +199,51 @@ { "cell_type": "code", "execution_count": null, - "id": "365cfc01", + "id": "9c0688c5", "metadata": {}, "outputs": [], "source": [ - "!python -m pip install semantic-kernel==1.0.5" + "from semantic_kernel import Kernel\n", + "\n", + "kernel = Kernel()" + ] + }, + { + "cell_type": "markdown", + "id": "63f0788e", + "metadata": {}, + "source": [ + "We will load our settings and get the LLM service to use for the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "82d16ce6", + "metadata": {}, + "outputs": [], + "source": [ + "from services import Service\n", + "\n", + "from samples.service_settings import ServiceSettings\n", + "\n", + "service_settings = ServiceSettings()\n", + "\n", + "# Select a service to use for this notebook (available services: OpenAI, AzureOpenAI, HuggingFace)\n", + "selectedService = (\n", + " Service.AzureOpenAI\n", + " if service_settings.global_llm_service is None\n", + " else Service(service_settings.global_llm_service.lower())\n", + ")\n", + "print(f\"Using service type: {selectedService}\")" + ] + }, + { + "cell_type": "markdown", + "id": "04ad7f35", + "metadata": {}, + "source": [ + "Let's load our settings and validate that the required ones exist." ] }, { @@ -130,6 +268,14 @@ "print(f\"Using service type: {selectedService}\")" ] }, + { + "cell_type": "markdown", + "id": "c50b4d7a", + "metadata": {}, + "source": [ + "We now configure our Chat Completion service on the kernel." + ] + }, { "cell_type": "code", "execution_count": null, @@ -137,9 +283,8 @@ "metadata": {}, "outputs": [], "source": [ - "from semantic_kernel import Kernel\n", - "\n", - "kernel = Kernel()\n", + "# Remove all services so that this cell can be re-run without restarting the kernel\n", + "kernel.remove_all_services()\n", "\n", "service_id = None\n", "if selectedService == Service.OpenAI:\n", @@ -147,14 +292,18 @@ "\n", " service_id = \"default\"\n", " kernel.add_service(\n", - " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\"),\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", "\n", " service_id = \"default\"\n", " kernel.add_service(\n", - " AzureChatCompletion(service_id=service_id),\n", + " AzureChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )" ] }, diff --git a/python/samples/getting_started/03-prompt-function-inline.ipynb b/python/samples/getting_started/03-prompt-function-inline.ipynb index a6e64753670a..da426c8ff975 100644 --- a/python/samples/getting_started/03-prompt-function-inline.ipynb +++ b/python/samples/getting_started/03-prompt-function-inline.ipynb @@ -9,6 +9,96 @@ "# Running Prompt Functions Inline\n" ] }, + { + "cell_type": "markdown", + "id": "ad85226c", + "metadata": {}, + "source": [ + "Import Semantic Kernel SDK from pypi.org" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a94d2b07", + "metadata": {}, + "outputs": [], + "source": [ + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1" + ] + }, + { + "cell_type": "markdown", + "id": "c704e54b", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7ebb95f2", + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure paths are correct for the imports\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", + "\n", + "\n", + "sys.path.append(grandparent_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "14eb77b5", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, { "attachments": {}, "cell_type": "markdown", @@ -41,20 +131,30 @@ "Prepare a semantic kernel instance first, loading also the AI service settings defined in the [Setup notebook](00-getting-started.ipynb):\n" ] }, + { + "cell_type": "markdown", + "id": "0377f42e", + "metadata": {}, + "source": [ + "Let's define our kernel for this example." + ] + }, { "cell_type": "code", "execution_count": null, - "id": "1da651d4", + "id": "462b281a", "metadata": {}, "outputs": [], "source": [ - "!python -m pip install semantic-kernel==1.0.5" + "from semantic_kernel.kernel import Kernel\n", + "\n", + "kernel = Kernel()" ] }, { "cell_type": "code", "execution_count": null, - "id": "68b770df", + "id": "734d121f", "metadata": {}, "outputs": [], "source": [ @@ -73,6 +173,14 @@ "print(f\"Using service type: {selectedService}\")" ] }, + { + "cell_type": "markdown", + "id": "06740170", + "metadata": {}, + "source": [ + "We now configure our Chat Completion service on the kernel." + ] + }, { "cell_type": "code", "execution_count": null, @@ -80,24 +188,27 @@ "metadata": {}, "outputs": [], "source": [ - "import semantic_kernel as sk\n", - "\n", - "kernel = sk.Kernel()\n", + "# Remove all services so that this cell can be re-run without restarting the kernel\n", + "kernel.remove_all_services()\n", "\n", "service_id = None\n", "if selectedService == Service.OpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", - " service_id = \"oai_chat_completion\"\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-instruct\"),\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", "\n", - " service_id = \"aoai_chat_completion\"\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " AzureChatCompletion(service_id=service_id),\n", + " AzureChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )" ] }, @@ -119,7 +230,7 @@ "metadata": {}, "outputs": [], "source": [ - "from semantic_kernel.connectors.ai.open_ai import OpenAIChatPromptExecutionSettings\n", + "from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings, OpenAIChatPromptExecutionSettings\n", "from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig\n", "\n", "prompt = \"\"\"{{$input}}\n", @@ -134,7 +245,7 @@ " temperature=0.7,\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", - " execution_settings = OpenAIChatPromptExecutionSettings(\n", + " execution_settings = AzureChatPromptExecutionSettings(\n", " service_id=service_id,\n", " ai_model_id=\"gpt-35-turbo\",\n", " max_tokens=2000,\n", @@ -242,22 +353,26 @@ "metadata": {}, "outputs": [], "source": [ - "kernel = sk.Kernel()\n", + "kernel.remove_all_services()\n", "\n", "service_id = None\n", "if selectedService == Service.OpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", - " service_id = \"oai_chat_gpt\"\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\"),\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", "\n", - " service_id = \"aoai_chat_completion\"\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " AzureChatCompletion(service_id=service_id),\n", + " AzureChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )" ] }, @@ -268,9 +383,7 @@ "metadata": {}, "outputs": [], "source": [ - "from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import (\n", - " OpenAIChatPromptExecutionSettings,\n", - ")\n", + "from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings, OpenAIChatPromptExecutionSettings\n", "\n", "prompt = \"\"\"\n", "{{$input}}\n", @@ -292,12 +405,12 @@ "if selectedService == Service.OpenAI:\n", " execution_settings = OpenAIChatPromptExecutionSettings(\n", " service_id=service_id,\n", - " ai_model_id=\"gpt-3.5-turbo-1106\",\n", + " ai_model_id=\"gpt-3.5-turbo\",\n", " max_tokens=2000,\n", " temperature=0.7,\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", - " execution_settings = OpenAIChatPromptExecutionSettings(\n", + " execution_settings = AzureChatPromptExecutionSettings(\n", " service_id=service_id,\n", " ai_model_id=\"gpt-35-turbo\",\n", " max_tokens=2000,\n", @@ -342,7 +455,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/python/samples/getting_started/04-kernel-arguments-chat.ipynb b/python/samples/getting_started/04-kernel-arguments-chat.ipynb index 43d46b85628b..6ba6249c6d54 100644 --- a/python/samples/getting_started/04-kernel-arguments-chat.ipynb +++ b/python/samples/getting_started/04-kernel-arguments-chat.ipynb @@ -22,17 +22,97 @@ { "cell_type": "code", "execution_count": null, - "id": "92f69b34", + "id": "3b16c201", "metadata": {}, "outputs": [], "source": [ - "!python -m pip install semantic-kernel==1.0.5" + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1" + ] + }, + { + "cell_type": "markdown", + "id": "85886ed0", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." ] }, { "cell_type": "code", "execution_count": null, - "id": "0a235b31", + "id": "ec88496f", + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure paths are correct for the imports\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", + "\n", + "\n", + "sys.path.append(grandparent_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "208ed165", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, + { + "cell_type": "markdown", + "id": "da290af7", + "metadata": {}, + "source": [ + "We will load our settings and get the LLM service to use for the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ed3f9ae", "metadata": {}, "outputs": [], "source": [ @@ -66,16 +146,20 @@ "if selectedService == Service.OpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", - " service_id = \"oai_chat_gpt\"\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\"),\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", "\n", - " service_id = \"aoai_chat_completion\"\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " AzureChatCompletion(service_id=service_id),\n", + " AzureChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )" ] }, @@ -120,21 +204,19 @@ "metadata": {}, "outputs": [], "source": [ - "from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import (\n", - " OpenAIChatPromptExecutionSettings,\n", - ")\n", + "from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings, OpenAIChatPromptExecutionSettings\n", "from semantic_kernel.prompt_template import PromptTemplateConfig\n", "from semantic_kernel.prompt_template.input_variable import InputVariable\n", "\n", "if selectedService == Service.OpenAI:\n", " execution_settings = OpenAIChatPromptExecutionSettings(\n", " service_id=service_id,\n", - " ai_model_id=\"gpt-3.5-turbo-1106\",\n", + " ai_model_id=\"gpt-3.5-turbo\",\n", " max_tokens=2000,\n", " temperature=0.7,\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", - " execution_settings = OpenAIChatPromptExecutionSettings(\n", + " execution_settings = AzureChatPromptExecutionSettings(\n", " service_id=service_id,\n", " ai_model_id=\"gpt-35-turbo\",\n", " max_tokens=2000,\n", diff --git a/python/samples/getting_started/05-using-the-planner.ipynb b/python/samples/getting_started/05-using-the-planner.ipynb index 4e6484178d8f..64ac282c4980 100644 --- a/python/samples/getting_started/05-using-the-planner.ipynb +++ b/python/samples/getting_started/05-using-the-planner.ipynb @@ -13,7 +13,15 @@ "\n", "From our own testing, planner works best with more powerful models like `gpt4` but sometimes you might get working plans with cheaper models like `gpt-35-turbo`. We encourage you to implement your own versions of the planner and use different models that fit your user needs.\n", "\n", - "Read more about planner [here](https://aka.ms/sk/concepts/planner)\n" + "Read more about planner [here](https://aka.ms/sk/concepts/planner).\n" + ] + }, + { + "cell_type": "markdown", + "id": "1ebf9c0e", + "metadata": {}, + "source": [ + "Import Semantic Kernel SDK from pypi.org" ] }, { @@ -23,13 +31,93 @@ "metadata": {}, "outputs": [], "source": [ - "!python -m pip install -U semantic-kernel==1.0.5" + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1" + ] + }, + { + "cell_type": "markdown", + "id": "5f8e96d7", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." ] }, { "cell_type": "code", "execution_count": null, - "id": "7d548e40", + "id": "e5d9ed81", + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure paths are correct for the imports\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", + "\n", + "\n", + "sys.path.append(grandparent_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "1dcd7e04", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, + { + "cell_type": "markdown", + "id": "1907665d", + "metadata": {}, + "source": [ + "We will load our settings and get the LLM service to use for the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1a1e3c4e", "metadata": {}, "outputs": [], "source": [ @@ -48,6 +136,14 @@ "print(f\"Using service type: {selectedService}\")" ] }, + { + "cell_type": "markdown", + "id": "4d888f62", + "metadata": {}, + "source": [ + "Let's define some imports that will be used in this example." + ] + }, { "cell_type": "code", "execution_count": null, @@ -100,20 +196,28 @@ "metadata": {}, "outputs": [], "source": [ - "import semantic_kernel as sk\n", - "import semantic_kernel.connectors.ai.open_ai as sk_oai\n", + "from semantic_kernel.connectors.ai.open_ai import OpenAIChatPromptExecutionSettings\n", "from semantic_kernel.core_plugins.text_plugin import TextPlugin\n", "from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt\n", + "from semantic_kernel.kernel import Kernel\n", "\n", - "kernel = sk.Kernel()\n", - "service_id = \"default\"\n", + "kernel = Kernel()\n", + "service_id = None\n", "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " sk_oai.OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\"),\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " sk_oai.AzureChatCompletion(\n", + " AzureChatCompletion(\n", " service_id=service_id,\n", " ),\n", " )\n", @@ -134,7 +238,7 @@ "\n", "Rewrite the above in the style of Shakespeare.\n", "\"\"\",\n", - " prompt_execution_settings=sk_oai.OpenAIChatPromptExecutionSettings(\n", + " prompt_execution_settings=OpenAIChatPromptExecutionSettings(\n", " service_id=service_id,\n", " max_tokens=2000,\n", " temperature=0.8,\n", @@ -279,18 +383,25 @@ "metadata": {}, "outputs": [], "source": [ - "import semantic_kernel as sk\n", - "import semantic_kernel.connectors.ai.open_ai as sk_oai\n", + "from semantic_kernel.kernel import Kernel\n", "\n", - "kernel = sk.Kernel()\n", - "service_id = \"default\"\n", + "kernel = Kernel()\n", + "service_id = None\n", "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " sk_oai.OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo-1106\"),\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " service_id = \"default\"\n", " kernel.add_service(\n", - " sk_oai.AzureChatCompletion(\n", + " AzureChatCompletion(\n", " service_id=service_id,\n", " ),\n", " )" @@ -467,7 +578,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/python/samples/getting_started/06-memory-and-embeddings.ipynb b/python/samples/getting_started/06-memory-and-embeddings.ipynb index 7c08a67cf703..53a3623f4b21 100644 --- a/python/samples/getting_started/06-memory-and-embeddings.ipynb +++ b/python/samples/getting_started/06-memory-and-embeddings.ipynb @@ -21,6 +21,14 @@ "To do this, we dive into the key concept of `Semantic Memory` in the Semantic Kernel.\n" ] }, + { + "cell_type": "markdown", + "id": "6713abcd", + "metadata": {}, + "source": [ + "Import Semantic Kernel SDK from pypi.org and other dependencies for this example." + ] + }, { "cell_type": "code", "execution_count": null, @@ -28,9 +36,89 @@ "metadata": {}, "outputs": [], "source": [ - "!python -m pip install semantic-kernel==1.0.5\n", - "!python -m pip install azure-core==1.30.1\n", - "!python -m pip install azure-search-documents==11.4.0" + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1\n", + "%pip install azure-core==1.30.1\n", + "%pip install azure-search-documents==11.6.0b4" + ] + }, + { + "cell_type": "markdown", + "id": "318033fe", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d8a3db35", + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure paths are correct for the imports\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", + "\n", + "\n", + "sys.path.append(grandparent_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "2c5f9651", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, + { + "cell_type": "markdown", + "id": "815cac6e", + "metadata": {}, + "source": [ + "We will load our settings and get the LLM service to use for the notebook." ] }, { @@ -89,14 +177,22 @@ "\n", "# Configure AI service used by the kernel\n", "if selectedService == Service.AzureOpenAI:\n", - " azure_chat_service = AzureChatCompletion(service_id=chat_service_id)\n", + " azure_chat_service = AzureChatCompletion(\n", + " service_id=chat_service_id,\n", + " )\n", " # next line assumes embeddings deployment name is \"text-embedding\", adjust the deployment name to the value of your chat model if needed\n", - " embedding_gen = AzureTextEmbedding(service_id=\"embedding\")\n", + " embedding_gen = AzureTextEmbedding(\n", + " service_id=\"embedding\",\n", + " )\n", " kernel.add_service(azure_chat_service)\n", " kernel.add_service(embedding_gen)\n", "elif selectedService == Service.OpenAI:\n", - " oai_chat_service = OpenAIChatCompletion(service_id=chat_service_id, ai_model_id=\"gpt-3.5-turbo\")\n", - " embedding_gen = OpenAITextEmbedding(ai_model_id=\"text-embedding-ada-002\")\n", + " oai_chat_service = OpenAIChatCompletion(\n", + " service_id=chat_service_id,\n", + " )\n", + " embedding_gen = OpenAITextEmbedding(\n", + " ai_model_id=\"embedding\",\n", + " )\n", " kernel.add_service(oai_chat_service)\n", " kernel.add_service(embedding_gen)\n", "\n", @@ -421,6 +517,25 @@ "Now you might be wondering what happens if you have so much data that it doesn't fit into your RAM? That's where you want to make use of an external Vector Database made specifically for storing and retrieving embeddings. Fortunately, semantic kernel makes this easy thanks to an extensive list of available connectors. In the following section, we will connect to an existing Azure AI Search service that we will use as an external Vector Database to store and retrieve embeddings.\n" ] }, + { + "cell_type": "markdown", + "id": "e78fd381", + "metadata": {}, + "source": [ + "_Please note you will need an AzureAI Search api_key or token credential and endpoint for the following example to work properly._" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4eb9e2bc", + "metadata": {}, + "outputs": [], + "source": [ + "api_key = os.getenv(\"AZURE_AI_SEARCH_API_KEY\")\n", + "endpoint = os.getenv(\"AZURE_AI_SEARCH_ENDPOINT\")" + ] + }, { "cell_type": "code", "execution_count": null, @@ -430,7 +545,7 @@ "source": [ "from semantic_kernel.connectors.memory.azure_cognitive_search import AzureCognitiveSearchMemoryStore\n", "\n", - "acs_memory_store = AzureCognitiveSearchMemoryStore(vector_size=1536)\n", + "acs_memory_store = AzureCognitiveSearchMemoryStore(vector_size=1536, admin_key=api_key, search_endpoint=endpoint)\n", "\n", "memory = SemanticTextMemory(storage=acs_memory_store, embeddings_generator=embedding_gen)\n", "kernel.add_plugin(TextMemoryPlugin(memory), \"TextMemoryPluginACS\")" @@ -497,7 +612,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/python/samples/getting_started/07-hugging-face-for-plugins.ipynb b/python/samples/getting_started/07-hugging-face-for-plugins.ipynb index 313cfcedcd06..241014636847 100644 --- a/python/samples/getting_started/07-hugging-face-for-plugins.ipynb +++ b/python/samples/getting_started/07-hugging-face-for-plugins.ipynb @@ -20,7 +20,8 @@ "metadata": {}, "outputs": [], "source": [ - "!python -m pip install semantic-kernel[hugging_face]==1.0.5" + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel[hugging_face]==1.1.1" ] }, { diff --git a/python/samples/getting_started/08-native-function-inline.ipynb b/python/samples/getting_started/08-native-function-inline.ipynb index d6f2838f83dc..2abc66a065c3 100644 --- a/python/samples/getting_started/08-native-function-inline.ipynb +++ b/python/samples/getting_started/08-native-function-inline.ipynb @@ -39,6 +39,14 @@ "Prepare a semantic kernel instance first, loading also the AI service settings defined in the [Setup notebook](00-getting-started.ipynb):\n" ] }, + { + "cell_type": "markdown", + "id": "f39125a5", + "metadata": {}, + "source": [ + "Import Semantic Kernel SDK from pypi.org" + ] + }, { "cell_type": "code", "execution_count": null, @@ -46,7 +54,87 @@ "metadata": {}, "outputs": [], "source": [ - "!python -m pip install semantic-kernel==1.0.5" + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1" + ] + }, + { + "cell_type": "markdown", + "id": "5f726252", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ecfe74be", + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure paths are correct for the imports\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", + "\n", + "\n", + "sys.path.append(grandparent_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "73a7fd96", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, + { + "cell_type": "markdown", + "id": "9a888bb7", + "metadata": {}, + "source": [ + "We will load our settings and get the LLM service to use for the notebook." ] }, { @@ -71,6 +159,14 @@ "print(f\"Using service type: {selectedService}\")" ] }, + { + "cell_type": "markdown", + "id": "fcee3dc1", + "metadata": {}, + "source": [ + "We now configure our Chat Completion service on the kernel." + ] + }, { "cell_type": "code", "execution_count": null, @@ -79,22 +175,28 @@ "outputs": [], "source": [ "from semantic_kernel import Kernel\n", - "from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, OpenAIChatCompletion\n", "\n", "kernel = Kernel()\n", "\n", - "if selectedService == Service.AzureOpenAI:\n", - " service_id = \"aoai_chat\" # used later in the notebook\n", - " azure_chat_service = AzureChatCompletion(\n", - " service_id=service_id\n", - " ) # set the deployment name to the value of your chat model\n", - " kernel.add_service(azure_chat_service)\n", - "\n", - "# Configure OpenAI service\n", + "service_id = None\n", "if selectedService == Service.OpenAI:\n", - " service_id = \"oai_chat\" # used later in the notebook\n", - " oai_chat_service = OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-4-turbo-1106\")\n", - " kernel.add_service(oai_chat_service)" + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "\n", + " service_id = \"default\"\n", + " kernel.add_service(\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " service_id = \"default\"\n", + " kernel.add_service(\n", + " AzureChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", + " )" ] }, { @@ -169,7 +271,7 @@ "metadata": {}, "outputs": [], "source": [ - "from semantic_kernel.connectors.ai.open_ai import OpenAIChatPromptExecutionSettings\n", + "from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings, OpenAIChatPromptExecutionSettings\n", "from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig\n", "\n", "prompt = \"\"\"\n", @@ -184,12 +286,12 @@ "if selectedService == Service.OpenAI:\n", " execution_settings = OpenAIChatPromptExecutionSettings(\n", " service_id=service_id,\n", - " ai_model_id=\"gpt-3.5-turbo-1106\",\n", + " ai_model_id=\"gpt-3.5-turbo\",\n", " max_tokens=2000,\n", " temperature=0.7,\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", - " execution_settings = OpenAIChatPromptExecutionSettings(\n", + " execution_settings = AzureChatPromptExecutionSettings(\n", " service_id=service_id,\n", " ai_model_id=\"gpt-35-turbo\",\n", " max_tokens=2000,\n", @@ -280,22 +382,27 @@ "metadata": {}, "outputs": [], "source": [ - "from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, OpenAIChatCompletion\n", + "kernel.remove_all_services()\n", "\n", - "kernel = Kernel()\n", + "service_id = None\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", "\n", - "if selectedService == Service.AzureOpenAI:\n", - " service_id = \"aoai_chat\" # used later in the notebook\n", - " azure_chat_service = AzureChatCompletion(\n", - " service_id=service_id\n", - " ) # set the deployment name to the value of your chat model\n", - " kernel.add_service(azure_chat_service)\n", + " service_id = \"default\"\n", + " kernel.add_service(\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", "\n", - "# Configure OpenAI service\n", - "if selectedService == Service.OpenAI:\n", - " service_id = \"oai_chat\" # used later in the notebook\n", - " oai_chat_service = OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-4-turbo-1106\")\n", - " kernel.add_service(oai_chat_service)" + " service_id = \"default\"\n", + " kernel.add_service(\n", + " AzureChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", + " )" ] }, { @@ -315,14 +422,10 @@ "outputs": [], "source": [ "import sys\n", + "from typing import Annotated\n", "\n", "from semantic_kernel.functions import kernel_function\n", "\n", - "if sys.version_info >= (3, 9):\n", - " from typing import Annotated\n", - "else:\n", - " from typing_extensions import Annotated\n", - "\n", "\n", "class GenerateNumberPlugin:\n", " \"\"\"\n", @@ -395,12 +498,12 @@ "if selectedService == Service.OpenAI:\n", " execution_settings = OpenAIChatPromptExecutionSettings(\n", " service_id=service_id,\n", - " ai_model_id=\"gpt-3.5-turbo-1106\",\n", + " ai_model_id=\"gpt-3.5-turbo\",\n", " max_tokens=2000,\n", " temperature=0.7,\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", - " execution_settings = OpenAIChatPromptExecutionSettings(\n", + " execution_settings = AzureChatPromptExecutionSettings(\n", " service_id=service_id,\n", " ai_model_id=\"gpt-35-turbo\",\n", " max_tokens=2000,\n", @@ -565,12 +668,12 @@ "if selectedService == Service.OpenAI:\n", " execution_settings = OpenAIChatPromptExecutionSettings(\n", " service_id=service_id,\n", - " ai_model_id=\"gpt-3.5-turbo-1106\",\n", + " ai_model_id=\"gpt-3.5-turbo\",\n", " max_tokens=2000,\n", " temperature=0.7,\n", " )\n", "elif selectedService == Service.AzureOpenAI:\n", - " execution_settings = OpenAIChatPromptExecutionSettings(\n", + " execution_settings = AzureChatPromptExecutionSettings(\n", " service_id=service_id,\n", " ai_model_id=\"gpt-35-turbo\",\n", " max_tokens=2000,\n", diff --git a/python/samples/getting_started/09-groundedness-checking.ipynb b/python/samples/getting_started/09-groundedness-checking.ipynb index 8b669c2c666a..33e787ea209a 100644 --- a/python/samples/getting_started/09-groundedness-checking.ipynb +++ b/python/samples/getting_started/09-groundedness-checking.ipynb @@ -7,6 +7,8 @@ "source": [ "# Groundedness Checking Plugins\n", "\n", + "For the proper configuration settings and setup, please follow the steps outlined at the beginning of the [first](./00-getting-started.ipynb) getting started notebook.\n", + "\n", "A well-known problem with large language models (LLMs) is that they make things up. These are sometimes called 'hallucinations' but a safer (and less anthropomorphic) term is 'ungrounded addition' - something in the text which cannot be firmly established. When attempting to establish whether or not something in an LLM response is 'true' we can either check for it in the supplied prompt (this is called 'narrow grounding') or use our general knowledge ('broad grounding'). Note that narrow grounding can lead to things being classified as 'true, but ungrounded.' For example \"I live in Switzerland\" is **not** _narrowly_ grounded in \"I live in Geneva\" even though it must be true (it **is** _broadly_ grounded).\n", "\n", "In this notebook we run a simple grounding pipeline, to see if a summary text has any ungrounded additions as compared to the original, and use this information to improve the summary text. This can be done in three stages:\n", @@ -15,9 +17,105 @@ "1. Check to see if these entities appear in the original (grounding) text\n", "1. Remove the ungrounded entities from the summary text\n", "\n", - "What is an 'entity' in this context? In its simplest form, it's a named object such as a person or place (so 'Dean' or 'Seattle'). However, the idea could be a _claim_ which relates concepts (such as 'Dean lives near Seattle'). In this notebook, we will keep to the simpler case of named objects.\n", + "What is an 'entity' in this context? In its simplest form, it's a named object such as a person or place (so 'Dean' or 'Seattle'). However, the idea could be a _claim_ which relates concepts (such as 'Dean lives near Seattle'). In this notebook, we will keep to the simpler case of named objects.\n" + ] + }, + { + "cell_type": "markdown", + "id": "60e2513c", + "metadata": {}, + "source": [ + "Import Semantic Kernel SDK from pypi.org" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19ce6d37", + "metadata": {}, + "outputs": [], + "source": [ + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1" + ] + }, + { + "cell_type": "markdown", + "id": "93e86ea5", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "47a5560b", + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure paths are correct for the imports\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", "\n", - "Let us first define our grounding text:\n" + "\n", + "sys.path.append(grandparent_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "181f38db", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, + { + "cell_type": "markdown", + "id": "fadcfde4", + "metadata": {}, + "source": [ + "Let us define our grounding text:" ] }, { @@ -67,36 +165,22 @@ }, { "cell_type": "markdown", - "id": "73f4abc1", + "id": "4fd80b62", "metadata": {}, "source": [ - "## Set up Semantic Kernel\n", - "\n", - "We prepare our kernel in the usual way:\n" + "We will load our settings and get the LLM service to use for the notebook." ] }, { "cell_type": "code", "execution_count": null, - "id": "7b22d324", - "metadata": {}, - "outputs": [], - "source": [ - "!python -m pip install semantic-kernel==1.0.5" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e13d3519", + "id": "c2d4f01f", "metadata": {}, "outputs": [], "source": [ "from services import Service\n", "\n", "from samples.service_settings import ServiceSettings\n", - "from semantic_kernel import Kernel\n", - "from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, OpenAIChatCompletion\n", "\n", "service_settings = ServiceSettings()\n", "\n", @@ -106,20 +190,48 @@ " if service_settings.global_llm_service is None\n", " else Service(service_settings.global_llm_service.lower())\n", ")\n", - "print(f\"Using service type: {selectedService}\")\n", + "print(f\"Using service type: {selectedService}\")" + ] + }, + { + "cell_type": "markdown", + "id": "723087dc", + "metadata": {}, + "source": [ + "We now configure our Chat Completion service on the kernel." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e13d3519", + "metadata": {}, + "outputs": [], + "source": [ + "from semantic_kernel import Kernel\n", + "from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion, OpenAIChatCompletion\n", "\n", "kernel = Kernel()\n", - "# Configure AI service used by the kernel\n", - "if selectedService == Service.AzureOpenAI:\n", + "\n", + "service_id = None\n", + "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "\n", " service_id = \"default\"\n", - " azure_chat_service = AzureChatCompletion(\n", - " service_id=service_id\n", - " ) # set the deployment name to the value of your chat model\n", - " kernel.add_service(azure_chat_service)\n", - "else:\n", + " kernel.add_service(\n", + " OpenAIChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", " service_id = \"default\"\n", - " oai_chat_service = OpenAIChatCompletion(service_id=service_id, ai_model_id=\"gpt-3.5-turbo\")\n", - " kernel.add_service(oai_chat_service)" + " kernel.add_service(\n", + " AzureChatCompletion(\n", + " service_id=service_id,\n", + " ),\n", + " )" ] }, { diff --git a/python/samples/getting_started/10-multiple-results-per-prompt.ipynb b/python/samples/getting_started/10-multiple-results-per-prompt.ipynb index c424bb34cb4b..cbb31df9c305 100644 --- a/python/samples/getting_started/10-multiple-results-per-prompt.ipynb +++ b/python/samples/getting_started/10-multiple-results-per-prompt.ipynb @@ -18,6 +18,14 @@ "In this notebook we show how you can in a single request, have the LLM model return multiple results per prompt. This is useful for running experiments where you want to evaluate the robustness of your prompt and the parameters of your config against a particular large language model.\n" ] }, + { + "cell_type": "markdown", + "id": "f7120635", + "metadata": {}, + "source": [ + "Import Semantic Kernel SDK from pypi.org" + ] + }, { "cell_type": "code", "execution_count": null, @@ -25,13 +33,93 @@ "metadata": {}, "outputs": [], "source": [ - "!python -m pip install semantic-kernel==1.0.5" + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1" + ] + }, + { + "cell_type": "markdown", + "id": "4ad09f90", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." ] }, { "cell_type": "code", "execution_count": null, - "id": "3f4bfee4", + "id": "5cff141d", + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure paths are correct for the imports\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", + "\n", + "\n", + "sys.path.append(grandparent_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "d4d76e3d", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, + { + "cell_type": "markdown", + "id": "73c2e146", + "metadata": {}, + "source": [ + "We will load our settings and get the LLM service to use for the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f924e1f4", "metadata": {}, "outputs": [], "source": [ @@ -50,14 +138,6 @@ "print(f\"Using service type: {selectedService}\")" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "508ad44f", - "metadata": {}, - "outputs": [], - "source": [] - }, { "attachments": {}, "cell_type": "markdown", @@ -75,38 +155,48 @@ "outputs": [], "source": [ "from semantic_kernel import Kernel\n", - "from semantic_kernel.connectors.ai.hugging_face import ( # noqa: F401\n", - " HuggingFacePromptExecutionSettings,\n", - " HuggingFaceTextCompletion,\n", - ")\n", - "from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings # noqa: F401\n", - "from semantic_kernel.connectors.ai.open_ai import OpenAIChatPromptExecutionSettings # noqa: F401\n", - "from semantic_kernel.connectors.ai.open_ai import OpenAITextPromptExecutionSettings # noqa: F401\n", "from semantic_kernel.connectors.ai.open_ai import (\n", " AzureChatCompletion,\n", + " AzureChatPromptExecutionSettings, # noqa: F401\n", " AzureTextCompletion,\n", " OpenAIChatCompletion,\n", + " OpenAIChatPromptExecutionSettings, # noqa: F401\n", " OpenAITextCompletion,\n", + " OpenAITextPromptExecutionSettings, # noqa: F401\n", ")\n", "\n", "kernel = Kernel()\n", "\n", "# Configure Azure LLM service\n", - "if selectedService == Service.AzureOpenAI:\n", - " azure_text_service = AzureTextCompletion(\n", - " service_id=\"aoai_text\"\n", - " ) # set the deployment name to the value of your text model (e.g. gpt-35-turbo-instruct)\n", - " azure_chat_service = AzureChatCompletion(\n", - " service_id=\"aoai_chat\"\n", - " ) # set the deployment name to the value of your chat model\n", - "\n", - "# Configure OpenAI service\n", + "service_id = None\n", "if selectedService == Service.OpenAI:\n", - " oai_text_service = OpenAITextCompletion(service_id=\"oai_text\", ai_model_id=\"gpt-3.5-turbo-instruct\")\n", - " oai_chat_service = OpenAIChatCompletion(service_id=\"oai_chat\", ai_model_id=\"gpt-3.5-turbo\")\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "\n", + " service_id = \"default\"\n", + " oai_chat_service = OpenAIChatCompletion(\n", + " service_id=\"oai_chat\",\n", + " )\n", + " oai_text_service = OpenAITextCompletion(\n", + " service_id=\"oai_text\",\n", + " )\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " service_id = \"default\"\n", + " aoai_chat_service = AzureChatCompletion(\n", + " service_id=\"aoai_chat\",\n", + " )\n", + " aoai_text_service = AzureTextCompletion(\n", + " service_id=\"aoai_text\",\n", + " )\n", "\n", "# Configure Hugging Face service\n", "if selectedService == Service.HuggingFace:\n", + " from semantic_kernel.connectors.ai.hugging_face import ( # noqa: F401\n", + " HuggingFacePromptExecutionSettings,\n", + " HuggingFaceTextCompletion,\n", + " )\n", + "\n", " hf_text_service = HuggingFaceTextCompletion(service_id=\"hf_text\", ai_model_id=\"distilgpt2\", task=\"text-generation\")" ] }, @@ -159,10 +249,9 @@ " prompt = \"What is the purpose of a rubber duck?\"\n", "\n", " results = await oai_text_service.get_text_contents(prompt=prompt, settings=oai_text_prompt_execution_settings)\n", - " i = 1\n", - " for result in results:\n", - " print(f\"Result {i}: {result}\")\n", - " i += 1" + "\n", + " for i, result in enumerate(results):\n", + " print(f\"Result {i+1}: {result}\")" ] }, { @@ -184,11 +273,10 @@ "if selectedService == Service.AzureOpenAI:\n", " prompt = \"provide me a list of possible meanings for the acronym 'ORLD'\"\n", "\n", - " results = await azure_text_service.get_text_contents(prompt=prompt, settings=oai_text_prompt_execution_settings)\n", - " i = 1\n", - " for result in results:\n", - " print(f\"Result {i}: {result}\")\n", - " i += 1" + " results = await aoai_text_service.get_text_contents(prompt=prompt, settings=oai_text_prompt_execution_settings)\n", + "\n", + " for i, result in enumerate(results):\n", + " print(f\"Result {i+1}: {result}\")" ] }, { @@ -209,7 +297,8 @@ "source": [ "if selectedService == Service.HuggingFace:\n", " hf_prompt_execution_settings = HuggingFacePromptExecutionSettings(\n", - " service_id=\"hf_text\", extension_data={\"max_new_tokens\": 80, \"temperature\": 0.7, \"top_p\": 1}\n", + " service_id=\"hf_text\",\n", + " extension_data={\"max_new_tokens\": 80, \"temperature\": 0.7, \"top_p\": 1, \"num_return_sequences\": 3},\n", " )" ] }, @@ -223,10 +312,10 @@ "if selectedService == Service.HuggingFace:\n", " prompt = \"The purpose of a rubber duck is\"\n", "\n", - " results = await hf_text_service.get_text_contents(\n", - " prompt=prompt, prompt_execution_settings=hf_prompt_execution_settings\n", - " )\n", - " print(\"\".join(results))" + " results = await hf_text_service.get_text_contents(prompt=prompt, settings=hf_prompt_execution_settings)\n", + "\n", + " for i, result in enumerate(results):\n", + " print(f\"Result {i + 1}: {result}\")" ] }, { @@ -282,10 +371,9 @@ " results = await oai_chat_service.get_chat_message_contents(\n", " chat_history=chat, settings=oai_chat_prompt_execution_settings\n", " )\n", - " i = 0\n", - " for result in results:\n", - " print(f\"Result {i + 1}: {result!s}\")\n", - " i += 1" + "\n", + " for i, result in enumerate(results):\n", + " print(f\"Result {i + 1}: {result!s}\")" ] }, { @@ -328,13 +416,12 @@ " )\n", " chat = ChatHistory()\n", " chat.add_user_message(content)\n", - " results = await azure_chat_service.get_chat_message_contents(\n", + " results = await aoai_chat_service.get_chat_message_contents(\n", " chat_history=chat, settings=az_oai_prompt_execution_settings\n", " )\n", - " i = 0\n", - " for result in results:\n", - " print(f\"Result {i + 1}: {result!s}\")\n", - " i += 1" + "\n", + " for i, result in enumerate(results):\n", + " print(f\"Result {i + 1}: {result!s}\")" ] }, { @@ -384,9 +471,8 @@ " current_time = time.time()\n", "\n", " # Update texts with new results\n", - " for idx, result in enumerate(results):\n", - " if idx < number_of_responses:\n", - " texts[idx] += str(result)\n", + " for result in results:\n", + " texts[result.choice_index] += str(result)\n", "\n", " # Clear and display output at intervals\n", " if current_time - last_clear_time > clear_interval:\n", @@ -415,7 +501,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/python/samples/getting_started/11-streaming-completions.ipynb b/python/samples/getting_started/11-streaming-completions.ipynb index e8df04f150d7..22b5815e0308 100644 --- a/python/samples/getting_started/11-streaming-completions.ipynb +++ b/python/samples/getting_started/11-streaming-completions.ipynb @@ -11,6 +11,14 @@ "Here is an example pattern if you want to stream your multiple results. Note that this is not supported for Hugging Face text completions at this time.\n" ] }, + { + "cell_type": "markdown", + "id": "a3dd8590", + "metadata": {}, + "source": [ + "Import Semantic Kernel SDK from pypi.org" + ] + }, { "cell_type": "code", "execution_count": null, @@ -18,13 +26,93 @@ "metadata": {}, "outputs": [], "source": [ - "!python -m pip install semantic-kernel==1.0.5" + "# Note: if using a Poetry virtual environment, do not run this cell\n", + "%pip install semantic-kernel==1.1.1" + ] + }, + { + "cell_type": "markdown", + "id": "fd94029f", + "metadata": {}, + "source": [ + "Initial configuration for the notebook to run properly." ] }, { "cell_type": "code", "execution_count": null, - "id": "e76c7c0b", + "id": "7547e59b", + "metadata": {}, + "outputs": [], + "source": [ + "# Make sure paths are correct for the imports\n", + "\n", + "import os\n", + "import sys\n", + "\n", + "notebook_dir = os.path.abspath(\"\")\n", + "parent_dir = os.path.dirname(notebook_dir)\n", + "grandparent_dir = os.path.dirname(parent_dir)\n", + "\n", + "\n", + "sys.path.append(grandparent_dir)" + ] + }, + { + "cell_type": "markdown", + "id": "73ba03ae", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://openai.com/product/) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, + { + "cell_type": "markdown", + "id": "fd931c14", + "metadata": {}, + "source": [ + "We will load our settings and get the LLM service to use for the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a9a5c87a", "metadata": {}, "outputs": [], "source": [ @@ -43,14 +131,6 @@ "print(f\"Using service type: {selectedService}\")" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "508ad44f", - "metadata": {}, - "outputs": [], - "source": [] - }, { "attachments": {}, "cell_type": "markdown", @@ -68,43 +148,48 @@ "outputs": [], "source": [ "from semantic_kernel import Kernel\n", - "from semantic_kernel.connectors.ai.hugging_face import HuggingFacePromptExecutionSettings # noqa: F401\n", - "from semantic_kernel.connectors.ai.hugging_face import HuggingFaceTextCompletion\n", - "from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings # noqa: F401\n", - "from semantic_kernel.connectors.ai.open_ai import OpenAIChatPromptExecutionSettings # noqa: F401\n", - "from semantic_kernel.connectors.ai.open_ai import OpenAITextPromptExecutionSettings # noqa: F401\n", "from semantic_kernel.connectors.ai.open_ai import (\n", " AzureChatCompletion,\n", + " AzureChatPromptExecutionSettings, # noqa: F401\n", " AzureTextCompletion,\n", " OpenAIChatCompletion,\n", + " OpenAIChatPromptExecutionSettings, # noqa: F401\n", " OpenAITextCompletion,\n", + " OpenAITextPromptExecutionSettings, # noqa: F401\n", ")\n", "from semantic_kernel.contents import ChatHistory # noqa: F401\n", "\n", "kernel = Kernel()\n", "\n", - "# Configure Azure LLM service\n", - "if selectedService == Service.AzureOpenAI:\n", - " azure_text_service = AzureTextCompletion(\n", - " service_id=\"aoai_text\",\n", - " ) # set the environment variable AZURE_OPENAI_TEXT_DEPLOYMENT_NAME to the value of your text model (e.g. gpt-35-turbo-instruct)\n", - " azure_chat_service = AzureChatCompletion(\n", - " service_id=\"aoai_chat\",\n", - " ) # set the environment variable AZURE_OPENAI_CHAT_DEPLOYMENT_NAME to the value of your chat model\n", - "\n", - "# Configure OpenAI service\n", + "service_id = None\n", "if selectedService == Service.OpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion\n", + "\n", + " service_id = \"default\"\n", + " oai_chat_service = OpenAIChatCompletion(\n", + " service_id=\"oai_chat\",\n", + " )\n", " oai_text_service = OpenAITextCompletion(\n", " service_id=\"oai_text\",\n", - " ai_model_id=\"gpt-3.5-turbo-instruct\",\n", " )\n", - " oai_chat_service = OpenAIChatCompletion(\n", - " service_id=\"oai_chat\",\n", - " ai_model_id=\"gpt-3.5-turbo\",\n", + "elif selectedService == Service.AzureOpenAI:\n", + " from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion\n", + "\n", + " service_id = \"default\"\n", + " aoai_chat_service = AzureChatCompletion(\n", + " service_id=\"aoai_chat\",\n", + " )\n", + " aoai_text_service = AzureTextCompletion(\n", + " service_id=\"aoai_text\",\n", " )\n", "\n", "# Configure Hugging Face service\n", "if selectedService == Service.HuggingFace:\n", + " from semantic_kernel.connectors.ai.hugging_face import (\n", + " HuggingFacePromptExecutionSettings, # noqa: F401\n", + " HuggingFaceTextCompletion,\n", + " )\n", + "\n", " hf_text_service = HuggingFaceTextCompletion(ai_model_id=\"distilgpt2\", task=\"text-generation\")" ] }, @@ -175,7 +260,7 @@ "source": [ "if selectedService == Service.AzureOpenAI:\n", " prompt = \"provide me a list of possible meanings for the acronym 'ORLD'\"\n", - " stream = azure_text_service.get_streaming_text_contents(prompt=prompt, settings=oai_prompt_execution_settings)\n", + " stream = aoai_text_service.get_streaming_text_contents(prompt=prompt, settings=oai_prompt_execution_settings)\n", " async for message in stream:\n", " print(str(message[0]), end=\"\")" ] @@ -315,7 +400,7 @@ " chat = ChatHistory()\n", " chat.add_system_message(content)\n", " chat.add_user_message(\"What is the purpose of a rubber duck?\")\n", - " stream = azure_chat_service.get_streaming_chat_message_contents(\n", + " stream = aoai_chat_service.get_streaming_chat_message_contents(\n", " chat_history=chat, settings=az_oai_chat_prompt_execution_settings\n", " )\n", " async for text in stream:\n", diff --git a/python/samples/getting_started/CONFIGURING_THE_KERNEL.md b/python/samples/getting_started/CONFIGURING_THE_KERNEL.md new file mode 100644 index 000000000000..3c299be623d1 --- /dev/null +++ b/python/samples/getting_started/CONFIGURING_THE_KERNEL.md @@ -0,0 +1,63 @@ +## Configuring the Kernel + +As covered in the notebooks, we require a `.env` file with the proper settings for the model you use. A `.env` file must be placed in the `getting_started` directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created. + +If interested, as you learn more about Semantic Kernel, there are a few other ways to make sure your secrets, keys, and settings are used: + +### 1. Environment Variables + +Set the keys/secrets/endpoints as environment variables in your system. In Semantic Kernel, we leverage Pydantic Settings. If using Environment Variables, it isn't required to pass in explicit arguments to class constructors. + +**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file or environment variables. If this setting is not included, the Service will default to AzureOpenAI.** + +#### Option 1: using OpenAI + +Add your [OpenAI Key](https://platform.openai.com/docs/overview) key to either your environment variables or to the `.env` file in the same folder (org Id only if you have multiple orgs): + +``` +GLOBAL_LLM_SERVICE="OpenAI" +OPENAI_API_KEY="sk-..." +OPENAI_ORG_ID="" +OPENAI_CHAT_MODEL_ID="" +``` +The environment variables names should match the names used in the `.env` file, as shown above. + +Use "keyword arguments" to instantiate an OpenAI Chat Completion service and add it to the kernel: + +#### Option 2: using Azure OpenAI + +Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to either your system's environment variables or to the `.env` file in the same folder: + +``` +GLOBAL_LLM_SERVICE="AzureOpenAI" +AZURE_OPENAI_API_KEY="..." +AZURE_OPENAI_ENDPOINT="https://..." +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="..." +AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="..." +AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME="..." +AZURE_OPENAI_API_VERSION="..." +``` +The environment variables names should match the names used in the `.env` file, as shown above. + +Use "keyword arguments" to instantiate an Azure OpenAI Chat Completion service and add it to the kernel: + +### 2. Custom .env file path + +It is possible to configure the constructor with an absolute or relative file path to point the settings to a `.env` file located outside of the `getting_started` directory. + +For OpenAI: + +``` +chat_completion = OpenAIChatCompletion(service_id="test", env_file_path='/path/to/file') +``` + +For AzureOpenAI: + +``` +chat_completion = AzureChatCompletion(service_id="test", env_file_path=env_file_path='/path/to/file') +``` + +### 3. Manual Configuration + +- Manually configure the `api_key` or required parameters on either the `OpenAIChatCompletion` or `AzureChatCompletion` constructor with keyword arguments. +- This requires the user to manage their own keys/secrets as they aren't relying on the underlying environment variables or `.env` file. diff --git a/python/samples/getting_started/third_party/.env.example b/python/samples/getting_started/third_party/.env.example index 7e7d7d0dcce2..413f1a63cb58 100644 --- a/python/samples/getting_started/third_party/.env.example +++ b/python/samples/getting_started/third_party/.env.example @@ -1,7 +1,15 @@ +GLOBAL_LLM_SERVICE="" OPENAI_API_KEY="" +OPEN_AI_CHAT_MODEL_ID="" +OPEN_AI_TEXT_MODEL_ID="" +OPEN_AI_EMBEDDING_MODEL_ID="" OPENAI_ORG_ID="" -AZURE_OPENAI_DEPLOYMENT_NAME="" +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="" +AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="" +AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME="" AZURE_OPENAI_ENDPOINT="" AZURE_OPENAI_API_KEY="" +AZURE_AISEARCH_API_KEY="" +AZURE_AISEARCH_URL="" WEAVIATE_URL="http://localhost:8080" # WEAVIATE_API_KEY="" diff --git a/python/samples/getting_started/third_party/weaviate-persistent-memory.ipynb b/python/samples/getting_started/third_party/weaviate-persistent-memory.ipynb index 780b8f17e055..9e5d6c0aa57d 100644 --- a/python/samples/getting_started/third_party/weaviate-persistent-memory.ipynb +++ b/python/samples/getting_started/third_party/weaviate-persistent-memory.ipynb @@ -16,6 +16,48 @@ "`WeaviateMemoryStore` is an example of a persistent (i.e. long-term) memory store backed by the Weaviate vector database.\n" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Configuring the Kernel\n", + "\n", + "Let's get started with the necessary configuration to run Semantic Kernel. For Notebooks, we require a `.env` file with the proper settings for the model you use. Create a new file named `.env` and place it in this directory. Copy the contents of the `.env.example` file from this directory and paste it into the `.env` file that you just created.\n", + "\n", + "**NOTE: Please make sure to include `GLOBAL_LLM_SERVICE` set to either OpenAI, AzureOpenAI, or HuggingFace in your .env file. If this setting is not included, the Service will default to AzureOpenAI.**\n", + "\n", + "#### Option 1: using OpenAI\n", + "\n", + "Add your [OpenAI Key](https://platform.openai.com/docs/overview) key to your `.env` file (org Id only if you have multiple orgs):\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"OpenAI\"\n", + "OPENAI_API_KEY=\"sk-...\"\n", + "OPENAI_ORG_ID=\"\"\n", + "OPENAI_CHAT_MODEL_ID=\"\"\n", + "OPENAI_TEXT_MODEL_ID=\"\"\n", + "OPENAI_EMBEDDING_MODEL_ID=\"\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "#### Option 2: using Azure OpenAI\n", + "\n", + "Add your [Azure Open AI Service key](https://learn.microsoft.com/azure/cognitive-services/openai/quickstart?pivots=programming-language-studio) settings to the `.env` file in the same folder:\n", + "\n", + "```\n", + "GLOBAL_LLM_SERVICE=\"AzureOpenAI\"\n", + "AZURE_OPENAI_API_KEY=\"...\"\n", + "AZURE_OPENAI_ENDPOINT=\"https://...\"\n", + "AZURE_OPENAI_CHAT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_TEXT_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=\"...\"\n", + "AZURE_OPENAI_API_VERSION=\"...\"\n", + "```\n", + "The names should match the names used in the `.env` file, as shown above.\n", + "\n", + "For more advanced configuration, please follow the steps outlined in the [setup guide](./CONFIGURING_THE_KERNEL.md)." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -114,7 +156,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install -U semantic-kernel[weaviate]==1.0.5" + "%pip install semantic-kernel[weaviate]==1.1.1" ] }, { @@ -194,7 +236,10 @@ "\n", "chat_service_id = \"chat\"\n", "if selectedService == Service.OpenAI:\n", - " oai_chat_service = OpenAIChatCompletion(service_id=chat_service_id, ai_model_id=\"gpt-3.5-turbo\")\n", + " oai_chat_service = OpenAIChatCompletion(\n", + " service_id=chat_service_id,\n", + " ai_model_id=\"gpt-3.5-turbo\",\n", + " )\n", " embedding_gen = OpenAITextEmbedding(ai_model_id=\"text-embedding-ada-002\")\n", " kernel.add_service(oai_chat_service)\n", " kernel.add_service(embedding_gen)\n", diff --git a/python/samples/learn_resources/.env.example b/python/samples/learn_resources/.env.example index 89e2523ad289..4a524a2c16bd 100644 --- a/python/samples/learn_resources/.env.example +++ b/python/samples/learn_resources/.env.example @@ -1,11 +1,12 @@ -GLOBAL_LLM_SERVICE="OpenAI" # Toggle between "OpenAI" or "AzureOpenAI" -OPEN_AI_CHAT_COMPLETION_MODEL_ID="gpt-3.5-turbo-0125" -OPEN_AI_TEXT_COMPLETION_MODEL_ID="gpt-3.5-turbo-instruct" OPENAI_API_KEY="" +OPEN_AI_CHAT_MODEL_ID="" +OPEN_AI_TEXT_MODEL_ID="" +OPEN_AI_EMBEDDING_MODEL_ID="" OPENAI_ORG_ID="" -AZURE_OPEN_AI_DEPLOYMENT_TYPE="chat-completion" # chat-completion or text-completion -AZURE_OPEN_AI_CHAT_COMPLETION_DEPLOYMENT_NAME="gpt-35-turbo" -AZURE_OPEN_AI_TEXT_COMPLETION_DEPLOYMENT_NAME="gpt-35-turbo-instruct" +AZURE_OPENAI_CHAT_DEPLOYMENT_NAME="" +AZURE_OPENAI_TEXT_DEPLOYMENT_NAME="" +AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME="" AZURE_OPENAI_ENDPOINT="" AZURE_OPENAI_API_KEY="" -AZURE_OPENAI_API_VERSION="" \ No newline at end of file +AZURE_AISEARCH_API_KEY="" +AZURE_AISEARCH_URL="" diff --git a/python/samples/learn_resources/templates.py b/python/samples/learn_resources/templates.py index d4e9df3b9fe7..90a6b27e38c2 100644 --- a/python/samples/learn_resources/templates.py +++ b/python/samples/learn_resources/templates.py @@ -6,8 +6,8 @@ from samples.sk_service_configurator import add_service from semantic_kernel import Kernel from semantic_kernel.contents import ChatHistory -from semantic_kernel.contents.author_role import AuthorRole from semantic_kernel.contents.chat_message_content import ChatMessageContent +from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.prompt_template import InputVariable, PromptTemplateConfig # Initialize the kernel diff --git a/python/semantic_kernel/connectors/ai/google_palm/services/gp_chat_completion.py b/python/semantic_kernel/connectors/ai/google_palm/services/gp_chat_completion.py index 5940c2b8f224..292d4a86a00e 100644 --- a/python/semantic_kernel/connectors/ai/google_palm/services/gp_chat_completion.py +++ b/python/semantic_kernel/connectors/ai/google_palm/services/gp_chat_completion.py @@ -15,10 +15,10 @@ from semantic_kernel.connectors.ai.google_palm.settings.google_palm_settings import GooglePalmSettings from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase -from semantic_kernel.contents.author_role import AuthorRole from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.exceptions import ServiceInitializationError, ServiceInvalidRequestError, ServiceResponseException logger: logging.Logger = logging.getLogger(__name__) diff --git a/python/semantic_kernel/connectors/ai/ollama/services/ollama_text_embedding.py b/python/semantic_kernel/connectors/ai/ollama/services/ollama_text_embedding.py index 23838f8e027f..5716a043bb90 100644 --- a/python/semantic_kernel/connectors/ai/ollama/services/ollama_text_embedding.py +++ b/python/semantic_kernel/connectors/ai/ollama/services/ollama_text_embedding.py @@ -5,9 +5,9 @@ from typing import Any if sys.version_info >= (3, 12): - from typing import override + from typing import override # pragma: no cover else: - from typing_extensions import override + from typing_extensions import override # pragma: no cover import aiohttp from numpy import array, ndarray @@ -39,10 +39,13 @@ class OllamaTextEmbedding(EmbeddingGeneratorBase): async def generate_embeddings(self, texts: list[str], **kwargs: Any) -> ndarray: result = [] for text in texts: - async with AsyncSession(self.session) as session, session.post( - self.url, - json={"model": self.ai_model_id, "prompt": text, "options": kwargs}, - ) as response: + async with ( + AsyncSession(self.session) as session, + session.post( + self.url, + json={"model": self.ai_model_id, "prompt": text, "options": kwargs}, + ) as response, + ): response.raise_for_status() response = await response.json() result.append(response["embedding"]) diff --git a/python/semantic_kernel/connectors/ai/open_ai/const.py b/python/semantic_kernel/connectors/ai/open_ai/const.py index eaeaa0eddcec..5291ee622608 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/const.py +++ b/python/semantic_kernel/connectors/ai/open_ai/const.py @@ -3,4 +3,3 @@ from typing import Final DEFAULT_AZURE_API_VERSION: Final[str] = "2024-02-01" -USER_AGENT: Final[str] = "User-Agent" diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/azure_chat_completion.py b/python/semantic_kernel/connectors/ai/open_ai/services/azure_chat_completion.py index 7c4ff6f44fe8..516029269748 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/azure_chat_completion.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/azure_chat_completion.py @@ -23,11 +23,11 @@ from semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings import AzureOpenAISettings from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.finish_reason import FinishReason from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.function_result_content import FunctionResultContent from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.finish_reason import FinishReason from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError from semantic_kernel.kernel_pydantic import HttpsUrl diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/azure_config_base.py b/python/semantic_kernel/connectors/ai/open_ai/services/azure_config_base.py index 48347fa3efd8..a42a3aafd5a9 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/azure_config_base.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/azure_config_base.py @@ -6,9 +6,10 @@ from openai import AsyncAzureOpenAI from pydantic import ConfigDict, validate_call -from semantic_kernel.connectors.ai.open_ai.const import DEFAULT_AZURE_API_VERSION, USER_AGENT +from semantic_kernel.connectors.ai.open_ai.const import DEFAULT_AZURE_API_VERSION from semantic_kernel.connectors.ai.open_ai.services.open_ai_handler import OpenAIHandler, OpenAIModelTypes from semantic_kernel.connectors.telemetry import APP_INFO, prepend_semantic_kernel_to_user_agent +from semantic_kernel.const import USER_AGENT from semantic_kernel.exceptions import ServiceInitializationError from semantic_kernel.kernel_pydantic import HttpsUrl diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py index 8b9cfdc2bbc3..28e168f59e1b 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_chat_completion_base.py @@ -26,15 +26,15 @@ from semantic_kernel.connectors.ai.open_ai.services.open_ai_handler import OpenAIHandler from semantic_kernel.connectors.ai.open_ai.services.utils import update_settings_from_function_call_configuration from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings -from semantic_kernel.contents.author_role import AuthorRole from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.finish_reason import FinishReason from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.function_result_content import FunctionResultContent from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent from semantic_kernel.contents.streaming_text_content import StreamingTextContent from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason from semantic_kernel.exceptions import ( FunctionCallInvalidArgumentsException, ServiceInvalidExecutionSettingsError, @@ -468,7 +468,7 @@ async def _process_function_call( if parsed_args: args_cloned.update(parsed_args) except (FunctionCallInvalidArgumentsException, TypeError) as exc: - logger.exception( + logger.info( f"Received invalid arguments for function {function_call.name}: {exc}. Trying tool call again." ) frc = FunctionResultContent.from_function_call_content_and_result( @@ -517,7 +517,7 @@ async def _process_function_call( f"{[param.name for param in function_to_call.parameters if param.is_required]}. " "Please provide the required arguments and try again." ) - logger.exception(msg) + logger.info(msg) frc = FunctionResultContent.from_function_call_content_and_result( function_call_content=function_call, result=msg, diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_config_base.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_config_base.py index de26fd3fa94f..783cb348770d 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_config_base.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_config_base.py @@ -6,10 +6,10 @@ from openai import AsyncOpenAI from pydantic import ConfigDict, Field, validate_call -from semantic_kernel.connectors.ai.open_ai.const import USER_AGENT from semantic_kernel.connectors.ai.open_ai.services.open_ai_handler import OpenAIHandler from semantic_kernel.connectors.ai.open_ai.services.open_ai_model_types import OpenAIModelTypes from semantic_kernel.connectors.telemetry import APP_INFO, prepend_semantic_kernel_to_user_agent +from semantic_kernel.const import USER_AGENT from semantic_kernel.exceptions import ServiceInitializationError logger: logging.Logger = logging.getLogger(__name__) diff --git a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding_base.py b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding_base.py index 90442267bdd7..72f0cab9a18b 100644 --- a/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding_base.py +++ b/python/semantic_kernel/connectors/ai/open_ai/services/open_ai_text_embedding_base.py @@ -6,9 +6,9 @@ from numpy import array, ndarray if sys.version_info >= (3, 12): - from typing import override + from typing import override # pragma: no cover else: - from typing_extensions import override + from typing_extensions import override # pragma: no cover from semantic_kernel.connectors.ai.embeddings.embedding_generator_base import EmbeddingGeneratorBase from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.open_ai_prompt_execution_settings import ( diff --git a/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_ai_search_settings.py b/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_ai_search_settings.py index f04af95caf4c..c4c066407d92 100644 --- a/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_ai_search_settings.py +++ b/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_ai_search_settings.py @@ -20,6 +20,15 @@ class AzureAISearchSettings(KernelBaseSettings): env_prefix: ClassVar[str] = "AZURE_AI_SEARCH_" - api_key: SecretStr + api_key: SecretStr | None = None endpoint: HttpsUrl index_name: str | None = None + + def model_dump(self) -> dict[str, str]: + """Dump the model to a dictionary.""" + data = super().model_dump() + data.update({ + "api_key": self.api_key.get_secret_value(), + "endpoint": str(self.endpoint), + }) + return data diff --git a/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_cognitive_search_memory_store.py b/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_cognitive_search_memory_store.py index b6b12ed5d9d5..8c4c63991f42 100644 --- a/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_cognitive_search_memory_store.py +++ b/python/semantic_kernel/connectors/memory/azure_cognitive_search/azure_cognitive_search_memory_store.py @@ -89,7 +89,7 @@ def __init__( self._vector_size = vector_size self._search_index_client = get_search_index_async_client( search_endpoint=str(acs_memory_settings.endpoint), - admin_key=acs_memory_settings.api_key.get_secret_value(), + admin_key=acs_memory_settings.api_key.get_secret_value() if acs_memory_settings.api_key else None, azure_credential=azure_credentials, token_credential=token_credentials, ) diff --git a/python/semantic_kernel/connectors/memory/azure_cognitive_search/utils.py b/python/semantic_kernel/connectors/memory/azure_cognitive_search/utils.py index c3a121a541ee..ed07a7bd637c 100644 --- a/python/semantic_kernel/connectors/memory/azure_cognitive_search/utils.py +++ b/python/semantic_kernel/connectors/memory/azure_cognitive_search/utils.py @@ -8,7 +8,7 @@ from azure.search.documents.indexes.models import SearchableField, SearchField, SearchFieldDataType, SimpleField from dotenv import load_dotenv -from semantic_kernel.connectors.ai.open_ai.const import USER_AGENT +from semantic_kernel.const import USER_AGENT from semantic_kernel.exceptions import ServiceInitializationError from semantic_kernel.memory.memory_record import MemoryRecord diff --git a/python/semantic_kernel/connectors/memory/azure_cosmosdb/mongo_vcore_store_api.py b/python/semantic_kernel/connectors/memory/azure_cosmosdb/mongo_vcore_store_api.py index f9e23799188d..d9d5034300fc 100644 --- a/python/semantic_kernel/connectors/memory/azure_cosmosdb/mongo_vcore_store_api.py +++ b/python/semantic_kernel/connectors/memory/azure_cosmosdb/mongo_vcore_store_api.py @@ -5,9 +5,9 @@ from typing import Any if sys.version >= "3.12": - from typing import override + from typing import override # pragma: no cover else: - from typing_extensions import override + from typing_extensions import override # pragma: no cover import numpy as np diff --git a/python/semantic_kernel/connectors/memory/azure_cosmosdb_no_sql/azure_cosmosdb_no_sql_memory_store.py b/python/semantic_kernel/connectors/memory/azure_cosmosdb_no_sql/azure_cosmosdb_no_sql_memory_store.py index 6a09f46d1559..2f2ccd56358a 100644 --- a/python/semantic_kernel/connectors/memory/azure_cosmosdb_no_sql/azure_cosmosdb_no_sql_memory_store.py +++ b/python/semantic_kernel/connectors/memory/azure_cosmosdb_no_sql/azure_cosmosdb_no_sql_memory_store.py @@ -5,9 +5,9 @@ from typing import Any if sys.version_info >= (3, 12): - from typing import override + from typing import override # pragma: no cover else: - from typing_extensions import override + from typing_extensions import override # pragma: no cover import numpy as np from azure.cosmos.aio import ContainerProxy, CosmosClient, DatabaseProxy diff --git a/python/semantic_kernel/connectors/memory/chroma/chroma_memory_store.py b/python/semantic_kernel/connectors/memory/chroma/chroma_memory_store.py index 0b839c796604..52bd91e31387 100644 --- a/python/semantic_kernel/connectors/memory/chroma/chroma_memory_store.py +++ b/python/semantic_kernel/connectors/memory/chroma/chroma_memory_store.py @@ -7,9 +7,9 @@ from numpy import array, ndarray if sys.version_info >= (3, 12): - from typing import override + from typing import override # pragma: no cover else: - from typing_extensions import override + from typing_extensions import override # pragma: no cover from semantic_kernel.connectors.memory.chroma.utils import chroma_compute_similarity_scores, query_results_to_records from semantic_kernel.exceptions import ServiceInitializationError, ServiceResourceNotFoundError diff --git a/python/semantic_kernel/connectors/memory/postgres/postgres_settings.py b/python/semantic_kernel/connectors/memory/postgres/postgres_settings.py index c53205e424ab..feb3901210b7 100644 --- a/python/semantic_kernel/connectors/memory/postgres/postgres_settings.py +++ b/python/semantic_kernel/connectors/memory/postgres/postgres_settings.py @@ -17,6 +17,6 @@ class PostgresSettings(KernelBaseSettings): (Env var POSTGRES_CONNECTION_STRING) """ - env_prefix: ClassVar[str] = "ASTRADB_" + env_prefix: ClassVar[str] = "POSTGRES_" connection_string: SecretStr diff --git a/python/semantic_kernel/connectors/openapi_plugin/openapi_runner.py b/python/semantic_kernel/connectors/openapi_plugin/openapi_runner.py index dc46c59d5be0..11ddd06452d2 100644 --- a/python/semantic_kernel/connectors/openapi_plugin/openapi_runner.py +++ b/python/semantic_kernel/connectors/openapi_plugin/openapi_runner.py @@ -10,13 +10,13 @@ import httpx from openapi_core import Spec -from semantic_kernel.connectors.ai.open_ai.const import USER_AGENT from semantic_kernel.connectors.openapi_plugin.models.rest_api_operation import RestApiOperation from semantic_kernel.connectors.openapi_plugin.models.rest_api_operation_expected_response import ( RestApiOperationExpectedResponse, ) from semantic_kernel.connectors.openapi_plugin.models.rest_api_operation_payload import RestApiOperationPayload from semantic_kernel.connectors.openapi_plugin.models.rest_api_operation_run_options import RestApiOperationRunOptions +from semantic_kernel.connectors.telemetry import APP_INFO, prepend_semantic_kernel_to_user_agent from semantic_kernel.exceptions.function_exceptions import FunctionExecutionException from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.utils.experimental_decorator import experimental_class @@ -124,8 +124,6 @@ async def run_operation( options: RestApiOperationRunOptions | None = None, ) -> str: """Run the operation.""" - from semantic_kernel.connectors.telemetry import HTTP_USER_AGENT - url = self.build_operation_url( operation=operation, arguments=arguments, @@ -143,7 +141,9 @@ async def run_operation( headers_update = await self.auth_callback(headers=headers) headers.update(headers_update) - headers[USER_AGENT] = " ".join((HTTP_USER_AGENT, headers.get(USER_AGENT, ""))).rstrip() + if APP_INFO: + headers.update(APP_INFO) + headers = prepend_semantic_kernel_to_user_agent(headers) if "Content-Type" not in headers: headers["Content-Type"] = self._get_first_response_media_type(operation.responses) diff --git a/python/semantic_kernel/connectors/search_engine/bing_connector.py b/python/semantic_kernel/connectors/search_engine/bing_connector.py index 3d2b82cbdfac..03925ea96708 100644 --- a/python/semantic_kernel/connectors/search_engine/bing_connector.py +++ b/python/semantic_kernel/connectors/search_engine/bing_connector.py @@ -78,14 +78,22 @@ async def search(self, query: str, num_results: int = 1, offset: int = 0) -> lis headers = {"Ocp-Apim-Subscription-Key": self._settings.api_key.get_secret_value()} - async with ( - aiohttp.ClientSession() as session, - session.get(_request_url, headers=headers, raise_for_status=True) as response, - ): - if response.status == 200: - data = await response.json() - pages = data.get("webPages", {}).get("value") - if pages: - return list(map(lambda x: x["snippet"], pages)) or [] - return None - return [] + try: + async with aiohttp.ClientSession() as session, session.get(_request_url, headers=headers) as response: + response.raise_for_status() + if response.status == 200: + data = await response.json() + pages = data.get("webPages", {}).get("value") + if pages: + return list(map(lambda x: x["snippet"], pages)) or [] + return None + return [] + except aiohttp.ClientResponseError as ex: + logger.error(f"Failed to get search results: {ex}") + raise ServiceInvalidRequestError("Failed to get search results.") from ex + except aiohttp.ClientError as ex: + logger.error(f"Client error occurred: {ex}") + raise ServiceInvalidRequestError("A client error occurred while getting search results.") from ex + except Exception as ex: + logger.error(f"An unexpected error occurred: {ex}") + raise ServiceInvalidRequestError("An unexpected error occurred while getting search results.") from ex diff --git a/python/semantic_kernel/connectors/telemetry.py b/python/semantic_kernel/connectors/telemetry.py index 6a788681ad5c..7545b482db69 100644 --- a/python/semantic_kernel/connectors/telemetry.py +++ b/python/semantic_kernel/connectors/telemetry.py @@ -4,13 +4,13 @@ from importlib.metadata import PackageNotFoundError, version from typing import Any -from semantic_kernel.connectors.ai.open_ai.const import USER_AGENT +from semantic_kernel.const import USER_AGENT TELEMETRY_DISABLED_ENV_VAR = "AZURE_TELEMETRY_DISABLED" IS_TELEMETRY_ENABLED = os.environ.get(TELEMETRY_DISABLED_ENV_VAR, "false").lower() not in ["true", "1"] -HTTP_USER_AGENT = "Semantic-Kernel" +HTTP_USER_AGENT = "semantic-kernel-python" try: version_info = version("semantic-kernel") @@ -19,7 +19,7 @@ APP_INFO = ( { - "Semantic-Kernel-Version": f"python-{version_info}", + "semantic-kernel-version": f"python/{version_info}", } if IS_TELEMETRY_ENABLED else None @@ -27,14 +27,18 @@ def prepend_semantic_kernel_to_user_agent(headers: dict[str, Any]): - """Prepend "Semantic-Kernel" to the User-Agent in the headers. + """Prepend "semantic-kernel" to the User-Agent in the headers. Args: headers: The existing headers dictionary. Returns: - The modified headers dictionary with "Semantic-Kernel" prepended to the User-Agent. + The modified headers dictionary with "semantic-kernel" prepended to the User-Agent. """ - headers[USER_AGENT] = f"{HTTP_USER_AGENT} {headers[USER_AGENT]}" if USER_AGENT in headers else f"{HTTP_USER_AGENT}" + headers[USER_AGENT] = ( + f"{HTTP_USER_AGENT}/{version_info} {headers[USER_AGENT]}" + if USER_AGENT in headers + else f"{HTTP_USER_AGENT}/{version_info}" + ) return headers diff --git a/python/semantic_kernel/const.py b/python/semantic_kernel/const.py index dd7e41716690..46836a019797 100644 --- a/python/semantic_kernel/const.py +++ b/python/semantic_kernel/const.py @@ -4,3 +4,4 @@ METADATA_EXCEPTION_KEY: Final[str] = "exception" DEFAULT_SERVICE_NAME: Final[str] = "default" +USER_AGENT: Final[str] = "User-Agent" diff --git a/python/semantic_kernel/contents/__init__.py b/python/semantic_kernel/contents/__init__.py index 05508422f645..21d717945299 100644 --- a/python/semantic_kernel/contents/__init__.py +++ b/python/semantic_kernel/contents/__init__.py @@ -1,20 +1,24 @@ # Copyright (c) Microsoft. All rights reserved. -from semantic_kernel.contents.author_role import AuthorRole from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent from semantic_kernel.contents.streaming_text_content import StreamingTextContent from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason __all__ = [ "AuthorRole", "ChatHistory", "ChatMessageContent", + "FinishReason", "FunctionCallContent", "FunctionResultContent", + "ImageContent", "StreamingChatMessageContent", "StreamingTextContent", "TextContent", diff --git a/python/semantic_kernel/contents/binary_content.py b/python/semantic_kernel/contents/binary_content.py new file mode 100644 index 000000000000..d6ee57546cb2 --- /dev/null +++ b/python/semantic_kernel/contents/binary_content.py @@ -0,0 +1,162 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +from typing import Annotated, Any, ClassVar, Literal, TypeVar +from xml.etree.ElementTree import Element # nosec + +from pydantic import Field, UrlConstraints, computed_field +from pydantic_core import Url + +from semantic_kernel.contents.const import BINARY_CONTENT_TAG, ContentTypes +from semantic_kernel.contents.kernel_content import KernelContent +from semantic_kernel.contents.utils.data_uri import DataUri +from semantic_kernel.exceptions.content_exceptions import ContentInitializationError +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger = logging.getLogger(__name__) + +_T = TypeVar("_T", bound="BinaryContent") + +DataUrl = Annotated[Url, UrlConstraints(allowed_schemes=["data"])] + + +@experimental_class +class BinaryContent(KernelContent): + """This is a base class for different types of binary content. + + This can be created either the bytes data or a data uri, additionally it can have a uri. + The uri is a reference to the source, and might or might not point to the same thing as the data. + + Ideally only subclasses of this class are used, like ImageContent. + + Methods: + __str__: Returns the string representation of the content. + + Raises: + ValidationError: If any arguments are malformed. + + """ + + content_type: Literal[ContentTypes.BINARY_CONTENT] = Field(BINARY_CONTENT_TAG, init=False) # type: ignore + uri: Url | None = None + default_mime_type: ClassVar[str] = "text/plain" + tag: ClassVar[str] = BINARY_CONTENT_TAG + _data_uri: DataUri | None = None + + def __init__( + self, + uri: Url | str | None = None, + data_uri: DataUrl | str | None = None, + data: str | bytes | None = None, + data_format: str | None = None, + mime_type: str | None = None, + **kwargs: Any, + ): + """Create a Binary Content object, either from a data_uri or data. + + Args: + uri (Url | None): The reference uri of the content. + data_uri (DataUrl | None): The data uri of the content. + data (str | bytes | None): The data of the content. + data_format (str | None): The format of the data (e.g. base64). + mime_type (str | None): The mime type of the image, only used with data. + kwargs (Any): Any additional arguments: + inner_content (Any): The inner content of the response, + this should hold all the information from the response so even + when not creating a subclass a developer can leverage the full thing. + ai_model_id (str | None): The id of the AI model that generated this response. + metadata (dict[str, Any]): Any metadata that should be attached to the response. + """ + _data_uri = None + if data_uri: + _data_uri = DataUri.from_data_uri(data_uri, self.default_mime_type) + if "metadata" in kwargs: + kwargs["metadata"].update(_data_uri.parameters) + else: + kwargs["metadata"] = _data_uri.parameters + elif data: + if isinstance(data, str): + _data_uri = DataUri( + data_str=data, data_format=data_format, mime_type=mime_type or self.default_mime_type + ) + else: + _data_uri = DataUri( + data_bytes=data, data_format=data_format, mime_type=mime_type or self.default_mime_type + ) + super().__init__(uri=uri, **kwargs) + self._data_uri = _data_uri + + @computed_field # type: ignore + @property + def data_uri(self) -> str: + """Get the data uri.""" + if self._data_uri: + return self._data_uri.to_string(self.metadata) + return "" + + @data_uri.setter + def data_uri(self, value: str): + """Set the data uri.""" + self._data_uri = DataUri.from_data_uri(value) + self.metadata.update(self._data_uri.parameters) + + @property + def data(self) -> bytes: + """Get the data.""" + if self._data_uri and self._data_uri.data_bytes: + return self._data_uri.data_bytes + if self._data_uri and self._data_uri.data_str: + return self._data_uri.data_str.encode("utf-8") + return b"" + + @data.setter + def data(self, value: str | bytes): + """Set the data.""" + if self._data_uri: + self._data_uri.update_data(value) + else: + if isinstance(value, str): + self._data_uri = DataUri(data_str=value, mime_type=self.mime_type) + else: + self._data_uri = DataUri(data_bytes=value, mime_type=self.mime_type) + + @property + def mime_type(self) -> str: + """Get the mime type.""" + if self._data_uri and self._data_uri.mime_type: + return self._data_uri.mime_type + return self.default_mime_type + + @mime_type.setter + def mime_type(self, value: str): + """Set the mime type.""" + if self._data_uri: + self._data_uri.mime_type = value + + def __str__(self) -> str: + """Return the string representation of the content.""" + return self.data_uri if self._data_uri else str(self.uri) + + def to_element(self) -> Element: + """Convert the instance to an Element.""" + element = Element(self.tag) + if self._data_uri: + element.text = self.data_uri + if self.uri: + element.set("uri", str(self.uri)) + return element + + @classmethod + def from_element(cls: type[_T], element: Element) -> _T: + """Create an instance from an Element.""" + if element.tag != cls.tag: + raise ContentInitializationError(f"Element tag is not {cls.tag}") # pragma: no cover + + if element.text: + return cls(data_uri=element.text, uri=element.get("uri", None)) + + return cls(uri=element.get("uri", None)) + + def to_dict(self) -> dict[str, Any]: + """Convert the instance to a dictionary.""" + return {"type": "binary", "binary": {"uri": str(self)}} diff --git a/python/semantic_kernel/contents/chat_history.py b/python/semantic_kernel/contents/chat_history.py index 47189b1df092..2d7ef35d5a79 100644 --- a/python/semantic_kernel/contents/chat_history.py +++ b/python/semantic_kernel/contents/chat_history.py @@ -10,10 +10,10 @@ from defusedxml.ElementTree import XML, ParseError from pydantic import field_validator -from semantic_kernel.contents.author_role import AuthorRole from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.const import CHAT_HISTORY_TAG, CHAT_MESSAGE_CONTENT_TAG from semantic_kernel.contents.kernel_content import KernelContent +from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.exceptions import ContentInitializationError, ContentSerializationError from semantic_kernel.kernel_pydantic import KernelBaseModel diff --git a/python/semantic_kernel/contents/chat_message_content.py b/python/semantic_kernel/contents/chat_message_content.py index b4c2dbe277ea..51394d0ce116 100644 --- a/python/semantic_kernel/contents/chat_message_content.py +++ b/python/semantic_kernel/contents/chat_message_content.py @@ -3,33 +3,45 @@ import logging from enum import Enum from html import unescape -from typing import Any, Union, overload +from typing import Any, ClassVar, Literal, Union, overload from xml.etree.ElementTree import Element # nosec from defusedxml import ElementTree from pydantic import Field -from semantic_kernel.contents.author_role import AuthorRole from semantic_kernel.contents.const import ( CHAT_MESSAGE_CONTENT_TAG, + DISCRIMINATOR_FIELD, FUNCTION_CALL_CONTENT_TAG, FUNCTION_RESULT_CONTENT_TAG, + IMAGE_CONTENT_TAG, TEXT_CONTENT_TAG, + ContentTypes, ) -from semantic_kernel.contents.finish_reason import FinishReason from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent from semantic_kernel.contents.kernel_content import KernelContent from semantic_kernel.contents.streaming_text_content import StreamingTextContent from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason +from semantic_kernel.exceptions.content_exceptions import ContentInitializationError TAG_CONTENT_MAP = { TEXT_CONTENT_TAG: TextContent, FUNCTION_CALL_CONTENT_TAG: FunctionCallContent, FUNCTION_RESULT_CONTENT_TAG: FunctionResultContent, + IMAGE_CONTENT_TAG: ImageContent, } -ITEM_TYPES = Union[TextContent, StreamingTextContent, FunctionResultContent, FunctionCallContent] +ITEM_TYPES = Union[ + ImageContent, + TextContent, + StreamingTextContent, + FunctionResultContent, + FunctionCallContent, +] logger = logging.getLogger(__name__) @@ -54,9 +66,11 @@ class ChatMessageContent(KernelContent): __str__: Returns the content of the response. """ + content_type: Literal[ContentTypes.CHAT_MESSAGE_CONTENT] = Field(CHAT_MESSAGE_CONTENT_TAG, init=False) # type: ignore + tag: ClassVar[str] = CHAT_MESSAGE_CONTENT_TAG role: AuthorRole name: str | None = None - items: list[ITEM_TYPES] = Field(default_factory=list) + items: list[ITEM_TYPES] = Field(default_factory=list, discriminator=DISCRIMINATOR_FIELD) encoding: str | None = None finish_reason: FinishReason | None = None @@ -104,8 +118,9 @@ def __init__( # type: ignore """Create a ChatMessageContent instance. Args: - role: ChatRole - The role of the chat message. - items: list[TextContent, StreamingTextContent, FunctionCallContent, FunctionResultContent] - The content. + role: AuthorRole - The role of the chat message. + items: list[TextContent, StreamingTextContent, FunctionCallContent, FunctionResultContent, ImageContent] + - The content. content: str - The text of the response. inner_content: Optional[Any] - The inner content of the response, this should hold all the information from the response so even @@ -193,7 +208,7 @@ def to_element(self) -> "Element": Returns: Element - The XML Element representing the ChatMessageContent. """ - root = Element(CHAT_MESSAGE_CONTENT_TAG) + root = Element(self.tag) for field in self.model_fields_set: if field not in ["role", "name", "encoding", "finish_reason", "ai_model_id"]: continue @@ -215,6 +230,8 @@ def from_element(cls, element: Element) -> "ChatMessageContent": Returns: ChatMessageContent - The new instance of ChatMessageContent or a subclass. """ + if element.tag != cls.tag: + raise ContentInitializationError(f"Element tag is not {cls.tag}") kwargs: dict[str, Any] = {key: value for key, value in element.items()} items: list[KernelContent] = [] if element.text: @@ -274,7 +291,7 @@ def _parse_items(self) -> str | list[dict[str, Any]]: """Parse the items of the ChatMessageContent. Returns: - str | dict - The parsed items. + str | list of dicts - The parsed items. """ if len(self.items) == 1 and isinstance(self.items[0], TextContent): return self.items[0].text diff --git a/python/semantic_kernel/contents/const.py b/python/semantic_kernel/contents/const.py index cf6d122574c9..07153e4c0541 100644 --- a/python/semantic_kernel/contents/const.py +++ b/python/semantic_kernel/contents/const.py @@ -1,9 +1,21 @@ # Copyright (c) Microsoft. All rights reserved. +from enum import Enum from typing import Final CHAT_MESSAGE_CONTENT_TAG: Final[str] = "message" CHAT_HISTORY_TAG: Final[str] = "chat_history" TEXT_CONTENT_TAG: Final[str] = "text" +IMAGE_CONTENT_TAG: Final[str] = "image" +BINARY_CONTENT_TAG: Final[str] = "binary" FUNCTION_CALL_CONTENT_TAG: Final[str] = "function_call" FUNCTION_RESULT_CONTENT_TAG: Final[str] = "function_result" -DISCRIMINATOR_FIELD: Final[str] = "type" +DISCRIMINATOR_FIELD: Final[str] = "content_type" + + +class ContentTypes(str, Enum): + BINARY_CONTENT = BINARY_CONTENT_TAG + CHAT_MESSAGE_CONTENT = CHAT_MESSAGE_CONTENT_TAG + IMAGE_CONTENT = IMAGE_CONTENT_TAG + FUNCTION_CALL_CONTENT = FUNCTION_CALL_CONTENT_TAG + FUNCTION_RESULT_CONTENT = FUNCTION_RESULT_CONTENT_TAG + TEXT_CONTENT = TEXT_CONTENT_TAG diff --git a/python/semantic_kernel/contents/function_call_content.py b/python/semantic_kernel/contents/function_call_content.py index d761d54a97d8..9497785456ef 100644 --- a/python/semantic_kernel/contents/function_call_content.py +++ b/python/semantic_kernel/contents/function_call_content.py @@ -3,12 +3,15 @@ import json import logging from functools import cached_property -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypeVar from xml.etree.ElementTree import Element # nosec -from semantic_kernel.contents.const import FUNCTION_CALL_CONTENT_TAG +from pydantic import Field + +from semantic_kernel.contents.const import FUNCTION_CALL_CONTENT_TAG, ContentTypes from semantic_kernel.contents.kernel_content import KernelContent from semantic_kernel.exceptions import FunctionCallInvalidArgumentsException, FunctionCallInvalidNameException +from semantic_kernel.exceptions.content_exceptions import ContentInitializationError if TYPE_CHECKING: from semantic_kernel.functions.kernel_arguments import KernelArguments @@ -16,9 +19,14 @@ logger = logging.getLogger(__name__) +_T = TypeVar("_T", bound="FunctionCallContent") + + class FunctionCallContent(KernelContent): """Class to hold a function call response.""" + content_type: Literal[ContentTypes.FUNCTION_CALL_CONTENT] = Field(FUNCTION_CALL_CONTENT_TAG, init=False) # type: ignore + tag: ClassVar[str] = FUNCTION_CALL_CONTENT_TAG id: str | None index: int | None = None name: str | None = None @@ -86,7 +94,7 @@ def split_name_dict(self) -> dict: def to_element(self) -> Element: """Convert the function call to an Element.""" - element = Element(FUNCTION_CALL_CONTENT_TAG) + element = Element(self.tag) if self.id: element.set("id", self.id) if self.name: @@ -96,10 +104,10 @@ def to_element(self) -> Element: return element @classmethod - def from_element(cls, element: Element) -> "FunctionCallContent": + def from_element(cls: type[_T], element: Element) -> _T: """Create an instance from an Element.""" - if element.tag != FUNCTION_CALL_CONTENT_TAG: - raise ValueError(f"Element tag is not {FUNCTION_CALL_CONTENT_TAG}") + if element.tag != cls.tag: + raise ContentInitializationError(f"Element tag is not {cls.tag}") return cls(name=element.get("name"), id=element.get("id"), arguments=element.text or "") diff --git a/python/semantic_kernel/contents/function_result_content.py b/python/semantic_kernel/contents/function_result_content.py index be4a4402d783..06395f30a5d9 100644 --- a/python/semantic_kernel/contents/function_result_content.py +++ b/python/semantic_kernel/contents/function_result_content.py @@ -1,15 +1,16 @@ # Copyright (c) Microsoft. All rights reserved. from functools import cached_property -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypeVar from xml.etree.ElementTree import Element # nosec -from pydantic import field_validator +from pydantic import Field, field_validator -from semantic_kernel.contents.author_role import AuthorRole -from semantic_kernel.contents.const import FUNCTION_RESULT_CONTENT_TAG, TEXT_CONTENT_TAG +from semantic_kernel.contents.const import FUNCTION_RESULT_CONTENT_TAG, TEXT_CONTENT_TAG, ContentTypes from semantic_kernel.contents.kernel_content import KernelContent from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.exceptions.content_exceptions import ContentInitializationError if TYPE_CHECKING: from semantic_kernel.contents.chat_message_content import ChatMessageContent @@ -20,6 +21,8 @@ TEXT_CONTENT_TAG: TextContent, } +_T = TypeVar("_T", bound="FunctionResultContent") + class FunctionResultContent(KernelContent): """This is the base class for text response content. @@ -40,6 +43,8 @@ class FunctionResultContent(KernelContent): __str__: Returns the text of the response. """ + content_type: Literal[ContentTypes.FUNCTION_RESULT_CONTENT] = Field(FUNCTION_RESULT_CONTENT_TAG, init=False) # type: ignore + tag: ClassVar[str] = FUNCTION_RESULT_CONTENT_TAG id: str name: str | None = None result: str @@ -68,7 +73,7 @@ def __str__(self) -> str: def to_element(self) -> Element: """Convert the instance to an Element.""" - element = Element(FUNCTION_RESULT_CONTENT_TAG) + element = Element(self.tag) element.set("id", self.id) if self.name: element.set("name", self.name) @@ -76,25 +81,25 @@ def to_element(self) -> Element: return element @classmethod - def from_element(cls, element: Element) -> "FunctionResultContent": + def from_element(cls: type[_T], element: Element) -> _T: """Create an instance from an Element.""" - if element.tag != FUNCTION_RESULT_CONTENT_TAG: - raise ValueError(f"Element tag is not {FUNCTION_RESULT_CONTENT_TAG}") - return cls(id=element.get("id", ""), result=element.text, name=element.get("name", None)) # type: ignore + if element.tag != cls.tag: + raise ContentInitializationError(f"Element tag is not {cls.tag}") + return cls(id=element.get("id", ""), result=element.text, name=element.get("name", None)) @classmethod def from_function_call_content_and_result( - cls, + cls: type[_T], function_call_content: "FunctionCallContent", result: "FunctionResult | TextContent | ChatMessageContent | Any", metadata: dict[str, Any] = {}, - ) -> "FunctionResultContent": + ) -> _T: """Create an instance from a FunctionCallContent and a result.""" if function_call_content.metadata: metadata.update(function_call_content.metadata) return cls( - id=function_call_content.id, - result=result, # type: ignore + id=function_call_content.id or "unknown", + result=str(result), name=function_call_content.name, ai_model_id=function_call_content.ai_model_id, metadata=metadata, diff --git a/python/semantic_kernel/contents/image_content.py b/python/semantic_kernel/contents/image_content.py new file mode 100644 index 000000000000..ccb472bbf60c --- /dev/null +++ b/python/semantic_kernel/contents/image_content.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft. All rights reserved. + +import logging +import mimetypes +from typing import Any, ClassVar, Literal, TypeVar + +from pydantic import Field + +from semantic_kernel.contents.binary_content import BinaryContent +from semantic_kernel.contents.const import IMAGE_CONTENT_TAG, ContentTypes +from semantic_kernel.utils.experimental_decorator import experimental_class + +logger = logging.getLogger(__name__) + +_T = TypeVar("_T", bound="ImageContent") + + +@experimental_class +class ImageContent(BinaryContent): + """Image Content class. + + This can be created either the bytes data or a data uri, additionally it can have a uri. + The uri is a reference to the source, and might or might not point to the same thing as the data. + + Use the .from_image_file method to create an instance from a image file. + This reads the file and guesses the mime_type. + If both uri and data is provided, data will be used and a warning is logged. + + Args: + uri (Url | None): The reference uri of the content. + data_uri (DataUrl | None): The data uri of the content. + data (str | bytes | None): The data of the content. + data_format (str | None): The format of the data (e.g. base64). + mime_type (str | None): The mime type of the image, only used with data. + kwargs (Any): Any additional arguments: + inner_content (Any): The inner content of the response, + this should hold all the information from the response so even + when not creating a subclass a developer can leverage the full thing. + ai_model_id (str | None): The id of the AI model that generated this response. + metadata (dict[str, Any]): Any metadata that should be attached to the response. + + Methods: + from_image_path: Create an instance from an image file. + __str__: Returns the string representation of the image. + + Raises: + ValidationError: If neither uri or data is provided. + + """ + + content_type: Literal[ContentTypes.IMAGE_CONTENT] = Field(IMAGE_CONTENT_TAG, init=False) # type: ignore + tag: ClassVar[str] = IMAGE_CONTENT_TAG + + @classmethod + def from_image_path(cls: type[_T], image_path: str) -> _T: + """Create an instance from an image file.""" + mime_type = mimetypes.guess_type(image_path)[0] + with open(image_path, "rb") as image_file: + return cls(data=image_file.read(), data_format="base64", mime_type=mime_type) + + def to_dict(self) -> dict[str, Any]: + """Convert the instance to a dictionary.""" + return {"type": "image_url", "image_url": {"url": str(self)}} diff --git a/python/semantic_kernel/contents/kernel_content.py b/python/semantic_kernel/contents/kernel_content.py index a03b474409ea..e717cc571caa 100644 --- a/python/semantic_kernel/contents/kernel_content.py +++ b/python/semantic_kernel/contents/kernel_content.py @@ -1,12 +1,14 @@ # Copyright (c) Microsoft. All rights reserved. from abc import ABC, abstractmethod -from typing import Any +from typing import Any, TypeVar from pydantic import Field from semantic_kernel.kernel_pydantic import KernelBaseModel +_T = TypeVar("_T", bound="KernelContent") + class KernelContent(KernelBaseModel, ABC): """Base class for all kernel contents.""" @@ -27,7 +29,7 @@ def to_element(self) -> Any: @classmethod @abstractmethod - def from_element(cls, element: Any) -> "KernelContent": + def from_element(cls: type[_T], element: Any) -> _T: """Create an instance from an Element.""" pass diff --git a/python/semantic_kernel/contents/streaming_chat_message_content.py b/python/semantic_kernel/contents/streaming_chat_message_content.py index 7c39be8545c5..ed68da8e6714 100644 --- a/python/semantic_kernel/contents/streaming_chat_message_content.py +++ b/python/semantic_kernel/contents/streaming_chat_message_content.py @@ -4,17 +4,22 @@ from typing import Any, Union, overload from xml.etree.ElementTree import Element # nosec -from semantic_kernel.contents.author_role import AuthorRole from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.const import CHAT_MESSAGE_CONTENT_TAG -from semantic_kernel.contents.finish_reason import FinishReason from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent from semantic_kernel.contents.streaming_content_mixin import StreamingContentMixin from semantic_kernel.contents.streaming_text_content import StreamingTextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason from semantic_kernel.exceptions import ContentAdditionException -ITEM_TYPES = Union[StreamingTextContent, FunctionCallContent, FunctionResultContent] +ITEM_TYPES = Union[ + ImageContent, + StreamingTextContent, + FunctionCallContent, + FunctionResultContent, +] class StreamingChatMessageContent(ChatMessageContent, StreamingContentMixin): @@ -88,7 +93,7 @@ def __init__( # type: ignore Args: role: ChatRole - The role of the chat message. choice_index: int - The index of the choice that generated this response. - items: list[TextContent, FunctionCallContent, FunctionResultContent] - The content. + items: list[TextContent, FunctionCallContent, FunctionResultContent, ImageContent] - The content. content: str - The text of the response. inner_content: Optional[Any] - The inner content of the response, this should hold all the information from the response so even @@ -170,12 +175,15 @@ def __add__(self, other: "StreamingChatMessageContent") -> "StreamingChatMessage if not added: self.items.append(other_item) if not isinstance(self.inner_content, list): - self.inner_content = [self.inner_content] - if other.inner_content: - self.inner_content.append(other.inner_content) - else: - if other.inner_content: - self.inner_content.append(other.inner_content) + self.inner_content = [self.inner_content] if self.inner_content else [] + other_content = ( + other.inner_content + if isinstance(other.inner_content, list) + else [other.inner_content] + if other.inner_content + else [] + ) + self.inner_content.extend(other_content) return StreamingChatMessageContent( role=self.role, items=self.items, # type: ignore @@ -196,7 +204,7 @@ def to_element(self) -> "Element": Returns: Element - The XML Element representing the StreamingChatMessageContent. """ - root = Element(CHAT_MESSAGE_CONTENT_TAG) + root = Element(self.tag) for field in self.model_fields_set: if field not in ["role", "name", "encoding", "finish_reason", "ai_model_id", "choice_index"]: continue diff --git a/python/semantic_kernel/contents/streaming_content_mixin.py b/python/semantic_kernel/contents/streaming_content_mixin.py index 065b03f8fffd..4441e92f9fe7 100644 --- a/python/semantic_kernel/contents/streaming_content_mixin.py +++ b/python/semantic_kernel/contents/streaming_content_mixin.py @@ -5,9 +5,9 @@ from typing import Any if sys.version_info >= (3, 11): - from typing import Self + from typing import Self # pragma: no cover else: - from typing_extensions import Self + from typing_extensions import Self # pragma: no cover from semantic_kernel.kernel_pydantic import KernelBaseModel diff --git a/python/semantic_kernel/contents/streaming_text_content.py b/python/semantic_kernel/contents/streaming_text_content.py index 5e33a4e3f330..93313b6f06eb 100644 --- a/python/semantic_kernel/contents/streaming_text_content.py +++ b/python/semantic_kernel/contents/streaming_text_content.py @@ -31,7 +31,7 @@ def __bytes__(self) -> bytes: """Return the content of the response encoded in the encoding.""" return self.text.encode(self.encoding if self.encoding else "utf-8") if self.text else b"" - def __add__(self, other: "TextContent") -> "StreamingTextContent": + def __add__(self, other: TextContent) -> "StreamingTextContent": """When combining two StreamingTextContent instances, the text fields are combined. The inner_content of the first one is used, choice_index, ai_model_id and encoding should be the same. diff --git a/python/semantic_kernel/contents/text_content.py b/python/semantic_kernel/contents/text_content.py index ddf64696c6a5..1fb29391803c 100644 --- a/python/semantic_kernel/contents/text_content.py +++ b/python/semantic_kernel/contents/text_content.py @@ -1,10 +1,16 @@ # Copyright (c) Microsoft. All rights reserved. from html import unescape +from typing import ClassVar, Literal, TypeVar from xml.etree.ElementTree import Element # nosec -from semantic_kernel.contents.const import TEXT_CONTENT_TAG +from pydantic import Field + +from semantic_kernel.contents.const import TEXT_CONTENT_TAG, ContentTypes from semantic_kernel.contents.kernel_content import KernelContent +from semantic_kernel.exceptions.content_exceptions import ContentInitializationError + +_T = TypeVar("_T", bound="TextContent") class TextContent(KernelContent): @@ -26,6 +32,8 @@ class TextContent(KernelContent): __str__: Returns the text of the response. """ + content_type: Literal[ContentTypes.TEXT_CONTENT] = Field(TEXT_CONTENT_TAG, init=False) # type: ignore + tag: ClassVar[str] = TEXT_CONTENT_TAG text: str encoding: str | None = None @@ -35,19 +43,19 @@ def __str__(self) -> str: def to_element(self) -> Element: """Convert the instance to an Element.""" - element = Element(TEXT_CONTENT_TAG) + element = Element(self.tag) element.text = self.text if self.encoding: element.set("encoding", self.encoding) return element @classmethod - def from_element(cls, element: Element) -> "TextContent": + def from_element(cls: type[_T], element: Element) -> _T: """Create an instance from an Element.""" - if element.tag != TEXT_CONTENT_TAG: - raise ValueError(f"Element tag is not {TEXT_CONTENT_TAG}") + if element.tag != cls.tag: + raise ContentInitializationError(f"Element tag is not {cls.tag}") - return TextContent(text=unescape(element.text) if element.text else "", encoding=element.get("encoding", None)) + return cls(text=unescape(element.text) if element.text else "", encoding=element.get("encoding", None)) def to_dict(self) -> dict[str, str]: """Convert the instance to a dictionary.""" diff --git a/python/semantic_kernel/contents/types.py b/python/semantic_kernel/contents/types.py deleted file mode 100644 index b994de150a3b..000000000000 --- a/python/semantic_kernel/contents/types.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - -from typing import Literal, Union, get_args - -AZURE_CHAT_MESSAGE_CONTENT_TYPE = Literal["AzureChatMessageContent"] -OPENAI_CHAT_MESSAGE_CONTENT_TYPE = Literal["OpenAIChatMessageContent"] -CHAT_MESSAGE_CONTENT_TYPE = Literal["ChatMessageContent"] - -AZURE_CHAT_MESSAGE_CONTENT: AZURE_CHAT_MESSAGE_CONTENT_TYPE = get_args(AZURE_CHAT_MESSAGE_CONTENT_TYPE)[0] -OPENAI_CHAT_MESSAGE_CONTENT: OPENAI_CHAT_MESSAGE_CONTENT_TYPE = get_args(OPENAI_CHAT_MESSAGE_CONTENT_TYPE)[0] -CHAT_MESSAGE_CONTENT: CHAT_MESSAGE_CONTENT_TYPE = get_args(CHAT_MESSAGE_CONTENT_TYPE)[0] - -CHAT_MESSAGE_CONTENT_TYPE_NAMES = Union[ - CHAT_MESSAGE_CONTENT_TYPE, OPENAI_CHAT_MESSAGE_CONTENT_TYPE, AZURE_CHAT_MESSAGE_CONTENT_TYPE -] diff --git a/python/semantic_kernel/contents/utils/__init__.py b/python/semantic_kernel/contents/utils/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/semantic_kernel/contents/author_role.py b/python/semantic_kernel/contents/utils/author_role.py similarity index 100% rename from python/semantic_kernel/contents/author_role.py rename to python/semantic_kernel/contents/utils/author_role.py diff --git a/python/semantic_kernel/contents/utils/data_uri.py b/python/semantic_kernel/contents/utils/data_uri.py new file mode 100644 index 000000000000..96d603013bbe --- /dev/null +++ b/python/semantic_kernel/contents/utils/data_uri.py @@ -0,0 +1,123 @@ +# Copyright (c) Microsoft. All rights reserved. + +import base64 +import binascii +import logging +import re +import sys +from typing import Any, TypeVar + +if sys.version < "3.11": + from typing_extensions import Self # pragma: no cover +else: + from typing import Self # pragma: no cover + +from pydantic import Field, ValidationError, field_validator, model_validator +from pydantic_core import Url + +from semantic_kernel.exceptions import ContentInitializationError +from semantic_kernel.kernel_pydantic import KernelBaseModel + +logger = logging.getLogger(__name__) + +_T = TypeVar("_T", bound="DataUri") + + +class DataUri(KernelBaseModel, validate_assignment=True): + data_bytes: bytes | None = None + data_str: str | None = None + mime_type: str | None = None + parameters: dict[str, str] = Field(default_factory=dict) + data_format: str | None = None + + def update_data(self, value: str | bytes): + """Update the data, using either a string or bytes.""" + if isinstance(value, str): + self.data_str = value + else: + self.data_bytes = value + + @model_validator(mode="before") + @classmethod + def _validate_data(cls, values: dict[str, Any]) -> dict[str, Any]: + """Validate the data.""" + if not values.get("data_bytes") and not values.get("data_str"): + raise ContentInitializationError("Either data_bytes or data_str must be provided.") + return values + + @model_validator(mode="after") + def _parse_data(self) -> Self: + """Parse the data bytes to str.""" + if not self.data_str and self.data_bytes: + if self.data_format and self.data_format.lower() == "base64": + self.data_str = base64.b64encode(self.data_bytes).decode("utf-8") + else: + self.data_str = self.data_bytes.decode("utf-8") + if self.data_format and self.data_format.lower() == "base64" and self.data_str: + try: + if not self.data_bytes: + self.data_bytes = base64.b64decode(self.data_str, validate=True) + else: + base64.b64decode(self.data_str, validate=True) + except binascii.Error as exc: + raise ContentInitializationError("Invalid base64 data.") from exc + return self + + @field_validator("parameters", mode="before") + def _validate_parameters(cls, value: list[str] | dict[str, str] | None = None) -> dict[str, str]: + if not value: + return {} + if isinstance(value, dict): + return value + + new: dict[str, str] = {} + for item in value: + item = item.strip() + if not item: + continue + if "=" not in item: + raise ContentInitializationError("Invalid data uri format. The parameter is missing a value.") + name, val = item.split("=", maxsplit=1) + new[name] = val + return new + + @classmethod + def from_data_uri(cls: type[_T], data_uri: str | Url, default_mime_type: str = "text/plain") -> _T: + """Create a DataUri object from a data URI string or pydantic URL.""" + if isinstance(data_uri, str): + try: + data_uri = Url(data_uri) + except ValidationError as exc: + raise ContentInitializationError("Invalid data uri format.") from exc + + data = data_uri.path + if not data or "," not in data: + raise ContentInitializationError("Invalid data uri format. The data is missing.") + + pattern = "(((?P[a-zA-Z]+/[a-zA-Z-]+)(?P(;[a-zA-Z0-9]+=+[a-zA-Z0-9]+)*))?(;+(?P.*)))?(,(?P.*))" # noqa: E501 + match = re.match(pattern, data) + if not match: + raise ContentInitializationError("Invalid data uri format.") + matches = match.groupdict() + if not matches.get("data_format"): + matches.pop("data_format") + if not matches.get("parameters"): + matches.pop("parameters") + else: + matches["parameters"] = matches["parameters"].strip(";").split(";") + if not matches.get("mime_type"): + matches["mime_type"] = default_mime_type + return cls(**matches) + + def to_string(self, metadata: dict[str, str] = {}) -> str: + """Return the data uri as a string.""" + parameters = ";".join([f"{key}={val}" for key, val in metadata.items()]) + parameters = f";{parameters}" if parameters else "" + data_format = f"{self.data_format}" if self.data_format else "" + return f"data:{self.mime_type or ''}{parameters};{data_format},{self.data_str}" + + def __eq__(self, value: object) -> bool: + """Check if the data uri is equal to another.""" + if not isinstance(value, DataUri): + return False + return self.to_string() == value.to_string() diff --git a/python/semantic_kernel/contents/finish_reason.py b/python/semantic_kernel/contents/utils/finish_reason.py similarity index 100% rename from python/semantic_kernel/contents/finish_reason.py rename to python/semantic_kernel/contents/utils/finish_reason.py diff --git a/python/semantic_kernel/core_plugins/sessions_python_tool/sessions_python_plugin.py b/python/semantic_kernel/core_plugins/sessions_python_tool/sessions_python_plugin.py index f52b0a33e914..302e4360c52b 100644 --- a/python/semantic_kernel/core_plugins/sessions_python_tool/sessions_python_plugin.py +++ b/python/semantic_kernel/core_plugins/sessions_python_tool/sessions_python_plugin.py @@ -10,8 +10,8 @@ import httpx from pydantic import ValidationError -from semantic_kernel.connectors.ai.open_ai.const import USER_AGENT from semantic_kernel.connectors.telemetry import HTTP_USER_AGENT, version_info +from semantic_kernel.const import USER_AGENT from semantic_kernel.core_plugins.sessions_python_tool.sessions_python_settings import ( ACASessionsSettings, SessionsPythonSettings, @@ -93,7 +93,7 @@ def _sanitize_input(self, code: str) -> str: code = re.sub(r"^(\s|`)*(?i:python)?\s*", "", code) # Removes whitespace & ` from end return re.sub(r"(\s|`)*$", "", code) - + def _construct_remote_file_path(self, remote_file_path: str) -> str: """Construct the remote file path. @@ -109,8 +109,8 @@ def _construct_remote_file_path(self, remote_file_path: str) -> str: def _build_url_with_version(self, base_url, endpoint, params): """Builds a URL with the provided base URL, endpoint, and query parameters.""" - params['api-version'] = SESSIONS_API_VERSION - query_string = '&'.join([f"{key}={value}" for key, value in params.items()]) + params["api-version"] = SESSIONS_API_VERSION + query_string = "&".join([f"{key}={value}" for key, value in params.items()]) return f"{base_url}{endpoint}?{query_string}" @kernel_function( @@ -178,7 +178,9 @@ async def upload_file( self, *, local_file_path: Annotated[str, "The path to the local file on the machine"], - remote_file_path: Annotated[str | None, "The remote path to the file in the session. Defaults to /mnt/data"] = None, # noqa: E501 + remote_file_path: Annotated[ + str | None, "The remote path to the file in the session. Defaults to /mnt/data" + ] = None, ) -> Annotated[SessionsRemoteFileMetadata, "The metadata of the uploaded file"]: """Upload a file to the session pool. @@ -222,7 +224,7 @@ async def upload_file( response.raise_for_status() response_json = response.json() - return SessionsRemoteFileMetadata.from_dict(response_json['$values'][0]) + return SessionsRemoteFileMetadata.from_dict(response_json["$values"][0]) @kernel_function(name="list_files", description="Lists all files in the provided Session ID") async def list_files(self) -> list[SessionsRemoteFileMetadata]: @@ -242,7 +244,7 @@ async def list_files(self) -> list[SessionsRemoteFileMetadata]: url = self._build_url_with_version( base_url=self.pool_management_endpoint, endpoint="python/files", - params={"identifier": self.settings.session_id} + params={"identifier": self.settings.session_id}, ) response = await self.http_client.get( @@ -275,10 +277,7 @@ async def download_file(self, *, remote_file_path: str, local_file_path: str | N url = self._build_url_with_version( base_url=self.pool_management_endpoint, endpoint="python/downloadFile", - params={ - "identifier": self.settings.session_id, - "filename": remote_file_path - } + params={"identifier": self.settings.session_id, "filename": remote_file_path}, ) response = await self.http_client.get( diff --git a/python/semantic_kernel/core_plugins/web_search_engine_plugin.py b/python/semantic_kernel/core_plugins/web_search_engine_plugin.py index 65341a51759f..07fdbda73a20 100644 --- a/python/semantic_kernel/core_plugins/web_search_engine_plugin.py +++ b/python/semantic_kernel/core_plugins/web_search_engine_plugin.py @@ -27,7 +27,7 @@ def __init__(self, connector: "ConnectorBase") -> None: """Initializes a new instance of the WebSearchEnginePlugin class.""" self._connector = connector - @kernel_function(description="Performs a web search for a given query") + @kernel_function(name="search", description="Performs a web search for a given query") async def search( self, query: Annotated[str, "The search query"], diff --git a/python/semantic_kernel/functions/kernel_function_decorator.py b/python/semantic_kernel/functions/kernel_function_decorator.py index 981ba676c47a..1a1698f72ab3 100644 --- a/python/semantic_kernel/functions/kernel_function_decorator.py +++ b/python/semantic_kernel/functions/kernel_function_decorator.py @@ -45,7 +45,6 @@ def kernel_function( if not supplied, the function docstring will be used, can be None. """ - def decorator(func: Callable[..., object]) -> Callable[..., object]: """The actual decorator function.""" setattr(func, "__kernel_function__", True) @@ -115,7 +114,7 @@ def _parse_parameter(name: str, param: Any, default: Any) -> dict[str, Any]: logger.debug(f"Parsing param: {name}") logger.debug(f"Parsing annotation: {param}") ret: dict[str, Any] = {"name": name} - if default: + if default is not None: ret["default_value"] = default ret["is_required"] = False else: diff --git a/python/semantic_kernel/kernel_pydantic.py b/python/semantic_kernel/kernel_pydantic.py index 5b5d7dea3787..e2bedb1c8f3f 100644 --- a/python/semantic_kernel/kernel_pydantic.py +++ b/python/semantic_kernel/kernel_pydantic.py @@ -49,6 +49,8 @@ def create(cls: type["T"], **data: Any) -> "T": cls.model_config["env_prefix"] = cls.env_prefix if data.get("env_file_path"): cls.model_config["env_file"] = data["env_file_path"] + else: + cls.model_config["env_file"] = ".env" cls.model_config["env_file_encoding"] = data.get("env_file_encoding", "utf-8") data = {k: v for k, v in data.items() if v is not None} return cls(**data) diff --git a/python/semantic_kernel/prompt_template/kernel_prompt_template.py b/python/semantic_kernel/prompt_template/kernel_prompt_template.py index 44b90fd9eb44..499cada09b66 100644 --- a/python/semantic_kernel/prompt_template/kernel_prompt_template.py +++ b/python/semantic_kernel/prompt_template/kernel_prompt_template.py @@ -75,10 +75,8 @@ def model_post_init(self, __context: Any) -> None: # is a named arg block. self._add_if_missing(sub_block.variable.name, seen) - def _add_if_missing(self, variable_name: str, seen: set | None = None): + def _add_if_missing(self, variable_name: str, seen: set): # Convert variable_name to lower case to handle case-insensitivity - if not seen: - seen = set() if variable_name and variable_name.lower() not in seen: seen.add(variable_name.lower()) self.prompt_template_config.input_variables.append(InputVariable(name=variable_name)) diff --git a/python/semantic_kernel/schema/kernel_json_schema_builder.py b/python/semantic_kernel/schema/kernel_json_schema_builder.py index 95f0f29cb8e0..4b41986c6856 100644 --- a/python/semantic_kernel/schema/kernel_json_schema_builder.py +++ b/python/semantic_kernel/schema/kernel_json_schema_builder.py @@ -70,8 +70,8 @@ def build_model_schema(cls, model: type, description: str | None = None) -> dict for field_name, field_type in hints.items(): field_description = None - if hasattr(model, "__fields__") and field_name in model.__fields__: - field_info = model.__fields__[field_name] + if hasattr(model, "model_fields") and field_name in model.model_fields: + field_info = model.model_fields[field_name] if isinstance(field_info.metadata, dict): field_description = field_info.metadata.get("description") elif isinstance(field_info.metadata, list) and field_info.metadata: @@ -148,9 +148,7 @@ def handle_complex_type(cls, parameter_type: type, description: str | None = Non _, value_type = args additional_properties = cls.build(value_type) if additional_properties == {"type": "object"}: - additional_properties["properties"] = ( - {} - ) # Account for differences in Python 3.10 dict + additional_properties["properties"] = {} # Account for differences in Python 3.10 dict schema = {"type": "object", "additionalProperties": additional_properties} if description: schema["description"] = description diff --git a/python/semantic_kernel/utils/experimental_decorator.py b/python/semantic_kernel/utils/experimental_decorator.py index 2f472b0d68f2..7b28cc0e4064 100644 --- a/python/semantic_kernel/utils/experimental_decorator.py +++ b/python/semantic_kernel/utils/experimental_decorator.py @@ -1,6 +1,9 @@ # Copyright (c) Microsoft. All rights reserved. from collections.abc import Callable +from typing import TypeVar + +T = TypeVar("T", bound=type) def experimental_function(func: Callable) -> Callable: @@ -16,7 +19,7 @@ def experimental_function(func: Callable) -> Callable: return func -def experimental_class(cls: type) -> type: +def experimental_class(cls: T) -> T: """Decorator to mark a class as experimental.""" if isinstance(cls, type): if cls.__doc__: diff --git a/python/tests/assets/sample_image.jpg b/python/tests/assets/sample_image.jpg new file mode 100644 index 000000000000..ea6486656fd5 Binary files /dev/null and b/python/tests/assets/sample_image.jpg differ diff --git a/python/tests/integration/completions/test_azure_oai_chat_service.py b/python/tests/integration/completions/test_azure_oai_chat_service.py deleted file mode 100644 index 91a015d6716e..000000000000 --- a/python/tests/integration/completions/test_azure_oai_chat_service.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) Microsoft. All rights reserved. - - -import pytest -from openai import AsyncAzureOpenAI -from test_utils import retry - -import semantic_kernel.connectors.ai.open_ai as sk_oai -from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior -from semantic_kernel.connectors.ai.open_ai.prompt_execution_settings.azure_chat_prompt_execution_settings import ( - AzureChatPromptExecutionSettings, -) -from semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings import AzureOpenAISettings -from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings -from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent -from semantic_kernel.core_plugins.math_plugin import MathPlugin -from semantic_kernel.functions.kernel_arguments import KernelArguments -from semantic_kernel.kernel import Kernel -from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig - - -@pytest.mark.asyncio -async def test_azure_e2e_chat_completion_with_plugin(setup_tldr_function_for_oai_models): - kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models - - # Configure LLM service - kernel.add_service( - sk_oai.AzureChatCompletion( - service_id="chat", - ), - ) - - exec_settings = PromptExecutionSettings( - service_id="chat", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} - ) - - prompt_template_config = PromptTemplateConfig( - template=prompt, description="Write a short story.", execution_settings=exec_settings - ) - - # Create the semantic function - kernel.add_function(function_name="tldr", plugin_name="plugin", prompt_template_config=prompt_template_config) - - arguments = KernelArguments(input=text_to_summarize) - - summary = await retry(lambda: kernel.invoke(function_name="tldr", plugin_name="plugin", arguments=arguments)) - output = str(summary).strip() - print(f"TLDR using input string: '{output}'") - assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) - assert len(output) < 100 - - -@pytest.mark.asyncio -async def test_azure_e2e_chat_completion_with_plugin_and_provided_client(setup_tldr_function_for_oai_models): - kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models - - azure_openai_settings = AzureOpenAISettings.create() - endpoint = azure_openai_settings.endpoint - deployment_name = azure_openai_settings.chat_deployment_name - api_key = azure_openai_settings.api_key.get_secret_value() - api_version = azure_openai_settings.api_version - - client = AsyncAzureOpenAI( - azure_endpoint=endpoint, - azure_deployment=deployment_name, - api_key=api_key, - api_version=api_version, - default_headers={"Test-User-X-ID": "test"}, - ) - - # Configure LLM service - kernel.add_service( - sk_oai.AzureChatCompletion( - service_id="chat_completion", - async_client=client, - ), - ) - - exec_settings = PromptExecutionSettings( - service_id="chat_completion", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} - ) - - prompt_template_config = PromptTemplateConfig( - template=prompt, description="Write a short story.", execution_settings=exec_settings - ) - - # Create the semantic function - kernel.add_function(function_name="tldr", plugin_name="plugin", prompt_template_config=prompt_template_config) - - arguments = KernelArguments(input=text_to_summarize) - - summary = await retry(lambda: kernel.invoke(function_name="tldr", plugin_name="plugin", arguments=arguments)) - output = str(summary).strip() - print(f"TLDR using input string: '{output}'") - assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) - assert len(output) < 100 - - -@pytest.mark.asyncio -async def test_azure_oai_chat_service_with_tool_call(kernel: Kernel): - azure_openai_settings = AzureOpenAISettings.create() - endpoint = azure_openai_settings.endpoint - deployment_name = azure_openai_settings.chat_deployment_name - api_key = azure_openai_settings.api_key.get_secret_value() - api_version = azure_openai_settings.api_version - - client = AsyncAzureOpenAI( - azure_endpoint=endpoint, - azure_deployment=deployment_name, - api_key=api_key, - api_version=api_version, - default_headers={"Test-User-X-ID": "test"}, - ) - - # Configure LLM service - kernel.add_service( - sk_oai.AzureChatCompletion( - service_id="chat_completion", - async_client=client, - ), - ) - - kernel.add_plugin(MathPlugin(), plugin_name="math") - - execution_settings = AzureChatPromptExecutionSettings( - service_id="chat_completion", - max_tokens=2000, - temperature=0.7, - top_p=0.8, - function_call_behavior=FunctionCallBehavior.EnableFunctions( - auto_invoke=True, filters={"excluded_plugins": ["ChatBot"]} - ), - ) - - prompt_template_config = PromptTemplateConfig( - template="{{$input}}", description="Do math.", execution_settings=execution_settings - ) - - # Create the prompt function - kernel.add_function( - function_name="math_fun", plugin_name="math_int_test", prompt_template_config=prompt_template_config - ) - - summary = await retry( - lambda: kernel.invoke(function_name="math_fun", plugin_name="math_int_test", input="what is 1+1?") - ) - output = str(summary).strip() - print(f"Math output: '{output}'") - assert "2" in output - assert len(output) > 0 - - -@pytest.mark.asyncio -async def test_azure_oai_chat_service_with_tool_call_streaming(kernel: Kernel): - azure_openai_settings = AzureOpenAISettings.create() - endpoint = azure_openai_settings.endpoint - deployment_name = azure_openai_settings.chat_deployment_name - api_key = azure_openai_settings.api_key.get_secret_value() - api_version = azure_openai_settings.api_version - - client = AsyncAzureOpenAI( - azure_endpoint=endpoint, - azure_deployment=deployment_name, - api_key=api_key, - api_version=api_version, - default_headers={"Test-User-X-ID": "test"}, - ) - - # Configure LLM service - kernel.add_service( - sk_oai.AzureChatCompletion( - service_id="chat_completion", - async_client=client, - ), - ) - - kernel.add_plugin(MathPlugin(), plugin_name="Math") - - # Create the prompt function - kernel.add_function(prompt="Keep the answer short. {{$input}}", function_name="chat", plugin_name="chat") - execution_settings = sk_oai.AzureChatPromptExecutionSettings( - service_id="chat_completion", - max_tokens=2000, - temperature=0.7, - top_p=0.8, - function_call_behavior=FunctionCallBehavior.EnableFunctions( - auto_invoke=True, filters={"excluded_plugins": ["ChatBot"]} - ), - ) - arguments = KernelArguments(input="what is 101+102?", settings=execution_settings) - - result: StreamingChatMessageContent | None = None - async for message in kernel.invoke_stream(function_name="chat", plugin_name="chat", arguments=arguments): - result = message[0] if not result else result + message[0] - output = str(result) - - print(f"Math output: '{output}'") - assert "2" in output - assert 0 < len(output) < 500 diff --git a/python/tests/integration/completions/test_chat_completions.py b/python/tests/integration/completions/test_chat_completions.py new file mode 100644 index 000000000000..1d8359b89944 --- /dev/null +++ b/python/tests/integration/completions/test_chat_completions.py @@ -0,0 +1,357 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os +from functools import partial, reduce +from typing import Any + +import pytest +from openai import AsyncAzureOpenAI + +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase +from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior +from semantic_kernel.connectors.ai.open_ai import ( + AzureChatCompletion, + AzureChatPromptExecutionSettings, + OpenAIChatCompletion, + OpenAIChatPromptExecutionSettings, +) +from semantic_kernel.connectors.ai.open_ai.settings.azure_open_ai_settings import AzureOpenAISettings +from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.contents import ChatHistory, ChatMessageContent, TextContent +from semantic_kernel.contents.function_call_content import FunctionCallContent +from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.core_plugins.math_plugin import MathPlugin +from tests.integration.completions.test_utils import retry + + +def setup( + kernel: Kernel, + service: str, + execution_settings_kwargs: dict[str, Any], + services: dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]], +): + kernel.add_service(services[service][0]) + kernel.add_plugin(MathPlugin(), plugin_name="math") + kernel.add_function( + function_name="chat", + plugin_name="chat", + prompt="If someone asks how you are, always include the word 'well', " + "if you get a direct question, answer the question. {{$chat_history}}", + prompt_execution_settings=services[service][1](**execution_settings_kwargs), + ) + + +@pytest.fixture(scope="function") +def history() -> ChatHistory: + return ChatHistory() + + +@pytest.fixture(scope="module") +def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]]: + azure_openai_settings = AzureOpenAISettings.create() + endpoint = azure_openai_settings.endpoint + deployment_name = azure_openai_settings.chat_deployment_name + api_key = azure_openai_settings.api_key.get_secret_value() + api_version = azure_openai_settings.api_version + azure_custom_client = AzureChatCompletion( + async_client=AsyncAzureOpenAI( + azure_endpoint=endpoint, + azure_deployment=deployment_name, + api_key=api_key, + api_version=api_version, + default_headers={"Test-User-X-ID": "test"}, + ), + ) + return { + "openai": (OpenAIChatCompletion(), OpenAIChatPromptExecutionSettings), + "azure": (AzureChatCompletion(), AzureChatPromptExecutionSettings), + "azure_custom_client": (azure_custom_client, AzureChatPromptExecutionSettings), + } + + +pytestmark = pytest.mark.parametrize( + "service, execution_settings_kwargs, inputs, outputs", + [ + pytest.param( + "openai", + {}, + [ + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Hello")]), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), + ], + ["Hello", "well"], + id="openai_text_input", + ), + pytest.param( + "openai", + {}, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent( + uri="https://upload.wikimedia.org/wikipedia/commons/d/d5/Half-timbered_mansion%2C_Zirkel%2C_East_view.jpg" + ), + ], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), + ], + ["house", "germany"], + id="openai_image_input_uri", + ), + pytest.param( + "openai", + {}, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent.from_image_path( + image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") + ), + ], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), + ], + ["house", "germany"], + id="openai_image_input_file", + ), + pytest.param( + "openai", + { + "function_call_behavior": FunctionCallBehavior.EnableFunctions( + auto_invoke=True, filters={"excluded_plugins": ["chat"]} + ) + }, + [ + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), + ], + ["348"], + id="openai_tool_call_auto", + ), + pytest.param( + "openai", + { + "function_call_behavior": FunctionCallBehavior.EnableFunctions( + auto_invoke=False, filters={"excluded_plugins": ["chat"]} + ) + }, + [ + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), + ], + ["348"], + id="openai_tool_call_non_auto", + ), + pytest.param( + "openai", + {}, + [ + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="What was our 2024 revenue?")], + ), + ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + FunctionCallContent( + id="fin", name="finance-search", arguments='{"company": "contoso", "year": 2024}' + ) + ], + ), + ChatMessageContent( + role=AuthorRole.TOOL, + items=[FunctionResultContent(id="fin", name="finance-search", result="1.2B")], + ), + ], + ], + ["1.2"], + id="openai_tool_call_flow", + ), + pytest.param( + "azure", + {}, + [ + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Hello")]), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), + ], + ["Hello", "well"], + id="azure_text_input", + ), + pytest.param( + "azure", + {}, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent( + uri="https://upload.wikimedia.org/wikipedia/commons/d/d5/Half-timbered_mansion%2C_Zirkel%2C_East_view.jpg" + ), + ], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), + ], + ["house", "germany"], + id="azure_image_input_uri", + ), + pytest.param( + "azure", + {}, + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="What is in this image?"), + ImageContent.from_image_path( + image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") + ), + ], + ), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Where was it made?")]), + ], + ["house", "germany"], + id="azure_image_input_file", + ), + pytest.param( + "azure", + { + "function_call_behavior": FunctionCallBehavior.EnableFunctions( + auto_invoke=True, filters={"excluded_plugins": ["chat"]} + ) + }, + [ + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), + ], + ["348"], + id="azure_tool_call_auto", + ), + pytest.param( + "azure", + { + "function_call_behavior": FunctionCallBehavior.EnableFunctions( + auto_invoke=False, filters={"excluded_plugins": ["chat"]} + ) + }, + [ + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="What is 3+345?")]), + ], + ["348"], + id="azure_tool_call_non_auto", + ), + pytest.param( + "azure", + {}, + [ + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[TextContent(text="What was our 2024 revenue?")], + ), + ChatMessageContent( + role=AuthorRole.ASSISTANT, + items=[ + FunctionCallContent( + id="fin", name="finance-search", arguments='{"company": "contoso", "year": 2024}' + ) + ], + ), + ChatMessageContent( + role=AuthorRole.TOOL, + items=[FunctionResultContent(id="fin", name="finance-search", result="1.2B")], + ), + ], + ], + ["1.2"], + id="azure_tool_call_flow", + ), + pytest.param( + "azure_custom_client", + {}, + [ + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="Hello")]), + ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]), + ], + ["Hello", "well"], + id="azure_custom_client", + ), + ], +) + + +@pytest.mark.asyncio +async def test_chat_completion( + kernel: Kernel, + service: str, + execution_settings_kwargs: dict[str, Any], + inputs: list[ChatMessageContent | list[ChatMessageContent]], + outputs: list[str], + services: dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]], + history: ChatHistory, +): + setup(kernel, service, execution_settings_kwargs, services) + for message, output in zip(inputs, outputs): + if isinstance(message, list): + for msg in message: + history.add_message(msg) + else: + history.add_message(message) + + cmc = await retry( + partial(execute_invoke, kernel=kernel, history=history, output=output, stream=False), retries=5 + ) + history.add_message(cmc) + + +@pytest.mark.asyncio +async def test_streaming_chat_completion( + kernel: Kernel, + service: str, + execution_settings_kwargs: dict[str, Any], + inputs: list[ChatMessageContent | list[ChatMessageContent]], + outputs: list[str], + services: dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]], + history: ChatHistory, +): + setup(kernel, service, execution_settings_kwargs, services) + for message, output in zip(inputs, outputs): + if isinstance(message, list): + for msg in message: + history.add_message(msg) + else: + history.add_message(message) + cmc = await retry( + partial(execute_invoke, kernel=kernel, history=history, output=output, stream=True), retries=5 + ) + history.add_message(cmc) + + +async def execute_invoke(kernel: Kernel, history: ChatHistory, output: str, stream: bool) -> "ChatMessageContent": + if stream: + invocation = kernel.invoke_stream(function_name="chat", plugin_name="chat", chat_history=history) + parts = [part[0] async for part in invocation] + if parts: + response = reduce(lambda p, r: p + r, parts) + else: + raise AssertionError("No response") + else: + invocation = await kernel.invoke(function_name="chat", plugin_name="chat", chat_history=history) + assert invocation is not None + response = invocation.value[0] + print(response) + if isinstance(response, ChatMessageContent): + for item in response.items: + if isinstance(item, TextContent): + assert item.text is not None + assert output.lower() in item.text.lower() + if isinstance(item, FunctionCallContent): + assert item.arguments + assert kernel.get_function_from_fully_qualified_function_name(item.name) + return response + raise AssertionError(f"Unexpected output: response: {invocation}, type: {type(invocation)}") diff --git a/python/tests/integration/completions/test_oai_chat_service.py b/python/tests/integration/completions/test_oai_chat_service.py index edd2d7ba32ca..9fa452bdd510 100644 --- a/python/tests/integration/completions/test_oai_chat_service.py +++ b/python/tests/integration/completions/test_oai_chat_service.py @@ -3,205 +3,10 @@ import pytest from openai import AsyncOpenAI -from test_utils import retry import semantic_kernel.connectors.ai.open_ai as sk_oai -from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior from semantic_kernel.connectors.ai.open_ai.settings.open_ai_settings import OpenAISettings -from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings from semantic_kernel.contents.chat_history import ChatHistory -from semantic_kernel.core_plugins.math_plugin import MathPlugin -from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig - - -@pytest.mark.asyncio -async def test_oai_chat_service_with_plugins(setup_tldr_function_for_oai_models): - kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models - - kernel.add_service( - sk_oai.OpenAIChatCompletion(service_id="chat-gpt", ai_model_id="gpt-3.5-turbo"), - ) - - exec_settings = PromptExecutionSettings( - service_id="chat-gpt", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} - ) - - prompt_template_config = PromptTemplateConfig( - template=prompt, description="Write a short story.", execution_settings=exec_settings - ) - - # Create the semantic function - tldr_function = kernel.add_function( - function_name="story", plugin_name="plugin", prompt_template_config=prompt_template_config - ) - - summary = await retry(lambda: kernel.invoke(tldr_function, input=text_to_summarize)) - output = str(summary).strip() - print(f"TLDR using input string: '{output}'") - assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) - assert len(output) < 100 - - -@pytest.mark.asyncio -async def test_oai_chat_service_with_tool_call(setup_tldr_function_for_oai_models): - kernel, _, _ = setup_tldr_function_for_oai_models - - kernel.add_service( - sk_oai.OpenAIChatCompletion( - service_id="chat-gpt", - ai_model_id="gpt-3.5-turbo-1106", - ), - ) - - kernel.add_plugin(MathPlugin(), plugin_name="math") - - execution_settings = sk_oai.OpenAIChatPromptExecutionSettings( - service_id="chat-gpt", - max_tokens=2000, - temperature=0.7, - top_p=0.8, - function_call_behavior=FunctionCallBehavior.EnableFunctions( - auto_invoke=True, filters={"excluded_plugins": ["ChatBot"]} - ), - ) - - prompt_template_config = PromptTemplateConfig( - template="{{$input}}", description="Do math.", execution_settings=execution_settings - ) - - # Create the prompt function - tldr_function = kernel.add_function( - function_name="math_fun", plugin_name="math_int_test", prompt_template_config=prompt_template_config - ) - - summary = await retry(lambda: kernel.invoke(tldr_function, input="what is 1+1?")) - output = str(summary).strip() - print(f"Math output: '{output}'") - assert "2" in output - assert 0 < len(output) < 100 - - -@pytest.mark.asyncio -async def test_oai_chat_service_with_tool_call_streaming(setup_tldr_function_for_oai_models): - kernel, _, _ = setup_tldr_function_for_oai_models - - kernel.add_service( - sk_oai.OpenAIChatCompletion( - service_id="chat-gpt", - ai_model_id="gpt-3.5-turbo-1106", - ), - ) - - kernel.add_plugin(MathPlugin(), plugin_name="math") - - execution_settings = sk_oai.OpenAIChatPromptExecutionSettings( - service_id="chat-gpt", - max_tokens=2000, - temperature=0.7, - top_p=0.8, - function_call_behavior=FunctionCallBehavior.EnableFunctions( - auto_invoke=True, filters={"excluded_plugins": ["ChatBot"]} - ), - ) - - prompt_template_config = PromptTemplateConfig( - template="{{$input}}", description="Do math.", execution_settings=execution_settings - ) - - # Create the prompt function - tldr_function = kernel.add_function( - function_name="math_fun", plugin_name="math_int_test", prompt_template_config=prompt_template_config - ) - - result = None - async for message in kernel.invoke_stream(tldr_function, input="what is 101+102?"): - result = message[0] if not result else result + message[0] - output = str(result) - - print(f"Math output: '{output}'") - assert "2" in output - assert 0 < len(output) < 100 - - -@pytest.mark.asyncio -async def test_oai_chat_service_with_plugins_with_provided_client(setup_tldr_function_for_oai_models): - kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models - - openai_settings = OpenAISettings.create() - api_key = openai_settings.api_key.get_secret_value() - org_id = openai_settings.org_id - - client = AsyncOpenAI( - api_key=api_key, - organization=org_id, - ) - - kernel.add_service( - sk_oai.OpenAIChatCompletion( - service_id="chat-gpt", - ai_model_id="gpt-3.5-turbo", - async_client=client, - ), - overwrite=True, # Overwrite the service if it already exists since add service says it does - ) - - exec_settings = PromptExecutionSettings( - service_id="chat-gpt", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} - ) - - prompt_template_config = PromptTemplateConfig( - template=prompt, description="Write a short story.", execution_settings=exec_settings - ) - - # Create the semantic function - tldr_function = kernel.add_function( - function_name="story", - plugin_name="story_plugin", - prompt_template_config=prompt_template_config, - ) - - summary = await retry(lambda: kernel.invoke(tldr_function, input=text_to_summarize)) - output = str(summary).strip() - print(f"TLDR using input string: '{output}'") - assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) - assert len(output) < 100 - - -@pytest.mark.asyncio -async def test_azure_oai_chat_stream_service_with_plugins(setup_tldr_function_for_oai_models): - kernel, prompt, text_to_summarize = setup_tldr_function_for_oai_models - - # Configure LLM service - kernel.add_service( - sk_oai.AzureChatCompletion( - service_id="chat_completion", - ), - overwrite=True, - ) - - exec_settings = PromptExecutionSettings( - service_id="chat_completion", extension_data={"max_tokens": 200, "temperature": 0, "top_p": 0.5} - ) - - prompt_template_config = PromptTemplateConfig( - template=prompt, description="Write a short story.", execution_settings=exec_settings - ) - - # Create the prompt function - tldr_function = kernel.add_function( - function_name="story", - plugin_name="story_plugin", - prompt_template_config=prompt_template_config, - ) - - result = None - async for message in kernel.invoke_stream(tldr_function, input=text_to_summarize): - result = message[0] if not result else result + message[0] - output = str(result) - - print(f"TLDR using input string: '{output}'") - # assert "First Law" not in output and ("human" in output or "Human" in output or "preserve" in output) - assert 0 < len(output) < 100 @pytest.mark.asyncio diff --git a/python/tests/integration/completions/test_utils.py b/python/tests/integration/completions/test_utils.py index ec0366521445..cb49a892e83d 100644 --- a/python/tests/integration/completions/test_utils.py +++ b/python/tests/integration/completions/test_utils.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft. All rights reserved. +import asyncio import logging -import time logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() @@ -12,13 +12,10 @@ async def retry(func, retries=20): max_delay = 7 for i in range(retries): try: - result = str(await func()) - if "Error" in result: - raise ValueError(result) - return result + return await func() except Exception as e: logger.error(f"Retry {i + 1}: {e}") if i == retries - 1: # Last retry raise - time.sleep(max(min(i, max_delay), min_delay)) + await asyncio.sleep(max(min(i, max_delay), min_delay)) return None diff --git a/python/tests/integration/cross_language/data/prompt_simple_expected.json b/python/tests/integration/cross_language/data/prompt_simple_expected.json index cfbe380355da..07fcc52c95f3 100644 --- a/python/tests/integration/cross_language/data/prompt_simple_expected.json +++ b/python/tests/integration/cross_language/data/prompt_simple_expected.json @@ -6,5 +6,5 @@ } ], "stream": false, - "model": "gpt-3.5-turbo-1106" + "model": "gpt-3.5-turbo" } \ No newline at end of file diff --git a/python/tests/integration/cross_language/data/prompt_with_chat_roles_expected.json b/python/tests/integration/cross_language/data/prompt_with_chat_roles_expected.json index 56a712c36621..7aa1f9122b74 100644 --- a/python/tests/integration/cross_language/data/prompt_with_chat_roles_expected.json +++ b/python/tests/integration/cross_language/data/prompt_with_chat_roles_expected.json @@ -14,5 +14,5 @@ } ], "stream": false, - "model": "gpt-3.5-turbo-1106" + "model": "gpt-3.5-turbo" } \ No newline at end of file diff --git a/python/tests/integration/cross_language/data/prompt_with_complex_objects_expected.json b/python/tests/integration/cross_language/data/prompt_with_complex_objects_expected.json index cfbe380355da..07fcc52c95f3 100644 --- a/python/tests/integration/cross_language/data/prompt_with_complex_objects_expected.json +++ b/python/tests/integration/cross_language/data/prompt_with_complex_objects_expected.json @@ -6,5 +6,5 @@ } ], "stream": false, - "model": "gpt-3.5-turbo-1106" + "model": "gpt-3.5-turbo" } \ No newline at end of file diff --git a/python/tests/integration/cross_language/data/prompt_with_helper_functions_expected.json b/python/tests/integration/cross_language/data/prompt_with_helper_functions_expected.json index 8945ef1ac01e..ee5a6e6e18f9 100644 --- a/python/tests/integration/cross_language/data/prompt_with_helper_functions_expected.json +++ b/python/tests/integration/cross_language/data/prompt_with_helper_functions_expected.json @@ -10,5 +10,5 @@ } ], "stream": false, - "model": "gpt-3.5-turbo-1106" + "model": "gpt-3.5-turbo" } \ No newline at end of file diff --git a/python/tests/integration/cross_language/data/prompt_with_simple_variable_expected.json b/python/tests/integration/cross_language/data/prompt_with_simple_variable_expected.json index cfbe380355da..07fcc52c95f3 100644 --- a/python/tests/integration/cross_language/data/prompt_with_simple_variable_expected.json +++ b/python/tests/integration/cross_language/data/prompt_with_simple_variable_expected.json @@ -6,5 +6,5 @@ } ], "stream": false, - "model": "gpt-3.5-turbo-1106" + "model": "gpt-3.5-turbo" } \ No newline at end of file diff --git a/python/tests/integration/cross_language/test_cross_language.py b/python/tests/integration/cross_language/test_cross_language.py index 8faae2c72679..15f8374255ef 100644 --- a/python/tests/integration/cross_language/test_cross_language.py +++ b/python/tests/integration/cross_language/test_cross_language.py @@ -195,7 +195,7 @@ async def test_prompt_with_chat_roles(is_inline, is_streaming, template_format, async_client, logging_client = get_new_client() ai_service = OpenAIChatCompletion( service_id="test", - ai_model_id="gpt-3.5-turbo-1106", + ai_model_id="gpt-3.5-turbo", async_client=async_client, ) @@ -249,7 +249,7 @@ async def test_prompt_with_complex_objects(is_inline, is_streaming, template_for async_client, logging_client = get_new_client() ai_service = OpenAIChatCompletion( service_id="default", - ai_model_id="gpt-3.5-turbo-1106", + ai_model_id="gpt-3.5-turbo", async_client=async_client, ) @@ -308,7 +308,7 @@ async def test_prompt_with_helper_functions(is_inline, is_streaming, template_fo async_client, logging_client = get_new_client() ai_service = OpenAIChatCompletion( service_id="default", - ai_model_id="gpt-3.5-turbo-1106", + ai_model_id="gpt-3.5-turbo", async_client=async_client, ) @@ -378,7 +378,7 @@ async def test_prompt_with_simple_variable(is_inline, is_streaming, template_for async_client, logging_client = get_new_client() ai_service = OpenAIChatCompletion( service_id="default", - ai_model_id="gpt-3.5-turbo-1106", + ai_model_id="gpt-3.5-turbo", async_client=async_client, ) @@ -437,7 +437,7 @@ async def test_simple_prompt(is_inline, is_streaming, template_format, prompt): async_client, logging_client = get_new_client() ai_service = OpenAIChatCompletion( service_id="default", - ai_model_id="gpt-3.5-turbo-1106", + ai_model_id="gpt-3.5-turbo", async_client=async_client, ) @@ -495,7 +495,7 @@ async def test_yaml_prompt(is_streaming, prompt_path, expected_result_path, kern async_client, logging_client = get_new_client() ai_service = OpenAIChatCompletion( service_id="default", - ai_model_id="gpt-3.5-turbo-1106", + ai_model_id="gpt-3.5-turbo", async_client=async_client, ) diff --git a/python/tests/integration/planning/function_calling_stepwise_planner/test_int_function_calling_stepwise_planner.py b/python/tests/integration/planning/function_calling_stepwise_planner/test_int_function_calling_stepwise_planner.py index 26bf72eb3890..fd6ca4a4b438 100644 --- a/python/tests/integration/planning/function_calling_stepwise_planner/test_int_function_calling_stepwise_planner.py +++ b/python/tests/integration/planning/function_calling_stepwise_planner/test_int_function_calling_stepwise_planner.py @@ -27,7 +27,7 @@ async def test_can_execute_function_calling_stepwise_plan(kernel: Kernel): kernel.add_service( OpenAIChatCompletion( service_id=service_id, - ai_model_id="gpt-3.5-turbo-1106", + ai_model_id="gpt-3.5-turbo", ), ) diff --git a/python/tests/samples/test_concepts.py b/python/tests/samples/test_concepts.py new file mode 100644 index 000000000000..3692f3761916 --- /dev/null +++ b/python/tests/samples/test_concepts.py @@ -0,0 +1,91 @@ +# Copyright (c) Microsoft. All rights reserved. + +from pytest import mark, param + +from samples.concepts.auto_function_calling.azure_python_code_interpreter_function_calling import ( + main as azure_python_code_interpreter_function_calling, +) +from samples.concepts.auto_function_calling.chat_gpt_api_function_calling import main as chat_gpt_api_function_calling +from samples.concepts.chat_completion.azure_chat_gpt_api import main as azure_chat_gpt_api +from samples.concepts.chat_completion.azure_chat_image_input import main as azure_chat_image_input +from samples.concepts.chat_completion.chat_gpt_api import main as chat_gpt_api +from samples.concepts.chat_completion.chat_streaming import main as chat_streaming +from samples.concepts.chat_completion.openai_logit_bias import main as openai_logit_bias +from samples.concepts.filtering.auto_function_invoke_filters import main as auto_function_invoke_filters +from samples.concepts.filtering.function_invocation_filters import main as function_invocation_filters +from samples.concepts.filtering.function_invocation_filters_stream import main as function_invocation_filters_stream +from samples.concepts.filtering.prompt_filters import main as prompt_filters +from samples.concepts.functions.kernel_arguments import main as kernel_arguments +from samples.concepts.grounding.grounded import main as grounded +from samples.concepts.memory.azure_cognitive_search_memory import main as azure_cognitive_search_memory +from samples.concepts.memory.memory import main as memory +from samples.concepts.planners.azure_openai_function_calling_stepwise_planner import ( + main as azure_openai_function_calling_stepwise_planner, +) +from samples.concepts.planners.openai_function_calling_stepwise_planner import ( + main as openai_function_calling_stepwise_planner, +) +from samples.concepts.planners.sequential_planner import main as sequential_planner +from samples.concepts.plugins.openai_function_calling_with_custom_plugin import ( + main as openai_function_calling_with_custom_plugin, +) +from samples.concepts.plugins.openai_plugin_azure_key_vault import main as openai_plugin_azure_key_vault +from samples.concepts.plugins.openai_plugin_klarna import main as openai_plugin_klarna +from samples.concepts.plugins.plugins_from_dir import main as plugins_from_dir +from samples.concepts.prompt_templates.azure_chat_gpt_api_handlebars import main as azure_chat_gpt_api_handlebars +from samples.concepts.prompt_templates.azure_chat_gpt_api_jinja2 import main as azure_chat_gpt_api_jinja2 +from samples.concepts.prompt_templates.configuring_prompts import main as configuring_prompts +from samples.concepts.prompt_templates.load_yaml_prompt import main as load_yaml_prompt +from samples.concepts.prompt_templates.template_language import main as template_language +from samples.concepts.rag.rag_with_text_memory_plugin import main as rag_with_text_memory_plugin +from samples.concepts.search.bing_search_plugin import main as bing_search_plugin +from samples.concepts.service_selector.custom_service_selector import main as custom_service_selector +from tests.samples.test_samples_utils import retry + +concepts = [ + param( + azure_python_code_interpreter_function_calling, + ["print('Hello, World!')", "exit"], + id="azure_python_code_interpreter_function_calling", + ), + param(chat_gpt_api_function_calling, ["What is 3+3?", "exit"], id="cht_gpt_api_function_calling"), + param(azure_chat_gpt_api, ["Why is the sky blue?", "exit"], id="azure_chat_gpt_api"), + param(chat_gpt_api, ["What is life?", "exit"], id="chat_gpt_api"), + param(chat_streaming, ["Why is the sun hot?", "exit"], id="chat_streaming"), + param(openai_logit_bias, [], id="openai_logit_bias"), + param(auto_function_invoke_filters, ["What is 3+3?", "exit"], id="auo_function_invoke_filters"), + param(function_invocation_filters, ["What is 3+3?", "exit"], id="function_invocation_filters"), + param(function_invocation_filters_stream, ["What is 3+3?", "exit"], id="function_invocation_filters_stream"), + param(prompt_filters, ["What is the fastest animal?", "exit"], id="prompt_filters"), + param(kernel_arguments, [], id="kernel_arguments"), + param(grounded, [], id="grounded"), + param(azure_cognitive_search_memory, [], id="azure_cognitive_search_memory"), + param(memory, ["What are my investments?", "exit"], id="memory"), + param(azure_openai_function_calling_stepwise_planner, [], id="azure_openai_function_calling_stepwise_planner"), + param(openai_function_calling_stepwise_planner, [], id="openai_function_calling_stepwise_planner"), + param(sequential_planner, [], id="sequential_planner"), + param(openai_function_calling_with_custom_plugin, [], id="openai_function_calling_with_custom_plugin"), + param( + openai_plugin_azure_key_vault, + ["Create a secret with the name 'Foo' and value 'Bar'", "exit"], + id="openai_plugin_azure_key_vault", + ), + param(openai_plugin_klarna, [], id="openai_plugin_klarna"), + param(plugins_from_dir, [], id="plugins_from_dir"), + param(azure_chat_gpt_api_handlebars, ["What is 3+3?", "exit"], id="azure_chat_gpt_api_handlebars"), + param(azure_chat_gpt_api_jinja2, ["What is 3+3?", "exit"], id="azure_chat_gpt_api_jinja2"), + param(configuring_prompts, ["What is my name?", "exit"], id="configuring_prompts"), + param(load_yaml_prompt, [], id="load_yaml_prompt"), + param(template_language, [], id="template_language"), + param(rag_with_text_memory_plugin, [], id="rag_with_text_memory_plugin"), + param(bing_search_plugin, [], id="bing_search_plugin"), + param(azure_chat_image_input, [], id="azure_chat_image_input"), + param(custom_service_selector, [], id="custom_service_selector"), +] + + +@mark.asyncio +@mark.parametrize("func, responses", concepts) +async def test_concepts(func, responses, monkeypatch): + monkeypatch.setattr("builtins.input", lambda _: responses.pop(0)) + await retry(lambda: func()) diff --git a/python/tests/samples/test_learn_resources.py b/python/tests/samples/test_learn_resources.py index 2f7f00ce8507..82a6e5d00175 100644 --- a/python/tests/samples/test_learn_resources.py +++ b/python/tests/samples/test_learn_resources.py @@ -12,6 +12,7 @@ from samples.learn_resources.templates import main as templates from samples.learn_resources.using_the_kernel import main as using_the_kernel from samples.learn_resources.your_first_prompt import main as your_first_prompt +from tests.samples.test_samples_utils import retry @mark.asyncio @@ -45,6 +46,6 @@ async def test_learn_resources(func, responses, monkeypatch): monkeypatch.setattr("builtins.input", lambda _: responses.pop(0)) if func.__module__ == "samples.learn_resources.your_first_prompt": - await func(delay=10) + await retry(lambda: func(delay=10)) return - await func() + await retry(lambda: func()) diff --git a/python/tests/samples/test_samples_utils.py b/python/tests/samples/test_samples_utils.py new file mode 100644 index 000000000000..d04b39d3656b --- /dev/null +++ b/python/tests/samples/test_samples_utils.py @@ -0,0 +1,23 @@ +# Copyright (c) Microsoft. All rights reserved. + +import asyncio +import logging + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger() + + +async def retry(func, max_retries=3): + """Retry a function a number of times before raising an exception.""" + attempt = 0 + while attempt < max_retries: + try: + await func() + break + except Exception as e: + attempt += 1 + logger.error(f"Attempt {attempt} for {func.__name__} failed: {e}") + if attempt == max_retries: + logger.error(f"All {max_retries} attempts for {func.__name__} failed") + raise e + await asyncio.sleep(1) diff --git a/python/tests/unit/connectors/google_palm/services/test_palm_chat_completion.py b/python/tests/unit/connectors/google_palm/services/test_palm_chat_completion.py index 5380a176a340..99cb9e6b9c43 100644 --- a/python/tests/unit/connectors/google_palm/services/test_palm_chat_completion.py +++ b/python/tests/unit/connectors/google_palm/services/test_palm_chat_completion.py @@ -29,6 +29,7 @@ def test_google_palm_chat_completion_init_with_empty_api_key(google_palm_unit_te with pytest.raises(ServiceInitializationError): GooglePalmChatCompletion( ai_model_id=ai_model_id, + env_file_path="test.env", ) diff --git a/python/tests/unit/connectors/google_palm/services/test_palm_text_completion.py b/python/tests/unit/connectors/google_palm/services/test_palm_text_completion.py index 91596bf54427..7334f335df21 100644 --- a/python/tests/unit/connectors/google_palm/services/test_palm_text_completion.py +++ b/python/tests/unit/connectors/google_palm/services/test_palm_text_completion.py @@ -30,6 +30,7 @@ def test_google_palm_text_completion_init_with_empty_api_key(google_palm_unit_te with pytest.raises(ServiceInitializationError): GooglePalmTextCompletion( ai_model_id=ai_model_id, + env_file_path="test.env", ) diff --git a/python/tests/unit/connectors/google_palm/services/test_palm_text_embedding.py b/python/tests/unit/connectors/google_palm/services/test_palm_text_embedding.py index aa1b61ee09e4..c70986c2b122 100644 --- a/python/tests/unit/connectors/google_palm/services/test_palm_text_embedding.py +++ b/python/tests/unit/connectors/google_palm/services/test_palm_text_embedding.py @@ -30,6 +30,7 @@ def test_google_palm_text_embedding_init_with_empty_api_key(google_palm_unit_tes with pytest.raises(ServiceInitializationError): GooglePalmTextEmbedding( ai_model_id=ai_model_id, + env_file_path="test.env", ) diff --git a/python/tests/unit/connectors/open_ai/services/test_azure_chat_completion.py b/python/tests/unit/connectors/open_ai/services/test_azure_chat_completion.py index 0d8a8f89889e..938fa1243441 100644 --- a/python/tests/unit/connectors/open_ai/services/test_azure_chat_completion.py +++ b/python/tests/unit/connectors/open_ai/services/test_azure_chat_completion.py @@ -12,7 +12,6 @@ from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase from semantic_kernel.connectors.ai.function_call_behavior import FunctionCallBehavior from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion -from semantic_kernel.connectors.ai.open_ai.const import USER_AGENT from semantic_kernel.connectors.ai.open_ai.exceptions.content_filter_ai_exception import ( ContentFilterAIException, ContentFilterResultSeverity, @@ -22,6 +21,7 @@ AzureChatPromptExecutionSettings, ExtraBody, ) +from semantic_kernel.const import USER_AGENT from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.exceptions import ServiceInitializationError, ServiceInvalidExecutionSettingsError from semantic_kernel.exceptions.service_exceptions import ServiceResponseException @@ -58,19 +58,25 @@ def test_azure_chat_completion_init_base_url(azure_openai_unit_test_env) -> None @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_CHAT_DEPLOYMENT_NAME"]], indirect=True) def test_azure_chat_completion_init_with_empty_deployment_name(azure_openai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): - AzureChatCompletion() + AzureChatCompletion( + env_file_path="test.env", + ) @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_KEY"]], indirect=True) def test_azure_chat_completion_init_with_empty_api_key(azure_openai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): - AzureChatCompletion() + AzureChatCompletion( + env_file_path="test.env", + ) @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_BASE_URL"]], indirect=True) def test_azure_chat_completion_init_with_empty_endpoint_and_base_url(azure_openai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): - AzureChatCompletion() + AzureChatCompletion( + env_file_path="test.env", + ) @pytest.mark.parametrize("override_env_param_dict", [{"AZURE_OPENAI_ENDPOINT": "http://test.com"}], indirect=True) @@ -450,9 +456,7 @@ async def test_azure_chat_completion_auto_invoke_false_no_kernel_provided_throws prompt = "some prompt that would trigger the content filtering" chat_history.add_user_message(prompt) complete_prompt_execution_settings = AzureChatPromptExecutionSettings( - function_call_behavior=FunctionCallBehavior.EnableFunctions( - auto_invoke=False, filters={} - ) + function_call_behavior=FunctionCallBehavior.EnableFunctions(auto_invoke=False, filters={}) ) test_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") diff --git a/python/tests/unit/connectors/open_ai/services/test_azure_text_completion.py b/python/tests/unit/connectors/open_ai/services/test_azure_text_completion.py index 137b7fa50439..061572bca095 100644 --- a/python/tests/unit/connectors/open_ai/services/test_azure_text_completion.py +++ b/python/tests/unit/connectors/open_ai/services/test_azure_text_completion.py @@ -43,21 +43,28 @@ def test_azure_text_completion_init_with_custom_header(azure_openai_unit_test_en @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_TEXT_DEPLOYMENT_NAME"]], indirect=True) -def test_azure_text_completion_init_with_empty_deployment_name(azure_openai_unit_test_env) -> None: +def test_azure_text_completion_init_with_empty_deployment_name(monkeypatch, azure_openai_unit_test_env) -> None: + monkeypatch.delenv("AZURE_OPENAI_TEXT_DEPLOYMENT_NAME", raising=False) with pytest.raises(ServiceInitializationError): - AzureTextCompletion() + AzureTextCompletion( + env_file_path="test.env", + ) @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_KEY"]], indirect=True) def test_azure_text_completion_init_with_empty_api_key(azure_openai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): - AzureTextCompletion() + AzureTextCompletion( + env_file_path="test.env", + ) @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_BASE_URL"]], indirect=True) def test_azure_text_completion_init_with_empty_endpoint_and_base_url(azure_openai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): - AzureTextCompletion() + AzureTextCompletion( + env_file_path="test.env", + ) @pytest.mark.parametrize("override_env_param_dict", [{"AZURE_OPENAI_ENDPOINT": "http://test.com"}], indirect=True) diff --git a/python/tests/unit/connectors/open_ai/services/test_azure_text_embedding.py b/python/tests/unit/connectors/open_ai/services/test_azure_text_embedding.py index 77bd4ec55004..bdb97b1b0070 100644 --- a/python/tests/unit/connectors/open_ai/services/test_azure_text_embedding.py +++ b/python/tests/unit/connectors/open_ai/services/test_azure_text_embedding.py @@ -24,19 +24,25 @@ def test_azure_text_embedding_init(azure_openai_unit_test_env) -> None: @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME"]], indirect=True) def test_azure_text_embedding_init_with_empty_deployment_name(azure_openai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): - AzureTextEmbedding() + AzureTextEmbedding( + env_file_path="test.env", + ) @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_API_KEY"]], indirect=True) def test_azure_text_embedding_init_with_empty_api_key(azure_openai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): - AzureTextEmbedding() + AzureTextEmbedding( + env_file_path="test.env", + ) @pytest.mark.parametrize("exclude_list", [["AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_BASE_URL"]], indirect=True) def test_azure_text_embedding_init_with_empty_endpoint_and_base_url(azure_openai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): - AzureTextEmbedding() + AzureTextEmbedding( + env_file_path="test.env", + ) @pytest.mark.parametrize("override_env_param_dict", [{"AZURE_OPENAI_ENDPOINT": "http://test.com"}], indirect=True) diff --git a/python/tests/unit/connectors/open_ai/services/test_open_ai_chat_completion_base.py b/python/tests/unit/connectors/open_ai/services/test_open_ai_chat_completion_base.py index 2e2cb8903502..fd360cd890f9 100644 --- a/python/tests/unit/connectors/open_ai/services/test_open_ai_chat_completion_base.py +++ b/python/tests/unit/connectors/open_ai/services/test_open_ai_chat_completion_base.py @@ -192,7 +192,7 @@ async def test_process_tool_calls_with_continuation_on_malformed_arguments(): FunctionCallBehavior.AutoInvokeKernelFunctions(), ) - logger_mock.exception.assert_any_call( + logger_mock.info.assert_any_call( "Received invalid arguments for function test_function: Malformed arguments. Trying tool call again." ) diff --git a/python/tests/unit/connectors/open_ai/services/test_openai_chat_completion.py b/python/tests/unit/connectors/open_ai/services/test_openai_chat_completion.py index b535bb849303..481feee774ac 100644 --- a/python/tests/unit/connectors/open_ai/services/test_openai_chat_completion.py +++ b/python/tests/unit/connectors/open_ai/services/test_openai_chat_completion.py @@ -4,8 +4,8 @@ import pytest from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase -from semantic_kernel.connectors.ai.open_ai.const import USER_AGENT from semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion import OpenAIChatCompletion +from semantic_kernel.const import USER_AGENT from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError @@ -46,7 +46,9 @@ def test_open_ai_chat_completion_init_with_default_header(openai_unit_test_env) @pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) def test_open_ai_chat_completion_init_with_empty_model_id(openai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): - OpenAIChatCompletion() + OpenAIChatCompletion( + env_file_path="test.env", + ) @pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) @@ -56,6 +58,7 @@ def test_open_ai_chat_completion_init_with_empty_api_key(openai_unit_test_env) - with pytest.raises(ServiceInitializationError): OpenAIChatCompletion( ai_model_id=ai_model_id, + env_file_path="test.env", ) diff --git a/python/tests/unit/connectors/open_ai/services/test_openai_text_completion.py b/python/tests/unit/connectors/open_ai/services/test_openai_text_completion.py index 4be7199cf708..fda23f1dec70 100644 --- a/python/tests/unit/connectors/open_ai/services/test_openai_text_completion.py +++ b/python/tests/unit/connectors/open_ai/services/test_openai_text_completion.py @@ -43,7 +43,9 @@ def test_open_ai_text_completion_init_with_default_header(openai_unit_test_env) @pytest.mark.parametrize("exclude_list", [["OPENAI_API_KEY"]], indirect=True) def test_open_ai_text_completion_init_with_empty_api_key(openai_unit_test_env) -> None: with pytest.raises(ServiceInitializationError): - OpenAITextCompletion() + OpenAITextCompletion( + env_file_path="test.env", + ) def test_open_ai_text_completion_serialize(openai_unit_test_env) -> None: diff --git a/python/tests/unit/contents/test_binary_content.py b/python/tests/unit/contents/test_binary_content.py new file mode 100644 index 000000000000..14719d5f5754 --- /dev/null +++ b/python/tests/unit/contents/test_binary_content.py @@ -0,0 +1,108 @@ +# Copyright (c) Microsoft. All rights reserved. + + +import pytest + +from semantic_kernel.contents.binary_content import BinaryContent + +test_cases = [ + pytest.param(BinaryContent(uri="http://test_uri"), id="uri"), + pytest.param(BinaryContent(data=b"test_data", mime_type="image/jpeg", data_format="base64"), id="data"), + pytest.param(BinaryContent(data="test_data", mime_type="image/jpeg"), id="data_str"), + pytest.param(BinaryContent(uri="http://test_uri", data=b"test_data", mime_type="image/jpeg"), id="both"), + pytest.param(BinaryContent(data_uri="data:image/jpeg;base64,dGVzdF9kYXRh"), id="data_uri"), + pytest.param(BinaryContent(data_uri="data:image/jpeg;base64,dGVzdF9kYXRh"), id="data_uri_with_params"), + pytest.param( + BinaryContent(data_uri="data:image/jpeg;foo=bar;base64,dGVzdF9kYXRh", metadata={"bar": "baz"}), + id="data_uri_with_params_and_metadata", + ), +] + + +def test_create_empty(): + binary = BinaryContent() + assert binary.uri is None + assert binary.data == b"" + assert binary.mime_type == "text/plain" + assert binary.metadata == {} + + +def test_create_uri(): + binary = BinaryContent(uri="http://test_uri") + assert str(binary.uri) == "http://test_uri/" + + +def test_create_data(): + binary = BinaryContent(data=b"test_data", mime_type="application/json") + assert binary.mime_type == "application/json" + assert binary.data == b"test_data" + + +def test_create_data_uri(): + binary = BinaryContent(data_uri="data:application/json;base64,dGVzdF9kYXRh") + assert binary.mime_type == "application/json" + assert binary.data.decode() == "test_data" + + +def test_create_data_uri_with_params(): + binary = BinaryContent(data_uri="data:image/jpeg;foo=bar;base64,dGVzdF9kYXRh") + assert binary.metadata == {"foo": "bar"} + + +def test_create_data_uri_with_params_and_metadata(): + binary = BinaryContent(data_uri="data:image/jpeg;foo=bar;base64,dGVzdF9kYXRh", metadata={"bar": "baz"}) + assert binary.metadata == {"foo": "bar", "bar": "baz"} + + +def test_update_data(): + binary = BinaryContent() + binary.data = b"test_data" + binary.mime_type = "application/json" + assert binary.mime_type == "application/json" + assert binary.data == b"test_data" + + +def test_update_data_str(): + binary = BinaryContent() + binary.data = "test_data" + binary.mime_type = "application/json" + assert binary.mime_type == "application/json" + assert binary.data == b"test_data" + + +def test_update_existing_data(): + binary = BinaryContent(data_uri="data:image/jpeg;foo=bar;base64,dGVzdF9kYXRh", metadata={"bar": "baz"}) + binary._data_uri.data_format = None + binary.data = "test_data" + binary.data = b"test_data" + assert binary.data == b"test_data" + + +def test_update_data_uri(): + binary = BinaryContent() + binary.data_uri = "data:image/jpeg;foo=bar;base64,dGVzdF9kYXRh" + assert binary.mime_type == "image/jpeg" + assert binary.data.decode() == "test_data" + assert binary.metadata == {"foo": "bar"} + + +def test_to_str_uri(): + binary = BinaryContent(uri="http://test_uri") + assert str(binary) == "http://test_uri/" + + +def test_to_str_data(): + binary = BinaryContent(data=b"test_data", mime_type="image/jpeg", data_format="base64") + assert str(binary) == "data:image/jpeg;base64,dGVzdF9kYXRh" + + +@pytest.mark.parametrize("binary", test_cases) +def test_element_roundtrip(binary): + element = binary.to_element() + new_image = BinaryContent.from_element(element) + assert new_image == binary + + +@pytest.mark.parametrize("binary", test_cases) +def test_to_dict(binary): + assert binary.to_dict() == {"type": "binary", "binary": {"uri": str(binary)}} diff --git a/python/tests/unit/contents/test_chat_history.py b/python/tests/unit/contents/test_chat_history.py index 89fdb1925b8e..d4b4a3bd07d6 100644 --- a/python/tests/unit/contents/test_chat_history.py +++ b/python/tests/unit/contents/test_chat_history.py @@ -3,12 +3,13 @@ import pytest -from semantic_kernel.contents.author_role import AuthorRole from semantic_kernel.contents.chat_history import ChatHistory from semantic_kernel.contents.chat_message_content import ChatMessageContent from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole from semantic_kernel.exceptions import ContentInitializationError from semantic_kernel.functions.kernel_arguments import KernelArguments from semantic_kernel.kernel import Kernel @@ -117,6 +118,26 @@ def test_add_message(chat_history: ChatHistory): assert chat_history.messages[-1].metadata == {"test": "test"} +def test_add_message_with_image(chat_history: ChatHistory): + content = "Test message" + role = AuthorRole.USER + encoding = "utf-8" + chat_history.add_message( + ChatMessageContent( + role=role, + items=[ + TextContent(text=content), + ImageContent(uri="https://test/"), + ], + encoding=encoding, + ) + ) + assert chat_history.messages[-1].content == content + assert chat_history.messages[-1].role == role + assert chat_history.messages[-1].encoding == encoding + assert str(chat_history.messages[-1].items[1].uri) == "https://test/" + + def test_add_message_invalid_message(chat_history: ChatHistory): content = "Test message" with pytest.raises(ContentInitializationError): @@ -222,13 +243,24 @@ def test_dump(): def test_serialize(): system_msg = "a test system prompt" chat_history = ChatHistory( - messages=[ChatMessageContent(role=AuthorRole.USER, content="Message")], system_message=system_msg + messages=[ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="Message"), + ImageContent(uri="http://test.com/image.jpg"), + ImageContent(data_uri="data:image/jpeg;base64,dGVzdF9kYXRh"), + ], + ) + ], + system_message=system_msg, ) + json_str = chat_history.serialize() assert json_str is not None assert ( json_str - == '{\n "messages": [\n {\n "metadata": {},\n "role": "system",\n "items": [\n {\n "metadata": {},\n "text": "a test system prompt"\n }\n ]\n },\n {\n "metadata": {},\n "role": "user",\n "items": [\n {\n "metadata": {},\n "text": "Message"\n }\n ]\n }\n ]\n}' # noqa: E501 + == '{\n "messages": [\n {\n "metadata": {},\n "content_type": "message",\n "role": "system",\n "items": [\n {\n "metadata": {},\n "content_type": "text",\n "text": "a test system prompt"\n }\n ]\n },\n {\n "metadata": {},\n "content_type": "message",\n "role": "user",\n "items": [\n {\n "metadata": {},\n "content_type": "text",\n "text": "Message"\n },\n {\n "metadata": {},\n "content_type": "image",\n "uri": "http://test.com/image.jpg",\n "data_uri": ""\n },\n {\n "metadata": {},\n "content_type": "image",\n "data_uri": "data:image/jpeg;base64,dGVzdF9kYXRh"\n }\n ]\n }\n ]\n}' # noqa: E501 ) diff --git a/python/tests/unit/contents/test_chat_message_content.py b/python/tests/unit/contents/test_chat_message_content.py index 38ee93d8e6bb..cdc3177dc71f 100644 --- a/python/tests/unit/contents/test_chat_message_content.py +++ b/python/tests/unit/contents/test_chat_message_content.py @@ -3,12 +3,13 @@ import pytest from defusedxml.ElementTree import XML -from semantic_kernel.contents.author_role import AuthorRole from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.finish_reason import FinishReason from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.function_result_content import FunctionResultContent +from semantic_kernel.contents.image_content import ImageContent from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason def test_cmc(): @@ -61,14 +62,11 @@ def test_cmc_items_and_content(): def test_cmc_multiple_items(): message = ChatMessageContent( role=AuthorRole.SYSTEM, - items=[ - TextContent(text="Hello, world!"), - TextContent(text="Hello, world!"), - ], + items=[TextContent(text="Hello, world!"), TextContent(text="Hello, world!"), ImageContent(uri="http://test/")], ) assert message.role == AuthorRole.SYSTEM assert message.content == "Hello, world!" - assert len(message.items) == 2 + assert len(message.items) == 3 def test_cmc_content_set(): @@ -103,6 +101,48 @@ def test_cmc_to_element(): assert child.text == "Hello, world!" +@pytest.mark.parametrize( + "message", + [ + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="test"), + ], + ), + ChatMessageContent( + role=AuthorRole.USER, + items=[ImageContent(uri="http://test/")], + ), + ChatMessageContent( + role=AuthorRole.USER, + items=[ImageContent(data=b"test_data", mime_type="image/jpeg")], + ), + ChatMessageContent( + role=AuthorRole.USER, items=[FunctionCallContent(id="test", name="func_name", arguments="args")] + ), + ChatMessageContent( + role=AuthorRole.USER, + items=[FunctionResultContent(id="test", name="func_name", result="result")], + ), + ChatMessageContent( + role=AuthorRole.USER, + items=[ + TextContent(text="Hello, world!"), + FunctionCallContent(id="test", name="func_name", arguments="args"), + FunctionResultContent(id="test", name="func_name", result="result"), + ImageContent(uri="http://test/"), + ], + ), + ], + ids=["text", "image_uri", "image_data", "function_call", "function_result", "all"], +) +def test_cmc_to_from_element(message): + element = message.to_element() + new_message = ChatMessageContent.from_element(element) + assert message == new_message + + def test_cmc_to_prompt(): message = ChatMessageContent(role=AuthorRole.USER, content="Hello, world!") prompt = message.to_prompt() @@ -162,6 +202,7 @@ def test_cmc_from_element_content(): 1, # TODO: review this case ), ('Hello, world!', "user", "Hello, world!", 1), + ('data:image/jpeg;base64,dGVzdF9kYXRh', "user", "", 1), ], ids=[ "no_tag", @@ -172,6 +213,7 @@ def test_cmc_from_element_content(): "combined", "unknown_tag", "streaming", + "image", ], ) def test_cmc_from_element_content_parse(xml_content, user, text_content, length): diff --git a/python/tests/unit/contents/test_data_uri.py b/python/tests/unit/contents/test_data_uri.py new file mode 100644 index 000000000000..c4879b305593 --- /dev/null +++ b/python/tests/unit/contents/test_data_uri.py @@ -0,0 +1,232 @@ +# Copyright (c) Microsoft. All rights reserved. + +from typing import Any + +import pytest + +from semantic_kernel.contents.utils.data_uri import DataUri +from semantic_kernel.exceptions.content_exceptions import ContentInitializationError + + +@pytest.mark.parametrize( + "uri, data_bytes, data_str, mime_type, parameters, data_format", + [ + pytest.param( + "data:image/jpeg;base64,dGVzdF9kYXRh", + b"test_data", + "dGVzdF9kYXRh", + "image/jpeg", + {}, + "base64", + id="basic_image", + ), + pytest.param("data:text/plain;,test_data", None, "test_data", "text/plain", {}, None, id="basic_text"), + pytest.param( + "data:application/octet-stream;base64,AQIDBA==", + b"\x01\x02\x03\x04", + "AQIDBA==", + "application/octet-stream", + {}, + "base64", + id="basic_binary", + ), + pytest.param( + "data:text/plain;base64,U29t\r\nZQ==\t", + b"Some", + "U29tZQ==", + "text/plain", + {}, + "base64", + id="strip_whitespace", + ), + pytest.param( + "data:application/octet-stream;utf8,01-02-03-04", + None, + "01-02-03-04", + "application/octet-stream", + {}, + "utf8", + id="utf8", + ), + ], +) +def test_data_uri_from_data_uri_str( + uri: str, + data_bytes: bytes | None, + data_str: str | None, + mime_type: str | None, + parameters: dict[str, str], + data_format: str | None, +): + data_uri = DataUri.from_data_uri(uri) + assert data_uri.data_bytes == data_bytes + assert data_uri.data_str == data_str + assert data_uri.mime_type == mime_type + assert data_uri.parameters == parameters + assert data_uri.data_format == data_format + + +@pytest.mark.parametrize( + "uri, exception", + [ + pytest.param("", ContentInitializationError, id="empty"), + pytest.param("data", ContentInitializationError, id="missing_colon"), + pytest.param("data:", ContentInitializationError, id="missing_comma"), + pytest.param("data:something,", ContentInitializationError, id="mime_type_without_subtype"), + pytest.param("data:something;else,data", ContentInitializationError, id="mime_type_without_subtype2"), + pytest.param( + "data:type/subtype;parameterwithoutvalue;else,", ContentInitializationError, id="param_without_value" + ), + pytest.param("data:type/subtype;parameter=va=lue;else,", ContentInitializationError, id="param_multiple_eq"), + pytest.param("data:type/subtype;=value;else,", ContentInitializationError, id="param_without_name"), + pytest.param("data:image/jpeg;base64,dGVzdF9kYXRh;foo=bar", ContentInitializationError, id="wrong_order"), + pytest.param("data:text/plain;test_data", ContentInitializationError, id="missing_comma"), + pytest.param( + "data:text/plain;base64,something!", + ContentInitializationError, + id="invalid_char", + ), + pytest.param( + "data:text/plain;base64,U29", + ContentInitializationError, + id="missing_padding", + ), + ], +) +def test_data_uri_from_data_uri_fail(uri: str, exception: type[Exception]): + with pytest.raises(exception): + DataUri.from_data_uri(uri) + + +def test_data_uri_to_string_with_extra_metadata(): + uri = DataUri.from_data_uri("data:image/jpeg;base64,dGVzdF9kYXRh") + assert uri.to_string(metadata={"foo": "bar"}) == "data:image/jpeg;foo=bar;base64,dGVzdF9kYXRh" + + +def test_default_mime_type(): + uri = DataUri.from_data_uri("data:;base64,dGVzdF9kYXRh", default_mime_type="image/jpeg") + assert uri.mime_type == "image/jpeg" + + +@pytest.mark.parametrize( + "fields, uri", + [ + pytest.param( + { + "data_bytes": b"test_data", + "mime_type": "image/jpeg", + "data_format": "base64", + }, + "data:image/jpeg;base64,dGVzdF9kYXRh", + id="basic_image", + ), + pytest.param( + {"data_str": "test_data", "mime_type": "text/plain"}, "data:text/plain;,test_data", id="basic_text" + ), + pytest.param( + { + "data_bytes": b"\x01\x02\x03\x04", + "mime_type": "application/octet-stream", + "data_format": "base64", + }, + "data:application/octet-stream;base64,AQIDBA==", + id="basic_binary", + ), + pytest.param( + { + "data_str": "test_data/r/t", + "mime_type": "image/jpeg", + }, + "data:image/jpeg;,test_data/r/t", + id="whitespace_not_base64", + ), + pytest.param( + { + "data_bytes": b"test_data", + "mime_type": "image/jpeg", + "data_format": "base64", + "parameters": None, + }, + "data:image/jpeg;base64,dGVzdF9kYXRh", + id="param_none", + ), + pytest.param( + { + "data_bytes": b"test_data", + "mime_type": "image/jpeg", + "data_format": "base64", + "parameters": [], + }, + "data:image/jpeg;base64,dGVzdF9kYXRh", + id="param_empty_list", + ), + pytest.param( + { + "data_bytes": b"test_data", + "mime_type": "image/jpeg", + "data_format": "base64", + "parameters": [""], + }, + "data:image/jpeg;base64,dGVzdF9kYXRh", + id="param_empty_list", + ), + pytest.param( + { + "data_bytes": b"test_data", + "mime_type": "image/jpeg", + "data_format": "base64", + "parameters": {}, + }, + "data:image/jpeg;base64,dGVzdF9kYXRh", + id="param_empty_dict", + ), + pytest.param( + { + "data_bytes": b"test_data", + "mime_type": "image/jpeg", + "data_format": "base64", + "parameters": {"foo": "bar"}, + }, + "data:image/jpeg;base64,dGVzdF9kYXRh", + id="param_dict", + ), + ], +) +def test_data_uri_from_fields(fields: dict[str, Any], uri: str): + data_uri = DataUri(**fields) + assert data_uri.to_string() == uri + + +@pytest.mark.parametrize( + "fields", + [ + pytest.param( + { + "data_str": "test_data/r/t", + "mime_type": "image/jpeg", + "data_format": "base64", + }, + id="whitespace", + ), + pytest.param( + { + "data_str": "test_data/r/t", + "mime_type": "image/jpeg", + "data_format": "base64", + "parameters": ["foo"], + }, + id="invalid_params", + ), + ], +) +def test_data_uri_from_fields_fail(fields: dict[str, Any]): + with pytest.raises(ContentInitializationError): + DataUri(**fields) + + +def test_eq(): + data_uri1 = DataUri.from_data_uri("data:image/jpeg;base64,dGVzdF9kYXRh") + data_uri2 = DataUri.from_data_uri("data:image/jpeg;base64,dGVzdF9kYXRh") + assert data_uri1 == data_uri2 + assert data_uri1 != "data:image/jpeg;base64,dGVzdF9kYXRh" + assert data_uri1 != DataUri.from_data_uri("data:image/jpeg;base64,dGVzdF9kYXRi") diff --git a/python/tests/unit/contents/test_function_call.py b/python/tests/unit/contents/test_function_call.py index a9445804d4ce..75aee374e109 100644 --- a/python/tests/unit/contents/test_function_call.py +++ b/python/tests/unit/contents/test_function_call.py @@ -90,10 +90,19 @@ def test_split_name_none(): def test_fc_dump(function_call: FunctionCallContent): # Test dumping the function call to dictionary dumped = function_call.model_dump(exclude_none=True) - assert dumped == {"id": "test", "name": "Test-Function", "arguments": '{"input": "world"}', "metadata": {}} + assert dumped == { + "content_type": "function_call", + "id": "test", + "name": "Test-Function", + "arguments": '{"input": "world"}', + "metadata": {}, + } def test_fc_dump_json(function_call: FunctionCallContent): # Test dumping the function call to dictionary dumped = function_call.model_dump_json(exclude_none=True) - assert dumped == """{"metadata":{},"id":"test","name":"Test-Function","arguments":"{\\"input\\": \\"world\\"}"}""" + assert ( + dumped + == """{"metadata":{},"content_type":"function_call","id":"test","name":"Test-Function","arguments":"{\\"input\\": \\"world\\"}"}""" # noqa: E501 + ) diff --git a/python/tests/unit/contents/test_image_content.py b/python/tests/unit/contents/test_image_content.py new file mode 100644 index 000000000000..7d84a1746271 --- /dev/null +++ b/python/tests/unit/contents/test_image_content.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft. All rights reserved. + +import os + +import pytest + +from semantic_kernel.contents.image_content import ImageContent + +test_cases = [ + pytest.param(ImageContent(uri="http://test_uri"), id="uri"), + pytest.param(ImageContent(data=b"test_data", mime_type="image/jpeg", data_format="base64"), id="data"), + pytest.param(ImageContent(uri="http://test_uri", data=b"test_data", mime_type="image/jpeg"), id="both"), + pytest.param( + ImageContent.from_image_path( + image_path=os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") + ), + id="image_file", + ), +] + + +def test_create_uri(): + image = ImageContent(uri="http://test_uri") + assert str(image.uri) == "http://test_uri/" + + +def test_create_file_from_path(): + image_path = os.path.join(os.path.dirname(__file__), "../../", "assets/sample_image.jpg") + image = ImageContent.from_image_path(image_path=image_path) + assert image.mime_type == "image/jpeg" + assert image.data_uri.startswith("data:image/jpeg;") + assert image.data is not None + + +def test_create_data(): + image = ImageContent(data=b"test_data", mime_type="image/jpeg") + assert image.mime_type == "image/jpeg" + assert image.data == b"test_data" + + +def test_to_str_uri(): + image = ImageContent(uri="http://test_uri") + assert str(image) == "http://test_uri/" + + +def test_to_str_data(): + image = ImageContent(data=b"test_data", mime_type="image/jpeg", data_format="base64") + assert str(image) == "data:image/jpeg;base64,dGVzdF9kYXRh" + + +@pytest.mark.parametrize("image", test_cases) +def test_element_roundtrip(image): + element = image.to_element() + new_image = ImageContent.from_element(element) + assert new_image == image + + +@pytest.mark.parametrize("image", test_cases) +def test_to_dict(image): + assert image.to_dict() == {"type": "image_url", "image_url": {"url": str(image)}} diff --git a/python/tests/unit/contents/test_streaming_chat_message_content.py b/python/tests/unit/contents/test_streaming_chat_message_content.py index f09f6c3408be..fbc093ebb048 100644 --- a/python/tests/unit/contents/test_streaming_chat_message_content.py +++ b/python/tests/unit/contents/test_streaming_chat_message_content.py @@ -3,14 +3,14 @@ import pytest from defusedxml.ElementTree import XML -from semantic_kernel.contents.author_role import AuthorRole from semantic_kernel.contents.chat_message_content import ChatMessageContent -from semantic_kernel.contents.finish_reason import FinishReason from semantic_kernel.contents.function_call_content import FunctionCallContent from semantic_kernel.contents.function_result_content import FunctionResultContent from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent from semantic_kernel.contents.streaming_text_content import StreamingTextContent from semantic_kernel.contents.text_content import TextContent +from semantic_kernel.contents.utils.author_role import AuthorRole +from semantic_kernel.contents.utils.finish_reason import FinishReason from semantic_kernel.exceptions.content_exceptions import ContentAdditionException diff --git a/python/tests/unit/core_plugins/test_sessions_python_plugin.py b/python/tests/unit/core_plugins/test_sessions_python_plugin.py index 198e3545fea5..05456ebe00dc 100644 --- a/python/tests/unit/core_plugins/test_sessions_python_plugin.py +++ b/python/tests/unit/core_plugins/test_sessions_python_plugin.py @@ -10,17 +10,17 @@ from semantic_kernel.kernel import Kernel -def test_auth_callback(): +def auth_callback_test(): return "sample_token" def test_it_can_be_instantiated(aca_python_sessions_unit_test_env): - plugin = SessionsPythonTool(auth_callback=test_auth_callback) + plugin = SessionsPythonTool(auth_callback=auth_callback_test) assert plugin is not None def test_validate_endpoint(aca_python_sessions_unit_test_env): - plugin = SessionsPythonTool(auth_callback=test_auth_callback) + plugin = SessionsPythonTool(auth_callback=auth_callback_test) assert plugin is not None assert str(plugin.pool_management_endpoint) == aca_python_sessions_unit_test_env["ACA_POOL_MANAGEMENT_ENDPOINT"] @@ -34,7 +34,7 @@ def test_validate_endpoint(aca_python_sessions_unit_test_env): indirect=True, ) def test_validate_endpoint_with_execute(aca_python_sessions_unit_test_env): - plugin = SessionsPythonTool(auth_callback=test_auth_callback) + plugin = SessionsPythonTool(auth_callback=auth_callback_test) assert plugin is not None assert "/python/execute" not in str(plugin.pool_management_endpoint) @@ -45,7 +45,7 @@ def test_validate_endpoint_with_execute(aca_python_sessions_unit_test_env): indirect=True, ) def test_validate_endpoint_no_final_slash(aca_python_sessions_unit_test_env): - plugin = SessionsPythonTool(auth_callback=test_auth_callback) + plugin = SessionsPythonTool(auth_callback=auth_callback_test) assert plugin is not None assert str(plugin.pool_management_endpoint) == "https://test.endpoint/" @@ -53,11 +53,14 @@ def test_validate_endpoint_no_final_slash(aca_python_sessions_unit_test_env): @pytest.mark.parametrize("exclude_list", [["ACA_POOL_MANAGEMENT_ENDPOINT"]], indirect=True) def test_validate_settings_fail(aca_python_sessions_unit_test_env): with pytest.raises(FunctionInitializationError): - SessionsPythonTool(auth_callback=test_auth_callback) + SessionsPythonTool( + auth_callback=auth_callback_test, + env_file_path="test.env", + ) def test_it_can_be_imported(kernel: Kernel, aca_python_sessions_unit_test_env): - plugin = SessionsPythonTool(auth_callback=test_auth_callback) + plugin = SessionsPythonTool(auth_callback=auth_callback_test) assert kernel.add_plugin(plugin=plugin, plugin_name="PythonCodeInterpreter") assert kernel.get_plugin(plugin_name="PythonCodeInterpreter") is not None assert kernel.get_plugin(plugin_name="PythonCodeInterpreter").name == "PythonCodeInterpreter" @@ -81,7 +84,7 @@ async def async_return(result): mock_post.return_value = await async_return(mock_response) - plugin = SessionsPythonTool(auth_callback=test_auth_callback) + plugin = SessionsPythonTool(auth_callback=auth_callback_test) result = await plugin.execute_code("print('hello world')") assert result is not None @@ -104,7 +107,7 @@ async def async_return(result): mock_post.return_value = await async_return(mock_response) - plugin = SessionsPythonTool(auth_callback=test_auth_callback) + plugin = SessionsPythonTool(auth_callback=auth_callback_test) with pytest.raises(Exception): _ = await plugin.execute_code("print('hello world')") @@ -112,7 +115,7 @@ async def async_return(result): @pytest.mark.asyncio async def test_empty_call_to_container_fails_raises_exception(aca_python_sessions_unit_test_env): - plugin = SessionsPythonTool(auth_callback=test_auth_callback) + plugin = SessionsPythonTool(auth_callback=auth_callback_test) with pytest.raises(FunctionExecutionException): await plugin.execute_code(code="") @@ -135,21 +138,26 @@ async def async_return(result): mock_request = httpx.Request(method="POST", url="https://example.com/python/uploadFile?identifier=None") mock_response = httpx.Response( - status_code=200, json={ - '$id': '1', - '$values': [ + status_code=200, + json={ + "$id": "1", + "$values": [ { - '$id': '2', - 'filename': 'test.txt', - 'size': 123, - 'last_modified_time': '2024-06-03T17:48:46.2672398Z' + "$id": "2", + "filename": "test.txt", + "size": 123, + "last_modified_time": "2024-06-03T17:48:46.2672398Z", } - ] - }, request=mock_request + ], + }, + request=mock_request, ) mock_post.return_value = await async_return(mock_response) - plugin = SessionsPythonTool(auth_callback=lambda: "sample_token") + plugin = SessionsPythonTool( + auth_callback=lambda: "sample_token", + env_file_path="test.env", + ) result = await plugin.upload_file(local_file_path="test.txt", remote_file_path="uploaded_test.txt") assert result.filename == "test.txt" @@ -176,21 +184,26 @@ async def async_return(result): mock_request = httpx.Request(method="POST", url="https://example.com/python/uploadFile?identifier=None") mock_response = httpx.Response( - status_code=200, json={ - '$id': '1', - '$values': [ + status_code=200, + json={ + "$id": "1", + "$values": [ { - '$id': '2', - 'filename': 'test.txt', - 'size': 123, - 'last_modified_time': '2024-06-03T17:00:00.0000000Z' + "$id": "2", + "filename": "test.txt", + "size": 123, + "last_modified_time": "2024-06-03T17:00:00.0000000Z", } - ] - }, request=mock_request + ], + }, + request=mock_request, ) mock_post.return_value = await async_return(mock_response) - plugin = SessionsPythonTool(auth_callback=lambda: "sample_token") + plugin = SessionsPythonTool( + auth_callback=lambda: "sample_token", + env_file_path="test.env", + ) result = await plugin.upload_file(local_file_path="test.txt") assert result.filename == "test.txt" @@ -203,7 +216,7 @@ async def async_return(result): [ ("./file.py", "uploaded_test.txt", "/mnt/data/uploaded_test.txt"), ("./file.py", "/mnt/data/input.py", "/mnt/data/input.py"), - ] + ], ) @pytest.mark.asyncio @patch("httpx.AsyncClient.post") @@ -215,25 +228,27 @@ async def test_upload_file_with_buffer( async def async_return(result): return result - with patch( - "semantic_kernel.core_plugins.sessions_python_tool.sessions_python_plugin.SessionsPythonTool._ensure_auth_token", - return_value="test_token", - ), patch("builtins.open", mock_open(read_data="print('hello, world~')")): - + with ( + patch( + "semantic_kernel.core_plugins.sessions_python_tool.sessions_python_plugin.SessionsPythonTool._ensure_auth_token", + return_value="test_token", + ), + patch("builtins.open", mock_open(read_data="print('hello, world~')")), + ): mock_request = httpx.Request(method="POST", url="https://example.com/python/uploadFile?identifier=None") mock_response = httpx.Response( status_code=200, json={ - '$id': '1', - '$values': [ + "$id": "1", + "$values": [ { - '$id': '2', - 'filename': expected_remote_file_path, - 'size': 456, - 'last_modified_time': '2024-06-03T17:00:00.0000000Z' + "$id": "2", + "filename": expected_remote_file_path, + "size": 456, + "last_modified_time": "2024-06-03T17:00:00.0000000Z", } - ] + ], }, request=mock_request, ) @@ -276,11 +291,21 @@ async def async_return(result): mock_response = httpx.Response( status_code=200, json={ - '$id': '1', - '$values': [ - {'$id': '2', 'filename': 'test1.txt', 'size': 123, 'last_modified_time': '2024-06-03T17:00:00.0000000Z'}, # noqa: E501 - {'$id': '3', 'filename': 'test2.txt', 'size': 456, 'last_modified_time': '2024-06-03T18:00:00.0000000Z'} # noqa: E501 - ] + "$id": "1", + "$values": [ + { + "$id": "2", + "filename": "test1.txt", + "size": 123, + "last_modified_time": "2024-06-03T17:00:00.0000000Z", + }, # noqa: E501 + { + "$id": "3", + "filename": "test2.txt", + "size": 456, + "last_modified_time": "2024-06-03T18:00:00.0000000Z", + }, # noqa: E501 + ], }, request=mock_request, ) @@ -322,7 +347,10 @@ async def mock_auth_callback(): mock_response = httpx.Response(status_code=200, content=b"file data", request=mock_request) mock_get.return_value = await async_return(mock_response) - plugin = SessionsPythonTool(auth_callback=mock_auth_callback) + plugin = SessionsPythonTool( + auth_callback=mock_auth_callback, + env_file_path="test.env", + ) await plugin.download_file(remote_file_path="remote_test.txt", local_file_path="local_test.txt") mock_get.assert_awaited_once() diff --git a/python/tests/unit/functions/test_kernel_plugins.py b/python/tests/unit/functions/test_kernel_plugins.py index 84776359e2a8..627357c23526 100644 --- a/python/tests/unit/functions/test_kernel_plugins.py +++ b/python/tests/unit/functions/test_kernel_plugins.py @@ -13,6 +13,7 @@ from semantic_kernel.connectors.openai_plugin.openai_function_execution_parameters import ( OpenAIFunctionExecutionParameters, ) +from semantic_kernel.connectors.telemetry import HTTP_USER_AGENT from semantic_kernel.exceptions.function_exceptions import PluginInitializationError from semantic_kernel.functions import kernel_function from semantic_kernel.functions.kernel_function import KernelFunction @@ -558,7 +559,7 @@ async def test_from_openai_plugin_from_url(mock_parse_openai_manifest, mock_get) assert plugin.functions.get("GetSecret") is not None assert plugin.functions.get("SetSecret") is not None - mock_get.assert_awaited_once_with(fake_plugin_url, headers={"User-Agent": "Semantic-Kernel"}) + mock_get.assert_awaited_once_with(fake_plugin_url, headers={"User-Agent": HTTP_USER_AGENT}) @pytest.mark.asyncio diff --git a/python/tests/unit/kernel/test_kernel.py b/python/tests/unit/kernel/test_kernel.py index b97a9ca774db..a426542c3316 100644 --- a/python/tests/unit/kernel/test_kernel.py +++ b/python/tests/unit/kernel/test_kernel.py @@ -1,7 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. import os -import sys from typing import Union from unittest.mock import AsyncMock, patch @@ -460,7 +459,6 @@ def test_get_service_with_multiple_types(kernel_with_service: Kernel): assert service_get == kernel_with_service.services["service"] -@pytest.mark.skipif(sys.version_info < (3, 10), reason="This is valid syntax only in python 3.10+.") def test_get_service_with_multiple_types_union(kernel_with_service: Kernel): """This is valid syntax only in python 3.10+. It is skipped for older versions.""" service_get = kernel_with_service.get_service("service", type=Union[AIServiceClientBase, ChatCompletionClientBase]) diff --git a/python/tests/unit/prompt_template/test_prompt_templates.py b/python/tests/unit/prompt_template/test_prompt_templates.py index 4955d1700f8c..38ff81c19a57 100644 --- a/python/tests/unit/prompt_template/test_prompt_templates.py +++ b/python/tests/unit/prompt_template/test_prompt_templates.py @@ -6,6 +6,7 @@ from pytest import raises from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings +from semantic_kernel.functions.kernel_function_from_prompt import KernelFunctionFromPrompt from semantic_kernel.functions.kernel_parameter_metadata import KernelParameterMetadata from semantic_kernel.prompt_template.input_variable import InputVariable from semantic_kernel.prompt_template.prompt_template_config import PromptTemplateConfig @@ -256,3 +257,8 @@ def test_from_json_validate_fail(): } ) ) + + +def test_multiple_param_in_prompt(): + func = KernelFunctionFromPrompt("test", prompt="{{$param}}{{$param}}") + assert len(func.parameters) == 1 diff --git a/python/tests/unit/schema/test_schema_builder.py b/python/tests/unit/schema/test_schema_builder.py index 0f4e14697640..3d2751980a33 100644 --- a/python/tests/unit/schema/test_schema_builder.py +++ b/python/tests/unit/schema/test_schema_builder.py @@ -41,7 +41,7 @@ class MockModel: "status": Union[int, str], "optional_field": Optional[str], } - __fields__ = { + model_fields = { "id": Mock(description="The ID of the model"), "name": Mock(description="The name of the model"), "is_active": Mock(description="Whether the model is active"), @@ -110,17 +110,13 @@ def test_build_model_schema(): "required": ["name", "age"], "description": "A model", } - result = KernelJsonSchemaBuilder.build_model_schema( - ExampleModel, description="A model" - ) + result = KernelJsonSchemaBuilder.build_model_schema(ExampleModel, description="A model") assert result == expected_schema def test_build_from_type_name(): expected_schema = {"type": "string", "description": "A simple string"} - result = KernelJsonSchemaBuilder.build_from_type_name( - "str", description="A simple string" - ) + result = KernelJsonSchemaBuilder.build_from_type_name("str", description="A simple string") assert result == expected_schema diff --git a/python/tests/unit/telemetry/test_user_agent.py b/python/tests/unit/telemetry/test_user_agent.py new file mode 100644 index 000000000000..738610ed3975 --- /dev/null +++ b/python/tests/unit/telemetry/test_user_agent.py @@ -0,0 +1,69 @@ +# Copyright (c) Microsoft. All rights reserved. + +import importlib + +from semantic_kernel.connectors.telemetry import ( + HTTP_USER_AGENT, + TELEMETRY_DISABLED_ENV_VAR, + prepend_semantic_kernel_to_user_agent, +) +from semantic_kernel.const import USER_AGENT + + +def test_append_to_existing_user_agent(monkeypatch): + monkeypatch.setenv(TELEMETRY_DISABLED_ENV_VAR, "false") + monkeypatch.setattr("importlib.metadata.version", lambda _: "1.0.0") + monkeypatch.setattr("semantic_kernel.connectors.telemetry.version_info", "1.0.0") + + headers = {USER_AGENT: "existing-agent"} + expected = {USER_AGENT: f"{HTTP_USER_AGENT}/1.0.0 existing-agent"} + result = prepend_semantic_kernel_to_user_agent(headers) + assert result == expected + + +def test_create_new_user_agent(monkeypatch): + monkeypatch.setenv(TELEMETRY_DISABLED_ENV_VAR, "false") + monkeypatch.setattr("importlib.metadata.version", lambda _: "1.0.0") + monkeypatch.setattr("semantic_kernel.connectors.telemetry.version_info", "1.0.0") + + headers = {} + expected = {USER_AGENT: f"{HTTP_USER_AGENT}/1.0.0"} + result = prepend_semantic_kernel_to_user_agent(headers) + assert result == expected + + +def test_telemetry_disabled(monkeypatch): + monkeypatch.setenv(TELEMETRY_DISABLED_ENV_VAR, "true") + monkeypatch.setattr("importlib.metadata.version", lambda _: "1.0.0") + monkeypatch.setattr("semantic_kernel.connectors.telemetry.version_info", "1.0.0") + + headers = {} + result = prepend_semantic_kernel_to_user_agent(headers) + assert result == headers + + +def test_app_info_when_telemetry_enabled(monkeypatch): + monkeypatch.setenv(TELEMETRY_DISABLED_ENV_VAR, "false") + monkeypatch.setattr("importlib.metadata.version", lambda _: "1.0.0") + monkeypatch.setattr("semantic_kernel.connectors.telemetry.version_info", "1.0.0") + + # need to reload the module to get the updated APP_INFO + import semantic_kernel.connectors.telemetry + + importlib.reload(semantic_kernel.connectors.telemetry) + + expected = {"semantic-kernel-version": "python/1.0.0"} + assert expected == semantic_kernel.connectors.telemetry.APP_INFO + + +def test_app_info_when_telemetry_disabled(monkeypatch): + monkeypatch.setenv(TELEMETRY_DISABLED_ENV_VAR, "true") + monkeypatch.setattr("importlib.metadata.version", lambda _: "1.0.0") + monkeypatch.setattr("semantic_kernel.connectors.telemetry.version_info", "1.0.0") + + # need to reload the module to get the updated APP_INFO + import semantic_kernel.connectors.telemetry + + importlib.reload(semantic_kernel.connectors.telemetry) + + assert semantic_kernel.connectors.telemetry.APP_INFO is None