Skip to content

Commit

Permalink
updated annotations of PES's and fixed tests and addressed comments
Browse files Browse the repository at this point in the history
  • Loading branch information
eavanvalkenburg committed Nov 27, 2024
1 parent f37f9eb commit 00df0b3
Show file tree
Hide file tree
Showing 36 changed files with 418 additions and 395 deletions.
26 changes: 9 additions & 17 deletions .github/workflows/python-integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ permissions:
env:
# Configure a constant location for the uv cache
UV_CACHE_DIR: /tmp/.uv-cache
HNSWLIB_NO_NATIVE: 1
Python_Integration_Tests: Python_Integration_Tests
AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME: ${{ vars.AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME }} # azure-text-embedding-ada-002
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ vars.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
Expand Down Expand Up @@ -92,8 +91,8 @@ jobs:
if: steps.filter.outputs.python != 'true'
run: echo "NOT python file"

python-merge-gate-completions:
name: Python Pre-Merge Integration Tests - Completions & Embeddings
python-merge-gate-ai-services:
name: Python Pre-Merge Integration Tests - AI Services (incl samples using those)
needs: paths-filter
if: github.event_name != 'pull_request' && github.event_name != 'schedule' && needs.paths-filter.outputs.pythonChanges == 'true'
strategy:
Expand Down Expand Up @@ -156,8 +155,8 @@ jobs:
client-id: ${{ secrets.AZURE_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Run Integration Tests - Completions
id: run_tests_completions
- name: Run Integration Tests
id: run_tests_ai_services
timeout-minutes: 25
shell: bash
run: |
Expand All @@ -175,7 +174,7 @@ jobs:
run: uv cache prune --ci

python-merge-gate-memory:
name: Python Pre-Merge Integration Tests - Memory
name: Python Pre-Merge Integration Tests - Memory (incl samples using those)
needs: paths-filter
if: github.event_name != 'pull_request' && github.event_name != 'schedule' && needs.paths-filter.outputs.pythonChanges == 'true'
strategy:
Expand Down Expand Up @@ -232,7 +231,7 @@ jobs:
client-id: ${{ secrets.AZURE_CLIENT_ID }}
tenant-id: ${{ secrets.AZURE_TENANT_ID }}
subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
- name: Run Integration Tests - Memory
- name: Run Integration Tests
id: run_tests_memory
timeout-minutes: 10
shell: bash
Expand All @@ -251,6 +250,7 @@ jobs:
run: uv cache prune --ci

python-integration-tests:
name: Python Integration Tests - Scheduled run
needs: paths-filter
if: (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') && needs.paths-filter.outputs.pythonChanges == 'true'
strategy:
Expand All @@ -265,8 +265,6 @@ jobs:
runs-on: ${{ matrix.os }}
environment: "integration"
env:
HNSWLIB_NO_NATIVE: 1
Python_Integration_Tests: Python_Integration_Tests
UV_PYTHON: ${{ matrix.python-version }}
MEMORY_CONCEPT_SAMPLE: "true"
COMPLETIONS_CONCEPT_SAMPLE: "true"
Expand All @@ -278,13 +276,7 @@ jobs:
version: "0.5.x"
enable-cache: true
cache-suffix: ${{ runner.os }}-${{ matrix.python-version }}
- name: Install dependencies with hnswlib native disabled
if: matrix.os == 'macos-latest' && matrix.python-version == '3.11'
run: |
export HNSWLIB_NO_NATIVE=1
uv sync --all-extras --dev
- name: Install dependencies with hnswlib native enabled
if: matrix.os != 'macos-latest' || matrix.python-version != '3.11'
- name: Install dependencies
run: |
uv sync --all-extras --dev
- name: Install Ollama
Expand Down Expand Up @@ -389,7 +381,7 @@ jobs:
strategy:
max-parallel: 1
fail-fast: false
needs: [python-merge-gate-completions, python-merge-gate-memory, python-integration-tests]
needs: [python-merge-gate-ai-services, python-merge-gate-memory, python-integration-tests]
steps:
- name: Get Date
shell: bash
Expand Down
11 changes: 6 additions & 5 deletions python/samples/concepts/chat_completion/simple_chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
Services,
get_chat_completion_service_and_request_settings,
)
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.contents import ChatHistory

# This sample shows how to create a chatbot. This sample uses the following two main components:
# - a ChatCompletionService: This component is responsible for generating responses to user messages.
Expand All @@ -26,7 +26,7 @@
# - Services.ONNX
# - Services.VERTEX_AI
# Please make sure you have configured your environment correctly for the selected chat completion service.
chat_completion_service, request_settings = get_chat_completion_service_and_request_settings(Services.AZURE_OPENAI)
chat_completion_service, request_settings = get_chat_completion_service_and_request_settings(Services.OPENAI)

# This is the system message that gives the chatbot its personality.
system_message = """
Expand Down Expand Up @@ -64,10 +64,11 @@ async def chat() -> bool:
chat_history=chat_history,
settings=request_settings,
)
print(f"Mosscap:> {response}")
if response:
print(f"Mosscap:> {response}")

# Add the chat message to the chat history to keep track of the conversation.
chat_history.add_assistant_message(str(response))
# Add the chat message to the chat history to keep track of the conversation.
chat_history.add_message(response)

return True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
Services,
get_chat_completion_service_and_request_settings,
)
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.functions.kernel_arguments import KernelArguments
from semantic_kernel.kernel import Kernel
from semantic_kernel import Kernel
from semantic_kernel.contents import ChatHistory
from semantic_kernel.functions import KernelArguments

# This sample shows how to create a chatbot using a kernel function.
# This sample uses the following two main components:
Expand Down Expand Up @@ -97,15 +97,15 @@ async def chat() -> bool:
user_input=user_input,
)

answer = await kernel.invoke(chat_function, kernel_arguments)
answer = await kernel.invoke(plugin_name="ChatBot", function_name="Chat", arguments=kernel_arguments)
# Alternatively, you can invoke the function directly with the kernel as an argument:
# answer = await chat_function.invoke(kernel, kernel_arguments)

print(f"Mosscap:> {answer}")

# Add the chat message to the chat history to keep track of the conversation.
chat_history.add_user_message(user_input)
chat_history.add_assistant_message(str(answer))
if answer:
print(f"Mosscap:> {answer}")
# Since the user_input is rendered by the template, it is not yet part of the chat history, so we add it here.
chat_history.add_user_message(user_input)
# Add the chat message to the chat history to keep track of the conversation.
chat_history.add_message(answer.value)

return True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
Services,
get_chat_completion_service_and_request_settings,
)
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.contents import ChatHistory

# This sample shows how to create a chatbot that whose output can be biased using logit bias.
# This sample uses the following three main components:
Expand All @@ -33,7 +33,6 @@

# Create a chat history object with the system message.
chat_history = ChatHistory(system_message=system_message)

# Create a list of tokens whose bias value will be reduced.
# The token ids of these words can be obtained using the GPT Tokenizer: https://platform.openai.com/tokenizer
# the targeted model series is GPT-4o & GPT-4o mini
Expand Down Expand Up @@ -61,7 +60,7 @@
]
# Configure the logit bias settings to minimize the likelihood of the
# tokens in the banned_tokens list appearing in the output.
request_settings.logit_bias = {k: -100 for k in banned_tokens}
request_settings.logit_bias = {k: -100 for k in banned_tokens} # type: ignore


async def chat() -> bool:
Expand All @@ -86,10 +85,11 @@ async def chat() -> bool:
chat_history=chat_history,
settings=request_settings,
)
print(f"Mosscap:> {response}")
if response:
print(f"Mosscap:> {response}")

# Add the chat message to the chat history to keep track of the conversation.
chat_history.add_assistant_message(str(response))
# Add the chat message to the chat history to keep track of the conversation.
chat_history.add_message(response)

return True

Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,12 @@
# Copyright (c) Microsoft. All rights reserved.

import asyncio
from functools import reduce

from samples.concepts.setup.chat_completion_services import (
Services,
get_chat_completion_service_and_request_settings,
)
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.contents import ChatHistory, StreamingChatMessageContent

# This sample shows how to create a chatbot that streams responses.
# This sample uses the following two main components:
Expand Down Expand Up @@ -70,15 +69,16 @@ async def chat() -> bool:
)

# Capture the chunks of the response and print them as they come in.
chunks = []
chunks: list[StreamingChatMessageContent] = []
print("Mosscap:> ", end="")
async for chunk in response:
chunks.append(chunk)
print(chunk, end="")
if chunk:
chunks.append(chunk)
print(chunk, end="")
print("")

# Combine the chunks into a single message to add to the chat history.
full_message = reduce(lambda first, second: first + second, chunks)
full_message = sum(chunks[1:], chunks[0])
# Add the chat message to the chat history to keep track of the conversation.
chat_history.add_message(full_message)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,7 @@
Services,
get_chat_completion_service_and_request_settings,
)
from semantic_kernel.contents.chat_history import ChatHistory
from semantic_kernel.contents.chat_message_content import ChatMessageContent
from semantic_kernel.contents.image_content import ImageContent
from semantic_kernel.contents.text_content import TextContent
from semantic_kernel.contents import AuthorRole, ChatHistory, ChatMessageContent, ImageContent, TextContent

# This sample shows how to create a chatbot that responds to user messages with image input.
# This sample uses the following three main components:
Expand All @@ -30,6 +27,11 @@
# - Services.ONNX
# - Services.VERTEX_AI
# Please make sure you have configured your environment correctly for the selected chat completion service.

# [NOTE]
# Not all models support image input. Make sure to select a model that supports image input.
# Not all services support image input from an image URI. If your image is saved in a remote location,
# make sure to use a service that supports image input from a URI.
chat_completion_service, request_settings = get_chat_completion_service_and_request_settings(Services.AZURE_OPENAI)

IMAGE_URI = "https://upload.wikimedia.org/wikipedia/commons/d/d5/Half-timbered_mansion%2C_Zirkel%2C_East_view.jpg"
Expand All @@ -41,12 +43,6 @@
image_content_local = ImageContent.from_image_file(IMAGE_PATH)


# [NOTE]
# Not all models support image input. Make sure to select a model that supports image input.
# Not all services support image input from an image URI. If your image is saved in a remote location,
# make sure to use a service that supports image input from a URI.


# This is the system message that gives the chatbot its personality.
system_message = """
You are an image reviewing chat bot. Your name is Mosscap and you have one goal critiquing images that are supplied.
Expand All @@ -56,7 +52,7 @@
chat_history = ChatHistory(system_message=system_message)
chat_history.add_message(
ChatMessageContent(
role="user",
role=AuthorRole.USER,
items=[TextContent(text="What is in this image?"), image_content_local],
)
)
Expand Down Expand Up @@ -90,10 +86,11 @@ async def chat(skip_user_input: bool = False) -> bool:
chat_history=chat_history,
settings=request_settings,
)
print(f"Mosscap:> {response}")
if response:
print(f"Mosscap:> {response}")

# Add the chat message to the chat history to keep track of the conversation.
chat_history.add_assistant_message(str(response))
# Add the chat message to the chat history to keep track of the conversation.
chat_history.add_message(response)

return True

Expand Down
Loading

0 comments on commit 00df0b3

Please sign in to comment.