Skip to content

Commit

Permalink
Python: Add OpenTelemetry to Python SK (#6914)
Browse files Browse the repository at this point in the history
### Motivation and Context
We want observability into usage of SK

### Description
Add OpenTelemetry to Python SK

### Contribution Checklist
- [ ] The code builds clean without any errors or warnings
- [ ] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations

---------

Co-authored-by: Tao Chen <[email protected]>
  • Loading branch information
glahaye and TaoChenOSU authored Jul 25, 2024
1 parent 5c7f9ba commit ca78ff7
Show file tree
Hide file tree
Showing 31 changed files with 634 additions and 89 deletions.
3 changes: 2 additions & 1 deletion python/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ openai = ">=1.0"

# openapi and swagger
openapi_core = ">=0.18,<0.20"

# OpenTelemetry
opentelemetry-api = "^1.24.0"
opentelemetry-sdk = "^1.24.0"

prance = "^23.6.21.0"

# templating
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from functools import reduce
from typing import TYPE_CHECKING, Any

from semantic_kernel.connectors.telemetry import SEMANTIC_KERNEL_USER_AGENT
from semantic_kernel.utils.telemetry.user_agent import SEMANTIC_KERNEL_USER_AGENT

if sys.version_info >= (3, 12):
from typing import override # pragma: no cover
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@
from semantic_kernel.connectors.ai.azure_ai_inference.azure_ai_inference_settings import AzureAIInferenceSettings
from semantic_kernel.connectors.ai.azure_ai_inference.services.azure_ai_inference_base import AzureAIInferenceBase
from semantic_kernel.connectors.ai.embeddings.embedding_generator_base import EmbeddingGeneratorBase
from semantic_kernel.connectors.telemetry import SEMANTIC_KERNEL_USER_AGENT
from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError
from semantic_kernel.utils.experimental_decorator import experimental_class
from semantic_kernel.utils.telemetry.user_agent import SEMANTIC_KERNEL_USER_AGENT

if TYPE_CHECKING:
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ async def get_chat_message_content(
Returns:
A string representing the response from the LLM.
"""
results = await self.get_chat_message_contents(chat_history, settings, **kwargs)
results = await self.get_chat_message_contents(chat_history=chat_history, settings=settings, **kwargs)
if results:
return results[0]
# this should not happen, should error out before returning an empty list
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@

from semantic_kernel.connectors.ai.open_ai.const import DEFAULT_AZURE_API_VERSION
from semantic_kernel.connectors.ai.open_ai.services.open_ai_handler import OpenAIHandler, OpenAIModelTypes
from semantic_kernel.connectors.telemetry import APP_INFO, prepend_semantic_kernel_to_user_agent
from semantic_kernel.const import USER_AGENT
from semantic_kernel.exceptions import ServiceInitializationError
from semantic_kernel.kernel_pydantic import HttpsUrl
from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent

logger: logging.Logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import sys
from collections.abc import AsyncGenerator
from functools import reduce
from typing import TYPE_CHECKING, Any
from typing import TYPE_CHECKING, Any, ClassVar

if sys.version_info >= (3, 12):
from typing import override # pragma: no cover
Expand Down Expand Up @@ -38,6 +38,7 @@
from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import (
AutoFunctionInvocationContext,
)
from semantic_kernel.utils.telemetry.decorators import trace_chat_completion

if TYPE_CHECKING:
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
Expand All @@ -56,6 +57,8 @@ class InvokeTermination(Exception):
class OpenAIChatCompletionBase(OpenAIHandler, ChatCompletionClientBase):
"""OpenAI Chat completion class."""

MODEL_PROVIDER_NAME: ClassVar[str] = "openai"

# region Overriding base class methods
# most of the methods are overridden from the ChatCompletionClientBase class, otherwise it is mentioned

Expand All @@ -64,6 +67,7 @@ def get_prompt_execution_settings_class(self) -> type["PromptExecutionSettings"]
return OpenAIChatPromptExecutionSettings

@override
@trace_chat_completion(MODEL_PROVIDER_NAME)
async def get_chat_message_contents(
self,
chat_history: ChatHistory,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@

from semantic_kernel.connectors.ai.open_ai.services.open_ai_handler import OpenAIHandler
from semantic_kernel.connectors.ai.open_ai.services.open_ai_model_types import OpenAIModelTypes
from semantic_kernel.connectors.telemetry import APP_INFO, prepend_semantic_kernel_to_user_agent
from semantic_kernel.const import USER_AGENT
from semantic_kernel.exceptions import ServiceInitializationError
from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent

logger: logging.Logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import logging
import sys
from collections.abc import AsyncGenerator
from typing import TYPE_CHECKING, Any
from typing import TYPE_CHECKING, Any, ClassVar

if sys.version_info >= (3, 12):
from typing import override # pragma: no cover
Expand All @@ -26,6 +26,7 @@
from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase
from semantic_kernel.contents.streaming_text_content import StreamingTextContent
from semantic_kernel.contents.text_content import TextContent
from semantic_kernel.utils.telemetry.decorators import trace_text_completion

if TYPE_CHECKING:
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
Expand All @@ -34,11 +35,14 @@


class OpenAITextCompletionBase(OpenAIHandler, TextCompletionClientBase):
MODEL_PROVIDER_NAME: ClassVar[str] = "openai"

@override
def get_prompt_execution_settings_class(self) -> type["PromptExecutionSettings"]:
return OpenAITextPromptExecutionSettings

@override
@trace_text_completion(MODEL_PROVIDER_NAME)
async def get_text_contents(
self,
prompt: str,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ async def get_text_content(self, prompt: str, settings: "PromptExecutionSettings
Returns:
TextContent: A string or list of strings representing the response(s) from the LLM.
"""
result = await self.get_text_contents(prompt, settings)
result = await self.get_text_contents(prompt=prompt, settings=settings)
if result:
return result[0]
# this should not happen, should error out before returning an empty list
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
import aiohttp

from semantic_kernel.connectors.memory.astradb.utils import AsyncSession
from semantic_kernel.connectors.telemetry import APP_INFO
from semantic_kernel.exceptions import ServiceResponseException
from semantic_kernel.utils.experimental_decorator import experimental_class
from semantic_kernel.utils.telemetry.user_agent import APP_INFO

ASTRA_CALLER_IDENTITY: str
SEMANTIC_KERNEL_VERSION = APP_INFO.get("Semantic-Kernel-Version")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class OpenAPIFunctionExecutionParameters(KernelBaseModel):

def model_post_init(self, __context: Any) -> None:
"""Post initialization method for the model."""
from semantic_kernel.connectors.telemetry import HTTP_USER_AGENT
from semantic_kernel.utils.telemetry.user_agent import HTTP_USER_AGENT

if self.server_url_override:
parsed_url = urlparse(self.server_url_override)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
)
from semantic_kernel.connectors.openapi_plugin.models.rest_api_operation_payload import RestApiOperationPayload
from semantic_kernel.connectors.openapi_plugin.models.rest_api_operation_run_options import RestApiOperationRunOptions
from semantic_kernel.connectors.telemetry import APP_INFO, prepend_semantic_kernel_to_user_agent
from semantic_kernel.exceptions.function_exceptions import FunctionExecutionException
from semantic_kernel.functions.kernel_arguments import KernelArguments
from semantic_kernel.utils.experimental_decorator import experimental_class
from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent

logger: logging.Logger = logging.getLogger(__name__)

Expand Down
2 changes: 1 addition & 1 deletion python/semantic_kernel/connectors/utils/document_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@

from httpx import AsyncClient, HTTPStatusError, RequestError

from semantic_kernel.connectors.telemetry import HTTP_USER_AGENT
from semantic_kernel.exceptions import ServiceInvalidRequestError
from semantic_kernel.utils.telemetry.user_agent import HTTP_USER_AGENT

logger: logging.Logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from httpx import AsyncClient, HTTPStatusError
from pydantic import ValidationError

from semantic_kernel.connectors.telemetry import HTTP_USER_AGENT, version_info
from semantic_kernel.const import USER_AGENT
from semantic_kernel.core_plugins.sessions_python_tool.sessions_python_settings import (
ACASessionsSettings,
Expand All @@ -20,6 +19,7 @@
from semantic_kernel.exceptions.function_exceptions import FunctionExecutionException, FunctionInitializationError
from semantic_kernel.functions.kernel_function_decorator import kernel_function
from semantic_kernel.kernel_pydantic import HttpsUrl, KernelBaseModel
from semantic_kernel.utils.telemetry.user_agent import HTTP_USER_AGENT, version_info

logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,8 @@ async def _invoke_internal(self, context: FunctionInvocationContext) -> None:
if isinstance(prompt_render_result.ai_service, TextCompletionClientBase):
try:
texts = await prompt_render_result.ai_service.get_text_contents(
unescape(prompt_render_result.rendered_prompt), prompt_render_result.execution_settings
prompt=unescape(prompt_render_result.rendered_prompt),
settings=prompt_render_result.execution_settings,
)
except Exception as exc:
raise FunctionExecutionException(f"Error occurred while invoking function {self.name}: {exc}") from exc
Expand Down
Empty file.
28 changes: 28 additions & 0 deletions python/semantic_kernel/utils/telemetry/const.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# Copyright (c) Microsoft. All rights reserved.
#
# Constants for tracing activities with semantic conventions.

# Activity tags
SYSTEM = "gen_ai.system"
OPERATION = "gen_ai.operation.name"
CHAT_COMPLETION_OPERATION = "chat.completions"
TEXT_COMPLETION_OPERATION = "text.completions"
MODEL = "gen_ai.request.model"
MAX_TOKENS = "gen_ai.request.max_tokens" # nosec
TEMPERATURE = "gen_ai.request.temperature"
TOP_P = "gen_ai.request.top_p"
RESPONSE_ID = "gen_ai.response.id"
FINISH_REASON = "gen_ai.response.finish_reason"
PROMPT_TOKENS = "gen_ai.response.prompt_tokens" # nosec
COMPLETION_TOKENS = "gen_ai.response.completion_tokens" # nosec
ADDRESS = "server.address"
PORT = "server.port"
ERROR_TYPE = "error.type"

# Activity events
PROMPT_EVENT = "gen_ai.content.prompt"
COMPLETION_EVENT = "gen_ai.content.completion"

# Activity event attributes
PROMPT_EVENT_PROMPT = "gen_ai.prompt"
COMPLETION_EVENT_COMPLETION = "gen_ai.completion"
Loading

0 comments on commit ca78ff7

Please sign in to comment.